summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn/src
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2022-05-12 15:59:20 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2022-05-25 06:57:22 +0000
commitf7eaed5286974984ba5f9e3189d8f49d03e99f81 (patch)
treecaed19b2af2024f35449fb0b781d0a25e09d4f8f /chromium/third_party/dawn/src
parent9729c4479fe23554eae6e6dd1f30ff488f470c84 (diff)
downloadqtwebengine-chromium-f7eaed5286974984ba5f9e3189d8f49d03e99f81.tar.gz
BASELINE: Update Chromium to 100.0.4896.167
Change-Id: I98cbeb5d7543d966ffe04d8cefded0c493a11333 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/dawn/src')
-rw-r--r--chromium/third_party/dawn/src/common/Assert.cpp31
-rw-r--r--chromium/third_party/dawn/src/common/Assert.h80
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn243
-rw-r--r--chromium/third_party/dawn/src/common/BitSetIterator.h139
-rw-r--r--chromium/third_party/dawn/src/common/Compiler.h121
-rw-r--r--chromium/third_party/dawn/src/common/ConcurrentCache.h54
-rw-r--r--chromium/third_party/dawn/src/common/Constants.h65
-rw-r--r--chromium/third_party/dawn/src/common/CoreFoundationRef.h46
-rw-r--r--chromium/third_party/dawn/src/common/DynamicLib.cpp106
-rw-r--r--chromium/third_party/dawn/src/common/DynamicLib.h54
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.cpp105
-rw-r--r--chromium/third_party/dawn/src/common/HashUtils.h101
-rw-r--r--chromium/third_party/dawn/src/common/IOKitRef.h46
-rw-r--r--chromium/third_party/dawn/src/common/LinkedList.h274
-rw-r--r--chromium/third_party/dawn/src/common/Log.cpp116
-rw-r--r--chromium/third_party/dawn/src/common/Math.cpp160
-rw-r--r--chromium/third_party/dawn/src/common/Math.h102
-rw-r--r--chromium/third_party/dawn/src/common/NSRef.h123
-rw-r--r--chromium/third_party/dawn/src/common/RefBase.h183
-rw-r--r--chromium/third_party/dawn/src/common/RefCounted.cpp86
-rw-r--r--chromium/third_party/dawn/src/common/RefCounted.h69
-rw-r--r--chromium/third_party/dawn/src/common/Result.cpp30
-rw-r--r--chromium/third_party/dawn/src/common/Result.h519
-rw-r--r--chromium/third_party/dawn/src/common/SerialMap.h76
-rw-r--r--chromium/third_party/dawn/src/common/SerialQueue.h86
-rw-r--r--chromium/third_party/dawn/src/common/SerialStorage.h322
-rw-r--r--chromium/third_party/dawn/src/common/SlabAllocator.cpp246
-rw-r--r--chromium/third_party/dawn/src/common/SlabAllocator.h184
-rw-r--r--chromium/third_party/dawn/src/common/StackContainer.h262
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils.cpp203
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils.h53
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils_mac.mm33
-rw-r--r--chromium/third_party/dawn/src/common/TypedInteger.h262
-rw-r--r--chromium/third_party/dawn/src/common/WindowsUtils.cpp45
-rw-r--r--chromium/third_party/dawn/src/common/ityp_array.h98
-rw-r--r--chromium/third_party/dawn/src/common/ityp_bitset.h134
-rw-r--r--chromium/third_party/dawn/src/common/ityp_span.h103
-rw-r--r--chromium/third_party/dawn/src/common/ityp_stack_vec.h103
-rw-r--r--chromium/third_party/dawn/src/common/ityp_vector.h108
-rw-r--r--chromium/third_party/dawn/src/common/vulkan_platform.h195
-rw-r--r--chromium/third_party/dawn/src/common/windows_with_undefs.h38
-rw-r--r--chromium/third_party/dawn/src/common/xlib_with_undefs.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/BUILD.gn92
-rw-r--r--chromium/third_party/dawn/src/dawn/CMakeLists.txt41
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Alloc.h (renamed from chromium/third_party/dawn/src/common/Alloc.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Assert.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Assert.h80
-rw-r--r--chromium/third_party/dawn/src/dawn/common/BUILD.gn250
-rw-r--r--chromium/third_party/dawn/src/dawn/common/BitSetIterator.h139
-rw-r--r--chromium/third_party/dawn/src/dawn/common/CMakeLists.txt (renamed from chromium/third_party/dawn/src/common/CMakeLists.txt)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Compiler.h97
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h54
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Constants.h68
-rw-r--r--chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp106
-rw-r--r--chromium/third_party/dawn/src/dawn/common/DynamicLib.h54
-rw-r--r--chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp105
-rw-r--r--chromium/third_party/dawn/src/dawn/common/GPUInfo.h (renamed from chromium/third_party/dawn/src/common/GPUInfo.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/HashUtils.h101
-rw-r--r--chromium/third_party/dawn/src/dawn/common/IOKitRef.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/common/LinkedList.h274
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Log.cpp116
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Log.h (renamed from chromium/third_party/dawn/src/common/Log.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Math.cpp160
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Math.h107
-rw-r--r--chromium/third_party/dawn/src/dawn/common/NSRef.h123
-rw-r--r--chromium/third_party/dawn/src/dawn/common/NonCopyable.h (renamed from chromium/third_party/dawn/src/common/NonCopyable.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/PlacementAllocated.h (renamed from chromium/third_party/dawn/src/common/PlacementAllocated.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Platform.h (renamed from chromium/third_party/dawn/src/common/Platform.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Preprocessor.h (renamed from chromium/third_party/dawn/src/common/Preprocessor.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/RefBase.h183
-rw-r--r--chromium/third_party/dawn/src/dawn/common/RefCounted.cpp86
-rw-r--r--chromium/third_party/dawn/src/dawn/common/RefCounted.h69
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Result.cpp30
-rw-r--r--chromium/third_party/dawn/src/dawn/common/Result.h526
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SerialMap.h76
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SerialQueue.h85
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SerialStorage.h322
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp247
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SlabAllocator.h184
-rw-r--r--chromium/third_party/dawn/src/dawn/common/StackContainer.h262
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SwapChainUtils.h (renamed from chromium/third_party/dawn/src/common/SwapChainUtils.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp229
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SystemUtils.h57
-rw-r--r--chromium/third_party/dawn/src/dawn/common/SystemUtils_mac.mm33
-rw-r--r--chromium/third_party/dawn/src/dawn/common/TypeTraits.h (renamed from chromium/third_party/dawn/src/common/TypeTraits.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/TypedInteger.h262
-rw-r--r--chromium/third_party/dawn/src/dawn/common/UnderlyingType.h (renamed from chromium/third_party/dawn/src/common/UnderlyingType.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp43
-rw-r--r--chromium/third_party/dawn/src/dawn/common/WindowsUtils.h (renamed from chromium/third_party/dawn/src/common/WindowsUtils.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_array.h98
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_bitset.h134
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_span.h103
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h103
-rw-r--r--chromium/third_party/dawn/src/dawn/common/ityp_vector.h108
-rw-r--r--chromium/third_party/dawn/src/dawn/common/vulkan_platform.h194
-rw-r--r--chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h38
-rw-r--r--chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/fuzzers/BUILD.gn124
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Adapter.cpp227
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Adapter.h99
-rw-r--r--chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn/native/AsyncTask.h65
-rw-r--r--chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp165
-rw-r--r--chromium/third_party/dawn/src/dawn/native/AttachmentState.h83
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BUILD.gn771
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BackendConnection.h50
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroup.cpp543
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroup.h96
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp670
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h170
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h142
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp266
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BindingInfo.h108
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp264
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h117
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp120
-rw-r--r--chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h74
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Buffer.cpp562
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Buffer.h135
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CMakeLists.txt554
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CachedObject.cpp44
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CachedObject.h53
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp228
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandAllocator.h273
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp245
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandBuffer.h76
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp407
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp1211
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandEncoder.h119
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp483
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CommandValidation.h88
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Commands.cpp365
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Commands.h290
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp201
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CompilationMessages.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp467
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h91
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp96
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ComputePipeline.h55
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp601
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h41
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp202
-rw-r--r--chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h108
-rw-r--r--chromium/third_party/dawn/src/dawn/native/DawnNative.cpp312
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Device.cpp1758
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Device.h547
-rw-r--r--chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp129
-rw-r--r--chromium/third_party/dawn/src/dawn/native/DynamicUploader.h66
-rw-r--r--chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp217
-rw-r--r--chromium/third_party/dawn/src/dawn/native/EncodingContext.h182
-rw-r--r--chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h39
-rw-r--r--chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h82
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Error.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Error.h192
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorData.cpp103
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorData.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorInjector.h68
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp92
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ErrorScope.h57
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp230
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ExternalTexture.h77
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Features.cpp277
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Features.h83
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Format.cpp474
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Format.h151
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Forward.h71
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp193
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h126
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp385
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Instance.cpp435
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Instance.h129
-rw-r--r--chromium/third_party/dawn/src/dawn/native/IntegerTypes.h76
-rw-r--r--chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h60
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Limits.cpp213
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Limits.h43
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp90
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ObjectBase.h97
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h82
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h100
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp243
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PerStage.cpp49
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PerStage.h87
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PersistentCache.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PersistentCache.h92
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Pipeline.cpp250
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Pipeline.h98
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp409
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PipelineLayout.h97
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h53
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp203
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h72
-rw-r--r--chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp219
-rw-r--r--chromium/third_party/dawn/src/dawn/native/QueryHelper.h43
-rw-r--r--chromium/third_party/dawn/src/dawn/native/QuerySet.cpp180
-rw-r--r--chromium/third_party/dawn/src/dawn/native/QuerySet.h72
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Queue.cpp512
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Queue.h111
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderBundle.h73
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp167
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h56
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp414
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h87
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp398
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h91
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp1080
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RenderPipeline.h144
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ResourceHeap.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp53
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h80
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp121
-rw-r--r--chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h63
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Sampler.cpp153
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Sampler.h80
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp47
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h55
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp1333
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ShaderModule.h308
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp74
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SpirvValidation.h27
-rw-r--r--chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn/native/StagingBuffer.h41
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Subresource.cpp132
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Subresource.h112
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h555
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Surface.cpp246
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Surface.h110
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Surface_metal.mm30
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SwapChain.cpp412
-rw-r--r--chromium/third_party/dawn/src/dawn/native/SwapChain.h169
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Texture.cpp781
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Texture.h157
-rw-r--r--chromium/third_party/dawn/src/dawn/native/TintUtils.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn/native/TintUtils.h37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/ToBackend.h155
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Toggles.cpp324
-rw-r--r--chromium/third_party/dawn/src/dawn/native/Toggles.h98
-rw-r--r--chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp69
-rw-r--r--chromium/third_party/dawn/src/dawn/native/VertexFormat.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp425
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h66
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp209
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h59
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp268
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h68
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp191
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h94
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp493
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h91
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp53
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h47
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp72
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp1652
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h57
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp175
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp105
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp187
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h92
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp179
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp122
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h41
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp743
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h265
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h69
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h44
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp71
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp120
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h60
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp76
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h80
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp377
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h100
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp271
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h110
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp244
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h98
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp490
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp371
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h82
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp414
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h108
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp43
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp106
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp166
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h107
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp846
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h69
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp254
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h105
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp77
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h41
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp152
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h85
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp377
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h89
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp539
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h97
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp1381
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h162
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp308
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h74
-rw-r--r--chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/dawn_platform.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h33
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm646
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h39
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h67
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm240
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h69
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm1594
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h59
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm132
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h53
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm89
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h154
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm506
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/Forward.h68
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm49
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm82
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h54
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm139
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h34
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm48
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm506
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h44
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm106
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm278
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm46
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h67
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm155
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h96
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm866
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm288
-rw-r--r--chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp520
-rw-r--r--chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h340
-rw-r--r--chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp32
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp305
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h36
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h41
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp37
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp184
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp1472
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h49
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp315
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h131
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/Forward.h66
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp120
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp88
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OWNERS (renamed from chromium/third_party/dawn/src/dawn_native/opengl/OWNERS)0
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp61
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp76
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h44
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp58
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h45
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp218
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h69
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp95
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h50
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp27
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h34
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp80
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp345
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h62
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp130
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp177
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h38
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp580
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h69
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h27
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/opengl_platform.h15
-rw-r--r--chromium/third_party/dawn/src/dawn/native/opengl/supported_extensions.json (renamed from chromium/third_party/dawn/src/dawn_native/opengl/supported_extensions.json)0
-rw-r--r--chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp192
-rw-r--r--chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h123
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp353
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h59
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp444
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h86
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp199
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h80
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp165
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h55
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp413
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h82
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp1326
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h55
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp116
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h53
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp188
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h76
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp1017
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h213
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp183
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h69
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp225
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h71
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp84
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h50
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp117
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h47
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp59
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h38
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp302
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h106
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp623
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h59
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h39
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp292
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h66
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp248
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h67
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp77
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp664
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h98
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp1367
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h197
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp273
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h121
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp129
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp109
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h50
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp326
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h162
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp332
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h326
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp334
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h89
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h78
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp357
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp156
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp158
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h60
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp137
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp50
-rw-r--r--chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp135
-rw-r--r--chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp132
-rw-r--r--chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h72
-rw-r--r--chromium/third_party/dawn/src/dawn/node/CMakeLists.txt (renamed from chromium/third_party/dawn/src/dawn_node/CMakeLists.txt)0
-rw-r--r--chromium/third_party/dawn/src/dawn/node/Module.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn/node/OWNERS (renamed from chromium/third_party/dawn/src/dawn_node/OWNERS)0
-rw-r--r--chromium/third_party/dawn/src/dawn/node/README.md135
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h77
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt82
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp1241
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Converter.h395
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp179
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Errors.h60
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/Flags.h35
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp165
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPU.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp252
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h47
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h45
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp169
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h86
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp215
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h84
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp115
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp528
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h113
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp132
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h61
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp192
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h86
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp242
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h109
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h44
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp125
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h50
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h64
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h49
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/Browser.idl (renamed from chromium/third_party/dawn/src/dawn_node/interop/Browser.idl)0
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt62
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/Core.cpp160
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/Core.h692
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/WebGPU.cpp.tmpl393
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/WebGPU.h.tmpl282
-rw-r--r--chromium/third_party/dawn/src/dawn/node/interop/WebGPUCommon.tmpl127
-rw-r--r--chromium/third_party/dawn/src/dawn/node/tools/go.mod9
-rw-r--r--chromium/third_party/dawn/src/dawn/node/tools/go.sum (renamed from chromium/third_party/dawn/src/dawn_node/tools/go.sum)0
-rwxr-xr-xchromium/third_party/dawn/src/dawn/node/tools/run-cts (renamed from chromium/third_party/dawn/src/dawn_node/tools/run-cts)0
-rw-r--r--chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go640
-rw-r--r--chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go1075
-rw-r--r--chromium/third_party/dawn/src/dawn/node/utils/Debug.h146
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/BUILD.gn41
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt32
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp63
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp97
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/WorkerThread.h32
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp58
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h51
-rw-r--r--chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h991
-rw-r--r--chromium/third_party/dawn/src/dawn/tests/BUILD.gn644
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/BUILD.gn193
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp109
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/BackendBinding.h46
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/CMakeLists.txt (renamed from chromium/third_party/dawn/src/utils/CMakeLists.txt)0
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h35
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp145
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h64
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp88
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/GLFWUtils.h (renamed from chromium/third_party/dawn/src/utils/GLFWUtils.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm54
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp100
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm135
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp47
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp77
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ObjCUtils.h (renamed from chromium/third_party/dawn/src/utils/ObjCUtils.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm25
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/PlatformDebugLogger.h (renamed from chromium/third_party/dawn/src/utils/PlatformDebugLogger.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp74
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h61
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm44
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/SystemUtils.h (renamed from chromium/third_party/dawn/src/utils/SystemUtils.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp59
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h42
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp181
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TestUtils.h (renamed from chromium/third_party/dawn/src/utils/TestUtils.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp707
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/TextureUtils.h248
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/Timer.h (renamed from chromium/third_party/dawn/src/utils/Timer.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp57
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp374
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h180
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp111
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp89
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp178
-rw-r--r--chromium/third_party/dawn/src/dawn/utils/WireHelper.h (renamed from chromium/third_party/dawn/src/utils/WireHelper.h)0
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/BUILD.gn111
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h85
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h73
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt83
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp79
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h71
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h114
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp49
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h26
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/Wire.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireClient.cpp82
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h43
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireResult.h38
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/WireServer.cpp83
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp133
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Adapter.h70
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h29
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp406
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Buffer.h109
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Client.cpp171
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Client.h95
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp133
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp105
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h99
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Device.cpp342
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Device.h112
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp101
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Instance.h56
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp63
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h40
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h110
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h51
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp98
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/Queue.h57
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h82
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h48
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h228
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/Server.cpp213
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/Server.h243
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp110
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp282
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp204
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp94
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp100
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h108
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp103
-rw-r--r--chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Adapter.cpp205
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Adapter.h92
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AsyncTask.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AsyncTask.h65
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp165
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.h83
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BUILD.gn771
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BackendConnection.h50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.cpp486
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.h92
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp568
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h147
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h142
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindingInfo.cpp221
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindingInfo.h103
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp264
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h117
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp120
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h74
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp560
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.h135
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CMakeLists.txt561
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CachedObject.cpp44
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CachedObject.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.cpp37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp228
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandAllocator.h273
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp240
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.h76
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp407
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp1137
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.h107
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp466
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.h84
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.cpp365
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.h290
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp188
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CompilationMessages.h62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp459
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h88
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp96
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.h55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp405
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp202
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h108
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DawnNative.cpp257
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp1719
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h544
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp129
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DynamicUploader.h66
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp217
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.h182
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EnumClassBitmasks.h39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EnumMaskIterator.h82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Error.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Error.h197
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorData.cpp103
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorData.h72
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorInjector.cpp70
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorInjector.h68
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp92
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScope.h57
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp138
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ExternalTexture.h63
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Features.cpp188
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Features.h76
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.cpp421
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.h141
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Forward.h71
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.cpp194
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.h126
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.cpp386
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.h40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.cpp312
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.h120
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IntegerTypes.h75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Limits.cpp213
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Limits.h43
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp90
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ObjectBase.h97
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ObjectContentHasher.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ObjectContentHasher.h82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h100
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp262
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PerStage.cpp49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PerStage.h90
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PersistentCache.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PersistentCache.h92
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp250
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.h98
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp411
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.h97
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammableEncoder.cpp203
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammableEncoder.h72
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp187
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QueryHelper.h40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.cpp180
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.h72
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp512
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.h111
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundle.h73
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp167
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h56
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp410
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h87
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp393
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h90
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp1052
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.h144
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceHeap.h31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceHeapAllocator.h37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp121
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h63
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Sampler.cpp153
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Sampler.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ScratchBuffer.cpp47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ScratchBuffer.h55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp1312
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h303
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SpirvValidation.cpp74
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SpirvValidation.h27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/StagingBuffer.h41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Subresource.cpp132
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Subresource.h112
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SubresourceStorage.h555
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface.cpp246
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface.h110
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface_metal.mm30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.cpp412
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.h169
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp777
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.h157
-rw-r--r--chromium/third_party/dawn/src/dawn_native/TintUtils.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/TintUtils.h37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ToBackend.h155
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.cpp317
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.h97
-rw-r--r--chromium/third_party/dawn/src/dawn_native/VertexFormat.cpp69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/VertexFormat.h40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/XlibXcbFunctions.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/XlibXcbFunctions.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp412
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h66
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp210
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp285
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h68
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp191
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h94
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp493
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h91
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp72
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp1651
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h57
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp126
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp105
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.cpp164
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.h89
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp181
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp122
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp745
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h266
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp71
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/IntegerTypes.h34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp120
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp76
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp367
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h100
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp271
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h110
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp238
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h98
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp490
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp371
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp410
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h107
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp43
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp106
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp166
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h107
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp837
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp254
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h105
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp77
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp152
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h85
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp377
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h89
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp539
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h97
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp1348
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h164
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp391
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h91
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/dawn_platform.h62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm605
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h67
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm240
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm1563
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm132
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm86
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h154
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm506
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/Forward.h68
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm139
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h70
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm506
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm106
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h70
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm263
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.h42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h67
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm155
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h95
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm784
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm293
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp515
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h340
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp306
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.h36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp184
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp1491
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp316
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h131
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/Forward.h66
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp120
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.h42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp88
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp61
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLVersion.cpp76
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLVersion.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PersistentPipelineStateGL.cpp58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PersistentPipelineStateGL.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp218
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp95
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.cpp27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.h34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp345
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp130
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp400
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp179
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.h51
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp580
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/opengl_platform.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/utils/WGPUHelpers.cpp192
-rw-r--r--chromium/third_party/dawn/src/dawn_native/utils/WGPUHelpers.h123
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp339
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp410
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp200
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp181
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp413
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp1323
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp116
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocation.h31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp188
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.h76
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp1017
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h213
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp183
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h70
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h69
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp225
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h71
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp84
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp117
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp280
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h104
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp625
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.h39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp292
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h66
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp203
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h67
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp77
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp664
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h98
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp1333
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h191
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp259
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h126
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp129
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp109
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.cpp326
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.h162
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp332
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h326
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp332
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h89
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h77
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp270
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp63
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp154
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp156
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceFD.cpp137
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceNull.cpp50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp135
-rw-r--r--chromium/third_party/dawn/src/dawn_node/Module.cpp65
-rw-r--r--chromium/third_party/dawn/src/dawn_node/NapiSymbols.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn_node/README.md122
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.h77
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/CMakeLists.txt88
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Converter.cpp1153
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Converter.h398
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Errors.cpp179
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Errors.h60
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Flags.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Flags.h35
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPU.cpp151
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPU.h42
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.cpp223
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.cpp169
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.cpp40
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.cpp196
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.cpp135
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.h76
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.cpp530
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.h113
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.cpp132
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.h61
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.cpp192
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.cpp262
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.h115
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.cpp125
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.h50
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.h64
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.h49
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/CMakeLists.txt68
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/Core.cpp160
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/Core.h692
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/WebGPU.cpp.tmpl393
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/WebGPU.h.tmpl282
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/WebGPUCommon.tmpl126
-rw-r--r--chromium/third_party/dawn/src/dawn_node/tools/go.mod9
-rw-r--r--chromium/third_party/dawn/src/dawn_node/tools/src/cmd/idlgen/main.go635
-rw-r--r--chromium/third_party/dawn/src/dawn_node/tools/src/cmd/run-cts/main.go894
-rw-r--r--chromium/third_party/dawn/src/dawn_node/utils/Debug.h146
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/BUILD.gn34
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/CMakeLists.txt32
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/DawnPlatform.cpp63
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp97
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/WorkerThread.h32
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp58
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h51
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h991
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/BUILD.gn95
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/BufferConsumer.h85
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/BufferConsumer_impl.h73
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt73
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/ChunkedCommandHandler.cpp79
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/ChunkedCommandHandler.h71
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h114
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/Wire.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireClient.cpp74
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireDeserializeAllocator.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireDeserializeAllocator.h43
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireResult.h38
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireServer.cpp79
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h27
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp405
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.h109
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.cpp157
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.h93
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp133
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientMemoryTransferService_mock.cpp105
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientMemoryTransferService_mock.h99
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.cpp328
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.h105
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h110
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h51
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp98
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Queue.h57
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/RequestTracker.h82
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h228
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.cpp192
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h236
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp284
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp211
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp94
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerMemoryTransferService_mock.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerMemoryTransferService_mock.h108
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp104
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerShaderModule.cpp51
-rw-r--r--chromium/third_party/dawn/src/fuzzers/BUILD.gn124
-rw-r--r--chromium/third_party/dawn/src/fuzzers/dawn/BUILD.gn22
-rw-r--r--chromium/third_party/dawn/src/include/README.md4
-rw-r--r--chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h157
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_proc.h37
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h34
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_wsi.h87
-rw-r--r--chromium/third_party/dawn/src/include/dawn/webgpu.h1
-rw-r--r--chromium/third_party/dawn/src/include/dawn/webgpu_cpp.h1
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h107
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/DawnNative.h275
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h72
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/NullBackend.h26
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h56
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h141
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h37
-rw-r--r--chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h117
-rw-r--r--chromium/third_party/dawn/src/include/dawn_platform/dawn_platform_export.h36
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/Wire.h77
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireClient.h176
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireServer.h149
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h37
-rw-r--r--chromium/third_party/dawn/src/tests/BUILD.gn612
-rw-r--r--chromium/third_party/dawn/src/utils/BUILD.gn193
-rw-r--r--chromium/third_party/dawn/src/utils/BackendBinding.cpp109
-rw-r--r--chromium/third_party/dawn/src/utils/BackendBinding.h46
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp28
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h35
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp145
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h64
-rw-r--r--chromium/third_party/dawn/src/utils/D3D12Binding.cpp55
-rw-r--r--chromium/third_party/dawn/src/utils/EmptyDebugLogger.cpp29
-rw-r--r--chromium/third_party/dawn/src/utils/GLFWUtils.cpp88
-rw-r--r--chromium/third_party/dawn/src/utils/GLFWUtils_metal.mm54
-rw-r--r--chromium/third_party/dawn/src/utils/Glfw3Fuchsia.cpp100
-rw-r--r--chromium/third_party/dawn/src/utils/MetalBinding.mm135
-rw-r--r--chromium/third_party/dawn/src/utils/NullBinding.cpp47
-rw-r--r--chromium/third_party/dawn/src/utils/OSXTimer.cpp77
-rw-r--r--chromium/third_party/dawn/src/utils/ObjCUtils.mm25
-rw-r--r--chromium/third_party/dawn/src/utils/OpenGLBinding.cpp55
-rw-r--r--chromium/third_party/dawn/src/utils/PosixTimer.cpp74
-rw-r--r--chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.cpp34
-rw-r--r--chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.h61
-rw-r--r--chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.mm44
-rw-r--r--chromium/third_party/dawn/src/utils/SystemUtils.cpp39
-rw-r--r--chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp59
-rw-r--r--chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h42
-rw-r--r--chromium/third_party/dawn/src/utils/TestUtils.cpp181
-rw-r--r--chromium/third_party/dawn/src/utils/TextureUtils.cpp684
-rw-r--r--chromium/third_party/dawn/src/utils/TextureUtils.h223
-rw-r--r--chromium/third_party/dawn/src/utils/VulkanBinding.cpp57
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.cpp374
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.h180
-rw-r--r--chromium/third_party/dawn/src/utils/WindowsDebugLogger.cpp111
-rw-r--r--chromium/third_party/dawn/src/utils/WindowsTimer.cpp89
-rw-r--r--chromium/third_party/dawn/src/utils/WireHelper.cpp178
1240 files changed, 100985 insertions, 99918 deletions
diff --git a/chromium/third_party/dawn/src/common/Assert.cpp b/chromium/third_party/dawn/src/common/Assert.cpp
deleted file mode 100644
index 8802c202f38..00000000000
--- a/chromium/third_party/dawn/src/common/Assert.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Assert.h"
-#include "common/Log.h"
-
-#include <cstdlib>
-
-void HandleAssertionFailure(const char* file,
- const char* function,
- int line,
- const char* condition) {
- dawn::ErrorLog() << "Assertion failure at " << file << ":" << line << " (" << function
- << "): " << condition;
-#if defined(DAWN_ABORT_ON_ASSERT)
- abort();
-#else
- DAWN_BREAKPOINT();
-#endif
-}
diff --git a/chromium/third_party/dawn/src/common/Assert.h b/chromium/third_party/dawn/src/common/Assert.h
deleted file mode 100644
index b9e36ca00bc..00000000000
--- a/chromium/third_party/dawn/src/common/Assert.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_ASSERT_H_
-#define COMMON_ASSERT_H_
-
-#include "common/Compiler.h"
-
-// Dawn asserts to be used instead of the regular C stdlib assert function (if you don't use assert
-// yet, you should start now!). In debug ASSERT(condition) will trigger an error, otherwise in
-// release it does nothing at runtime.
-//
-// In case of name clashes (with for example a testing library), you can define the
-// DAWN_SKIP_ASSERT_SHORTHANDS to only define the DAWN_ prefixed macros.
-//
-// These asserts feature:
-// - Logging of the error with file, line and function information.
-// - Breaking in the debugger when an assert is triggered and a debugger is attached.
-// - Use the assert information to help the compiler optimizer in release builds.
-
-// MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and
-// points out that it looks like an owl face.
-#if defined(DAWN_COMPILER_MSVC)
-# define DAWN_ASSERT_LOOP_CONDITION (0, 0)
-#else
-# define DAWN_ASSERT_LOOP_CONDITION (0)
-#endif
-
-// DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would
-// expect of an assert and in release it tries to give hints to make the compiler generate better
-// code.
-#if defined(DAWN_ENABLE_ASSERTS)
-# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
- do { \
- if (!(condition)) { \
- HandleAssertionFailure(file, func, line, #condition); \
- } \
- } while (DAWN_ASSERT_LOOP_CONDITION)
-#else
-# if defined(DAWN_COMPILER_MSVC)
-# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
-# elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
-# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
-# else
-# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
- do { \
- DAWN_UNUSED(sizeof(condition)); \
- } while (DAWN_ASSERT_LOOP_CONDITION)
-# endif
-#endif
-
-#define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition)
-#define DAWN_UNREACHABLE() \
- do { \
- DAWN_ASSERT(DAWN_ASSERT_LOOP_CONDITION && "Unreachable code hit"); \
- DAWN_BUILTIN_UNREACHABLE(); \
- } while (DAWN_ASSERT_LOOP_CONDITION)
-
-#if !defined(DAWN_SKIP_ASSERT_SHORTHANDS)
-# define ASSERT DAWN_ASSERT
-# define UNREACHABLE DAWN_UNREACHABLE
-#endif
-
-void HandleAssertionFailure(const char* file,
- const char* function,
- int line,
- const char* condition);
-
-#endif // COMMON_ASSERT_H_
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
deleted file mode 100644
index 92da725881e..00000000000
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright 2019 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import("../../scripts/dawn_overrides_with_defaults.gni")
-
-import("//build_overrides/build.gni")
-import("${dawn_root}/scripts/dawn_features.gni")
-
-# Use Chromium's dcheck_always_on when available so that we respect it when
-# running tests on the GPU builders
-if (build_with_chromium) {
- import("//build/config/dcheck_always_on.gni")
-} else {
- dcheck_always_on = false
-}
-
-if (build_with_chromium) {
- import("//build/config/sanitizers/sanitizers.gni")
-} else {
- use_fuzzing_engine = false
-}
-
-###############################################################################
-# Common dawn configs
-###############################################################################
-
-config("dawn_public_include_dirs") {
- include_dirs = [
- "${target_gen_dir}/../../src/include",
- "${dawn_root}/src/include",
- ]
-}
-
-config("dawn_internal") {
- include_dirs = [
- "${target_gen_dir}/../../src",
- "${dawn_root}/src",
- ]
-
- defines = []
- if (dawn_always_assert || dcheck_always_on || is_debug ||
- use_fuzzing_engine) {
- defines += [ "DAWN_ENABLE_ASSERTS" ]
- }
-
- if (use_fuzzing_engine) {
- # Does a hard abort when an assertion fails so that fuzzers catch and parse the failure.
- defines += [ "DAWN_ABORT_ON_ASSERT" ]
- }
-
- if (dawn_enable_d3d12) {
- defines += [ "DAWN_ENABLE_BACKEND_D3D12" ]
- }
- if (dawn_enable_metal) {
- defines += [ "DAWN_ENABLE_BACKEND_METAL" ]
- }
- if (dawn_enable_null) {
- defines += [ "DAWN_ENABLE_BACKEND_NULL" ]
- }
- if (dawn_enable_opengl) {
- defines += [ "DAWN_ENABLE_BACKEND_OPENGL" ]
- }
- if (dawn_enable_desktop_gl) {
- defines += [ "DAWN_ENABLE_BACKEND_DESKTOP_GL" ]
- }
- if (dawn_enable_opengles) {
- defines += [ "DAWN_ENABLE_BACKEND_OPENGLES" ]
- }
- if (dawn_enable_vulkan) {
- defines += [ "DAWN_ENABLE_BACKEND_VULKAN" ]
- }
-
- if (dawn_use_x11) {
- defines += [ "DAWN_USE_X11" ]
- }
-
- if (dawn_enable_error_injection) {
- defines += [ "DAWN_ENABLE_ERROR_INJECTION" ]
- }
-
- # Only internal Dawn targets can use this config, this means only targets in
- # this BUILD.gn file and related subdirs.
- visibility = [ "../*" ]
-
- cflags = []
- if (is_clang) {
- cflags += [ "-Wno-shadow" ]
- }
-
- # Enable more warnings that were found when using Dawn in other projects.
- # Add them only when building in standalone because we control which clang
- # version we use. Otherwise we risk breaking projects depending on Dawn when
- # the use a different clang version.
- if (dawn_standalone && is_clang) {
- cflags += [
- "-Wconditional-uninitialized",
- "-Wcstring-format-directive",
- "-Wc++11-narrowing",
- "-Wdeprecated-copy",
- "-Wdeprecated-copy-dtor",
- "-Wduplicate-enum",
- "-Wextra-semi-stmt",
- "-Wimplicit-fallthrough",
- "-Winconsistent-missing-destructor-override",
- "-Winvalid-offsetof",
- "-Wmissing-field-initializers",
- "-Wnon-c-typedef-for-linkage",
- "-Wpessimizing-move",
- "-Wrange-loop-analysis",
- "-Wredundant-move",
- "-Wshadow-field",
- "-Wstrict-prototypes",
- "-Wtautological-unsigned-zero-compare",
- "-Wunreachable-code-aggressive",
- "-Wunused-but-set-variable",
- ]
-
- if (is_win) {
- cflags += [
- # clang-cl doesn't know -pedantic, pass it explicitly to the clang driver
- "/clang:-pedantic",
-
- # Allow the use of __uuidof()
- "-Wno-language-extension-token",
- ]
- } else {
- cflags += [ "-pedantic" ]
- }
- }
-
- if (!is_clang && is_win) {
- # Dawn extends wgpu enums with internal enums.
- # MSVC considers these invalid switch values. crbug.com/dawn/397.
- cflags += [ "/wd4063" ]
- if (dawn_is_winuwp) {
- # /ZW makes sure we don't add calls that are forbidden in UWP.
- # and /EHsc is required to be used in combination with it,
- # even if it is already added by the windows GN defaults,
- # we still add it to make every /ZW paired with a /EHsc
- cflags_cc = [
- "/ZW:nostdlib",
- "/EHsc",
- ]
- }
- }
-}
-
-###############################################################################
-# Common dawn library
-###############################################################################
-
-# This GN file is discovered by all Chromium builds, but common doesn't support
-# all of Chromium's OSes so we explicitly make the target visible only on
-# systems we know Dawn is able to compile on.
-if (is_win || is_linux || is_chromeos || is_mac || is_fuchsia || is_android) {
- static_library("common") {
- sources = [
- "Alloc.h",
- "Assert.cpp",
- "Assert.h",
- "BitSetIterator.h",
- "Compiler.h",
- "ConcurrentCache.h",
- "Constants.h",
- "CoreFoundationRef.h",
- "DynamicLib.cpp",
- "DynamicLib.h",
- "GPUInfo.cpp",
- "GPUInfo.h",
- "HashUtils.h",
- "IOKitRef.h",
- "LinkedList.h",
- "Log.cpp",
- "Log.h",
- "Math.cpp",
- "Math.h",
- "NSRef.h",
- "NonCopyable.h",
- "PlacementAllocated.h",
- "Platform.h",
- "Preprocessor.h",
- "RefBase.h",
- "RefCounted.cpp",
- "RefCounted.h",
- "Result.cpp",
- "Result.h",
- "SerialMap.h",
- "SerialQueue.h",
- "SerialStorage.h",
- "SlabAllocator.cpp",
- "SlabAllocator.h",
- "StackContainer.h",
- "SwapChainUtils.h",
- "SystemUtils.cpp",
- "SystemUtils.h",
- "TypeTraits.h",
- "TypedInteger.h",
- "UnderlyingType.h",
- "ityp_array.h",
- "ityp_bitset.h",
- "ityp_span.h",
- "ityp_stack_vec.h",
- "ityp_vector.h",
- "vulkan_platform.h",
- "xlib_with_undefs.h",
- ]
-
- if (is_mac) {
- sources += [ "SystemUtils_mac.mm" ]
- }
-
- public_configs = [ ":dawn_internal" ]
- deps = [
- "${dawn_root}/src/dawn:dawn_headers",
- "${dawn_root}/src/dawn:dawncpp_headers",
- ]
-
- if (is_win) {
- sources += [
- "WindowsUtils.cpp",
- "WindowsUtils.h",
- "windows_with_undefs.h",
- ]
- }
- if (dawn_enable_vulkan) {
- public_deps = [ "${dawn_root}/third_party/khronos:vulkan_headers" ]
- }
- if (is_android) {
- libs = [ "log" ]
- }
- }
-}
diff --git a/chromium/third_party/dawn/src/common/BitSetIterator.h b/chromium/third_party/dawn/src/common/BitSetIterator.h
deleted file mode 100644
index 5a1c10ff69c..00000000000
--- a/chromium/third_party/dawn/src/common/BitSetIterator.h
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_BITSETITERATOR_H_
-#define COMMON_BITSETITERATOR_H_
-
-#include "common/Assert.h"
-#include "common/Math.h"
-#include "common/UnderlyingType.h"
-
-#include <bitset>
-#include <limits>
-
-// This is ANGLE's BitSetIterator class with a customizable return type
-// TODO(crbug.com/dawn/306): it could be optimized, in particular when N <= 64
-
-template <typename T>
-T roundUp(const T value, const T alignment) {
- auto temp = value + alignment - static_cast<T>(1);
- return temp - temp % alignment;
-}
-
-template <size_t N, typename T>
-class BitSetIterator final {
- public:
- BitSetIterator(const std::bitset<N>& bitset);
- BitSetIterator(const BitSetIterator& other);
- BitSetIterator& operator=(const BitSetIterator& other);
-
- class Iterator final {
- public:
- Iterator(const std::bitset<N>& bits);
- Iterator& operator++();
-
- bool operator==(const Iterator& other) const;
- bool operator!=(const Iterator& other) const;
-
- T operator*() const {
- using U = UnderlyingType<T>;
- ASSERT(static_cast<U>(mCurrentBit) <= std::numeric_limits<U>::max());
- return static_cast<T>(static_cast<U>(mCurrentBit));
- }
-
- private:
- unsigned long getNextBit();
-
- static constexpr size_t kBitsPerWord = sizeof(uint32_t) * 8;
- std::bitset<N> mBits;
- unsigned long mCurrentBit;
- unsigned long mOffset;
- };
-
- Iterator begin() const {
- return Iterator(mBits);
- }
- Iterator end() const {
- return Iterator(std::bitset<N>(0));
- }
-
- private:
- const std::bitset<N> mBits;
-};
-
-template <size_t N, typename T>
-BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {
-}
-
-template <size_t N, typename T>
-BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {
-}
-
-template <size_t N, typename T>
-BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
- mBits = other.mBits;
- return *this;
-}
-
-template <size_t N, typename T>
-BitSetIterator<N, T>::Iterator::Iterator(const std::bitset<N>& bits)
- : mBits(bits), mCurrentBit(0), mOffset(0) {
- if (bits.any()) {
- mCurrentBit = getNextBit();
- } else {
- mOffset = static_cast<unsigned long>(roundUp(N, kBitsPerWord));
- }
-}
-
-template <size_t N, typename T>
-typename BitSetIterator<N, T>::Iterator& BitSetIterator<N, T>::Iterator::operator++() {
- DAWN_ASSERT(mBits.any());
- mBits.set(mCurrentBit - mOffset, 0);
- mCurrentBit = getNextBit();
- return *this;
-}
-
-template <size_t N, typename T>
-bool BitSetIterator<N, T>::Iterator::operator==(const Iterator& other) const {
- return mOffset == other.mOffset && mBits == other.mBits;
-}
-
-template <size_t N, typename T>
-bool BitSetIterator<N, T>::Iterator::operator!=(const Iterator& other) const {
- return !(*this == other);
-}
-
-template <size_t N, typename T>
-unsigned long BitSetIterator<N, T>::Iterator::getNextBit() {
- static std::bitset<N> wordMask(std::numeric_limits<uint32_t>::max());
-
- while (mOffset < N) {
- uint32_t wordBits = static_cast<uint32_t>((mBits & wordMask).to_ulong());
- if (wordBits != 0ul) {
- return ScanForward(wordBits) + mOffset;
- }
-
- mBits >>= kBitsPerWord;
- mOffset += kBitsPerWord;
- }
- return 0;
-}
-
-// Helper to avoid needing to specify the template parameter size
-template <size_t N>
-BitSetIterator<N, uint32_t> IterateBitSet(const std::bitset<N>& bitset) {
- return BitSetIterator<N, uint32_t>(bitset);
-}
-
-#endif // COMMON_BITSETITERATOR_H_
diff --git a/chromium/third_party/dawn/src/common/Compiler.h b/chromium/third_party/dawn/src/common/Compiler.h
deleted file mode 100644
index bb2d6698578..00000000000
--- a/chromium/third_party/dawn/src/common/Compiler.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_COMPILER_H_
-#define COMMON_COMPILER_H_
-
-// Defines macros for compiler-specific functionality
-// - DAWN_COMPILER_[CLANG|GCC|MSVC]: Compiler detection
-// - DAWN_BREAKPOINT(): Raises an exception and breaks in the debugger
-// - DAWN_BUILTIN_UNREACHABLE(): Hints the compiler that a code path is unreachable
-// - DAWN_NO_DISCARD: An attribute that is C++17 [[nodiscard]] where available
-// - DAWN_(UN)?LIKELY(EXPR): Where available, hints the compiler that the expression will be true
-// (resp. false) to help it generate code that leads to better branch prediction.
-// - DAWN_UNUSED(EXPR): Prevents unused variable/expression warnings on EXPR.
-// - DAWN_UNUSED_FUNC(FUNC): Prevents unused function warnings on FUNC.
-// - DAWN_DECLARE_UNUSED: Prevents unused function warnings a subsequent declaration.
-// Both DAWN_UNUSED_FUNC and DAWN_DECLARE_UNUSED may be necessary, e.g. to suppress clang's
-// unneeded-internal-declaration warning.
-
-// Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC
-#if defined(__GNUC__) || defined(__clang__)
-# if defined(__clang__)
-# define DAWN_COMPILER_CLANG
-# else
-# define DAWN_COMPILER_GCC
-# endif
-
-# if defined(__i386__) || defined(__x86_64__)
-# define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
-# else
-// TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures
-# define DAWN_BREAKPOINT()
-# endif
-
-# define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
-# define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
-# define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
-
-# if !defined(__has_cpp_attribute)
-# define __has_cpp_attribute(name) 0
-# endif
-
-// Use warn_unused_result on clang otherwise we can a c++1z extension warning in C++14 mode
-// Also avoid warn_unused_result with GCC because it is only a function attribute and not a type
-// attribute.
-# if __has_cpp_attribute(warn_unused_result) && defined(__clang__)
-# define DAWN_NO_DISCARD __attribute__((warn_unused_result))
-# elif DAWN_CPP_VERSION >= 17 && __has_cpp_attribute(nodiscard)
-# define DAWN_NO_DISCARD [[nodiscard]]
-# endif
-
-# define DAWN_DECLARE_UNUSED __attribute__((unused))
-# if defined(NDEBUG)
-# define DAWN_FORCE_INLINE inline __attribute__((always_inline))
-# endif
-# define DAWN_NOINLINE __attribute__((noinline))
-
-// MSVC
-#elif defined(_MSC_VER)
-# define DAWN_COMPILER_MSVC
-
-extern void __cdecl __debugbreak(void);
-# define DAWN_BREAKPOINT() __debugbreak()
-
-# define DAWN_BUILTIN_UNREACHABLE() __assume(false)
-
-// Visual Studio 2017 15.3 adds support for [[nodiscard]]
-# if _MSC_VER >= 1911 && DAWN_CPP_VERSION >= 17
-# define DAWN_NO_DISCARD [[nodiscard]]
-# endif
-
-# define DAWN_DECLARE_UNUSED
-# if defined(NDEBUG)
-# define DAWN_FORCE_INLINE __forceinline
-# endif
-# define DAWN_NOINLINE __declspec(noinline)
-
-#else
-# error "Unsupported compiler"
-#endif
-
-// It seems that (void) EXPR works on all compilers to silence the unused variable warning.
-#define DAWN_UNUSED(EXPR) (void)EXPR
-// Likewise using static asserting on sizeof(&FUNC) seems to make it tagged as used
-#define DAWN_UNUSED_FUNC(FUNC) static_assert(sizeof(&FUNC) == sizeof(void (*)()), "")
-
-// Add noop replacements for macros for features that aren't supported by the compiler.
-#if !defined(DAWN_LIKELY)
-# define DAWN_LIKELY(X) X
-#endif
-#if !defined(DAWN_UNLIKELY)
-# define DAWN_UNLIKELY(X) X
-#endif
-#if !defined(DAWN_NO_DISCARD)
-# define DAWN_NO_DISCARD
-#endif
-#if !defined(DAWN_FORCE_INLINE)
-# define DAWN_FORCE_INLINE inline
-#endif
-#if !defined(DAWN_NOINLINE)
-# define DAWN_NOINLINE
-#endif
-
-#if defined(__clang__)
-# define DAWN_FALLTHROUGH [[clang::fallthrough]]
-#else
-# define DAWN_FALLTHROUGH
-#endif
-
-#endif // COMMON_COMPILER_H_
diff --git a/chromium/third_party/dawn/src/common/ConcurrentCache.h b/chromium/third_party/dawn/src/common/ConcurrentCache.h
deleted file mode 100644
index 0e93dddf126..00000000000
--- a/chromium/third_party/dawn/src/common/ConcurrentCache.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_CONCURRENT_CACHE_H_
-#define COMMON_CONCURRENT_CACHE_H_
-
-#include "common/NonCopyable.h"
-
-#include <mutex>
-#include <unordered_set>
-#include <utility>
-
-template <typename T>
-class ConcurrentCache : public NonMovable {
- public:
- ConcurrentCache() = default;
-
- T* Find(T* object) {
- std::lock_guard<std::mutex> lock(mMutex);
- auto iter = mCache.find(object);
- if (iter == mCache.end()) {
- return nullptr;
- }
- return *iter;
- }
-
- std::pair<T*, bool> Insert(T* object) {
- std::lock_guard<std::mutex> lock(mMutex);
- auto insertion = mCache.insert(object);
- return std::make_pair(*(insertion.first), insertion.second);
- }
-
- size_t Erase(T* object) {
- std::lock_guard<std::mutex> lock(mMutex);
- return mCache.erase(object);
- }
-
- private:
- std::mutex mMutex;
- std::unordered_set<T*, typename T::HashFunc, typename T::EqualityFunc> mCache;
-};
-
-#endif
diff --git a/chromium/third_party/dawn/src/common/Constants.h b/chromium/third_party/dawn/src/common/Constants.h
deleted file mode 100644
index f5a521e6d2a..00000000000
--- a/chromium/third_party/dawn/src/common/Constants.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_CONSTANTS_H_
-#define COMMON_CONSTANTS_H_
-
-#include <cstdint>
-
-static constexpr uint32_t kMaxBindGroups = 4u;
-static constexpr uint8_t kMaxVertexAttributes = 16u;
-static constexpr uint8_t kMaxVertexBuffers = 8u;
-static constexpr uint32_t kMaxVertexBufferArrayStride = 2048u;
-static constexpr uint32_t kNumStages = 3;
-static constexpr uint8_t kMaxColorAttachments = 8u;
-static constexpr uint32_t kTextureBytesPerRowAlignment = 256u;
-static constexpr uint32_t kMaxInterStageShaderComponents = 60u;
-static constexpr uint32_t kMaxInterStageShaderVariables = kMaxInterStageShaderComponents / 4;
-
-// Per stage limits
-static constexpr uint32_t kMaxSampledTexturesPerShaderStage = 16;
-static constexpr uint32_t kMaxSamplersPerShaderStage = 16;
-static constexpr uint32_t kMaxStorageBuffersPerShaderStage = 8;
-static constexpr uint32_t kMaxStorageTexturesPerShaderStage = 4;
-static constexpr uint32_t kMaxUniformBuffersPerShaderStage = 12;
-
-// Per pipeline layout limits
-static constexpr uint32_t kMaxDynamicUniformBuffersPerPipelineLayout = 8u;
-static constexpr uint32_t kMaxDynamicStorageBuffersPerPipelineLayout = 4u;
-
-// Indirect command sizes
-static constexpr uint64_t kDispatchIndirectSize = 3 * sizeof(uint32_t);
-static constexpr uint64_t kDrawIndirectSize = 4 * sizeof(uint32_t);
-static constexpr uint64_t kDrawIndexedIndirectSize = 5 * sizeof(uint32_t);
-
-// Non spec defined constants.
-static constexpr float kLodMin = 0.0;
-static constexpr float kLodMax = 1000.0;
-
-// Offset alignment for CopyB2B. Strictly speaking this alignment is required only
-// on macOS, but we decide to do it on all platforms.
-static constexpr uint64_t kCopyBufferToBufferOffsetAlignment = 4u;
-
-// The maximum size of visibilityResultBuffer is 256KB on Metal, to fit the restriction, limit the
-// maximum size of query set to 64KB. The size of a query is 8-bytes, the maximum query count is 64
-// * 1024 / 8.
-static constexpr uint32_t kMaxQueryCount = 8192u;
-
-// An external texture occupies multiple binding slots. These are the per-external-texture bindings
-// needed.
-static constexpr uint8_t kSampledTexturesPerExternalTexture = 3u;
-static constexpr uint8_t kSamplersPerExternalTexture = 1u;
-static constexpr uint8_t kUniformsPerExternalTexture = 1u;
-
-#endif // COMMON_CONSTANTS_H_
diff --git a/chromium/third_party/dawn/src/common/CoreFoundationRef.h b/chromium/third_party/dawn/src/common/CoreFoundationRef.h
deleted file mode 100644
index a4a637b6897..00000000000
--- a/chromium/third_party/dawn/src/common/CoreFoundationRef.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_COREFOUNDATIONREF_H_
-#define COMMON_COREFOUNDATIONREF_H_
-
-#include "common/RefBase.h"
-
-#include <CoreFoundation/CoreFoundation.h>
-
-template <typename T>
-struct CoreFoundationRefTraits {
- static constexpr T kNullValue = nullptr;
- static void Reference(T value) {
- CFRetain(value);
- }
- static void Release(T value) {
- CFRelease(value);
- }
-};
-
-template <typename T>
-class CFRef : public RefBase<T, CoreFoundationRefTraits<T>> {
- public:
- using RefBase<T, CoreFoundationRefTraits<T>>::RefBase;
-};
-
-template <typename T>
-CFRef<T> AcquireCFRef(T pointee) {
- CFRef<T> ref;
- ref.Acquire(pointee);
- return ref;
-}
-
-#endif // COMMON_COREFOUNDATIONREF_H_
diff --git a/chromium/third_party/dawn/src/common/DynamicLib.cpp b/chromium/third_party/dawn/src/common/DynamicLib.cpp
deleted file mode 100644
index 119ec42f4ac..00000000000
--- a/chromium/third_party/dawn/src/common/DynamicLib.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/DynamicLib.h"
-
-#include "common/Platform.h"
-
-#if DAWN_PLATFORM_WINDOWS
-# include "common/windows_with_undefs.h"
-# if DAWN_PLATFORM_WINUWP
-# include "common/WindowsUtils.h"
-# endif
-#elif DAWN_PLATFORM_POSIX
-# include <dlfcn.h>
-#else
-# error "Unsupported platform for DynamicLib"
-#endif
-
-DynamicLib::~DynamicLib() {
- Close();
-}
-
-DynamicLib::DynamicLib(DynamicLib&& other) {
- std::swap(mHandle, other.mHandle);
-}
-
-DynamicLib& DynamicLib::operator=(DynamicLib&& other) {
- std::swap(mHandle, other.mHandle);
- return *this;
-}
-
-bool DynamicLib::Valid() const {
- return mHandle != nullptr;
-}
-
-bool DynamicLib::Open(const std::string& filename, std::string* error) {
-#if DAWN_PLATFORM_WINDOWS
-# if DAWN_PLATFORM_WINUWP
- mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
-# else
- mHandle = LoadLibraryA(filename.c_str());
-# endif
- if (mHandle == nullptr && error != nullptr) {
- *error = "Windows Error: " + std::to_string(GetLastError());
- }
-#elif DAWN_PLATFORM_POSIX
- mHandle = dlopen(filename.c_str(), RTLD_NOW);
-
- if (mHandle == nullptr && error != nullptr) {
- *error = dlerror();
- }
-#else
-# error "Unsupported platform for DynamicLib"
-#endif
-
- return mHandle != nullptr;
-}
-
-void DynamicLib::Close() {
- if (mHandle == nullptr) {
- return;
- }
-
-#if DAWN_PLATFORM_WINDOWS
- FreeLibrary(static_cast<HMODULE>(mHandle));
-#elif DAWN_PLATFORM_POSIX
- dlclose(mHandle);
-#else
-# error "Unsupported platform for DynamicLib"
-#endif
-
- mHandle = nullptr;
-}
-
-void* DynamicLib::GetProc(const std::string& procName, std::string* error) const {
- void* proc = nullptr;
-
-#if DAWN_PLATFORM_WINDOWS
- proc = reinterpret_cast<void*>(GetProcAddress(static_cast<HMODULE>(mHandle), procName.c_str()));
-
- if (proc == nullptr && error != nullptr) {
- *error = "Windows Error: " + std::to_string(GetLastError());
- }
-#elif DAWN_PLATFORM_POSIX
- proc = reinterpret_cast<void*>(dlsym(mHandle, procName.c_str()));
-
- if (proc == nullptr && error != nullptr) {
- *error = dlerror();
- }
-#else
-# error "Unsupported platform for DynamicLib"
-#endif
-
- return proc;
-}
diff --git a/chromium/third_party/dawn/src/common/DynamicLib.h b/chromium/third_party/dawn/src/common/DynamicLib.h
deleted file mode 100644
index 09a08198f82..00000000000
--- a/chromium/third_party/dawn/src/common/DynamicLib.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_DYNAMICLIB_H_
-#define COMMON_DYNAMICLIB_H_
-
-#include "common/Assert.h"
-
-#include <string>
-#include <type_traits>
-
-class DynamicLib {
- public:
- DynamicLib() = default;
- ~DynamicLib();
-
- DynamicLib(const DynamicLib&) = delete;
- DynamicLib& operator=(const DynamicLib&) = delete;
-
- DynamicLib(DynamicLib&& other);
- DynamicLib& operator=(DynamicLib&& other);
-
- bool Valid() const;
-
- bool Open(const std::string& filename, std::string* error = nullptr);
- void Close();
-
- void* GetProc(const std::string& procName, std::string* error = nullptr) const;
-
- template <typename T>
- bool GetProc(T** proc, const std::string& procName, std::string* error = nullptr) const {
- ASSERT(proc != nullptr);
- static_assert(std::is_function<T>::value, "");
-
- *proc = reinterpret_cast<T*>(GetProc(procName, error));
- return *proc != nullptr;
- }
-
- private:
- void* mHandle = nullptr;
-};
-
-#endif // COMMON_DYNAMICLIB_H_
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.cpp b/chromium/third_party/dawn/src/common/GPUInfo.cpp
deleted file mode 100644
index 6068b9da041..00000000000
--- a/chromium/third_party/dawn/src/common/GPUInfo.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/GPUInfo.h"
-
-#include "common/Assert.h"
-
-#include <algorithm>
-#include <array>
-
-namespace gpu_info {
- namespace {
- // Intel
- // Referenced from the following Mesa source code:
- // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
- // gen9
- const std::array<uint32_t, 25> Skylake = {
- {0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
- 0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
- 0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
- // gen9p5
- const std::array<uint32_t, 20> Kabylake = {
- {0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
- 0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
- const std::array<uint32_t, 17> Coffeelake = {
- {0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
- 0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
- const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
- const std::array<uint32_t, 21> Cometlake = {
- {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
- 0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
-
- // According to Intel graphics driver version schema, build number is generated from the
- // last two fields.
- // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
- // more details.
- uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
- return driverVersion[2] * 10000 + driverVersion[3];
- }
-
- } // anonymous namespace
-
- bool IsAMD(PCIVendorID vendorId) {
- return vendorId == kVendorID_AMD;
- }
- bool IsARM(PCIVendorID vendorId) {
- return vendorId == kVendorID_ARM;
- }
- bool IsImgTec(PCIVendorID vendorId) {
- return vendorId == kVendorID_ImgTec;
- }
- bool IsIntel(PCIVendorID vendorId) {
- return vendorId == kVendorID_Intel;
- }
- bool IsNvidia(PCIVendorID vendorId) {
- return vendorId == kVendorID_Nvidia;
- }
- bool IsQualcomm(PCIVendorID vendorId) {
- return vendorId == kVendorID_Qualcomm;
- }
- bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
- return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
- }
- bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
- return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
- }
-
- int CompareD3DDriverVersion(PCIVendorID vendorId,
- const D3DDriverVersion& version1,
- const D3DDriverVersion& version2) {
- if (IsIntel(vendorId)) {
- uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
- uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
- return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
- }
-
- // TODO(crbug.com/dawn/823): support other GPU vendors
- UNREACHABLE();
- return 0;
- }
-
- // Intel GPUs
- bool IsSkylake(PCIDeviceID deviceId) {
- return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
- }
- bool IsKabylake(PCIDeviceID deviceId) {
- return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
- }
- bool IsCoffeelake(PCIDeviceID deviceId) {
- return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
- (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
- (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
- }
-} // namespace gpu_info
diff --git a/chromium/third_party/dawn/src/common/HashUtils.h b/chromium/third_party/dawn/src/common/HashUtils.h
deleted file mode 100644
index 1c33a3f2c1c..00000000000
--- a/chromium/third_party/dawn/src/common/HashUtils.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_HASHUTILS_H_
-#define COMMON_HASHUTILS_H_
-
-#include "common/Platform.h"
-#include "common/TypedInteger.h"
-#include "common/ityp_bitset.h"
-
-#include <bitset>
-#include <functional>
-
-// Wrapper around std::hash to make it a templated function instead of a functor. It is marginally
-// nicer, and avoids adding to the std namespace to add hashing of other types.
-template <typename T>
-size_t Hash(const T& value) {
- return std::hash<T>()(value);
-}
-
-// Add hashing of TypedIntegers
-template <typename Tag, typename T>
-size_t Hash(const TypedInteger<Tag, T>& value) {
- return Hash(static_cast<T>(value));
-}
-
-// When hashing sparse structures we want to iteratively build a hash value with only parts of the
-// data. HashCombine "hashes" together an existing hash and hashable values.
-//
-// Example usage to compute the hash of a mask and values corresponding to the mask:
-//
-// size_t hash = Hash(mask):
-// for (uint32_t i : IterateBitSet(mask)) { HashCombine(&hash, hashables[i]); }
-// return hash;
-template <typename T>
-void HashCombine(size_t* hash, const T& value) {
-#if defined(DAWN_PLATFORM_64_BIT)
- const size_t offset = 0x9e3779b97f4a7c16;
-#elif defined(DAWN_PLATFORM_32_BIT)
- const size_t offset = 0x9e3779b9;
-#else
-# error "Unsupported platform"
-#endif
- *hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2);
-}
-
-template <typename T, typename... Args>
-void HashCombine(size_t* hash, const T& value, const Args&... args) {
- HashCombine(hash, value);
- HashCombine(hash, args...);
-}
-
-// Workaround a bug between clang++ and libstdlibc++ by defining our own hashing for bitsets.
-// When _GLIBCXX_DEBUG is enabled libstdc++ wraps containers into debug containers. For bitset this
-// means what is normally std::bitset is defined as std::__cxx1988::bitset and is replaced by the
-// debug version of bitset.
-// When hashing, std::hash<std::bitset> proxies the call to std::hash<std::__cxx1998::bitset> and
-// fails on clang because the latter tries to access the private _M_getdata member of the bitset.
-// It looks like it should work because the non-debug bitset declares
-//
-// friend struct std::hash<bitset> // bitset is the name of the class itself
-//
-// which should friend std::hash<std::__cxx1998::bitset> but somehow doesn't work on clang.
-#if defined(_GLIBCXX_DEBUG)
-template <size_t N>
-size_t Hash(const std::bitset<N>& value) {
- constexpr size_t kWindowSize = sizeof(unsigned long long);
-
- std::bitset<N> bits = value;
- size_t hash = 0;
- for (size_t processedBits = 0; processedBits < N; processedBits += kWindowSize) {
- HashCombine(&hash, bits.to_ullong());
- bits >>= kWindowSize;
- }
-
- return hash;
-}
-#endif
-
-namespace std {
- template <typename Index, size_t N>
- struct hash<ityp::bitset<Index, N>> {
- public:
- size_t operator()(const ityp::bitset<Index, N>& value) const {
- return Hash(static_cast<const std::bitset<N>&>(value));
- }
- };
-} // namespace std
-
-#endif // COMMON_HASHUTILS_H_
diff --git a/chromium/third_party/dawn/src/common/IOKitRef.h b/chromium/third_party/dawn/src/common/IOKitRef.h
deleted file mode 100644
index cba037665af..00000000000
--- a/chromium/third_party/dawn/src/common/IOKitRef.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_IOKITREF_H_
-#define COMMON_IOKITREF_H_
-
-#include "common/RefBase.h"
-
-#include <IOKit/IOKitLib.h>
-
-template <typename T>
-struct IOKitRefTraits {
- static constexpr T kNullValue = IO_OBJECT_NULL;
- static void Reference(T value) {
- IOObjectRetain(value);
- }
- static void Release(T value) {
- IOObjectRelease(value);
- }
-};
-
-template <typename T>
-class IORef : public RefBase<T, IOKitRefTraits<T>> {
- public:
- using RefBase<T, IOKitRefTraits<T>>::RefBase;
-};
-
-template <typename T>
-IORef<T> AcquireIORef(T pointee) {
- IORef<T> ref;
- ref.Acquire(pointee);
- return ref;
-}
-
-#endif // COMMON_IOKITREF_H_
diff --git a/chromium/third_party/dawn/src/common/LinkedList.h b/chromium/third_party/dawn/src/common/LinkedList.h
deleted file mode 100644
index 881aa82c70a..00000000000
--- a/chromium/third_party/dawn/src/common/LinkedList.h
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is a copy of Chromium's /src/base/containers/linked_list.h with the following
-// modifications:
-// - Added iterators for ranged based iterations
-// - Added in list check before removing node to prevent segfault, now returns true iff removed
-// - Added MoveInto functionality for moving list elements to another list
-
-#ifndef COMMON_LINKED_LIST_H
-#define COMMON_LINKED_LIST_H
-
-#include "common/Assert.h"
-
-// Simple LinkedList type. (See the Q&A section to understand how this
-// differs from std::list).
-//
-// To use, start by declaring the class which will be contained in the linked
-// list, as extending LinkNode (this gives it next/previous pointers).
-//
-// class MyNodeType : public LinkNode<MyNodeType> {
-// ...
-// };
-//
-// Next, to keep track of the list's head/tail, use a LinkedList instance:
-//
-// LinkedList<MyNodeType> list;
-//
-// To add elements to the list, use any of LinkedList::Append,
-// LinkNode::InsertBefore, or LinkNode::InsertAfter:
-//
-// LinkNode<MyNodeType>* n1 = ...;
-// LinkNode<MyNodeType>* n2 = ...;
-// LinkNode<MyNodeType>* n3 = ...;
-//
-// list.Append(n1);
-// list.Append(n3);
-// n3->InsertBefore(n3);
-//
-// Lastly, to iterate through the linked list forwards:
-//
-// for (LinkNode<MyNodeType>* node = list.head();
-// node != list.end();
-// node = node->next()) {
-// MyNodeType* value = node->value();
-// ...
-// }
-//
-// for (LinkNode<MyNodeType*> node : list) {
-// MyNodeType* value = node->value();
-// ...
-// }
-//
-// Or to iterate the linked list backwards:
-//
-// for (LinkNode<MyNodeType>* node = list.tail();
-// node != list.end();
-// node = node->previous()) {
-// MyNodeType* value = node->value();
-// ...
-// }
-//
-// Questions and Answers:
-//
-// Q. Should I use std::list or base::LinkedList?
-//
-// A. The main reason to use base::LinkedList over std::list is
-// performance. If you don't care about the performance differences
-// then use an STL container, as it makes for better code readability.
-//
-// Comparing the performance of base::LinkedList<T> to std::list<T*>:
-//
-// * Erasing an element of type T* from base::LinkedList<T> is
-// an O(1) operation. Whereas for std::list<T*> it is O(n).
-// That is because with std::list<T*> you must obtain an
-// iterator to the T* element before you can call erase(iterator).
-//
-// * Insertion operations with base::LinkedList<T> never require
-// heap allocations.
-//
-// Q. How does base::LinkedList implementation differ from std::list?
-//
-// A. Doubly-linked lists are made up of nodes that contain "next" and
-// "previous" pointers that reference other nodes in the list.
-//
-// With base::LinkedList<T>, the type being inserted already reserves
-// space for the "next" and "previous" pointers (base::LinkNode<T>*).
-// Whereas with std::list<T> the type can be anything, so the implementation
-// needs to glue on the "next" and "previous" pointers using
-// some internal node type.
-
-// Forward declarations of the types in order for recursive referencing and friending.
-template <typename T>
-class LinkNode;
-template <typename T>
-class LinkedList;
-
-template <typename T>
-class LinkNode {
- public:
- LinkNode() : previous_(nullptr), next_(nullptr) {
- }
- LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
- }
-
- LinkNode(LinkNode<T>&& rhs) {
- next_ = rhs.next_;
- rhs.next_ = nullptr;
- previous_ = rhs.previous_;
- rhs.previous_ = nullptr;
-
- // If the node belongs to a list, next_ and previous_ are both non-null.
- // Otherwise, they are both null.
- if (next_) {
- next_->previous_ = this;
- previous_->next_ = this;
- }
- }
-
- // Insert |this| into the linked list, before |e|.
- void InsertBefore(LinkNode<T>* e) {
- this->next_ = e;
- this->previous_ = e->previous_;
- e->previous_->next_ = this;
- e->previous_ = this;
- }
-
- // Insert |this| into the linked list, after |e|.
- void InsertAfter(LinkNode<T>* e) {
- this->next_ = e->next_;
- this->previous_ = e;
- e->next_->previous_ = this;
- e->next_ = this;
- }
-
- // Check if |this| is in a list.
- bool IsInList() const {
- ASSERT((this->previous_ == nullptr) == (this->next_ == nullptr));
- return this->next_ != nullptr;
- }
-
- // Remove |this| from the linked list. Returns true iff removed from a list.
- bool RemoveFromList() {
- if (!IsInList()) {
- return false;
- }
-
- this->previous_->next_ = this->next_;
- this->next_->previous_ = this->previous_;
- // next() and previous() return non-null if and only this node is not in any list.
- this->next_ = nullptr;
- this->previous_ = nullptr;
- return true;
- }
-
- LinkNode<T>* previous() const {
- return previous_;
- }
-
- LinkNode<T>* next() const {
- return next_;
- }
-
- // Cast from the node-type to the value type.
- const T* value() const {
- return static_cast<const T*>(this);
- }
-
- T* value() {
- return static_cast<T*>(this);
- }
-
- private:
- friend class LinkedList<T>;
- LinkNode<T>* previous_;
- LinkNode<T>* next_;
-};
-
-template <typename T>
-class LinkedList {
- public:
- // The "root" node is self-referential, and forms the basis of a circular
- // list (root_.next() will point back to the start of the list,
- // and root_->previous() wraps around to the end of the list).
- LinkedList() : root_(&root_, &root_) {
- }
-
- ~LinkedList() {
- // If any LinkNodes still exist in the LinkedList, there will be outstanding references to
- // root_ even after it has been freed. We should remove root_ from the list to prevent any
- // future access.
- root_.RemoveFromList();
- }
-
- // Appends |e| to the end of the linked list.
- void Append(LinkNode<T>* e) {
- e->InsertBefore(&root_);
- }
-
- // Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
- void MoveInto(LinkedList<T>* l) {
- if (empty()) {
- return;
- }
- l->root_.previous_->next_ = root_.next_;
- root_.next_->previous_ = l->root_.previous_;
- l->root_.previous_ = root_.previous_;
- root_.previous_->next_ = &l->root_;
-
- root_.next_ = &root_;
- root_.previous_ = &root_;
- }
-
- LinkNode<T>* head() const {
- return root_.next();
- }
-
- LinkNode<T>* tail() const {
- return root_.previous();
- }
-
- const LinkNode<T>* end() const {
- return &root_;
- }
-
- bool empty() const {
- return head() == end();
- }
-
- private:
- LinkNode<T> root_;
-};
-
-template <typename T>
-class LinkedListIterator {
- public:
- LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {
- }
-
- // We keep an early reference to the next node in the list so that even if the current element
- // is modified or removed from the list, we have a valid next node.
- LinkedListIterator<T> const& operator++() {
- current_ = next_;
- next_ = current_->next();
- return *this;
- }
-
- bool operator!=(const LinkedListIterator<T>& other) const {
- return current_ != other.current_;
- }
-
- LinkNode<T>* operator*() const {
- return current_;
- }
-
- private:
- LinkNode<T>* current_;
- LinkNode<T>* next_;
-};
-
-template <typename T>
-LinkedListIterator<T> begin(LinkedList<T>& l) {
- return LinkedListIterator<T>(l.head());
-}
-
-// Free end function does't use LinkedList<T>::end because of it's const nature. Instead we wrap
-// around from tail.
-template <typename T>
-LinkedListIterator<T> end(LinkedList<T>& l) {
- return LinkedListIterator<T>(l.tail()->next());
-}
-
-#endif // COMMON_LINKED_LIST_H
diff --git a/chromium/third_party/dawn/src/common/Log.cpp b/chromium/third_party/dawn/src/common/Log.cpp
deleted file mode 100644
index 98b4b2bb689..00000000000
--- a/chromium/third_party/dawn/src/common/Log.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Log.h"
-
-#include "common/Assert.h"
-#include "common/Platform.h"
-
-#include <cstdio>
-
-#if defined(DAWN_PLATFORM_ANDROID)
-# include <android/log.h>
-#endif
-
-namespace dawn {
-
- namespace {
-
- const char* SeverityName(LogSeverity severity) {
- switch (severity) {
- case LogSeverity::Debug:
- return "Debug";
- case LogSeverity::Info:
- return "Info";
- case LogSeverity::Warning:
- return "Warning";
- case LogSeverity::Error:
- return "Error";
- default:
- UNREACHABLE();
- return "";
- }
- }
-
-#if defined(DAWN_PLATFORM_ANDROID)
- android_LogPriority AndroidLogPriority(LogSeverity severity) {
- switch (severity) {
- case LogSeverity::Debug:
- return ANDROID_LOG_INFO;
- case LogSeverity::Info:
- return ANDROID_LOG_INFO;
- case LogSeverity::Warning:
- return ANDROID_LOG_WARN;
- case LogSeverity::Error:
- return ANDROID_LOG_ERROR;
- default:
- UNREACHABLE();
- return ANDROID_LOG_ERROR;
- }
- }
-#endif // defined(DAWN_PLATFORM_ANDROID)
-
- } // anonymous namespace
-
- LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
- }
-
- LogMessage::~LogMessage() {
- std::string fullMessage = mStream.str();
-
- // If this message has been moved, its stream is empty.
- if (fullMessage.empty()) {
- return;
- }
-
- const char* severityName = SeverityName(mSeverity);
-
-#if defined(DAWN_PLATFORM_ANDROID)
- android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
- __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
-#else // defined(DAWN_PLATFORM_ANDROID)
- FILE* outputStream = stdout;
- if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
- outputStream = stderr;
- }
-
- // Note: we use fprintf because <iostream> includes static initializers.
- fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
- fflush(outputStream);
-#endif // defined(DAWN_PLATFORM_ANDROID)
- }
-
- LogMessage DebugLog() {
- return {LogSeverity::Debug};
- }
-
- LogMessage InfoLog() {
- return {LogSeverity::Info};
- }
-
- LogMessage WarningLog() {
- return {LogSeverity::Warning};
- }
-
- LogMessage ErrorLog() {
- return {LogSeverity::Error};
- }
-
- LogMessage DebugLog(const char* file, const char* function, int line) {
- LogMessage message = DebugLog();
- message << file << ":" << line << "(" << function << ")";
- return message;
- }
-
-} // namespace dawn
diff --git a/chromium/third_party/dawn/src/common/Math.cpp b/chromium/third_party/dawn/src/common/Math.cpp
deleted file mode 100644
index 053fa3e4c12..00000000000
--- a/chromium/third_party/dawn/src/common/Math.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Math.h"
-
-#include "common/Assert.h"
-#include "common/Platform.h"
-
-#include <algorithm>
-#include <cmath>
-#include <limits>
-
-#if defined(DAWN_COMPILER_MSVC)
-# include <intrin.h>
-#endif
-
-uint32_t ScanForward(uint32_t bits) {
- ASSERT(bits != 0);
-#if defined(DAWN_COMPILER_MSVC)
- unsigned long firstBitIndex = 0ul;
- unsigned char ret = _BitScanForward(&firstBitIndex, bits);
- ASSERT(ret != 0);
- return firstBitIndex;
-#else
- return static_cast<uint32_t>(__builtin_ctz(bits));
-#endif
-}
-
-uint32_t Log2(uint32_t value) {
- ASSERT(value != 0);
-#if defined(DAWN_COMPILER_MSVC)
- unsigned long firstBitIndex = 0ul;
- unsigned char ret = _BitScanReverse(&firstBitIndex, value);
- ASSERT(ret != 0);
- return firstBitIndex;
-#else
- return 31 - static_cast<uint32_t>(__builtin_clz(value));
-#endif
-}
-
-uint32_t Log2(uint64_t value) {
- ASSERT(value != 0);
-#if defined(DAWN_COMPILER_MSVC)
-# if defined(DAWN_PLATFORM_64_BIT)
- unsigned long firstBitIndex = 0ul;
- unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
- ASSERT(ret != 0);
- return firstBitIndex;
-# else // defined(DAWN_PLATFORM_64_BIT)
- unsigned long firstBitIndex = 0ul;
- if (_BitScanReverse(&firstBitIndex, value >> 32)) {
- return firstBitIndex + 32;
- }
- unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
- ASSERT(ret != 0);
- return firstBitIndex;
-# endif // defined(DAWN_PLATFORM_64_BIT)
-#else // defined(DAWN_COMPILER_MSVC)
- return 63 - static_cast<uint32_t>(__builtin_clzll(value));
-#endif // defined(DAWN_COMPILER_MSVC)
-}
-
-uint64_t NextPowerOfTwo(uint64_t n) {
- if (n <= 1) {
- return 1;
- }
-
- return 1ull << (Log2(n - 1) + 1);
-}
-
-bool IsPowerOfTwo(uint64_t n) {
- ASSERT(n != 0);
- return (n & (n - 1)) == 0;
-}
-
-bool IsPtrAligned(const void* ptr, size_t alignment) {
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- return (reinterpret_cast<size_t>(ptr) & (alignment - 1)) == 0;
-}
-
-bool IsAligned(uint32_t value, size_t alignment) {
- ASSERT(alignment <= UINT32_MAX);
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- uint32_t alignment32 = static_cast<uint32_t>(alignment);
- return (value & (alignment32 - 1)) == 0;
-}
-
-uint16_t Float32ToFloat16(float fp32) {
- uint32_t fp32i = BitCast<uint32_t>(fp32);
- uint32_t sign16 = (fp32i & 0x80000000) >> 16;
- uint32_t mantissaAndExponent = fp32i & 0x7FFFFFFF;
-
- if (mantissaAndExponent > 0x7F800000) { // NaN
- return 0x7FFF;
- } else if (mantissaAndExponent > 0x47FFEFFF) { // Infinity
- return static_cast<uint16_t>(sign16 | 0x7C00);
- } else if (mantissaAndExponent < 0x38800000) { // Denormal
- uint32_t mantissa = (mantissaAndExponent & 0x007FFFFF) | 0x00800000;
- int32_t exponent = 113 - (mantissaAndExponent >> 23);
-
- if (exponent < 24) {
- mantissaAndExponent = mantissa >> exponent;
- } else {
- mantissaAndExponent = 0;
- }
-
- return static_cast<uint16_t>(
- sign16 | (mantissaAndExponent + 0x00000FFF + ((mantissaAndExponent >> 13) & 1)) >> 13);
- } else {
- return static_cast<uint16_t>(sign16 | (mantissaAndExponent + 0xC8000000 + 0x00000FFF +
- ((mantissaAndExponent >> 13) & 1)) >>
- 13);
- }
-}
-
-float Float16ToFloat32(uint16_t fp16) {
- uint32_t tmp = (fp16 & 0x7fff) << 13 | (fp16 & 0x8000) << 16;
- float tmp2 = *reinterpret_cast<float*>(&tmp);
- return pow(2, 127 - 15) * tmp2;
-}
-
-bool IsFloat16NaN(uint16_t fp16) {
- return (fp16 & 0x7FFF) > 0x7C00;
-}
-
-// Based on the Khronos Data Format Specification 1.2 Section 13.3 sRGB transfer functions
-float SRGBToLinear(float srgb) {
- // sRGB is always used in unsigned normalized formats so clamp to [0.0, 1.0]
- if (srgb <= 0.0f) {
- return 0.0f;
- } else if (srgb > 1.0f) {
- return 1.0f;
- }
-
- if (srgb < 0.04045f) {
- return srgb / 12.92f;
- } else {
- return std::pow((srgb + 0.055f) / 1.055f, 2.4f);
- }
-}
-
-uint64_t RoundUp(uint64_t n, uint64_t m) {
- ASSERT(m > 0);
- ASSERT(n > 0);
- ASSERT(m <= std::numeric_limits<uint64_t>::max() - n);
- return ((n + m - 1) / m) * m;
-}
diff --git a/chromium/third_party/dawn/src/common/Math.h b/chromium/third_party/dawn/src/common/Math.h
deleted file mode 100644
index 69cab24deae..00000000000
--- a/chromium/third_party/dawn/src/common/Math.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_MATH_H_
-#define COMMON_MATH_H_
-
-#include "common/Assert.h"
-
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-
-#include <limits>
-#include <type_traits>
-
-// The following are not valid for 0
-uint32_t ScanForward(uint32_t bits);
-uint32_t Log2(uint32_t value);
-uint32_t Log2(uint64_t value);
-bool IsPowerOfTwo(uint64_t n);
-uint64_t RoundUp(uint64_t n, uint64_t m);
-
-constexpr uint32_t ConstexprLog2(uint64_t v) {
- return v <= 1 ? 0 : 1 + ConstexprLog2(v / 2);
-}
-
-constexpr uint32_t ConstexprLog2Ceil(uint64_t v) {
- return v <= 1 ? 0 : ConstexprLog2(v - 1) + 1;
-}
-
-inline uint32_t Log2Ceil(uint32_t v) {
- return v <= 1 ? 0 : Log2(v - 1) + 1;
-}
-
-inline uint32_t Log2Ceil(uint64_t v) {
- return v <= 1 ? 0 : Log2(v - 1) + 1;
-}
-
-uint64_t NextPowerOfTwo(uint64_t n);
-bool IsPtrAligned(const void* ptr, size_t alignment);
-void* AlignVoidPtr(void* ptr, size_t alignment);
-bool IsAligned(uint32_t value, size_t alignment);
-
-template <typename T>
-T Align(T value, size_t alignment) {
- ASSERT(value <= std::numeric_limits<T>::max() - (alignment - 1));
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- T alignmentT = static_cast<T>(alignment);
- return (value + (alignmentT - 1)) & ~(alignmentT - 1);
-}
-
-template <typename T>
-DAWN_FORCE_INLINE T* AlignPtr(T* ptr, size_t alignment) {
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- return reinterpret_cast<T*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
- ~(alignment - 1));
-}
-
-template <typename T>
-DAWN_FORCE_INLINE const T* AlignPtr(const T* ptr, size_t alignment) {
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- return reinterpret_cast<const T*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
- ~(alignment - 1));
-}
-
-template <typename destType, typename sourceType>
-destType BitCast(const sourceType& source) {
- static_assert(sizeof(destType) == sizeof(sourceType), "BitCast: cannot lose precision.");
- destType output;
- std::memcpy(&output, &source, sizeof(destType));
- return output;
-}
-
-uint16_t Float32ToFloat16(float fp32);
-float Float16ToFloat32(uint16_t fp16);
-bool IsFloat16NaN(uint16_t fp16);
-
-float SRGBToLinear(float srgb);
-
-template <typename T1,
- typename T2,
- typename Enable = typename std::enable_if<sizeof(T1) == sizeof(T2)>::type>
-constexpr bool IsSubset(T1 subset, T2 set) {
- T2 bitsAlsoInSet = subset & set;
- return bitsAlsoInSet == subset;
-}
-
-#endif // COMMON_MATH_H_
diff --git a/chromium/third_party/dawn/src/common/NSRef.h b/chromium/third_party/dawn/src/common/NSRef.h
deleted file mode 100644
index 6423856e523..00000000000
--- a/chromium/third_party/dawn/src/common/NSRef.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_NSREF_H_
-#define COMMON_NSREF_H_
-
-#include "common/RefBase.h"
-
-#import <Foundation/NSObject.h>
-
-#if !defined(__OBJC__)
-# error "NSRef can only be used in Objective C/C++ code."
-#endif
-
-// This file contains smart pointers that automatically reference and release Objective C objects
-// and prototocals in a manner very similar to Ref<>. Note that NSRef<> and NSPRef's constructor add
-// a reference to the object by default, so the pattern to get a reference for a newly created
-// NSObject is the following:
-//
-// NSRef<NSFoo> foo = AcquireNSRef([NSFoo alloc]);
-//
-// NSRef overloads -> and * but these operators don't work extremely well with Objective C's
-// features. For example automatic dereferencing when doing the following doesn't work:
-//
-// NSFoo* foo;
-// foo.member = 1;
-// someVar = foo.member;
-//
-// Instead use the message passing syntax:
-//
-// NSRef<NSFoo> foo;
-// [*foo setMember: 1];
-// someVar = [*foo member];
-//
-// Also did you notive the extra '*' in the example above? That's because Objective C's message
-// passing doesn't automatically call a C++ operator to dereference smart pointers (like -> does) so
-// we have to dereference manually using '*'. In some cases the extra * or message passing syntax
-// can get a bit annoying so instead a local "naked" pointer can be borrowed from the NSRef. This
-// would change the syntax overload in the following:
-//
-// NSRef<NSFoo> foo;
-// [*foo setA:1];
-// [*foo setB:2];
-// [*foo setC:3];
-//
-// Into (note access to members of ObjC classes referenced via pointer is done with . and not ->):
-//
-// NSRef<NSFoo> fooRef;
-// NSFoo* foo = fooRef.Get();
-// foo.a = 1;
-// foo.b = 2;
-// boo.c = 3;
-//
-// Which can be subjectively easier to read.
-
-template <typename T>
-struct NSRefTraits {
- static constexpr T kNullValue = nullptr;
- static void Reference(T value) {
- [value retain];
- }
- static void Release(T value) {
- [value release];
- }
-};
-
-template <typename T>
-class NSRef : public RefBase<T*, NSRefTraits<T*>> {
- public:
- using RefBase<T*, NSRefTraits<T*>>::RefBase;
-
- const T* operator*() const {
- return this->Get();
- }
-
- T* operator*() {
- return this->Get();
- }
-};
-
-template <typename T>
-NSRef<T> AcquireNSRef(T* pointee) {
- NSRef<T> ref;
- ref.Acquire(pointee);
- return ref;
-}
-
-// This is a RefBase<> for an Objective C protocol (hence the P). Objective C protocols must always
-// be referenced with id<ProtocolName> and not just ProtocolName* so they cannot use NSRef<>
-// itself. That's what the P in NSPRef stands for: Protocol.
-template <typename T>
-class NSPRef : public RefBase<T, NSRefTraits<T>> {
- public:
- using RefBase<T, NSRefTraits<T>>::RefBase;
-
- const T operator*() const {
- return this->Get();
- }
-
- T operator*() {
- return this->Get();
- }
-};
-
-template <typename T>
-NSPRef<T> AcquireNSPRef(T pointee) {
- NSPRef<T> ref;
- ref.Acquire(pointee);
- return ref;
-}
-
-#endif // COMMON_NSREF_H_
diff --git a/chromium/third_party/dawn/src/common/RefBase.h b/chromium/third_party/dawn/src/common/RefBase.h
deleted file mode 100644
index 0127a26801b..00000000000
--- a/chromium/third_party/dawn/src/common/RefBase.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_REFBASE_H_
-#define COMMON_REFBASE_H_
-
-#include "common/Assert.h"
-#include "common/Compiler.h"
-
-#include <type_traits>
-#include <utility>
-
-// A common class for various smart-pointers acting on referenceable/releasable pointer-like
-// objects. Logic for each specialization can be customized using a Traits type that looks
-// like the following:
-//
-// struct {
-// static constexpr T kNullValue = ...;
-// static void Reference(T value) { ... }
-// static void Release(T value) { ... }
-// };
-//
-// RefBase supports
-template <typename T, typename Traits>
-class RefBase {
- public:
- // Default constructor and destructor.
- RefBase() : mValue(Traits::kNullValue) {
- }
-
- ~RefBase() {
- Release(mValue);
- }
-
- // Constructors from nullptr.
- constexpr RefBase(std::nullptr_t) : RefBase() {
- }
-
- RefBase<T, Traits>& operator=(std::nullptr_t) {
- Set(Traits::kNullValue);
- return *this;
- }
-
- // Constructors from a value T.
- RefBase(T value) : mValue(value) {
- Reference(value);
- }
-
- RefBase<T, Traits>& operator=(const T& value) {
- Set(value);
- return *this;
- }
-
- // Constructors from a RefBase<T>
- RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) {
- Reference(other.mValue);
- }
-
- RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
- Set(other.mValue);
- return *this;
- }
-
- RefBase(RefBase<T, Traits>&& other) {
- mValue = other.Detach();
- }
-
- RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
- if (&other != this) {
- Release(mValue);
- mValue = other.Detach();
- }
- return *this;
- }
-
- // Constructors from a RefBase<U>. Note that in the *-assignment operators this cannot be the
- // same as `other` because overload resolution rules would have chosen the *-assignement
- // operators defined with `other` == RefBase<T, Traits>.
- template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
- RefBase(const RefBase<U, UTraits>& other) : mValue(other.mValue) {
- Reference(other.mValue);
- }
-
- template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
- RefBase<T, Traits>& operator=(const RefBase<U, UTraits>& other) {
- Set(other.mValue);
- return *this;
- }
-
- template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
- RefBase(RefBase<U, UTraits>&& other) {
- mValue = other.Detach();
- }
-
- template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
- RefBase<T, Traits>& operator=(RefBase<U, UTraits>&& other) {
- Release(mValue);
- mValue = other.Detach();
- return *this;
- }
-
- // Comparison operators.
- bool operator==(const T& other) const {
- return mValue == other;
- }
-
- bool operator!=(const T& other) const {
- return mValue != other;
- }
-
- const T operator->() const {
- return mValue;
- }
- T operator->() {
- return mValue;
- }
-
- // Smart pointer methods.
- const T& Get() const {
- return mValue;
- }
- T& Get() {
- return mValue;
- }
-
- T Detach() DAWN_NO_DISCARD {
- T value{std::move(mValue)};
- mValue = Traits::kNullValue;
- return value;
- }
-
- void Acquire(T value) {
- Release(mValue);
- mValue = value;
- }
-
- T* InitializeInto() DAWN_NO_DISCARD {
- ASSERT(mValue == Traits::kNullValue);
- return &mValue;
- }
-
- private:
- // Friend is needed so that instances of RefBase<U> can call Reference and Release on
- // RefBase<T>.
- template <typename U, typename UTraits>
- friend class RefBase;
-
- static void Reference(T value) {
- if (value != Traits::kNullValue) {
- Traits::Reference(value);
- }
- }
- static void Release(T value) {
- if (value != Traits::kNullValue) {
- Traits::Release(value);
- }
- }
-
- void Set(T value) {
- if (mValue != value) {
- // Ensure that the new value is referenced before the old is released to prevent any
- // transitive frees that may affect the new value.
- Reference(value);
- Release(mValue);
- mValue = value;
- }
- }
-
- T mValue;
-};
-
-#endif // COMMON_REFBASE_H_
diff --git a/chromium/third_party/dawn/src/common/RefCounted.cpp b/chromium/third_party/dawn/src/common/RefCounted.cpp
deleted file mode 100644
index f5596386491..00000000000
--- a/chromium/third_party/dawn/src/common/RefCounted.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/RefCounted.h"
-
-#include "common/Assert.h"
-
-#include <cstddef>
-
-static constexpr size_t kPayloadBits = 1;
-static constexpr uint64_t kPayloadMask = (uint64_t(1) << kPayloadBits) - 1;
-static constexpr uint64_t kRefCountIncrement = (uint64_t(1) << kPayloadBits);
-
-RefCounted::RefCounted(uint64_t payload) : mRefCount(kRefCountIncrement + payload) {
- ASSERT((payload & kPayloadMask) == payload);
-}
-
-uint64_t RefCounted::GetRefCountForTesting() const {
- return mRefCount >> kPayloadBits;
-}
-
-uint64_t RefCounted::GetRefCountPayload() const {
- // We only care about the payload bits of the refcount. These never change after
- // initialization so we can use the relaxed memory order. The order doesn't guarantee
- // anything except the atomicity of the load, which is enough since any past values of the
- // atomic will have the correct payload bits.
- return kPayloadMask & mRefCount.load(std::memory_order_relaxed);
-}
-
-void RefCounted::Reference() {
- ASSERT((mRefCount & ~kPayloadMask) != 0);
-
- // The relaxed ordering guarantees only the atomicity of the update, which is enough here
- // because the reference we are copying from still exists and makes sure other threads
- // don't delete `this`.
- // See the explanation in the Boost documentation:
- // https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
- mRefCount.fetch_add(kRefCountIncrement, std::memory_order_relaxed);
-}
-
-void RefCounted::Release() {
- ASSERT((mRefCount & ~kPayloadMask) != 0);
-
- // The release fence here is to make sure all accesses to the object on a thread A
- // happen-before the object is deleted on a thread B. The release memory order ensures that
- // all accesses on thread A happen-before the refcount is decreased and the atomic variable
- // makes sure the refcount decrease in A happens-before the refcount decrease in B. Finally
- // the acquire fence in the destruction case makes sure the refcount decrease in B
- // happens-before the `delete this`.
- //
- // See the explanation in the Boost documentation:
- // https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
- uint64_t previousRefCount = mRefCount.fetch_sub(kRefCountIncrement, std::memory_order_release);
-
- // Check that the previous reference count was strictly less than 2, ignoring payload bits.
- if (previousRefCount < 2 * kRefCountIncrement) {
- // Note that on ARM64 this will generate a `dmb ish` instruction which is a global
- // memory barrier, when an acquire load on mRefCount (using the `ldar` instruction)
- // should be enough and could end up being faster.
- std::atomic_thread_fence(std::memory_order_acquire);
- DeleteThis();
- }
-}
-
-void RefCounted::APIReference() {
- Reference();
-}
-
-void RefCounted::APIRelease() {
- Release();
-}
-
-void RefCounted::DeleteThis() {
- delete this;
-}
diff --git a/chromium/third_party/dawn/src/common/RefCounted.h b/chromium/third_party/dawn/src/common/RefCounted.h
deleted file mode 100644
index 6b266e3b9e4..00000000000
--- a/chromium/third_party/dawn/src/common/RefCounted.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_REFCOUNTED_H_
-#define COMMON_REFCOUNTED_H_
-
-#include "common/RefBase.h"
-
-#include <atomic>
-#include <cstdint>
-
-class RefCounted {
- public:
- RefCounted(uint64_t payload = 0);
-
- uint64_t GetRefCountForTesting() const;
- uint64_t GetRefCountPayload() const;
-
- void Reference();
- void Release();
-
- void APIReference();
- void APIRelease();
-
- protected:
- virtual ~RefCounted() = default;
- // A Derived class may override this if they require a custom deleter.
- virtual void DeleteThis();
-
- private:
- std::atomic<uint64_t> mRefCount;
-};
-
-template <typename T>
-struct RefCountedTraits {
- static constexpr T* kNullValue = nullptr;
- static void Reference(T* value) {
- value->Reference();
- }
- static void Release(T* value) {
- value->Release();
- }
-};
-
-template <typename T>
-class Ref : public RefBase<T*, RefCountedTraits<T>> {
- public:
- using RefBase<T*, RefCountedTraits<T>>::RefBase;
-};
-
-template <typename T>
-Ref<T> AcquireRef(T* pointee) {
- Ref<T> ref;
- ref.Acquire(pointee);
- return ref;
-}
-
-#endif // COMMON_REFCOUNTED_H_
diff --git a/chromium/third_party/dawn/src/common/Result.cpp b/chromium/third_party/dawn/src/common/Result.cpp
deleted file mode 100644
index a4132cd0be1..00000000000
--- a/chromium/third_party/dawn/src/common/Result.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Result.h"
-
-// Implementation details of the tagged pointer Results
-namespace detail {
-
- intptr_t MakePayload(const void* pointer, PayloadType type) {
- intptr_t payload = reinterpret_cast<intptr_t>(pointer);
- ASSERT((payload & 3) == 0);
- return payload | type;
- }
-
- PayloadType GetPayloadType(intptr_t payload) {
- return static_cast<PayloadType>(payload & 3);
- }
-
-} // namespace detail
diff --git a/chromium/third_party/dawn/src/common/Result.h b/chromium/third_party/dawn/src/common/Result.h
deleted file mode 100644
index 4cea7f2f925..00000000000
--- a/chromium/third_party/dawn/src/common/Result.h
+++ /dev/null
@@ -1,519 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_RESULT_H_
-#define COMMON_RESULT_H_
-
-#include "common/Assert.h"
-#include "common/Compiler.h"
-
-#include <cstddef>
-#include <cstdint>
-#include <memory>
-#include <type_traits>
-#include <utility>
-
-// Result<T, E> is the following sum type (Haskell notation):
-//
-// data Result T E = Success T | Error E | Empty
-//
-// It is meant to be used as the return type of functions that might fail. The reason for the Empty
-// case is that a Result should never be discarded, only destructured (its error or success moved
-// out) or moved into a different Result. The Empty case tags Results that have been moved out and
-// Result's destructor should ASSERT on it being Empty.
-//
-// Since C++ doesn't have efficient sum types for the special cases we care about, we provide
-// template specializations for them.
-
-template <typename T, typename E>
-class Result;
-
-// The interface of Result<T, E> should look like the following.
-// public:
-// Result(T&& success);
-// Result(std::unique_ptr<E> error);
-//
-// Result(Result<T, E>&& other);
-// Result<T, E>& operator=(Result<T, E>&& other);
-//
-// ~Result();
-//
-// bool IsError() const;
-// bool IsSuccess() const;
-//
-// T&& AcquireSuccess();
-// std::unique_ptr<E> AcquireError();
-
-// Specialization of Result for returning errors only via pointers. It is basically a pointer
-// where nullptr is both Success and Empty.
-template <typename E>
-class DAWN_NO_DISCARD Result<void, E> {
- public:
- Result();
- Result(std::unique_ptr<E> error);
-
- Result(Result<void, E>&& other);
- Result<void, E>& operator=(Result<void, E>&& other);
-
- ~Result();
-
- bool IsError() const;
- bool IsSuccess() const;
-
- void AcquireSuccess();
- std::unique_ptr<E> AcquireError();
-
- private:
- std::unique_ptr<E> mError;
-};
-
-// Uses SFINAE to try to get alignof(T) but fallback to Default if T isn't defined.
-template <typename T, size_t Default, typename = size_t>
-constexpr size_t alignof_if_defined_else_default = Default;
-
-template <typename T, size_t Default>
-constexpr size_t alignof_if_defined_else_default<T, Default, decltype(alignof(T))> = alignof(T);
-
-// Specialization of Result when both the error an success are pointers. It is implemented as a
-// tagged pointer. The tag for Success is 0 so that returning the value is fastest.
-
-namespace detail {
- // Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
- // but we really want them inlined so we keep them in the headers
- enum PayloadType {
- Success = 0,
- Error = 1,
- Empty = 2,
- };
-
- intptr_t MakePayload(const void* pointer, PayloadType type);
- PayloadType GetPayloadType(intptr_t payload);
-
- template <typename T>
- static T* GetSuccessFromPayload(intptr_t payload);
- template <typename E>
- static E* GetErrorFromPayload(intptr_t payload);
-
- constexpr static intptr_t kEmptyPayload = Empty;
-} // namespace detail
-
-template <typename T, typename E>
-class DAWN_NO_DISCARD Result<T*, E> {
- public:
- static_assert(alignof_if_defined_else_default<T, 4> >= 4,
- "Result<T*, E*> reserves two bits for tagging pointers");
- static_assert(alignof_if_defined_else_default<E, 4> >= 4,
- "Result<T*, E*> reserves two bits for tagging pointers");
-
- Result(T* success);
- Result(std::unique_ptr<E> error);
-
- // Support returning a Result<T*, E*> from a Result<TChild*, E*>
- template <typename TChild>
- Result(Result<TChild*, E>&& other);
- template <typename TChild>
- Result<T*, E>& operator=(Result<TChild*, E>&& other);
-
- ~Result();
-
- bool IsError() const;
- bool IsSuccess() const;
-
- T* AcquireSuccess();
- std::unique_ptr<E> AcquireError();
-
- private:
- template <typename T2, typename E2>
- friend class Result;
-
- intptr_t mPayload = detail::kEmptyPayload;
-};
-
-template <typename T, typename E>
-class DAWN_NO_DISCARD Result<const T*, E> {
- public:
- static_assert(alignof_if_defined_else_default<T, 4> >= 4,
- "Result<T*, E*> reserves two bits for tagging pointers");
- static_assert(alignof_if_defined_else_default<E, 4> >= 4,
- "Result<T*, E*> reserves two bits for tagging pointers");
-
- Result(const T* success);
- Result(std::unique_ptr<E> error);
-
- Result(Result<const T*, E>&& other);
- Result<const T*, E>& operator=(Result<const T*, E>&& other);
-
- ~Result();
-
- bool IsError() const;
- bool IsSuccess() const;
-
- const T* AcquireSuccess();
- std::unique_ptr<E> AcquireError();
-
- private:
- intptr_t mPayload = detail::kEmptyPayload;
-};
-
-template <typename T>
-class Ref;
-
-template <typename T, typename E>
-class DAWN_NO_DISCARD Result<Ref<T>, E> {
- public:
- static_assert(alignof_if_defined_else_default<T, 4> >= 4,
- "Result<Ref<T>, E> reserves two bits for tagging pointers");
- static_assert(alignof_if_defined_else_default<E, 4> >= 4,
- "Result<Ref<T>, E> reserves two bits for tagging pointers");
-
- template <typename U>
- Result(Ref<U>&& success);
- Result(std::unique_ptr<E> error);
-
- template <typename U>
- Result(Result<Ref<U>, E>&& other);
- template <typename U>
- Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other);
-
- ~Result();
-
- bool IsError() const;
- bool IsSuccess() const;
-
- Ref<T> AcquireSuccess();
- std::unique_ptr<E> AcquireError();
-
- private:
- template <typename T2, typename E2>
- friend class Result;
-
- intptr_t mPayload = detail::kEmptyPayload;
-};
-
-// Catchall definition of Result<T, E> implemented as a tagged struct. It could be improved to use
-// a tagged union instead if it turns out to be a hotspot. T and E must be movable and default
-// constructible.
-template <typename T, typename E>
-class DAWN_NO_DISCARD Result {
- public:
- Result(T&& success);
- Result(std::unique_ptr<E> error);
-
- Result(Result<T, E>&& other);
- Result<T, E>& operator=(Result<T, E>&& other);
-
- ~Result();
-
- bool IsError() const;
- bool IsSuccess() const;
-
- T&& AcquireSuccess();
- std::unique_ptr<E> AcquireError();
-
- private:
- enum PayloadType {
- Success = 0,
- Error = 1,
- Acquired = 2,
- };
- PayloadType mType;
-
- std::unique_ptr<E> mError;
- T mSuccess;
-};
-
-// Implementation of Result<void, E>
-template <typename E>
-Result<void, E>::Result() {
-}
-
-template <typename E>
-Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
-}
-
-template <typename E>
-Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
-}
-
-template <typename E>
-Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
- ASSERT(mError == nullptr);
- mError = std::move(other.mError);
- return *this;
-}
-
-template <typename E>
-Result<void, E>::~Result() {
- ASSERT(mError == nullptr);
-}
-
-template <typename E>
-bool Result<void, E>::IsError() const {
- return mError != nullptr;
-}
-
-template <typename E>
-bool Result<void, E>::IsSuccess() const {
- return mError == nullptr;
-}
-
-template <typename E>
-void Result<void, E>::AcquireSuccess() {
-}
-
-template <typename E>
-std::unique_ptr<E> Result<void, E>::AcquireError() {
- return std::move(mError);
-}
-
-// Implementation details of the tagged pointer Results
-namespace detail {
-
- template <typename T>
- T* GetSuccessFromPayload(intptr_t payload) {
- ASSERT(GetPayloadType(payload) == Success);
- return reinterpret_cast<T*>(payload);
- }
-
- template <typename E>
- E* GetErrorFromPayload(intptr_t payload) {
- ASSERT(GetPayloadType(payload) == Error);
- return reinterpret_cast<E*>(payload ^ 1);
- }
-
-} // namespace detail
-
-// Implementation of Result<T*, E>
-template <typename T, typename E>
-Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
-}
-
-template <typename T, typename E>
-Result<T*, E>::Result(std::unique_ptr<E> error)
- : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
-
-template <typename T, typename E>
-template <typename TChild>
-Result<T*, E>::Result(Result<TChild*, E>&& other) : mPayload(other.mPayload) {
- other.mPayload = detail::kEmptyPayload;
- static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value, "");
-}
-
-template <typename T, typename E>
-template <typename TChild>
-Result<T*, E>& Result<T*, E>::operator=(Result<TChild*, E>&& other) {
- ASSERT(mPayload == detail::kEmptyPayload);
- static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value, "");
- mPayload = other.mPayload;
- other.mPayload = detail::kEmptyPayload;
- return *this;
-}
-
-template <typename T, typename E>
-Result<T*, E>::~Result() {
- ASSERT(mPayload == detail::kEmptyPayload);
-}
-
-template <typename T, typename E>
-bool Result<T*, E>::IsError() const {
- return detail::GetPayloadType(mPayload) == detail::Error;
-}
-
-template <typename T, typename E>
-bool Result<T*, E>::IsSuccess() const {
- return detail::GetPayloadType(mPayload) == detail::Success;
-}
-
-template <typename T, typename E>
-T* Result<T*, E>::AcquireSuccess() {
- T* success = detail::GetSuccessFromPayload<T>(mPayload);
- mPayload = detail::kEmptyPayload;
- return success;
-}
-
-template <typename T, typename E>
-std::unique_ptr<E> Result<T*, E>::AcquireError() {
- std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
- mPayload = detail::kEmptyPayload;
- return std::move(error);
-}
-
-// Implementation of Result<const T*, E*>
-template <typename T, typename E>
-Result<const T*, E>::Result(const T* success)
- : mPayload(detail::MakePayload(success, detail::Success)) {
-}
-
-template <typename T, typename E>
-Result<const T*, E>::Result(std::unique_ptr<E> error)
- : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
-
-template <typename T, typename E>
-Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
- other.mPayload = detail::kEmptyPayload;
-}
-
-template <typename T, typename E>
-Result<const T*, E>& Result<const T*, E>::operator=(Result<const T*, E>&& other) {
- ASSERT(mPayload == detail::kEmptyPayload);
- mPayload = other.mPayload;
- other.mPayload = detail::kEmptyPayload;
- return *this;
-}
-
-template <typename T, typename E>
-Result<const T*, E>::~Result() {
- ASSERT(mPayload == detail::kEmptyPayload);
-}
-
-template <typename T, typename E>
-bool Result<const T*, E>::IsError() const {
- return detail::GetPayloadType(mPayload) == detail::Error;
-}
-
-template <typename T, typename E>
-bool Result<const T*, E>::IsSuccess() const {
- return detail::GetPayloadType(mPayload) == detail::Success;
-}
-
-template <typename T, typename E>
-const T* Result<const T*, E>::AcquireSuccess() {
- T* success = detail::GetSuccessFromPayload<T>(mPayload);
- mPayload = detail::kEmptyPayload;
- return success;
-}
-
-template <typename T, typename E>
-std::unique_ptr<E> Result<const T*, E>::AcquireError() {
- std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
- mPayload = detail::kEmptyPayload;
- return std::move(error);
-}
-
-// Implementation of Result<Ref<T>, E>
-template <typename T, typename E>
-template <typename U>
-Result<Ref<T>, E>::Result(Ref<U>&& success)
- : mPayload(detail::MakePayload(success.Detach(), detail::Success)) {
- static_assert(std::is_convertible<U*, T*>::value, "");
-}
-
-template <typename T, typename E>
-Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
- : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
-
-template <typename T, typename E>
-template <typename U>
-Result<Ref<T>, E>::Result(Result<Ref<U>, E>&& other) : mPayload(other.mPayload) {
- static_assert(std::is_convertible<U*, T*>::value, "");
- other.mPayload = detail::kEmptyPayload;
-}
-
-template <typename T, typename E>
-template <typename U>
-Result<Ref<U>, E>& Result<Ref<T>, E>::operator=(Result<Ref<U>, E>&& other) {
- static_assert(std::is_convertible<U*, T*>::value, "");
- ASSERT(mPayload == detail::kEmptyPayload);
- mPayload = other.mPayload;
- other.mPayload = detail::kEmptyPayload;
- return *this;
-}
-
-template <typename T, typename E>
-Result<Ref<T>, E>::~Result() {
- ASSERT(mPayload == detail::kEmptyPayload);
-}
-
-template <typename T, typename E>
-bool Result<Ref<T>, E>::IsError() const {
- return detail::GetPayloadType(mPayload) == detail::Error;
-}
-
-template <typename T, typename E>
-bool Result<Ref<T>, E>::IsSuccess() const {
- return detail::GetPayloadType(mPayload) == detail::Success;
-}
-
-template <typename T, typename E>
-Ref<T> Result<Ref<T>, E>::AcquireSuccess() {
- ASSERT(IsSuccess());
- Ref<T> success = AcquireRef(detail::GetSuccessFromPayload<T>(mPayload));
- mPayload = detail::kEmptyPayload;
- return success;
-}
-
-template <typename T, typename E>
-std::unique_ptr<E> Result<Ref<T>, E>::AcquireError() {
- ASSERT(IsError());
- std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
- mPayload = detail::kEmptyPayload;
- return std::move(error);
-}
-
-// Implementation of Result<T, E>
-template <typename T, typename E>
-Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {
-}
-
-template <typename T, typename E>
-Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
-}
-
-template <typename T, typename E>
-Result<T, E>::~Result() {
- ASSERT(mType == Acquired);
-}
-
-template <typename T, typename E>
-Result<T, E>::Result(Result<T, E>&& other)
- : mType(other.mType), mError(std::move(other.mError)), mSuccess(std::move(other.mSuccess)) {
- other.mType = Acquired;
-}
-template <typename T, typename E>
-Result<T, E>& Result<T, E>::operator=(Result<T, E>&& other) {
- mType = other.mType;
- mError = std::move(other.mError);
- mSuccess = std::move(other.mSuccess);
- other.mType = Acquired;
- return *this;
-}
-
-template <typename T, typename E>
-bool Result<T, E>::IsError() const {
- return mType == Error;
-}
-
-template <typename T, typename E>
-bool Result<T, E>::IsSuccess() const {
- return mType == Success;
-}
-
-template <typename T, typename E>
-T&& Result<T, E>::AcquireSuccess() {
- ASSERT(mType == Success);
- mType = Acquired;
- return std::move(mSuccess);
-}
-
-template <typename T, typename E>
-std::unique_ptr<E> Result<T, E>::AcquireError() {
- ASSERT(mType == Error);
- mType = Acquired;
- return std::move(mError);
-}
-
-#endif // COMMON_RESULT_H_
diff --git a/chromium/third_party/dawn/src/common/SerialMap.h b/chromium/third_party/dawn/src/common/SerialMap.h
deleted file mode 100644
index fe3ca188918..00000000000
--- a/chromium/third_party/dawn/src/common/SerialMap.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_SERIALMAP_H_
-#define COMMON_SERIALMAP_H_
-
-#include "common/SerialStorage.h"
-
-#include <map>
-#include <vector>
-
-template <typename Serial, typename Value>
-class SerialMap;
-
-template <typename SerialT, typename ValueT>
-struct SerialStorageTraits<SerialMap<SerialT, ValueT>> {
- using Serial = SerialT;
- using Value = ValueT;
- using Storage = std::map<Serial, std::vector<Value>>;
- using StorageIterator = typename Storage::iterator;
- using ConstStorageIterator = typename Storage::const_iterator;
-};
-
-// SerialMap stores a map from Serial to Value.
-// Unlike SerialQueue, items may be enqueued with Serials in any
-// arbitrary order. SerialMap provides useful iterators for iterating
-// through Value items in order of increasing Serial.
-template <typename Serial, typename Value>
-class SerialMap : public SerialStorage<SerialMap<Serial, Value>> {
- public:
- void Enqueue(const Value& value, Serial serial);
- void Enqueue(Value&& value, Serial serial);
- void Enqueue(const std::vector<Value>& values, Serial serial);
- void Enqueue(std::vector<Value>&& values, Serial serial);
-};
-
-// SerialMap
-
-template <typename Serial, typename Value>
-void SerialMap<Serial, Value>::Enqueue(const Value& value, Serial serial) {
- this->mStorage[serial].emplace_back(value);
-}
-
-template <typename Serial, typename Value>
-void SerialMap<Serial, Value>::Enqueue(Value&& value, Serial serial) {
- this->mStorage[serial].emplace_back(value);
-}
-
-template <typename Serial, typename Value>
-void SerialMap<Serial, Value>::Enqueue(const std::vector<Value>& values, Serial serial) {
- DAWN_ASSERT(values.size() > 0);
- for (const Value& value : values) {
- Enqueue(value, serial);
- }
-}
-
-template <typename Serial, typename Value>
-void SerialMap<Serial, Value>::Enqueue(std::vector<Value>&& values, Serial serial) {
- DAWN_ASSERT(values.size() > 0);
- for (const Value& value : values) {
- Enqueue(value, serial);
- }
-}
-
-#endif // COMMON_SERIALMAP_H_
diff --git a/chromium/third_party/dawn/src/common/SerialQueue.h b/chromium/third_party/dawn/src/common/SerialQueue.h
deleted file mode 100644
index 5f2494786b0..00000000000
--- a/chromium/third_party/dawn/src/common/SerialQueue.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_SERIALQUEUE_H_
-#define COMMON_SERIALQUEUE_H_
-
-#include "common/SerialStorage.h"
-
-#include <vector>
-
-template <typename Serial, typename Value>
-class SerialQueue;
-
-template <typename SerialT, typename ValueT>
-struct SerialStorageTraits<SerialQueue<SerialT, ValueT>> {
- using Serial = SerialT;
- using Value = ValueT;
- using SerialPair = std::pair<Serial, std::vector<Value>>;
- using Storage = std::vector<SerialPair>;
- using StorageIterator = typename Storage::iterator;
- using ConstStorageIterator = typename Storage::const_iterator;
-};
-
-// SerialQueue stores an associative list mapping a Serial to Value.
-// It enforces that the Serials enqueued are strictly non-decreasing.
-// This makes it very efficient iterate or clear all items added up
-// to some Serial value because they are stored contiguously in memory.
-template <typename Serial, typename Value>
-class SerialQueue : public SerialStorage<SerialQueue<Serial, Value>> {
- public:
-
- // The serial must be given in (not strictly) increasing order.
- void Enqueue(const Value& value, Serial serial);
- void Enqueue(Value&& value, Serial serial);
- void Enqueue(const std::vector<Value>& values, Serial serial);
- void Enqueue(std::vector<Value>&& values, Serial serial);
-};
-
-// SerialQueue
-
-template <typename Serial, typename Value>
-void SerialQueue<Serial, Value>::Enqueue(const Value& value, Serial serial) {
- DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
-
- if (this->Empty() || this->mStorage.back().first < serial) {
- this->mStorage.emplace_back(serial, std::vector<Value>{});
- }
- this->mStorage.back().second.push_back(value);
-}
-
-template <typename Serial, typename Value>
-void SerialQueue<Serial, Value>::Enqueue(Value&& value, Serial serial) {
- DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
-
- if (this->Empty() || this->mStorage.back().first < serial) {
- this->mStorage.emplace_back(serial, std::vector<Value>{});
- }
- this->mStorage.back().second.push_back(std::move(value));
-}
-
-template <typename Serial, typename Value>
-void SerialQueue<Serial, Value>::Enqueue(const std::vector<Value>& values, Serial serial) {
- DAWN_ASSERT(values.size() > 0);
- DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
- this->mStorage.emplace_back(serial, values);
-}
-
-template <typename Serial, typename Value>
-void SerialQueue<Serial, Value>::Enqueue(std::vector<Value>&& values, Serial serial) {
- DAWN_ASSERT(values.size() > 0);
- DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
- this->mStorage.emplace_back(serial, values);
-}
-
-#endif // COMMON_SERIALQUEUE_H_
diff --git a/chromium/third_party/dawn/src/common/SerialStorage.h b/chromium/third_party/dawn/src/common/SerialStorage.h
deleted file mode 100644
index 2999071191a..00000000000
--- a/chromium/third_party/dawn/src/common/SerialStorage.h
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_SERIALSTORAGE_H_
-#define COMMON_SERIALSTORAGE_H_
-
-#include "common/Assert.h"
-
-#include <cstdint>
-#include <utility>
-
-template <typename T>
-struct SerialStorageTraits {};
-
-template <typename Derived>
-class SerialStorage {
- protected:
- using Serial = typename SerialStorageTraits<Derived>::Serial;
- using Value = typename SerialStorageTraits<Derived>::Value;
- using Storage = typename SerialStorageTraits<Derived>::Storage;
- using StorageIterator = typename SerialStorageTraits<Derived>::StorageIterator;
- using ConstStorageIterator = typename SerialStorageTraits<Derived>::ConstStorageIterator;
-
- public:
- class Iterator {
- public:
- Iterator(StorageIterator start);
- Iterator& operator++();
-
- bool operator==(const Iterator& other) const;
- bool operator!=(const Iterator& other) const;
- Value& operator*() const;
-
- private:
- StorageIterator mStorageIterator;
- // Special case the mSerialIterator when it should be equal to mStorageIterator.begin()
- // otherwise we could ask mStorageIterator.begin() when mStorageIterator is mStorage.end()
- // which is invalid. mStorageIterator.begin() is tagged with a nullptr.
- Value* mSerialIterator;
- };
-
- class ConstIterator {
- public:
- ConstIterator(ConstStorageIterator start);
- ConstIterator& operator++();
-
- bool operator==(const ConstIterator& other) const;
- bool operator!=(const ConstIterator& other) const;
- const Value& operator*() const;
-
- private:
- ConstStorageIterator mStorageIterator;
- const Value* mSerialIterator;
- };
-
- class BeginEnd {
- public:
- BeginEnd(StorageIterator start, StorageIterator end);
-
- Iterator begin() const;
- Iterator end() const;
-
- private:
- StorageIterator mStartIt;
- StorageIterator mEndIt;
- };
-
- class ConstBeginEnd {
- public:
- ConstBeginEnd(ConstStorageIterator start, ConstStorageIterator end);
-
- ConstIterator begin() const;
- ConstIterator end() const;
-
- private:
- ConstStorageIterator mStartIt;
- ConstStorageIterator mEndIt;
- };
-
- // Derived classes may specialize constraits for elements stored
- // Ex.) SerialQueue enforces that the serial must be given in (not strictly)
- // increasing order
- template <typename... Params>
- void Enqueue(Params&&... args, Serial serial) {
- Derived::Enqueue(std::forward<Params>(args)..., serial);
- }
-
- bool Empty() const;
-
- // The UpTo variants of Iterate and Clear affect all values associated to a serial
- // that is smaller OR EQUAL to the given serial. Iterating is done like so:
- // for (const T& value : queue.IterateAll()) { stuff(T); }
- ConstBeginEnd IterateAll() const;
- ConstBeginEnd IterateUpTo(Serial serial) const;
- BeginEnd IterateAll();
- BeginEnd IterateUpTo(Serial serial);
-
- void Clear();
- void ClearUpTo(Serial serial);
-
- Serial FirstSerial() const;
- Serial LastSerial() const;
-
- protected:
- // Returns the first StorageIterator that a serial bigger than serial.
- ConstStorageIterator FindUpTo(Serial serial) const;
- StorageIterator FindUpTo(Serial serial);
- Storage mStorage;
-};
-
-// SerialStorage
-
-template <typename Derived>
-bool SerialStorage<Derived>::Empty() const {
- return mStorage.empty();
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::ConstBeginEnd SerialStorage<Derived>::IterateAll() const {
- return {mStorage.begin(), mStorage.end()};
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::ConstBeginEnd SerialStorage<Derived>::IterateUpTo(
- Serial serial) const {
- return {mStorage.begin(), FindUpTo(serial)};
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::BeginEnd SerialStorage<Derived>::IterateAll() {
- return {mStorage.begin(), mStorage.end()};
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::BeginEnd SerialStorage<Derived>::IterateUpTo(Serial serial) {
- return {mStorage.begin(), FindUpTo(serial)};
-}
-
-template <typename Derived>
-void SerialStorage<Derived>::Clear() {
- mStorage.clear();
-}
-
-template <typename Derived>
-void SerialStorage<Derived>::ClearUpTo(Serial serial) {
- mStorage.erase(mStorage.begin(), FindUpTo(serial));
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::Serial SerialStorage<Derived>::FirstSerial() const {
- DAWN_ASSERT(!Empty());
- return mStorage.begin()->first;
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::Serial SerialStorage<Derived>::LastSerial() const {
- DAWN_ASSERT(!Empty());
- return mStorage.back().first;
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::ConstStorageIterator SerialStorage<Derived>::FindUpTo(
- Serial serial) const {
- auto it = mStorage.begin();
- while (it != mStorage.end() && it->first <= serial) {
- it++;
- }
- return it;
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::StorageIterator SerialStorage<Derived>::FindUpTo(Serial serial) {
- auto it = mStorage.begin();
- while (it != mStorage.end() && it->first <= serial) {
- it++;
- }
- return it;
-}
-
-// SerialStorage::BeginEnd
-
-template <typename Derived>
-SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
- typename SerialStorage<Derived>::StorageIterator end)
- : mStartIt(start), mEndIt(end) {
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
- return {mStartIt};
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::end() const {
- return {mEndIt};
-}
-
-// SerialStorage::Iterator
-
-template <typename Derived>
-SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
- : mStorageIterator(start), mSerialIterator(nullptr) {
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
- Value* vectorData = mStorageIterator->second.data();
-
- if (mSerialIterator == nullptr) {
- mSerialIterator = vectorData + 1;
- } else {
- mSerialIterator++;
- }
-
- if (mSerialIterator >= vectorData + mStorageIterator->second.size()) {
- mSerialIterator = nullptr;
- mStorageIterator++;
- }
-
- return *this;
-}
-
-template <typename Derived>
-bool SerialStorage<Derived>::Iterator::operator==(
- const typename SerialStorage<Derived>::Iterator& other) const {
- return other.mStorageIterator == mStorageIterator && other.mSerialIterator == mSerialIterator;
-}
-
-template <typename Derived>
-bool SerialStorage<Derived>::Iterator::operator!=(
- const typename SerialStorage<Derived>::Iterator& other) const {
- return !(*this == other);
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::Value& SerialStorage<Derived>::Iterator::operator*() const {
- if (mSerialIterator == nullptr) {
- return *mStorageIterator->second.begin();
- }
- return *mSerialIterator;
-}
-
-// SerialStorage::ConstBeginEnd
-
-template <typename Derived>
-SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
- typename SerialStorage<Derived>::ConstStorageIterator start,
- typename SerialStorage<Derived>::ConstStorageIterator end)
- : mStartIt(start), mEndIt(end) {
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
- const {
- return {mStartIt};
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::end() const {
- return {mEndIt};
-}
-
-// SerialStorage::ConstIterator
-
-template <typename Derived>
-SerialStorage<Derived>::ConstIterator::ConstIterator(
- typename SerialStorage<Derived>::ConstStorageIterator start)
- : mStorageIterator(start), mSerialIterator(nullptr) {
-}
-
-template <typename Derived>
-typename SerialStorage<Derived>::ConstIterator&
-SerialStorage<Derived>::ConstIterator::operator++() {
- const Value* vectorData = mStorageIterator->second.data();
-
- if (mSerialIterator == nullptr) {
- mSerialIterator = vectorData + 1;
- } else {
- mSerialIterator++;
- }
-
- if (mSerialIterator >= vectorData + mStorageIterator->second.size()) {
- mSerialIterator = nullptr;
- mStorageIterator++;
- }
-
- return *this;
-}
-
-template <typename Derived>
-bool SerialStorage<Derived>::ConstIterator::operator==(
- const typename SerialStorage<Derived>::ConstIterator& other) const {
- return other.mStorageIterator == mStorageIterator && other.mSerialIterator == mSerialIterator;
-}
-
-template <typename Derived>
-bool SerialStorage<Derived>::ConstIterator::operator!=(
- const typename SerialStorage<Derived>::ConstIterator& other) const {
- return !(*this == other);
-}
-
-template <typename Derived>
-const typename SerialStorage<Derived>::Value& SerialStorage<Derived>::ConstIterator::operator*()
- const {
- if (mSerialIterator == nullptr) {
- return *mStorageIterator->second.begin();
- }
- return *mSerialIterator;
-}
-
-#endif // COMMON_SERIALSTORAGE_H_
diff --git a/chromium/third_party/dawn/src/common/SlabAllocator.cpp b/chromium/third_party/dawn/src/common/SlabAllocator.cpp
deleted file mode 100644
index 0ffb0731949..00000000000
--- a/chromium/third_party/dawn/src/common/SlabAllocator.cpp
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/SlabAllocator.h"
-
-#include "common/Assert.h"
-#include "common/Math.h"
-
-#include <algorithm>
-#include <cstdlib>
-#include <limits>
-#include <new>
-
-// IndexLinkNode
-
-SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
- : index(index), nextIndex(nextIndex) {
-}
-
-// Slab
-
-SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
- : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
-}
-
-SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
-
-SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
-}
-
-SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
-
-SlabAllocatorImpl::SentinelSlab::~SentinelSlab() {
- Slab* slab = this->next;
- while (slab != nullptr) {
- Slab* next = slab->next;
- ASSERT(slab->blocksInUse == 0);
- // Delete the slab's allocation. The slab is allocated inside slab->allocation.
- delete[] slab->allocation;
- slab = next;
- }
-}
-
-// SlabAllocatorImpl
-
-SlabAllocatorImpl::Index SlabAllocatorImpl::kInvalidIndex =
- std::numeric_limits<SlabAllocatorImpl::Index>::max();
-
-SlabAllocatorImpl::SlabAllocatorImpl(Index blocksPerSlab,
- uint32_t objectSize,
- uint32_t objectAlignment)
- : mAllocationAlignment(std::max(static_cast<uint32_t>(alignof(Slab)), objectAlignment)),
- mSlabBlocksOffset(Align(sizeof(Slab), objectAlignment)),
- mIndexLinkNodeOffset(Align(objectSize, alignof(IndexLinkNode))),
- mBlockStride(Align(mIndexLinkNodeOffset + sizeof(IndexLinkNode), objectAlignment)),
- mBlocksPerSlab(blocksPerSlab),
- mTotalAllocationSize(
- // required allocation size
- static_cast<size_t>(mSlabBlocksOffset) + mBlocksPerSlab * mBlockStride +
- // Pad the allocation size by mAllocationAlignment so that the aligned allocation still
- // fulfills the required size.
- mAllocationAlignment) {
- ASSERT(IsPowerOfTwo(mAllocationAlignment));
-}
-
-SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
- : mAllocationAlignment(rhs.mAllocationAlignment),
- mSlabBlocksOffset(rhs.mSlabBlocksOffset),
- mIndexLinkNodeOffset(rhs.mIndexLinkNodeOffset),
- mBlockStride(rhs.mBlockStride),
- mBlocksPerSlab(rhs.mBlocksPerSlab),
- mTotalAllocationSize(rhs.mTotalAllocationSize),
- mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
- mFullSlabs(std::move(rhs.mFullSlabs)),
- mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
-}
-
-SlabAllocatorImpl::~SlabAllocatorImpl() = default;
-
-SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::OffsetFrom(
- IndexLinkNode* node,
- std::make_signed_t<Index> offset) const {
- return reinterpret_cast<IndexLinkNode*>(reinterpret_cast<char*>(node) +
- static_cast<intptr_t>(mBlockStride) * offset);
-}
-
-SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::NodeFromObject(void* object) const {
- return reinterpret_cast<SlabAllocatorImpl::IndexLinkNode*>(static_cast<char*>(object) +
- mIndexLinkNodeOffset);
-}
-
-void* SlabAllocatorImpl::ObjectFromNode(IndexLinkNode* node) const {
- return static_cast<void*>(reinterpret_cast<char*>(node) - mIndexLinkNodeOffset);
-}
-
-bool SlabAllocatorImpl::IsNodeInSlab(Slab* slab, IndexLinkNode* node) const {
- char* firstObjectPtr = reinterpret_cast<char*>(slab) + mSlabBlocksOffset;
- IndexLinkNode* firstNode = NodeFromObject(firstObjectPtr);
- IndexLinkNode* lastNode = OffsetFrom(firstNode, mBlocksPerSlab - 1);
- return node >= firstNode && node <= lastNode && node->index < mBlocksPerSlab;
-}
-
-void SlabAllocatorImpl::PushFront(Slab* slab, IndexLinkNode* node) const {
- ASSERT(IsNodeInSlab(slab, node));
-
- IndexLinkNode* head = slab->freeList;
- if (head == nullptr) {
- node->nextIndex = kInvalidIndex;
- } else {
- ASSERT(IsNodeInSlab(slab, head));
- node->nextIndex = head->index;
- }
- slab->freeList = node;
-
- ASSERT(slab->blocksInUse != 0);
- slab->blocksInUse--;
-}
-
-SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::PopFront(Slab* slab) const {
- ASSERT(slab->freeList != nullptr);
-
- IndexLinkNode* head = slab->freeList;
- if (head->nextIndex == kInvalidIndex) {
- slab->freeList = nullptr;
- } else {
- ASSERT(IsNodeInSlab(slab, head));
- slab->freeList = OffsetFrom(head, head->nextIndex - head->index);
- ASSERT(IsNodeInSlab(slab, slab->freeList));
- }
-
- ASSERT(slab->blocksInUse < mBlocksPerSlab);
- slab->blocksInUse++;
- return head;
-}
-
-void SlabAllocatorImpl::SentinelSlab::Prepend(SlabAllocatorImpl::Slab* slab) {
- if (this->next != nullptr) {
- this->next->prev = slab;
- }
- slab->prev = this;
- slab->next = this->next;
- this->next = slab;
-}
-
-void SlabAllocatorImpl::Slab::Splice() {
- SlabAllocatorImpl::Slab* originalPrev = this->prev;
- SlabAllocatorImpl::Slab* originalNext = this->next;
-
- this->prev = nullptr;
- this->next = nullptr;
-
- ASSERT(originalPrev != nullptr);
-
- // Set the originalNext's prev pointer.
- if (originalNext != nullptr) {
- originalNext->prev = originalPrev;
- }
-
- // Now, set the originalNext as the originalPrev's new next.
- originalPrev->next = originalNext;
-}
-
-void* SlabAllocatorImpl::Allocate() {
- if (mAvailableSlabs.next == nullptr) {
- GetNewSlab();
- }
-
- Slab* slab = mAvailableSlabs.next;
- IndexLinkNode* node = PopFront(slab);
- ASSERT(node != nullptr);
-
- // Move full slabs to a separate list, so allocate can always return quickly.
- if (slab->blocksInUse == mBlocksPerSlab) {
- slab->Splice();
- mFullSlabs.Prepend(slab);
- }
-
- return ObjectFromNode(node);
-}
-
-void SlabAllocatorImpl::Deallocate(void* ptr) {
- IndexLinkNode* node = NodeFromObject(ptr);
-
- ASSERT(node->index < mBlocksPerSlab);
- void* firstAllocation = ObjectFromNode(OffsetFrom(node, -node->index));
- Slab* slab = reinterpret_cast<Slab*>(static_cast<char*>(firstAllocation) - mSlabBlocksOffset);
- ASSERT(slab != nullptr);
-
- bool slabWasFull = slab->blocksInUse == mBlocksPerSlab;
-
- ASSERT(slab->blocksInUse != 0);
- PushFront(slab, node);
-
- if (slabWasFull) {
- // Slab is in the full list. Move it to the recycled list.
- ASSERT(slab->freeList != nullptr);
- slab->Splice();
- mRecycledSlabs.Prepend(slab);
- }
-
- // TODO(crbug.com/dawn/825): Occasionally prune slabs if |blocksInUse == 0|.
- // Doing so eagerly hurts performance.
-}
-
-void SlabAllocatorImpl::GetNewSlab() {
- // Should only be called when there are no available slabs.
- ASSERT(mAvailableSlabs.next == nullptr);
-
- if (mRecycledSlabs.next != nullptr) {
- // If the recycled list is non-empty, swap their contents.
- std::swap(mAvailableSlabs.next, mRecycledSlabs.next);
-
- // We swapped the next pointers, so the prev pointer is wrong.
- // Update it here.
- mAvailableSlabs.next->prev = &mAvailableSlabs;
- ASSERT(mRecycledSlabs.next == nullptr);
- return;
- }
-
- // TODO(crbug.com/dawn/824): Use aligned_alloc with C++17.
- char* allocation = new char[mTotalAllocationSize];
- char* alignedPtr = AlignPtr(allocation, mAllocationAlignment);
-
- char* dataStart = alignedPtr + mSlabBlocksOffset;
-
- IndexLinkNode* node = NodeFromObject(dataStart);
- for (uint32_t i = 0; i < mBlocksPerSlab; ++i) {
- new (OffsetFrom(node, i)) IndexLinkNode(i, i + 1);
- }
-
- IndexLinkNode* lastNode = OffsetFrom(node, mBlocksPerSlab - 1);
- lastNode->nextIndex = kInvalidIndex;
-
- mAvailableSlabs.Prepend(new (alignedPtr) Slab(allocation, node));
-}
diff --git a/chromium/third_party/dawn/src/common/SlabAllocator.h b/chromium/third_party/dawn/src/common/SlabAllocator.h
deleted file mode 100644
index b02ed520269..00000000000
--- a/chromium/third_party/dawn/src/common/SlabAllocator.h
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_SLABALLOCATOR_H_
-#define COMMON_SLABALLOCATOR_H_
-
-#include "common/PlacementAllocated.h"
-
-#include <cstdint>
-#include <type_traits>
-#include <utility>
-
-// The SlabAllocator allocates objects out of one or more fixed-size contiguous "slabs" of memory.
-// This makes it very quick to allocate and deallocate fixed-size objects because the allocator only
-// needs to index an offset into pre-allocated memory. It is similar to a pool-allocator that
-// recycles memory from previous allocations, except multiple allocations are hosted contiguously in
-// one large slab.
-//
-// Internally, the SlabAllocator stores slabs as a linked list to avoid extra indirections indexing
-// into an std::vector. To service an allocation request, the allocator only needs to know the first
-// currently available slab. There are three backing linked lists: AVAILABLE, FULL, and RECYCLED.
-// A slab that is AVAILABLE can be used to immediately service allocation requests. Once it has no
-// remaining space, it is moved to the FULL state. When a FULL slab sees any deallocations, it is
-// moved to the RECYCLED state. The RECYCLED state is separate from the AVAILABLE state so that
-// deallocations don't immediately prepend slabs to the AVAILABLE list, and change the current slab
-// servicing allocations. When the AVAILABLE list becomes empty is it swapped with the RECYCLED
-// list.
-//
-// Allocated objects are placement-allocated with some extra info at the end (we'll call the Object
-// plus the extra bytes a "block") used to specify the constant index of the block in its parent
-// slab, as well as the index of the next available block. So, following the block next-indices
-// forms a linked list of free blocks.
-//
-// Slab creation: When a new slab is allocated, sufficient memory is allocated for it, and then the
-// slab metadata plus all of its child blocks are placement-allocated into the memory. Indices and
-// next-indices are initialized to form the free-list of blocks.
-//
-// Allocation: When an object is allocated, if there is no space available in an existing slab, a
-// new slab is created (or an old slab is recycled). The first block of the slab is removed and
-// returned.
-//
-// Deallocation: When an object is deallocated, it can compute the pointer to its parent slab
-// because it stores the index of its own allocation. That block is then prepended to the slab's
-// free list.
-class SlabAllocatorImpl {
- public:
- // Allocations host their current index and the index of the next free block.
- // Because this is an index, and not a byte offset, it can be much smaller than a size_t.
- // TODO(crbug.com/dawn/825): Is uint8_t sufficient?
- using Index = uint16_t;
-
- SlabAllocatorImpl(SlabAllocatorImpl&& rhs);
-
- protected:
- // This is essentially a singly linked list using indices instead of pointers,
- // so we store the index of "this" in |this->index|.
- struct IndexLinkNode : PlacementAllocated {
- IndexLinkNode(Index index, Index nextIndex);
-
- const Index index; // The index of this block in the slab.
- Index nextIndex; // The index of the next available block. kInvalidIndex, if none.
- };
-
- struct Slab : PlacementAllocated {
- // A slab is placement-allocated into an aligned pointer from a separate allocation.
- // Ownership of the allocation is transferred to the slab on creation.
- // | ---------- allocation --------- |
- // | pad | Slab | data ------------> |
- Slab(char allocation[], IndexLinkNode* head);
- Slab(Slab&& rhs);
-
- void Splice();
-
- char* allocation;
- IndexLinkNode* freeList;
- Slab* prev;
- Slab* next;
- Index blocksInUse;
- };
-
- SlabAllocatorImpl(Index blocksPerSlab, uint32_t objectSize, uint32_t objectAlignment);
- ~SlabAllocatorImpl();
-
- // Allocate a new block of memory.
- void* Allocate();
-
- // Deallocate a block of memory.
- void Deallocate(void* ptr);
-
- private:
- // The maximum value is reserved to indicate the end of the list.
- static Index kInvalidIndex;
-
- // Get the IndexLinkNode |offset| slots away.
- IndexLinkNode* OffsetFrom(IndexLinkNode* node, std::make_signed_t<Index> offset) const;
-
- // Compute the pointer to the IndexLinkNode from an allocated object.
- IndexLinkNode* NodeFromObject(void* object) const;
-
- // Compute the pointer to the object from an IndexLinkNode.
- void* ObjectFromNode(IndexLinkNode* node) const;
-
- bool IsNodeInSlab(Slab* slab, IndexLinkNode* node) const;
-
- // The Slab stores a linked-list of free allocations.
- // PushFront/PopFront adds/removes an allocation from the free list.
- void PushFront(Slab* slab, IndexLinkNode* node) const;
- IndexLinkNode* PopFront(Slab* slab) const;
-
- // Replace the current slab with a new one, and chain the old one off of it.
- // Both slabs may still be used for for allocation/deallocation, but older slabs
- // will be a little slower to get allocations from.
- void GetNewSlab();
-
- const uint32_t mAllocationAlignment;
-
- // | Slab | pad | Obj | pad | Node | pad | Obj | pad | Node | pad | ....
- // | -----------| mSlabBlocksOffset
- // | | ---------------------- | mBlockStride
- // | | ----------| mIndexLinkNodeOffset
- // | --------------------------------------> (mSlabBlocksOffset + mBlocksPerSlab * mBlockStride)
-
- // A Slab is metadata, followed by the aligned memory to allocate out of. |mSlabBlocksOffset| is
- // the offset to the start of the aligned memory region.
- const uint32_t mSlabBlocksOffset;
-
- // The IndexLinkNode is stored after the Allocation itself. This is the offset to it.
- const uint32_t mIndexLinkNodeOffset;
-
- // Because alignment of allocations may introduce padding, |mBlockStride| is the
- // distance between aligned blocks of (Allocation + IndexLinkNode)
- const uint32_t mBlockStride;
-
- const Index mBlocksPerSlab; // The total number of blocks in a slab.
-
- const size_t mTotalAllocationSize;
-
- struct SentinelSlab : Slab {
- SentinelSlab();
- ~SentinelSlab();
-
- SentinelSlab(SentinelSlab&& rhs);
-
- void Prepend(Slab* slab);
- };
-
- SentinelSlab mAvailableSlabs; // Available slabs to service allocations.
- SentinelSlab mFullSlabs; // Full slabs. Stored here so we can skip checking them.
- SentinelSlab mRecycledSlabs; // Recycled slabs. Not immediately added to |mAvailableSlabs| so
- // we don't thrash the current "active" slab.
-};
-
-template <typename T>
-class SlabAllocator : public SlabAllocatorImpl {
- public:
- SlabAllocator(size_t totalObjectBytes,
- uint32_t objectSize = sizeof(T),
- uint32_t objectAlignment = alignof(T))
- : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
- }
-
- template <typename... Args>
- T* Allocate(Args&&... args) {
- void* ptr = SlabAllocatorImpl::Allocate();
- return new (ptr) T(std::forward<Args>(args)...);
- }
-
- void Deallocate(T* object) {
- SlabAllocatorImpl::Deallocate(object);
- }
-};
-
-#endif // COMMON_SLABALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/common/StackContainer.h b/chromium/third_party/dawn/src/common/StackContainer.h
deleted file mode 100644
index be3cf32d0de..00000000000
--- a/chromium/third_party/dawn/src/common/StackContainer.h
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is a modified copy of Chromium's /src/base/containers/stack_container.h
-
-#ifndef COMMON_STACKCONTAINER_H_
-#define COMMON_STACKCONTAINER_H_
-
-#include "common/Compiler.h"
-
-#include <cstddef>
-#include <vector>
-
-// This allocator can be used with STL containers to provide a stack buffer
-// from which to allocate memory and overflows onto the heap. This stack buffer
-// would be allocated on the stack and allows us to avoid heap operations in
-// some situations.
-//
-// STL likes to make copies of allocators, so the allocator itself can't hold
-// the data. Instead, we make the creator responsible for creating a
-// StackAllocator::Source which contains the data. Copying the allocator
-// merely copies the pointer to this shared source, so all allocators created
-// based on our allocator will share the same stack buffer.
-//
-// This stack buffer implementation is very simple. The first allocation that
-// fits in the stack buffer will use the stack buffer. Any subsequent
-// allocations will not use the stack buffer, even if there is unused room.
-// This makes it appropriate for array-like containers, but the caller should
-// be sure to reserve() in the container up to the stack buffer size. Otherwise
-// the container will allocate a small array which will "use up" the stack
-// buffer.
-template <typename T, size_t stack_capacity>
-class StackAllocator : public std::allocator<T> {
- public:
- typedef typename std::allocator<T>::pointer pointer;
- typedef typename std::allocator<T>::size_type size_type;
-
- // Backing store for the allocator. The container owner is responsible for
- // maintaining this for as long as any containers using this allocator are
- // live.
- struct Source {
- Source() : used_stack_buffer_(false) {
- }
-
- // Casts the buffer in its right type.
- T* stack_buffer() {
- return reinterpret_cast<T*>(stack_buffer_);
- }
- const T* stack_buffer() const {
- return reinterpret_cast<const T*>(&stack_buffer_);
- }
-
- // The buffer itself. It is not of type T because we don't want the
- // constructors and destructors to be automatically called. Define a POD
- // buffer of the right size instead.
- alignas(T) char stack_buffer_[sizeof(T[stack_capacity])];
-#if defined(DAWN_COMPILER_GCC) && !defined(__x86_64__) && !defined(__i386__)
- static_assert(alignof(T) <= 16, "http://crbug.com/115612");
-#endif
-
- // Set when the stack buffer is used for an allocation. We do not track
- // how much of the buffer is used, only that somebody is using it.
- bool used_stack_buffer_;
- };
-
- // Used by containers when they want to refer to an allocator of type U.
- template <typename U>
- struct rebind {
- typedef StackAllocator<U, stack_capacity> other;
- };
-
- // For the straight up copy c-tor, we can share storage.
- StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
- : std::allocator<T>(), source_(rhs.source_) {
- }
-
- // ISO C++ requires the following constructor to be defined,
- // and std::vector in VC++2008SP1 Release fails with an error
- // in the class _Container_base_aux_alloc_real (from <xutility>)
- // if the constructor does not exist.
- // For this constructor, we cannot share storage; there's
- // no guarantee that the Source buffer of Ts is large enough
- // for Us.
- // TODO: If we were fancy pants, perhaps we could share storage
- // iff sizeof(T) == sizeof(U).
- template <typename U, size_t other_capacity>
- StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
- }
-
- // This constructor must exist. It creates a default allocator that doesn't
- // actually have a stack buffer. glibc's std::string() will compare the
- // current allocator against the default-constructed allocator, so this
- // should be fast.
- StackAllocator() : source_(nullptr) {
- }
-
- explicit StackAllocator(Source* source) : source_(source) {
- }
-
- // Actually do the allocation. Use the stack buffer if nobody has used it yet
- // and the size requested fits. Otherwise, fall through to the standard
- // allocator.
- pointer allocate(size_type n) {
- if (source_ && !source_->used_stack_buffer_ && n <= stack_capacity) {
- source_->used_stack_buffer_ = true;
- return source_->stack_buffer();
- } else {
- return std::allocator<T>::allocate(n);
- }
- }
-
- // Free: when trying to free the stack buffer, just mark it as free. For
- // non-stack-buffer pointers, just fall though to the standard allocator.
- void deallocate(pointer p, size_type n) {
- if (source_ && p == source_->stack_buffer())
- source_->used_stack_buffer_ = false;
- else
- std::allocator<T>::deallocate(p, n);
- }
-
- private:
- Source* source_;
-};
-
-// A wrapper around STL containers that maintains a stack-sized buffer that the
-// initial capacity of the vector is based on. Growing the container beyond the
-// stack capacity will transparently overflow onto the heap. The container must
-// support reserve().
-//
-// This will not work with std::string since some implementations allocate
-// more bytes than requested in calls to reserve(), forcing the allocation onto
-// the heap. http://crbug.com/709273
-//
-// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
-// type. This object is really intended to be used only internally. You'll want
-// to use the wrappers below for different types.
-template <typename TContainerType, size_t stack_capacity>
-class StackContainer {
- public:
- typedef TContainerType ContainerType;
- typedef typename ContainerType::value_type ContainedType;
- typedef StackAllocator<ContainedType, stack_capacity> Allocator;
-
- // Allocator must be constructed before the container!
- StackContainer() : allocator_(&stack_data_), container_(allocator_) {
- // Make the container use the stack allocation by reserving our buffer size
- // before doing anything else.
- container_.reserve(stack_capacity);
- }
-
- // Getters for the actual container.
- //
- // Danger: any copies of this made using the copy constructor must have
- // shorter lifetimes than the source. The copy will share the same allocator
- // and therefore the same stack buffer as the original. Use std::copy to
- // copy into a "real" container for longer-lived objects.
- ContainerType& container() {
- return container_;
- }
- const ContainerType& container() const {
- return container_;
- }
-
- // Support operator-> to get to the container. This allows nicer syntax like:
- // StackContainer<...> foo;
- // std::sort(foo->begin(), foo->end());
- ContainerType* operator->() {
- return &container_;
- }
- const ContainerType* operator->() const {
- return &container_;
- }
-
- // Retrieves the stack source so that that unit tests can verify that the
- // buffer is being used properly.
- const typename Allocator::Source& stack_data() const {
- return stack_data_;
- }
-
- protected:
- typename Allocator::Source stack_data_;
- Allocator allocator_;
- ContainerType container_;
-
- private:
- StackContainer(const StackContainer& rhs) = delete;
- StackContainer& operator=(const StackContainer& rhs) = delete;
- StackContainer(StackContainer&& rhs) = delete;
- StackContainer& operator=(StackContainer&& rhs) = delete;
-};
-
-// Range-based iteration support for StackContainer.
-template <typename TContainerType, size_t stack_capacity>
-auto begin(const StackContainer<TContainerType, stack_capacity>& stack_container)
- -> decltype(begin(stack_container.container())) {
- return begin(stack_container.container());
-}
-
-template <typename TContainerType, size_t stack_capacity>
-auto begin(StackContainer<TContainerType, stack_capacity>& stack_container)
- -> decltype(begin(stack_container.container())) {
- return begin(stack_container.container());
-}
-
-template <typename TContainerType, size_t stack_capacity>
-auto end(StackContainer<TContainerType, stack_capacity>& stack_container)
- -> decltype(end(stack_container.container())) {
- return end(stack_container.container());
-}
-
-template <typename TContainerType, size_t stack_capacity>
-auto end(const StackContainer<TContainerType, stack_capacity>& stack_container)
- -> decltype(end(stack_container.container())) {
- return end(stack_container.container());
-}
-
-// StackVector -----------------------------------------------------------------
-
-// Example:
-// StackVector<int, 16> foo;
-// foo->push_back(22); // we have overloaded operator->
-// foo[0] = 10; // as well as operator[]
-template <typename T, size_t stack_capacity>
-class StackVector
- : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
- public:
- StackVector()
- : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
- }
-
- // We need to put this in STL containers sometimes, which requires a copy
- // constructor. We can't call the regular copy constructor because that will
- // take the stack buffer from the original. Here, we create an empty object
- // and make a stack buffer of its own.
- StackVector(const StackVector<T, stack_capacity>& other)
- : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
- this->container().assign(other->begin(), other->end());
- }
-
- StackVector<T, stack_capacity>& operator=(const StackVector<T, stack_capacity>& other) {
- this->container().assign(other->begin(), other->end());
- return *this;
- }
-
- // Vectors are commonly indexed, which isn't very convenient even with
- // operator-> (using "->at()" does exception stuff we don't want).
- T& operator[](size_t i) {
- return this->container().operator[](i);
- }
- const T& operator[](size_t i) const {
- return this->container().operator[](i);
- }
-
- private:
- // StackVector(const StackVector& rhs) = delete;
- // StackVector& operator=(const StackVector& rhs) = delete;
- StackVector(StackVector&& rhs) = delete;
- StackVector& operator=(StackVector&& rhs) = delete;
-};
-
-#endif // COMMON_STACKCONTAINER_H_
diff --git a/chromium/third_party/dawn/src/common/SystemUtils.cpp b/chromium/third_party/dawn/src/common/SystemUtils.cpp
deleted file mode 100644
index 9dc066aaf29..00000000000
--- a/chromium/third_party/dawn/src/common/SystemUtils.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/SystemUtils.h"
-
-#include "common/Assert.h"
-#include "common/Log.h"
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include <Windows.h>
-# include <vector>
-#elif defined(DAWN_PLATFORM_LINUX)
-# include <dlfcn.h>
-# include <limits.h>
-# include <unistd.h>
-# include <cstdlib>
-#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
-# include <dlfcn.h>
-# include <mach-o/dyld.h>
-# include <vector>
-#endif
-
-#include <array>
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-const char* GetPathSeparator() {
- return "\\";
-}
-
-std::pair<std::string, bool> GetEnvironmentVar(const char* variableName) {
- // First pass a size of 0 to get the size of variable value.
- DWORD sizeWithNullTerminator = GetEnvironmentVariableA(variableName, nullptr, 0);
- if (sizeWithNullTerminator == 0) {
- DWORD err = GetLastError();
- if (err != ERROR_ENVVAR_NOT_FOUND) {
- dawn::WarningLog() << "GetEnvironmentVariableA failed with code " << err;
- }
- return std::make_pair(std::string(), false);
- }
-
- // Then get variable value with its actual size.
- std::vector<char> buffer(sizeWithNullTerminator);
- DWORD sizeStored =
- GetEnvironmentVariableA(variableName, buffer.data(), static_cast<DWORD>(buffer.size()));
- if (sizeStored + 1 != sizeWithNullTerminator) {
- DWORD err = GetLastError();
- if (err) {
- dawn::WarningLog() << "GetEnvironmentVariableA failed with code " << err;
- }
- return std::make_pair(std::string(), false);
- }
- return std::make_pair(std::string(buffer.data(), sizeStored), true);
-}
-
-bool SetEnvironmentVar(const char* variableName, const char* value) {
- return SetEnvironmentVariableA(variableName, value) == TRUE;
-}
-#elif defined(DAWN_PLATFORM_POSIX)
-const char* GetPathSeparator() {
- return "/";
-}
-
-std::pair<std::string, bool> GetEnvironmentVar(const char* variableName) {
- char* value = getenv(variableName);
- return value == nullptr ? std::make_pair(std::string(), false)
- : std::make_pair(std::string(value), true);
-}
-
-bool SetEnvironmentVar(const char* variableName, const char* value) {
- if (value == nullptr) {
- return unsetenv(variableName) == 0;
- }
- return setenv(variableName, value, 1) == 0;
-}
-#else
-# error "Implement Get/SetEnvironmentVar for your platform."
-#endif
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-std::string GetExecutablePath() {
- std::array<char, MAX_PATH> executableFileBuf;
- DWORD executablePathLen = GetModuleFileNameA(nullptr, executableFileBuf.data(),
- static_cast<DWORD>(executableFileBuf.size()));
- return executablePathLen > 0 ? std::string(executableFileBuf.data()) : "";
-}
-#elif defined(DAWN_PLATFORM_LINUX)
-std::string GetExecutablePath() {
- std::array<char, PATH_MAX> path;
- ssize_t result = readlink("/proc/self/exe", path.data(), PATH_MAX - 1);
- if (result < 0 || static_cast<size_t>(result) >= PATH_MAX - 1) {
- return "";
- }
-
- path[result] = '\0';
- return path.data();
-}
-#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
-std::string GetExecutablePath() {
- uint32_t size = 0;
- _NSGetExecutablePath(nullptr, &size);
-
- std::vector<char> buffer(size + 1);
- if (_NSGetExecutablePath(buffer.data(), &size) != 0) {
- return "";
- }
-
- buffer[size] = '\0';
- return buffer.data();
-}
-#elif defined(DAWN_PLATFORM_FUCHSIA)
-std::string GetExecutablePath() {
- // TODO: Implement on Fuchsia
- return "";
-}
-#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
-std::string GetExecutablePath() {
- UNREACHABLE();
- return "";
-}
-#else
-# error "Implement GetExecutablePath for your platform."
-#endif
-
-std::string GetExecutableDirectory() {
- std::string exePath = GetExecutablePath();
- size_t lastPathSepLoc = exePath.find_last_of(GetPathSeparator());
- return lastPathSepLoc != std::string::npos ? exePath.substr(0, lastPathSepLoc + 1) : "";
-}
-
-#if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
-std::string GetModulePath() {
- static int placeholderSymbol = 0;
- Dl_info dlInfo;
- if (dladdr(&placeholderSymbol, &dlInfo) == 0) {
- return "";
- }
-
- std::array<char, PATH_MAX> absolutePath;
- if (realpath(dlInfo.dli_fname, absolutePath.data()) == NULL) {
- return "";
- }
- return absolutePath.data();
-}
-#elif defined(DAWN_PLATFORM_WINDOWS)
-std::string GetModulePath() {
- UNREACHABLE();
- return "";
-}
-#elif defined(DAWN_PLATFORM_FUCHSIA)
-std::string GetModulePath() {
- UNREACHABLE();
- return "";
-}
-#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
-std::string GetModulePath() {
- UNREACHABLE();
- return "";
-}
-#else
-# error "Implement GetModulePath for your platform."
-#endif
-
-std::string GetModuleDirectory() {
- std::string modPath = GetModulePath();
- size_t lastPathSepLoc = modPath.find_last_of(GetPathSeparator());
- return lastPathSepLoc != std::string::npos ? modPath.substr(0, lastPathSepLoc + 1) : "";
-}
-
-// ScopedEnvironmentVar
-
-ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
- : mName(variableName),
- mOriginalValue(GetEnvironmentVar(variableName)),
- mIsSet(SetEnvironmentVar(variableName, value)) {
-}
-
-ScopedEnvironmentVar::~ScopedEnvironmentVar() {
- if (mIsSet) {
- bool success = SetEnvironmentVar(
- mName.c_str(), mOriginalValue.second ? mOriginalValue.first.c_str() : nullptr);
- // If we set the environment variable in the constructor, we should never fail restoring it.
- ASSERT(success);
- }
-}
-
-bool ScopedEnvironmentVar::Set(const char* variableName, const char* value) {
- ASSERT(!mIsSet);
- mName = variableName;
- mOriginalValue = GetEnvironmentVar(variableName);
- mIsSet = SetEnvironmentVar(variableName, value);
- return mIsSet;
-}
diff --git a/chromium/third_party/dawn/src/common/SystemUtils.h b/chromium/third_party/dawn/src/common/SystemUtils.h
deleted file mode 100644
index 875c5aae31a..00000000000
--- a/chromium/third_party/dawn/src/common/SystemUtils.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_SYSTEMUTILS_H_
-#define COMMON_SYSTEMUTILS_H_
-
-#include "common/Platform.h"
-
-#include <string>
-
-const char* GetPathSeparator();
-// Returns a pair of the environment variable's value, and a boolean indicating whether the variable
-// was present.
-std::pair<std::string, bool> GetEnvironmentVar(const char* variableName);
-bool SetEnvironmentVar(const char* variableName, const char* value);
-// Directories are always returned with a trailing path separator.
-std::string GetExecutableDirectory();
-std::string GetModuleDirectory();
-
-#ifdef DAWN_PLATFORM_MACOS
-void GetMacOSVersion(int32_t* majorVersion, int32_t* minorVersion = nullptr);
-bool IsMacOSVersionAtLeast(uint32_t majorVersion, uint32_t minorVersion = 0);
-#endif
-
-class ScopedEnvironmentVar {
- public:
- ScopedEnvironmentVar() = default;
- ScopedEnvironmentVar(const char* variableName, const char* value);
- ~ScopedEnvironmentVar();
-
- ScopedEnvironmentVar(const ScopedEnvironmentVar& rhs) = delete;
- ScopedEnvironmentVar& operator=(const ScopedEnvironmentVar& rhs) = delete;
-
- bool Set(const char* variableName, const char* value);
-
- private:
- std::string mName;
- std::pair<std::string, bool> mOriginalValue;
- bool mIsSet = false;
-};
-
-#endif // COMMON_SYSTEMUTILS_H_
diff --git a/chromium/third_party/dawn/src/common/SystemUtils_mac.mm b/chromium/third_party/dawn/src/common/SystemUtils_mac.mm
deleted file mode 100644
index 9bd27d239a4..00000000000
--- a/chromium/third_party/dawn/src/common/SystemUtils_mac.mm
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/SystemUtils.h"
-
-#include "common/Assert.h"
-
-#import <Foundation/NSProcessInfo.h>
-
-void GetMacOSVersion(int32_t* majorVersion, int32_t* minorVersion) {
- NSOperatingSystemVersion version = [[NSProcessInfo processInfo] operatingSystemVersion];
- ASSERT(majorVersion != nullptr);
- *majorVersion = version.majorVersion;
- if (minorVersion != nullptr) {
- *minorVersion = version.minorVersion;
- }
-}
-
-bool IsMacOSVersionAtLeast(uint32_t majorVersion, uint32_t minorVersion) {
- return
- [NSProcessInfo.processInfo isOperatingSystemAtLeastVersion:{majorVersion, minorVersion, 0}];
-}
diff --git a/chromium/third_party/dawn/src/common/TypedInteger.h b/chromium/third_party/dawn/src/common/TypedInteger.h
deleted file mode 100644
index fd458952835..00000000000
--- a/chromium/third_party/dawn/src/common/TypedInteger.h
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_TYPEDINTEGER_H_
-#define COMMON_TYPEDINTEGER_H_
-
-#include "common/Assert.h"
-#include "common/UnderlyingType.h"
-
-#include <limits>
-#include <type_traits>
-
-// TypedInteger is helper class that provides additional type safety in Debug.
-// - Integers of different (Tag, BaseIntegerType) may not be used interoperably
-// - Allows casts only to the underlying type.
-// - Integers of the same (Tag, BaseIntegerType) may be compared or assigned.
-// This class helps ensure that the many types of indices in Dawn aren't mixed up and used
-// interchangably.
-// In Release builds, when DAWN_ENABLE_ASSERTS is not defined, TypedInteger is a passthrough
-// typedef of the underlying type.
-//
-// Example:
-// using UintA = TypedInteger<struct TypeA, uint32_t>;
-// using UintB = TypedInteger<struct TypeB, uint32_t>;
-//
-// in Release:
-// using UintA = uint32_t;
-// using UintB = uint32_t;
-//
-// in Debug:
-// using UintA = detail::TypedIntegerImpl<struct TypeA, uint32_t>;
-// using UintB = detail::TypedIntegerImpl<struct TypeB, uint32_t>;
-//
-// Assignment, construction, comparison, and arithmetic with TypedIntegerImpl are allowed
-// only for typed integers of exactly the same type. Further, they must be
-// created / cast explicitly; there is no implicit conversion.
-//
-// UintA a(2);
-// uint32_t aValue = static_cast<uint32_t>(a);
-//
-namespace detail {
- template <typename Tag, typename T>
- class TypedIntegerImpl;
-} // namespace detail
-
-template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
-#if defined(DAWN_ENABLE_ASSERTS)
-using TypedInteger = detail::TypedIntegerImpl<Tag, T>;
-#else
-using TypedInteger = T;
-#endif
-
-namespace detail {
- template <typename Tag, typename T>
- class alignas(T) TypedIntegerImpl {
- static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
- T mValue;
-
- public:
- constexpr TypedIntegerImpl() : mValue(0) {
- static_assert(alignof(TypedIntegerImpl) == alignof(T), "");
- static_assert(sizeof(TypedIntegerImpl) == sizeof(T), "");
- }
-
- // Construction from non-narrowing integral types.
- template <typename I,
- typename = std::enable_if_t<
- std::is_integral<I>::value &&
- std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
- std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
- explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
- }
-
- // Allow explicit casts only to the underlying type. If you're casting out of an
- // TypedInteger, you should know what what you're doing, and exactly what type you
- // expect.
- explicit constexpr operator T() const {
- return static_cast<T>(this->mValue);
- }
-
-// Same-tag TypedInteger comparison operators
-#define TYPED_COMPARISON(op) \
- constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
- return mValue op rhs.mValue; \
- }
- TYPED_COMPARISON(<)
- TYPED_COMPARISON(<=)
- TYPED_COMPARISON(>)
- TYPED_COMPARISON(>=)
- TYPED_COMPARISON(==)
- TYPED_COMPARISON(!=)
-#undef TYPED_COMPARISON
-
- // Increment / decrement operators for for-loop iteration
- constexpr TypedIntegerImpl& operator++() {
- ASSERT(this->mValue < std::numeric_limits<T>::max());
- ++this->mValue;
- return *this;
- }
-
- constexpr TypedIntegerImpl operator++(int) {
- TypedIntegerImpl ret = *this;
-
- ASSERT(this->mValue < std::numeric_limits<T>::max());
- ++this->mValue;
- return ret;
- }
-
- constexpr TypedIntegerImpl& operator--() {
- assert(this->mValue > std::numeric_limits<T>::min());
- --this->mValue;
- return *this;
- }
-
- constexpr TypedIntegerImpl operator--(int) {
- TypedIntegerImpl ret = *this;
-
- ASSERT(this->mValue > std::numeric_limits<T>::min());
- --this->mValue;
- return ret;
- }
-
- template <typename T2 = T>
- static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
- AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
- static_assert(std::is_same<T, T2>::value, "");
-
- // Overflow would wrap around
- ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
- return lhs.mValue + rhs.mValue;
- }
-
- template <typename T2 = T>
- static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
- AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
- static_assert(std::is_same<T, T2>::value, "");
-
- if (lhs.mValue > 0) {
- // rhs is positive: |rhs| is at most the distance between max and |lhs|.
- // rhs is negative: (positive + negative) won't overflow
- ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
- } else {
- // rhs is postive: (negative + positive) won't underflow
- // rhs is negative: |rhs| isn't less than the (negative) distance between min
- // and |lhs|
- ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
- }
- return lhs.mValue + rhs.mValue;
- }
-
- template <typename T2 = T>
- static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
- SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
- static_assert(std::is_same<T, T2>::value, "");
-
- // Overflow would wrap around
- ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
- return lhs.mValue - rhs.mValue;
- }
-
- template <typename T2 = T>
- static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
- TypedIntegerImpl<Tag, T> lhs,
- TypedIntegerImpl<Tag, T2> rhs) {
- static_assert(std::is_same<T, T2>::value, "");
-
- if (lhs.mValue > 0) {
- // rhs is positive: positive minus positive won't overflow
- // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
- // and max.
- ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
- } else {
- // rhs is positive: |rhs| is at most the distance between min and |lhs|
- // rhs is negative: negative minus negative won't overflow
- ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
- }
- return lhs.mValue - rhs.mValue;
- }
-
- template <typename T2 = T>
- constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
- static_assert(std::is_same<T, T2>::value, "");
- // The negation of the most negative value cannot be represented.
- ASSERT(this->mValue != std::numeric_limits<T>::min());
- return TypedIntegerImpl(-this->mValue);
- }
-
- constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
- auto result = AddImpl(*this, rhs);
- static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
- return TypedIntegerImpl(result);
- }
-
- constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
- auto result = SubImpl(*this, rhs);
- static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
- return TypedIntegerImpl(result);
- }
- };
-
-} // namespace detail
-
-namespace std {
-
- template <typename Tag, typename T>
- class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
- public:
- static detail::TypedIntegerImpl<Tag, T> max() noexcept {
- return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
- }
- static detail::TypedIntegerImpl<Tag, T> min() noexcept {
- return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
- }
- };
-
-} // namespace std
-
-namespace ityp {
-
- // These helpers below are provided since the default arithmetic operators for small integer
- // types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
- // casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
- // ityp::Sub(a, b) instead.
-
- template <typename Tag, typename T>
- constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
- ::detail::TypedIntegerImpl<Tag, T> rhs) {
- return ::detail::TypedIntegerImpl<Tag, T>(
- static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
- }
-
- template <typename Tag, typename T>
- constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
- ::detail::TypedIntegerImpl<Tag, T> rhs) {
- return ::detail::TypedIntegerImpl<Tag, T>(
- static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
- }
-
- template <typename T>
- constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
- return static_cast<T>(lhs + rhs);
- }
-
- template <typename T>
- constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
- return static_cast<T>(lhs - rhs);
- }
-
-} // namespace ityp
-
-#endif // COMMON_TYPEDINTEGER_H_
diff --git a/chromium/third_party/dawn/src/common/WindowsUtils.cpp b/chromium/third_party/dawn/src/common/WindowsUtils.cpp
deleted file mode 100644
index f4aef7cb3dd..00000000000
--- a/chromium/third_party/dawn/src/common/WindowsUtils.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/WindowsUtils.h"
-
-#include "common/windows_with_undefs.h"
-
-#include <memory>
-
-std::string WCharToUTF8(const wchar_t* input) {
- // The -1 argument asks WideCharToMultiByte to use the null terminator to know the size of
- // input. It will return a size that includes the null terminator.
- int requiredSize = WideCharToMultiByte(CP_UTF8, 0, input, -1, nullptr, 0, nullptr, nullptr);
-
- // When we can use C++17 this can be changed to use string.data() instead.
- std::unique_ptr<char[]> result = std::make_unique<char[]>(requiredSize);
- WideCharToMultiByte(CP_UTF8, 0, input, -1, result.get(), requiredSize, nullptr, nullptr);
-
- // This will allocate the returned std::string and then destroy result.
- return std::string(result.get(), result.get() + (requiredSize - 1));
-}
-
-std::wstring UTF8ToWStr(const char* input) {
- // The -1 argument asks MultiByteToWideChar to use the null terminator to know the size of
- // input. It will return a size that includes the null terminator.
- int requiredSize = MultiByteToWideChar(CP_UTF8, 0, input, -1, nullptr, 0);
-
- // When we can use C++17 this can be changed to use string.data() instead.
- std::unique_ptr<wchar_t[]> result = std::make_unique<wchar_t[]>(requiredSize);
- MultiByteToWideChar(CP_UTF8, 0, input, -1, result.get(), requiredSize);
-
- // This will allocate the returned std::string and then destroy result.
- return std::wstring(result.get(), result.get() + (requiredSize - 1));
-}
diff --git a/chromium/third_party/dawn/src/common/ityp_array.h b/chromium/third_party/dawn/src/common/ityp_array.h
deleted file mode 100644
index 68b428d5b05..00000000000
--- a/chromium/third_party/dawn/src/common/ityp_array.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_ITYP_ARRAY_H_
-#define COMMON_ITYP_ARRAY_H_
-
-#include "common/TypedInteger.h"
-#include "common/UnderlyingType.h"
-
-#include <array>
-#include <cstddef>
-#include <type_traits>
-
-namespace ityp {
-
- // ityp::array is a helper class that wraps std::array with the restriction that
- // indices must be a particular type |Index|. Dawn uses multiple flat maps of
- // index-->data, and this class helps ensure an indices cannot be passed interchangably
- // to a flat map of a different type.
- template <typename Index, typename Value, size_t Size>
- class array : private std::array<Value, Size> {
- using I = UnderlyingType<Index>;
- using Base = std::array<Value, Size>;
-
- static_assert(Size <= std::numeric_limits<I>::max(), "");
-
- public:
- constexpr array() = default;
-
- template <typename... Values>
- constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
- }
-
- Value& operator[](Index i) {
- I index = static_cast<I>(i);
- ASSERT(index >= 0 && index < I(Size));
- return Base::operator[](index);
- }
-
- constexpr const Value& operator[](Index i) const {
- I index = static_cast<I>(i);
- ASSERT(index >= 0 && index < I(Size));
- return Base::operator[](index);
- }
-
- Value& at(Index i) {
- I index = static_cast<I>(i);
- ASSERT(index >= 0 && index < I(Size));
- return Base::at(index);
- }
-
- constexpr const Value& at(Index i) const {
- I index = static_cast<I>(i);
- ASSERT(index >= 0 && index < I(Size));
- return Base::at(index);
- }
-
- typename Base::iterator begin() noexcept {
- return Base::begin();
- }
-
- typename Base::const_iterator begin() const noexcept {
- return Base::begin();
- }
-
- typename Base::iterator end() noexcept {
- return Base::end();
- }
-
- typename Base::const_iterator end() const noexcept {
- return Base::end();
- }
-
- constexpr Index size() const {
- return Index(I(Size));
- }
-
- using Base::back;
- using Base::data;
- using Base::empty;
- using Base::fill;
- using Base::front;
- };
-
-} // namespace ityp
-
-#endif // COMMON_ITYP_ARRAY_H_
diff --git a/chromium/third_party/dawn/src/common/ityp_bitset.h b/chromium/third_party/dawn/src/common/ityp_bitset.h
deleted file mode 100644
index 339cf182937..00000000000
--- a/chromium/third_party/dawn/src/common/ityp_bitset.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_ITYP_BITSET_H_
-#define COMMON_ITYP_BITSET_H_
-
-#include "common/BitSetIterator.h"
-#include "common/TypedInteger.h"
-#include "common/UnderlyingType.h"
-
-namespace ityp {
-
- // ityp::bitset is a helper class that wraps std::bitset with the restriction that
- // indices must be a particular type |Index|.
- template <typename Index, size_t N>
- class bitset : private std::bitset<N> {
- using I = UnderlyingType<Index>;
- using Base = std::bitset<N>;
-
- static_assert(sizeof(I) <= sizeof(size_t), "");
-
- constexpr bitset(const Base& rhs) : Base(rhs) {
- }
-
- public:
- constexpr bitset() noexcept : Base() {
- }
-
- constexpr bitset(unsigned long long value) noexcept : Base(value) {
- }
-
- constexpr bool operator[](Index i) const {
- return Base::operator[](static_cast<I>(i));
- }
-
- typename Base::reference operator[](Index i) {
- return Base::operator[](static_cast<I>(i));
- }
-
- bool test(Index i) const {
- return Base::test(static_cast<I>(i));
- }
-
- using Base::all;
- using Base::any;
- using Base::count;
- using Base::none;
- using Base::size;
-
- bool operator==(const bitset& other) const noexcept {
- return Base::operator==(static_cast<const Base&>(other));
- }
-
- bool operator!=(const bitset& other) const noexcept {
- return Base::operator!=(static_cast<const Base&>(other));
- }
-
- bitset& operator&=(const bitset& other) noexcept {
- return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
- }
-
- bitset& operator|=(const bitset& other) noexcept {
- return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
- }
-
- bitset& operator^=(const bitset& other) noexcept {
- return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
- }
-
- bitset operator~() const noexcept {
- return bitset(*this).flip();
- }
-
- bitset& set() noexcept {
- return static_cast<bitset&>(Base::set());
- }
-
- bitset& set(Index i, bool value = true) {
- return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
- }
-
- bitset& reset() noexcept {
- return static_cast<bitset&>(Base::reset());
- }
-
- bitset& reset(Index i) {
- return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
- }
-
- bitset& flip() noexcept {
- return static_cast<bitset&>(Base::flip());
- }
-
- bitset& flip(Index i) {
- return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
- }
-
- using Base::to_string;
- using Base::to_ullong;
- using Base::to_ulong;
-
- friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
- return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
- }
-
- friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
- return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
- }
-
- friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
- return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
- }
-
- friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
- return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
- }
-
- friend struct std::hash<bitset>;
- };
-
-} // namespace ityp
-
-#endif // COMMON_ITYP_BITSET_H_
diff --git a/chromium/third_party/dawn/src/common/ityp_span.h b/chromium/third_party/dawn/src/common/ityp_span.h
deleted file mode 100644
index 00ba93f7503..00000000000
--- a/chromium/third_party/dawn/src/common/ityp_span.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_ITYP_SPAN_H_
-#define COMMON_ITYP_SPAN_H_
-
-#include "common/TypedInteger.h"
-#include "common/UnderlyingType.h"
-
-#include <type_traits>
-
-namespace ityp {
-
- // ityp::span is a helper class that wraps an unowned packed array of type |Value|.
- // It stores the size and pointer to first element. It has the restriction that
- // indices must be a particular type |Index|. This provides a type-safe way to index
- // raw pointers.
- template <typename Index, typename Value>
- class span {
- using I = UnderlyingType<Index>;
-
- public:
- constexpr span() : mData(nullptr), mSize(0) {
- }
- constexpr span(Value* data, Index size) : mData(data), mSize(size) {
- }
-
- constexpr Value& operator[](Index i) const {
- ASSERT(i < mSize);
- return mData[static_cast<I>(i)];
- }
-
- Value* data() noexcept {
- return mData;
- }
-
- const Value* data() const noexcept {
- return mData;
- }
-
- Value* begin() noexcept {
- return mData;
- }
-
- const Value* begin() const noexcept {
- return mData;
- }
-
- Value* end() noexcept {
- return mData + static_cast<I>(mSize);
- }
-
- const Value* end() const noexcept {
- return mData + static_cast<I>(mSize);
- }
-
- Value& front() {
- ASSERT(mData != nullptr);
- ASSERT(static_cast<I>(mSize) >= 0);
- return *mData;
- }
-
- const Value& front() const {
- ASSERT(mData != nullptr);
- ASSERT(static_cast<I>(mSize) >= 0);
- return *mData;
- }
-
- Value& back() {
- ASSERT(mData != nullptr);
- ASSERT(static_cast<I>(mSize) >= 0);
- return *(mData + static_cast<I>(mSize) - 1);
- }
-
- const Value& back() const {
- ASSERT(mData != nullptr);
- ASSERT(static_cast<I>(mSize) >= 0);
- return *(mData + static_cast<I>(mSize) - 1);
- }
-
- Index size() const {
- return mSize;
- }
-
- private:
- Value* mData;
- Index mSize;
- };
-
-} // namespace ityp
-
-#endif // COMMON_ITYP_SPAN_H_
diff --git a/chromium/third_party/dawn/src/common/ityp_stack_vec.h b/chromium/third_party/dawn/src/common/ityp_stack_vec.h
deleted file mode 100644
index b88888b7897..00000000000
--- a/chromium/third_party/dawn/src/common/ityp_stack_vec.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_ITYP_STACK_VEC_H_
-#define COMMON_ITYP_STACK_VEC_H_
-
-#include "common/Assert.h"
-#include "common/StackContainer.h"
-#include "common/UnderlyingType.h"
-
-namespace ityp {
-
- template <typename Index, typename Value, size_t StaticCapacity>
- class stack_vec : private StackVector<Value, StaticCapacity> {
- using I = UnderlyingType<Index>;
- using Base = StackVector<Value, StaticCapacity>;
- using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
- static_assert(StaticCapacity <= std::numeric_limits<I>::max(), "");
-
- public:
- stack_vec() : Base() {
- }
- stack_vec(Index size) : Base() {
- this->container().resize(static_cast<I>(size));
- }
-
- Value& operator[](Index i) {
- ASSERT(i < size());
- return Base::operator[](static_cast<I>(i));
- }
-
- constexpr const Value& operator[](Index i) const {
- ASSERT(i < size());
- return Base::operator[](static_cast<I>(i));
- }
-
- void resize(Index size) {
- this->container().resize(static_cast<I>(size));
- }
-
- void reserve(Index size) {
- this->container().reserve(static_cast<I>(size));
- }
-
- Value* data() {
- return this->container().data();
- }
-
- const Value* data() const {
- return this->container().data();
- }
-
- typename VectorBase::iterator begin() noexcept {
- return this->container().begin();
- }
-
- typename VectorBase::const_iterator begin() const noexcept {
- return this->container().begin();
- }
-
- typename VectorBase::iterator end() noexcept {
- return this->container().end();
- }
-
- typename VectorBase::const_iterator end() const noexcept {
- return this->container().end();
- }
-
- typename VectorBase::reference front() {
- return this->container().front();
- }
-
- typename VectorBase::const_reference front() const {
- return this->container().front();
- }
-
- typename VectorBase::reference back() {
- return this->container().back();
- }
-
- typename VectorBase::const_reference back() const {
- return this->container().back();
- }
-
- Index size() const {
- return Index(static_cast<I>(this->container().size()));
- }
- };
-
-} // namespace ityp
-
-#endif // COMMON_ITYP_STACK_VEC_H_
diff --git a/chromium/third_party/dawn/src/common/ityp_vector.h b/chromium/third_party/dawn/src/common/ityp_vector.h
deleted file mode 100644
index a747d5aeb53..00000000000
--- a/chromium/third_party/dawn/src/common/ityp_vector.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_ITYP_VECTOR_H_
-#define COMMON_ITYP_VECTOR_H_
-
-#include "common/TypedInteger.h"
-#include "common/UnderlyingType.h"
-
-#include <type_traits>
-#include <vector>
-
-namespace ityp {
-
- // ityp::vector is a helper class that wraps std::vector with the restriction that
- // indices must be a particular type |Index|.
- template <typename Index, typename Value>
- class vector : public std::vector<Value> {
- using I = UnderlyingType<Index>;
- using Base = std::vector<Value>;
-
- private:
- // Disallow access to base constructors and untyped index/size-related operators.
- using Base::Base;
- using Base::operator=;
- using Base::operator[];
- using Base::at;
- using Base::reserve;
- using Base::resize;
- using Base::size;
-
- public:
- vector() : Base() {
- }
-
- explicit vector(Index size) : Base(static_cast<I>(size)) {
- }
-
- vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
- }
-
- vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
- }
-
- vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
- }
-
- vector(std::initializer_list<Value> init) : Base(init) {
- }
-
- vector& operator=(const vector& rhs) {
- Base::operator=(static_cast<const Base&>(rhs));
- return *this;
- }
-
- vector& operator=(vector&& rhs) noexcept {
- Base::operator=(static_cast<Base&&>(rhs));
- return *this;
- }
-
- Value& operator[](Index i) {
- ASSERT(i >= Index(0) && i < size());
- return Base::operator[](static_cast<I>(i));
- }
-
- constexpr const Value& operator[](Index i) const {
- ASSERT(i >= Index(0) && i < size());
- return Base::operator[](static_cast<I>(i));
- }
-
- Value& at(Index i) {
- ASSERT(i >= Index(0) && i < size());
- return Base::at(static_cast<I>(i));
- }
-
- constexpr const Value& at(Index i) const {
- ASSERT(i >= Index(0) && i < size());
- return Base::at(static_cast<I>(i));
- }
-
- constexpr Index size() const {
- ASSERT(std::numeric_limits<I>::max() >= Base::size());
- return Index(static_cast<I>(Base::size()));
- }
-
- void resize(Index size) {
- Base::resize(static_cast<I>(size));
- }
-
- void reserve(Index size) {
- Base::reserve(static_cast<I>(size));
- }
- };
-
-} // namespace ityp
-
-#endif // COMMON_ITYP_VECTOR_H_
diff --git a/chromium/third_party/dawn/src/common/vulkan_platform.h b/chromium/third_party/dawn/src/common/vulkan_platform.h
deleted file mode 100644
index 9e4ea0ca91e..00000000000
--- a/chromium/third_party/dawn/src/common/vulkan_platform.h
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_VULKANPLATFORM_H_
-#define COMMON_VULKANPLATFORM_H_
-
-#if !defined(DAWN_ENABLE_BACKEND_VULKAN)
-# error "vulkan_platform.h included without the Vulkan backend enabled"
-#endif
-#if defined(VULKAN_CORE_H_)
-# error "vulkan.h included before vulkan_platform.h"
-#endif
-
-#include "common/Platform.h"
-
-#include <cstddef>
-#include <cstdint>
-
-// vulkan.h defines non-dispatchable handles to opaque pointers on 64bit architectures and uint64_t
-// on 32bit architectures. This causes a problem in 32bit where the handles cannot be used to
-// distinguish between overloads of the same function.
-// Change the definition of non-dispatchable handles to be opaque structures containing a uint64_t
-// and overload the comparison operators between themselves and VK_NULL_HANDLE (which will be
-// redefined to be nullptr). This keeps the type-safety of having the handles be different types
-// (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
-
-#if defined(DAWN_PLATFORM_64_BIT)
-# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
-// This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
-// TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
-template <typename T>
-T NativeNonDispatachableHandleFromU64(uint64_t u64) {
- return reinterpret_cast<T>(u64);
-}
-#elif defined(DAWN_PLATFORM_32_BIT)
-# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
-template <typename T>
-T NativeNonDispatachableHandleFromU64(uint64_t u64) {
- return u64;
-}
-#else
-# error "Unsupported platform"
-#endif
-
-// Define a dummy Vulkan handle for use before we include vulkan.h
-DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
-
-// Find out the alignment of native handles. Logically we would use alignof(VkSomeHandleNative) so
-// why bother with the wrapper struct? It turns out that on Linux Intel x86 alignof(uint64_t) is 8
-// but alignof(struct{uint64_t a;}) is 4. This is because this Intel ABI doesn't say anything about
-// double-word alignment so for historical reasons compilers violated the standard and use an
-// alignment of 4 for uint64_t (and double) inside structures.
-// See https://stackoverflow.com/questions/44877185
-// One way to get the alignment inside structures of a type is to look at the alignment of it
-// wrapped in a structure. Hence VkSameHandleNativeWrappe
-
-namespace dawn_native { namespace vulkan {
-
- namespace detail {
- template <typename T>
- struct WrapperStruct {
- T member;
- };
-
- template <typename T>
- static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
-
- static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
- static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
-
- // Simple handle types that supports "nullptr_t" as a 0 value.
- template <typename Tag, typename HandleType>
- class alignas(detail::kNativeVkHandleAlignment) VkHandle {
- public:
- // Default constructor and assigning of VK_NULL_HANDLE
- VkHandle() = default;
- VkHandle(std::nullptr_t) {
- }
-
- // Use default copy constructor/assignment
- VkHandle(const VkHandle<Tag, HandleType>& other) = default;
- VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
-
- // Comparisons between handles
- bool operator==(VkHandle<Tag, HandleType> other) const {
- return mHandle == other.mHandle;
- }
- bool operator!=(VkHandle<Tag, HandleType> other) const {
- return mHandle != other.mHandle;
- }
-
- // Comparisons between handles and VK_NULL_HANDLE
- bool operator==(std::nullptr_t) const {
- return mHandle == 0;
- }
- bool operator!=(std::nullptr_t) const {
- return mHandle != 0;
- }
-
- // Implicit conversion to real Vulkan types.
- operator HandleType() const {
- return GetHandle();
- }
-
- HandleType GetHandle() const {
- return mHandle;
- }
-
- HandleType& operator*() {
- return mHandle;
- }
-
- static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
- return VkHandle{handle};
- }
-
- private:
- explicit VkHandle(HandleType handle) : mHandle(handle) {
- }
-
- HandleType mHandle = 0;
- };
- } // namespace detail
-
- static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
-
- template <typename Tag, typename HandleType>
- HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
- return reinterpret_cast<HandleType*>(handle);
- }
-
-}} // namespace dawn_native::vulkan
-
-#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
- DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
- namespace dawn_native { namespace vulkan { \
- using object = detail::VkHandle<struct VkTag##object, ::object>; \
- static_assert(sizeof(object) == sizeof(uint64_t), ""); \
- static_assert(alignof(object) == detail::kUint64Alignment, ""); \
- static_assert(sizeof(object) == sizeof(::object), ""); \
- static_assert(alignof(object) == detail::kNativeVkHandleAlignment, ""); \
- } \
- } // namespace dawn_native::vulkan
-
-// Import additional parts of Vulkan that are supported on our architecture and preemptively include
-// headers that vulkan.h includes that we have "undefs" for.
-#if defined(DAWN_PLATFORM_WINDOWS)
-# define VK_USE_PLATFORM_WIN32_KHR
-# include "common/windows_with_undefs.h"
-#endif // DAWN_PLATFORM_WINDOWS
-
-#if defined(DAWN_USE_X11)
-# define VK_USE_PLATFORM_XLIB_KHR
-# define VK_USE_PLATFORM_XCB_KHR
-# include "common/xlib_with_undefs.h"
-#endif // defined(DAWN_USE_X11)
-
-#if defined(DAWN_ENABLE_BACKEND_METAL)
-# define VK_USE_PLATFORM_METAL_EXT
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_PLATFORM_ANDROID)
-# define VK_USE_PLATFORM_ANDROID_KHR
-#endif // defined(DAWN_PLATFORM_ANDROID)
-
-#if defined(DAWN_PLATFORM_FUCHSIA)
-# define VK_USE_PLATFORM_FUCHSIA
-#endif // defined(DAWN_PLATFORM_FUCHSIA)
-
-// The actual inclusion of vulkan.h!
-#define VK_NO_PROTOTYPES
-#include <vulkan/vulkan.h>
-
-// Redefine VK_NULL_HANDLE for better type safety where possible.
-#undef VK_NULL_HANDLE
-#if defined(DAWN_PLATFORM_64_BIT)
-static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
-#elif defined(DAWN_PLATFORM_32_BIT)
-static constexpr uint64_t VK_NULL_HANDLE = 0;
-#else
-# error "Unsupported platform"
-#endif
-
-#endif // COMMON_VULKANPLATFORM_H_
diff --git a/chromium/third_party/dawn/src/common/windows_with_undefs.h b/chromium/third_party/dawn/src/common/windows_with_undefs.h
deleted file mode 100644
index 39c9cadb9ae..00000000000
--- a/chromium/third_party/dawn/src/common/windows_with_undefs.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_WINDOWS_WITH_UNDEFS_H_
-#define COMMON_WINDOWS_WITH_UNDEFS_H_
-
-#include "common/Platform.h"
-
-#if !defined(DAWN_PLATFORM_WINDOWS)
-# error "windows_with_undefs.h included on non-Windows"
-#endif
-
-// This header includes <windows.h> but removes all the extra defines that conflict with identifiers
-// in internal code. It should never be included in something that is part of the public interface.
-#include <windows.h>
-
-// Macros defined for ANSI / Unicode support
-#undef CreateWindow
-#undef GetMessage
-
-// Macros defined to produce compiler intrinsics
-#undef MemoryBarrier
-
-// Macro defined as an alias of GetTickCount
-#undef GetCurrentTime
-
-#endif // COMMON_WINDOWS_WITH_UNDEFS_H_
diff --git a/chromium/third_party/dawn/src/common/xlib_with_undefs.h b/chromium/third_party/dawn/src/common/xlib_with_undefs.h
deleted file mode 100644
index 34889aab5d5..00000000000
--- a/chromium/third_party/dawn/src/common/xlib_with_undefs.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_XLIB_WITH_UNDEFS_H_
-#define COMMON_XLIB_WITH_UNDEFS_H_
-
-#include "common/Platform.h"
-
-#if !defined(DAWN_PLATFORM_LINUX)
-# error "xlib_with_undefs.h included on non-Linux"
-#endif
-
-// This header includes <X11/Xlib.h> but removes all the extra defines that conflict with
-// identifiers in internal code. It should never be included in something that is part of the public
-// interface.
-#include <X11/Xlib.h>
-
-// Xlib-xcb.h technically includes Xlib.h but we separate the includes to make it more clear what
-// the problem is if one of these two includes fail.
-#include <X11/Xlib-xcb.h>
-
-#undef Success
-#undef None
-#undef Always
-#undef Bool
-
-using XErrorHandler = int (*)(Display*, XErrorEvent*);
-
-#endif // COMMON_XLIB_WITH_UNDEFS_H_
diff --git a/chromium/third_party/dawn/src/dawn/BUILD.gn b/chromium/third_party/dawn/src/dawn/BUILD.gn
index feddfeecf14..67991adbac9 100644
--- a/chromium/third_party/dawn/src/dawn/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/BUILD.gn
@@ -18,85 +18,43 @@ import("${dawn_root}/generator/dawn_generator.gni")
import("${dawn_root}/scripts/dawn_component.gni")
###############################################################################
-# Dawn headers
-###############################################################################
-
-dawn_json_generator("dawn_headers_gen") {
- target = "dawn_headers"
- outputs = [
- "src/include/dawn/dawn_proc_table.h",
- "src/include/dawn/webgpu.h",
- ]
-}
-
-source_set("dawn_headers") {
- all_dependent_configs = [ "${dawn_root}/src/common:dawn_public_include_dirs" ]
- public_deps = [ ":dawn_headers_gen" ]
-
- sources = get_target_outputs(":dawn_headers_gen")
- sources += [ "${dawn_root}/src/include/dawn/dawn_wsi.h" ]
-}
-
-###############################################################################
-# Dawn C++ headers
-###############################################################################
-
-dawn_json_generator("dawncpp_headers_gen") {
- target = "dawncpp_headers"
- outputs = [
- "src/include/dawn/webgpu_cpp.h",
- "src/include/dawn/webgpu_cpp_print.h",
- ]
-}
-
-source_set("dawncpp_headers") {
- public_deps = [
- ":dawn_headers",
- ":dawncpp_headers_gen",
- ]
-
- sources = get_target_outputs(":dawncpp_headers_gen")
- sources += [ "${dawn_root}/src/include/dawn/EnumClassBitmasks.h" ]
-}
-
-###############################################################################
# Dawn C++ wrapper
###############################################################################
-dawn_json_generator("dawncpp_gen") {
- target = "dawncpp"
+dawn_json_generator("cpp_gen") {
+ target = "cpp"
outputs = [ "src/dawn/webgpu_cpp.cpp" ]
}
-source_set("dawncpp") {
+source_set("cpp") {
deps = [
- ":dawncpp_gen",
- ":dawncpp_headers",
+ ":cpp_gen",
+ "${dawn_root}/include/dawn:cpp_headers",
]
- sources = get_target_outputs(":dawncpp_gen")
+ sources = get_target_outputs(":cpp_gen")
}
###############################################################################
-# dawn_proc
+# Dawn proc
###############################################################################
-dawn_json_generator("dawn_proc_gen") {
- target = "dawn_proc"
+dawn_json_generator("proc_gen") {
+ target = "proc"
outputs = [
"src/dawn/dawn_proc.c",
"src/dawn/dawn_thread_dispatch_proc.cpp",
]
}
-dawn_component("dawn_proc") {
+dawn_component("proc") {
DEFINE_PREFIX = "WGPU"
- public_deps = [ ":dawn_headers" ]
- deps = [ ":dawn_proc_gen" ]
- sources = get_target_outputs(":dawn_proc_gen")
+ public_deps = [ "${dawn_root}/include/dawn:headers" ]
+ deps = [ ":proc_gen" ]
+ sources = get_target_outputs(":proc_gen")
sources += [
- "${dawn_root}/src/include/dawn/dawn_proc.h",
- "${dawn_root}/src/include/dawn/dawn_thread_dispatch_proc.h",
+ "${dawn_root}/include/dawn/dawn_proc.h",
+ "${dawn_root}/include/dawn/dawn_thread_dispatch_proc.h",
]
}
@@ -119,3 +77,23 @@ dawn_json_generator("emscripten_bits_gen") {
"emscripten-bits/library_webgpu_enum_tables.js",
]
}
+
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawncpp") {
+ public_deps = [ ":cpp" ]
+}
+group("dawncpp_headers") {
+ public_deps = [ "${dawn_root}/include/dawn:cpp_headers" ]
+}
+group("dawn_proc") {
+ public_deps = [ ":proc" ]
+}
+group("dawn_headers") {
+ public_deps = [ "${dawn_root}/include/dawn:headers" ]
+}
+group("dawn_cpp") {
+ public_deps = [ ":cpp" ]
+}
diff --git a/chromium/third_party/dawn/src/dawn/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/CMakeLists.txt
index 7d6a4f67645..578e61cbda8 100644
--- a/chromium/third_party/dawn/src/dawn/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/CMakeLists.txt
@@ -13,11 +13,44 @@
# limitations under the License.
###############################################################################
+# Dawn projects
+###############################################################################
+
+add_subdirectory(common)
+add_subdirectory(platform)
+add_subdirectory(native)
+add_subdirectory(wire)
+# TODO(dawn:269): Remove once the implementation-based swapchains are removed.
+add_subdirectory(utils)
+
+if (DAWN_BUILD_NODE_BINDINGS)
+ set(NODE_BINDING_DEPS
+ ${NODE_ADDON_API_DIR}
+ ${NODE_API_HEADERS_DIR}
+ ${WEBGPU_IDL_PATH}
+ )
+ foreach(DEP ${NODE_BINDING_DEPS})
+ if (NOT EXISTS ${DEP})
+ message(FATAL_ERROR
+ "DAWN_BUILD_NODE_BINDINGS requires missing dependency '${DEP}'\n"
+ "Please follow the 'Fetch dependencies' instructions at:\n"
+ "./src/dawn/node/README.md"
+ )
+ endif()
+ endforeach()
+ if (NOT CMAKE_POSITION_INDEPENDENT_CODE)
+ message(FATAL_ERROR "DAWN_BUILD_NODE_BINDINGS requires building with DAWN_ENABLE_PIC")
+ endif()
+
+ add_subdirectory(node)
+endif()
+
+###############################################################################
# Dawn headers
###############################################################################
DawnJSONGenerator(
- TARGET "dawn_headers"
+ TARGET "headers"
PRINT_NAME "Dawn headers"
RESULT_VARIABLE "DAWN_HEADERS_GEN_SOURCES"
)
@@ -42,7 +75,7 @@ target_link_libraries(dawn_headers INTERFACE dawn_public_config)
###############################################################################
DawnJSONGenerator(
- TARGET "dawncpp_headers"
+ TARGET "cpp_headers"
PRINT_NAME "Dawn C++ headers"
RESULT_VARIABLE "DAWNCPP_HEADERS_GEN_SOURCES"
)
@@ -61,7 +94,7 @@ target_link_libraries(dawncpp_headers INTERFACE dawn_headers)
###############################################################################
DawnJSONGenerator(
- TARGET "dawncpp"
+ TARGET "cpp"
PRINT_NAME "Dawn C++ wrapper"
RESULT_VARIABLE "DAWNCPP_GEN_SOURCES"
)
@@ -75,7 +108,7 @@ target_link_libraries(dawncpp PUBLIC dawncpp_headers)
###############################################################################
DawnJSONGenerator(
- TARGET "dawn_proc"
+ TARGET "proc"
PRINT_NAME "Dawn C++ wrapper"
RESULT_VARIABLE "DAWNPROC_GEN_SOURCES"
)
diff --git a/chromium/third_party/dawn/src/common/Alloc.h b/chromium/third_party/dawn/src/dawn/common/Alloc.h
index 940d5ff451f..940d5ff451f 100644
--- a/chromium/third_party/dawn/src/common/Alloc.h
+++ b/chromium/third_party/dawn/src/dawn/common/Alloc.h
diff --git a/chromium/third_party/dawn/src/dawn/common/Assert.cpp b/chromium/third_party/dawn/src/dawn/common/Assert.cpp
new file mode 100644
index 00000000000..95d2efd30b7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Assert.cpp
@@ -0,0 +1,31 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+
+#include <cstdlib>
+
+void HandleAssertionFailure(const char* file,
+ const char* function,
+ int line,
+ const char* condition) {
+ dawn::ErrorLog() << "Assertion failure at " << file << ":" << line << " (" << function
+ << "): " << condition;
+#if defined(DAWN_ABORT_ON_ASSERT)
+ abort();
+#else
+ DAWN_BREAKPOINT();
+#endif
+}
diff --git a/chromium/third_party/dawn/src/dawn/common/Assert.h b/chromium/third_party/dawn/src/dawn/common/Assert.h
new file mode 100644
index 00000000000..e7961d7f4ed
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Assert.h
@@ -0,0 +1,80 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ASSERT_H_
+#define COMMON_ASSERT_H_
+
+#include "dawn/common/Compiler.h"
+
+// Dawn asserts to be used instead of the regular C stdlib assert function (if you don't use assert
+// yet, you should start now!). In debug ASSERT(condition) will trigger an error, otherwise in
+// release it does nothing at runtime.
+//
+// In case of name clashes (with for example a testing library), you can define the
+// DAWN_SKIP_ASSERT_SHORTHANDS to only define the DAWN_ prefixed macros.
+//
+// These asserts feature:
+// - Logging of the error with file, line and function information.
+// - Breaking in the debugger when an assert is triggered and a debugger is attached.
+// - Use the assert information to help the compiler optimizer in release builds.
+
+// MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and
+// points out that it looks like an owl face.
+#if defined(DAWN_COMPILER_MSVC)
+# define DAWN_ASSERT_LOOP_CONDITION (0, 0)
+#else
+# define DAWN_ASSERT_LOOP_CONDITION (0)
+#endif
+
+// DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would
+// expect of an assert and in release it tries to give hints to make the compiler generate better
+// code.
+#if defined(DAWN_ENABLE_ASSERTS)
+# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
+ do { \
+ if (!(condition)) { \
+ HandleAssertionFailure(file, func, line, #condition); \
+ } \
+ } while (DAWN_ASSERT_LOOP_CONDITION)
+#else
+# if defined(DAWN_COMPILER_MSVC)
+# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
+# elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
+# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
+# else
+# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
+ do { \
+ DAWN_UNUSED(sizeof(condition)); \
+ } while (DAWN_ASSERT_LOOP_CONDITION)
+# endif
+#endif
+
+#define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition)
+#define DAWN_UNREACHABLE() \
+ do { \
+ DAWN_ASSERT(DAWN_ASSERT_LOOP_CONDITION && "Unreachable code hit"); \
+ DAWN_BUILTIN_UNREACHABLE(); \
+ } while (DAWN_ASSERT_LOOP_CONDITION)
+
+#if !defined(DAWN_SKIP_ASSERT_SHORTHANDS)
+# define ASSERT DAWN_ASSERT
+# define UNREACHABLE DAWN_UNREACHABLE
+#endif
+
+void HandleAssertionFailure(const char* file,
+ const char* function,
+ int line,
+ const char* condition);
+
+#endif // COMMON_ASSERT_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/BUILD.gn b/chromium/third_party/dawn/src/dawn/common/BUILD.gn
new file mode 100644
index 00000000000..8a15f024a16
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/BUILD.gn
@@ -0,0 +1,250 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("//build_overrides/build.gni")
+import("${dawn_root}/scripts/dawn_features.gni")
+
+# Use Chromium's dcheck_always_on when available so that we respect it when
+# running tests on the GPU builders
+if (build_with_chromium) {
+ import("//build/config/dcheck_always_on.gni")
+} else {
+ dcheck_always_on = false
+}
+
+if (build_with_chromium) {
+ import("//build/config/sanitizers/sanitizers.gni")
+} else {
+ use_fuzzing_engine = false
+}
+
+###############################################################################
+# Common dawn configs
+###############################################################################
+
+config("internal_config") {
+ include_dirs = [
+ "${target_gen_dir}/../../../src",
+ "${dawn_root}/src",
+ ]
+
+ defines = []
+ if (dawn_always_assert || dcheck_always_on || is_debug ||
+ use_fuzzing_engine) {
+ defines += [ "DAWN_ENABLE_ASSERTS" ]
+ }
+
+ if (use_fuzzing_engine) {
+ # Does a hard abort when an assertion fails so that fuzzers catch and parse the failure.
+ defines += [ "DAWN_ABORT_ON_ASSERT" ]
+ }
+
+ if (dawn_enable_d3d12) {
+ defines += [ "DAWN_ENABLE_BACKEND_D3D12" ]
+ }
+ if (dawn_enable_metal) {
+ defines += [ "DAWN_ENABLE_BACKEND_METAL" ]
+ }
+ if (dawn_enable_null) {
+ defines += [ "DAWN_ENABLE_BACKEND_NULL" ]
+ }
+ if (dawn_enable_opengl) {
+ defines += [ "DAWN_ENABLE_BACKEND_OPENGL" ]
+ }
+ if (dawn_enable_desktop_gl) {
+ defines += [ "DAWN_ENABLE_BACKEND_DESKTOP_GL" ]
+ }
+ if (dawn_enable_opengles) {
+ defines += [ "DAWN_ENABLE_BACKEND_OPENGLES" ]
+ }
+ if (dawn_enable_vulkan) {
+ defines += [ "DAWN_ENABLE_BACKEND_VULKAN" ]
+ }
+
+ if (dawn_use_x11) {
+ defines += [ "DAWN_USE_X11" ]
+ }
+
+ if (dawn_enable_error_injection) {
+ defines += [ "DAWN_ENABLE_ERROR_INJECTION" ]
+ }
+
+ # Only internal Dawn targets can use this config, this means only targets in
+ # this BUILD.gn file and related subdirs.
+ visibility = [
+ "${dawn_root}/samples/dawn/*",
+ "${dawn_root}/src/dawn/*",
+ ]
+
+ cflags = []
+ if (is_clang) {
+ cflags += [ "-Wno-shadow" ]
+ }
+
+ # Enable more warnings that were found when using Dawn in other projects.
+ # Add them only when building in standalone because we control which clang
+ # version we use. Otherwise we risk breaking projects depending on Dawn when
+ # the use a different clang version.
+ if (dawn_standalone && is_clang) {
+ cflags += [
+ "-Wconditional-uninitialized",
+ "-Wcstring-format-directive",
+ "-Wc++11-narrowing",
+ "-Wdeprecated-copy",
+ "-Wdeprecated-copy-dtor",
+ "-Wduplicate-enum",
+ "-Wextra-semi-stmt",
+ "-Wimplicit-fallthrough",
+ "-Winconsistent-missing-destructor-override",
+ "-Winvalid-offsetof",
+ "-Wmissing-field-initializers",
+ "-Wnon-c-typedef-for-linkage",
+ "-Wpessimizing-move",
+ "-Wrange-loop-analysis",
+ "-Wredundant-move",
+ "-Wshadow-field",
+ "-Wstrict-prototypes",
+ "-Wtautological-unsigned-zero-compare",
+ "-Wunreachable-code-aggressive",
+ "-Wunused-but-set-variable",
+ ]
+
+ if (is_win) {
+ cflags += [
+ # clang-cl doesn't know -pedantic, pass it explicitly to the clang driver
+ "/clang:-pedantic",
+
+ # Allow the use of __uuidof()
+ "-Wno-language-extension-token",
+ ]
+ } else {
+ cflags += [ "-pedantic" ]
+ }
+ }
+
+ if (!is_clang && is_win) {
+ # Dawn extends wgpu enums with internal enums.
+ # MSVC considers these invalid switch values. crbug.com/dawn/397.
+ cflags += [ "/wd4063" ]
+
+ # MSVC things that a switch over all the enum values of an enum class is
+ # not sufficient to cover all control paths. Turn off this warning so that
+ # the respective clang warning tells us where to add switch cases
+ # (otherwise we have to add default: UNREACHABLE() that silences clang too)
+ cflags += [ "/wd4715" ]
+
+ # MSVC emits warnings when using constructs deprecated in C++17. Silence
+ # them until they are fixed.
+ # TODO(dawn:824): Fix all uses of C++ features deprecated in C++17.
+ defines += [ "_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS" ]
+ if (dawn_is_winuwp) {
+ # /ZW makes sure we don't add calls that are forbidden in UWP.
+ # and /EHsc is required to be used in combination with it,
+ # even if it is already added by the windows GN defaults,
+ # we still add it to make every /ZW paired with a /EHsc
+ cflags_cc = [
+ "/ZW:nostdlib",
+ "/EHsc",
+ ]
+ }
+ }
+}
+
+###############################################################################
+# Common dawn library
+###############################################################################
+
+# This GN file is discovered by all Chromium builds, but common doesn't support
+# all of Chromium's OSes so we explicitly make the target visible only on
+# systems we know Dawn is able to compile on.
+if (is_win || is_linux || is_chromeos || is_mac || is_fuchsia || is_android) {
+ static_library("common") {
+ sources = [
+ "Alloc.h",
+ "Assert.cpp",
+ "Assert.h",
+ "BitSetIterator.h",
+ "Compiler.h",
+ "ConcurrentCache.h",
+ "Constants.h",
+ "CoreFoundationRef.h",
+ "DynamicLib.cpp",
+ "DynamicLib.h",
+ "GPUInfo.cpp",
+ "GPUInfo.h",
+ "HashUtils.h",
+ "IOKitRef.h",
+ "LinkedList.h",
+ "Log.cpp",
+ "Log.h",
+ "Math.cpp",
+ "Math.h",
+ "NSRef.h",
+ "NonCopyable.h",
+ "PlacementAllocated.h",
+ "Platform.h",
+ "Preprocessor.h",
+ "RefBase.h",
+ "RefCounted.cpp",
+ "RefCounted.h",
+ "Result.cpp",
+ "Result.h",
+ "SerialMap.h",
+ "SerialQueue.h",
+ "SerialStorage.h",
+ "SlabAllocator.cpp",
+ "SlabAllocator.h",
+ "StackContainer.h",
+ "SwapChainUtils.h",
+ "SystemUtils.cpp",
+ "SystemUtils.h",
+ "TypeTraits.h",
+ "TypedInteger.h",
+ "UnderlyingType.h",
+ "ityp_array.h",
+ "ityp_bitset.h",
+ "ityp_span.h",
+ "ityp_stack_vec.h",
+ "ityp_vector.h",
+ "vulkan_platform.h",
+ "xlib_with_undefs.h",
+ ]
+
+ if (is_mac) {
+ sources += [ "SystemUtils_mac.mm" ]
+ }
+
+ public_configs = [ ":internal_config" ]
+ deps = [
+ "${dawn_root}/include/dawn:cpp_headers",
+ "${dawn_root}/include/dawn:headers",
+ ]
+
+ if (is_win) {
+ sources += [
+ "WindowsUtils.cpp",
+ "WindowsUtils.h",
+ "windows_with_undefs.h",
+ ]
+ }
+ if (dawn_enable_vulkan) {
+ public_deps = [ "${dawn_root}/third_party/khronos:vulkan_headers" ]
+ }
+ if (is_android) {
+ libs = [ "log" ]
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/src/dawn/common/BitSetIterator.h b/chromium/third_party/dawn/src/dawn/common/BitSetIterator.h
new file mode 100644
index 00000000000..f14a76c977e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/BitSetIterator.h
@@ -0,0 +1,139 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_BITSETITERATOR_H_
+#define COMMON_BITSETITERATOR_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <bitset>
+#include <limits>
+
+// This is ANGLE's BitSetIterator class with a customizable return type
+// TODO(crbug.com/dawn/306): it could be optimized, in particular when N <= 64
+
+template <typename T>
+T roundUp(const T value, const T alignment) {
+ auto temp = value + alignment - static_cast<T>(1);
+ return temp - temp % alignment;
+}
+
+template <size_t N, typename T>
+class BitSetIterator final {
+ public:
+ BitSetIterator(const std::bitset<N>& bitset);
+ BitSetIterator(const BitSetIterator& other);
+ BitSetIterator& operator=(const BitSetIterator& other);
+
+ class Iterator final {
+ public:
+ Iterator(const std::bitset<N>& bits);
+ Iterator& operator++();
+
+ bool operator==(const Iterator& other) const;
+ bool operator!=(const Iterator& other) const;
+
+ T operator*() const {
+ using U = UnderlyingType<T>;
+ ASSERT(static_cast<U>(mCurrentBit) <= std::numeric_limits<U>::max());
+ return static_cast<T>(static_cast<U>(mCurrentBit));
+ }
+
+ private:
+ unsigned long getNextBit();
+
+ static constexpr size_t kBitsPerWord = sizeof(uint32_t) * 8;
+ std::bitset<N> mBits;
+ unsigned long mCurrentBit;
+ unsigned long mOffset;
+ };
+
+ Iterator begin() const {
+ return Iterator(mBits);
+ }
+ Iterator end() const {
+ return Iterator(std::bitset<N>(0));
+ }
+
+ private:
+ const std::bitset<N> mBits;
+};
+
+template <size_t N, typename T>
+BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {
+}
+
+template <size_t N, typename T>
+BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {
+}
+
+template <size_t N, typename T>
+BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
+ mBits = other.mBits;
+ return *this;
+}
+
+template <size_t N, typename T>
+BitSetIterator<N, T>::Iterator::Iterator(const std::bitset<N>& bits)
+ : mBits(bits), mCurrentBit(0), mOffset(0) {
+ if (bits.any()) {
+ mCurrentBit = getNextBit();
+ } else {
+ mOffset = static_cast<unsigned long>(roundUp(N, kBitsPerWord));
+ }
+}
+
+template <size_t N, typename T>
+typename BitSetIterator<N, T>::Iterator& BitSetIterator<N, T>::Iterator::operator++() {
+ DAWN_ASSERT(mBits.any());
+ mBits.set(mCurrentBit - mOffset, 0);
+ mCurrentBit = getNextBit();
+ return *this;
+}
+
+template <size_t N, typename T>
+bool BitSetIterator<N, T>::Iterator::operator==(const Iterator& other) const {
+ return mOffset == other.mOffset && mBits == other.mBits;
+}
+
+template <size_t N, typename T>
+bool BitSetIterator<N, T>::Iterator::operator!=(const Iterator& other) const {
+ return !(*this == other);
+}
+
+template <size_t N, typename T>
+unsigned long BitSetIterator<N, T>::Iterator::getNextBit() {
+ static std::bitset<N> wordMask(std::numeric_limits<uint32_t>::max());
+
+ while (mOffset < N) {
+ uint32_t wordBits = static_cast<uint32_t>((mBits & wordMask).to_ulong());
+ if (wordBits != 0ul) {
+ return ScanForward(wordBits) + mOffset;
+ }
+
+ mBits >>= kBitsPerWord;
+ mOffset += kBitsPerWord;
+ }
+ return 0;
+}
+
+// Helper to avoid needing to specify the template parameter size
+template <size_t N>
+BitSetIterator<N, uint32_t> IterateBitSet(const std::bitset<N>& bitset) {
+ return BitSetIterator<N, uint32_t>(bitset);
+}
+
+#endif // COMMON_BITSETITERATOR_H_
diff --git a/chromium/third_party/dawn/src/common/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/common/CMakeLists.txt
index d839d84106b..d839d84106b 100644
--- a/chromium/third_party/dawn/src/common/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/common/CMakeLists.txt
diff --git a/chromium/third_party/dawn/src/dawn/common/Compiler.h b/chromium/third_party/dawn/src/dawn/common/Compiler.h
new file mode 100644
index 00000000000..ae4f5c0d761
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Compiler.h
@@ -0,0 +1,97 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_COMPILER_H_
+#define COMMON_COMPILER_H_
+
+// Defines macros for compiler-specific functionality
+// - DAWN_COMPILER_[CLANG|GCC|MSVC]: Compiler detection
+// - DAWN_BREAKPOINT(): Raises an exception and breaks in the debugger
+// - DAWN_BUILTIN_UNREACHABLE(): Hints the compiler that a code path is unreachable
+// - DAWN_(UN)?LIKELY(EXPR): Where available, hints the compiler that the expression will be true
+// (resp. false) to help it generate code that leads to better branch prediction.
+// - DAWN_UNUSED(EXPR): Prevents unused variable/expression warnings on EXPR.
+// - DAWN_UNUSED_FUNC(FUNC): Prevents unused function warnings on FUNC.
+// - DAWN_DECLARE_UNUSED: Prevents unused function warnings a subsequent declaration.
+// Both DAWN_UNUSED_FUNC and DAWN_DECLARE_UNUSED may be necessary, e.g. to suppress clang's
+// unneeded-internal-declaration warning.
+
+// Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC
+#if defined(__GNUC__) || defined(__clang__)
+# if defined(__clang__)
+# define DAWN_COMPILER_CLANG
+# else
+# define DAWN_COMPILER_GCC
+# endif
+
+# if defined(__i386__) || defined(__x86_64__)
+# define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
+# else
+// TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures
+# define DAWN_BREAKPOINT()
+# endif
+
+# define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
+# define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
+# define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
+
+# if !defined(__has_cpp_attribute)
+# define __has_cpp_attribute(name) 0
+# endif
+
+# define DAWN_DECLARE_UNUSED __attribute__((unused))
+# if defined(NDEBUG)
+# define DAWN_FORCE_INLINE inline __attribute__((always_inline))
+# endif
+# define DAWN_NOINLINE __attribute__((noinline))
+
+// MSVC
+#elif defined(_MSC_VER)
+# define DAWN_COMPILER_MSVC
+
+extern void __cdecl __debugbreak(void);
+# define DAWN_BREAKPOINT() __debugbreak()
+
+# define DAWN_BUILTIN_UNREACHABLE() __assume(false)
+
+# define DAWN_DECLARE_UNUSED
+# if defined(NDEBUG)
+# define DAWN_FORCE_INLINE __forceinline
+# endif
+# define DAWN_NOINLINE __declspec(noinline)
+
+#else
+# error "Unsupported compiler"
+#endif
+
+// It seems that (void) EXPR works on all compilers to silence the unused variable warning.
+#define DAWN_UNUSED(EXPR) (void)EXPR
+// Likewise using static asserting on sizeof(&FUNC) seems to make it tagged as used
+#define DAWN_UNUSED_FUNC(FUNC) static_assert(sizeof(&FUNC) == sizeof(void (*)()))
+
+// Add noop replacements for macros for features that aren't supported by the compiler.
+#if !defined(DAWN_LIKELY)
+# define DAWN_LIKELY(X) X
+#endif
+#if !defined(DAWN_UNLIKELY)
+# define DAWN_UNLIKELY(X) X
+#endif
+#if !defined(DAWN_FORCE_INLINE)
+# define DAWN_FORCE_INLINE inline
+#endif
+#if !defined(DAWN_NOINLINE)
+# define DAWN_NOINLINE
+#endif
+
+#endif // COMMON_COMPILER_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h b/chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h
new file mode 100644
index 00000000000..e11b6469884
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/ConcurrentCache.h
@@ -0,0 +1,54 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_CONCURRENT_CACHE_H_
+#define COMMON_CONCURRENT_CACHE_H_
+
+#include "dawn/common/NonCopyable.h"
+
+#include <mutex>
+#include <unordered_set>
+#include <utility>
+
+template <typename T>
+class ConcurrentCache : public NonMovable {
+ public:
+ ConcurrentCache() = default;
+
+ T* Find(T* object) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ auto iter = mCache.find(object);
+ if (iter == mCache.end()) {
+ return nullptr;
+ }
+ return *iter;
+ }
+
+ std::pair<T*, bool> Insert(T* object) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ auto [value, inserted] = mCache.insert(object);
+ return {*value, inserted};
+ }
+
+ size_t Erase(T* object) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ return mCache.erase(object);
+ }
+
+ private:
+ std::mutex mMutex;
+ std::unordered_set<T*, typename T::HashFunc, typename T::EqualityFunc> mCache;
+};
+
+#endif
diff --git a/chromium/third_party/dawn/src/dawn/common/Constants.h b/chromium/third_party/dawn/src/dawn/common/Constants.h
new file mode 100644
index 00000000000..13b5995d22b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Constants.h
@@ -0,0 +1,68 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_CONSTANTS_H_
+#define COMMON_CONSTANTS_H_
+
+#include <cstdint>
+
+static constexpr uint32_t kMaxBindGroups = 4u;
+static constexpr uint8_t kMaxVertexAttributes = 16u;
+static constexpr uint8_t kMaxVertexBuffers = 8u;
+static constexpr uint32_t kMaxVertexBufferArrayStride = 2048u;
+static constexpr uint32_t kNumStages = 3;
+static constexpr uint8_t kMaxColorAttachments = 8u;
+static constexpr uint32_t kTextureBytesPerRowAlignment = 256u;
+static constexpr uint32_t kMaxInterStageShaderComponents = 60u;
+static constexpr uint32_t kMaxInterStageShaderVariables = kMaxInterStageShaderComponents / 4;
+
+// Per stage limits
+static constexpr uint32_t kMaxSampledTexturesPerShaderStage = 16;
+static constexpr uint32_t kMaxSamplersPerShaderStage = 16;
+static constexpr uint32_t kMaxStorageBuffersPerShaderStage = 8;
+static constexpr uint32_t kMaxStorageTexturesPerShaderStage = 4;
+static constexpr uint32_t kMaxUniformBuffersPerShaderStage = 12;
+
+// Per pipeline layout limits
+static constexpr uint32_t kMaxDynamicUniformBuffersPerPipelineLayout = 8u;
+static constexpr uint32_t kMaxDynamicStorageBuffersPerPipelineLayout = 4u;
+
+// Indirect command sizes
+static constexpr uint64_t kDispatchIndirectSize = 3 * sizeof(uint32_t);
+static constexpr uint64_t kDrawIndirectSize = 4 * sizeof(uint32_t);
+static constexpr uint64_t kDrawIndexedIndirectSize = 5 * sizeof(uint32_t);
+
+// Non spec defined constants.
+static constexpr float kLodMin = 0.0;
+static constexpr float kLodMax = 1000.0;
+
+// Offset alignment for CopyB2B. Strictly speaking this alignment is required only
+// on macOS, but we decide to do it on all platforms.
+static constexpr uint64_t kCopyBufferToBufferOffsetAlignment = 4u;
+
+// The maximum size of visibilityResultBuffer is 256KB on Metal, to fit the restriction, limit the
+// maximum size of query set to 64KB. The size of a query is 8-bytes, the maximum query count is 64
+// * 1024 / 8.
+static constexpr uint32_t kMaxQueryCount = 8192u;
+
+// An external texture occupies multiple binding slots. These are the per-external-texture bindings
+// needed.
+static constexpr uint8_t kSampledTexturesPerExternalTexture = 4u;
+static constexpr uint8_t kSamplersPerExternalTexture = 1u;
+static constexpr uint8_t kUniformsPerExternalTexture = 1u;
+
+// A spec defined constant but that doesn't have a name.
+static constexpr uint32_t kMaxBindingNumber = 65535;
+
+#endif // COMMON_CONSTANTS_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h b/chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h
new file mode 100644
index 00000000000..e6cafbefd8d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/CoreFoundationRef.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_COREFOUNDATIONREF_H_
+#define COMMON_COREFOUNDATIONREF_H_
+
+#include "dawn/common/RefBase.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+
+template <typename T>
+struct CoreFoundationRefTraits {
+ static constexpr T kNullValue = nullptr;
+ static void Reference(T value) {
+ CFRetain(value);
+ }
+ static void Release(T value) {
+ CFRelease(value);
+ }
+};
+
+template <typename T>
+class CFRef : public RefBase<T, CoreFoundationRefTraits<T>> {
+ public:
+ using RefBase<T, CoreFoundationRefTraits<T>>::RefBase;
+};
+
+template <typename T>
+CFRef<T> AcquireCFRef(T pointee) {
+ CFRef<T> ref;
+ ref.Acquire(pointee);
+ return ref;
+}
+
+#endif // COMMON_COREFOUNDATIONREF_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp b/chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp
new file mode 100644
index 00000000000..ab4f2d795dd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/DynamicLib.cpp
@@ -0,0 +1,106 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/DynamicLib.h"
+
+#include "dawn/common/Platform.h"
+
+#if DAWN_PLATFORM_WINDOWS
+# include "dawn/common/windows_with_undefs.h"
+# if DAWN_PLATFORM_WINUWP
+# include "dawn/common/WindowsUtils.h"
+# endif
+#elif DAWN_PLATFORM_POSIX
+# include <dlfcn.h>
+#else
+# error "Unsupported platform for DynamicLib"
+#endif
+
+DynamicLib::~DynamicLib() {
+ Close();
+}
+
+DynamicLib::DynamicLib(DynamicLib&& other) {
+ std::swap(mHandle, other.mHandle);
+}
+
+DynamicLib& DynamicLib::operator=(DynamicLib&& other) {
+ std::swap(mHandle, other.mHandle);
+ return *this;
+}
+
+bool DynamicLib::Valid() const {
+ return mHandle != nullptr;
+}
+
+bool DynamicLib::Open(const std::string& filename, std::string* error) {
+#if DAWN_PLATFORM_WINDOWS
+# if DAWN_PLATFORM_WINUWP
+ mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
+# else
+ mHandle = LoadLibraryA(filename.c_str());
+# endif
+ if (mHandle == nullptr && error != nullptr) {
+ *error = "Windows Error: " + std::to_string(GetLastError());
+ }
+#elif DAWN_PLATFORM_POSIX
+ mHandle = dlopen(filename.c_str(), RTLD_NOW);
+
+ if (mHandle == nullptr && error != nullptr) {
+ *error = dlerror();
+ }
+#else
+# error "Unsupported platform for DynamicLib"
+#endif
+
+ return mHandle != nullptr;
+}
+
+void DynamicLib::Close() {
+ if (mHandle == nullptr) {
+ return;
+ }
+
+#if DAWN_PLATFORM_WINDOWS
+ FreeLibrary(static_cast<HMODULE>(mHandle));
+#elif DAWN_PLATFORM_POSIX
+ dlclose(mHandle);
+#else
+# error "Unsupported platform for DynamicLib"
+#endif
+
+ mHandle = nullptr;
+}
+
+void* DynamicLib::GetProc(const std::string& procName, std::string* error) const {
+ void* proc = nullptr;
+
+#if DAWN_PLATFORM_WINDOWS
+ proc = reinterpret_cast<void*>(GetProcAddress(static_cast<HMODULE>(mHandle), procName.c_str()));
+
+ if (proc == nullptr && error != nullptr) {
+ *error = "Windows Error: " + std::to_string(GetLastError());
+ }
+#elif DAWN_PLATFORM_POSIX
+ proc = reinterpret_cast<void*>(dlsym(mHandle, procName.c_str()));
+
+ if (proc == nullptr && error != nullptr) {
+ *error = dlerror();
+ }
+#else
+# error "Unsupported platform for DynamicLib"
+#endif
+
+ return proc;
+}
diff --git a/chromium/third_party/dawn/src/dawn/common/DynamicLib.h b/chromium/third_party/dawn/src/dawn/common/DynamicLib.h
new file mode 100644
index 00000000000..66d846ee894
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/DynamicLib.h
@@ -0,0 +1,54 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_DYNAMICLIB_H_
+#define COMMON_DYNAMICLIB_H_
+
+#include "dawn/common/Assert.h"
+
+#include <string>
+#include <type_traits>
+
+class DynamicLib {
+ public:
+ DynamicLib() = default;
+ ~DynamicLib();
+
+ DynamicLib(const DynamicLib&) = delete;
+ DynamicLib& operator=(const DynamicLib&) = delete;
+
+ DynamicLib(DynamicLib&& other);
+ DynamicLib& operator=(DynamicLib&& other);
+
+ bool Valid() const;
+
+ bool Open(const std::string& filename, std::string* error = nullptr);
+ void Close();
+
+ void* GetProc(const std::string& procName, std::string* error = nullptr) const;
+
+ template <typename T>
+ bool GetProc(T** proc, const std::string& procName, std::string* error = nullptr) const {
+ ASSERT(proc != nullptr);
+ static_assert(std::is_function<T>::value);
+
+ *proc = reinterpret_cast<T*>(GetProc(procName, error));
+ return *proc != nullptr;
+ }
+
+ private:
+ void* mHandle = nullptr;
+};
+
+#endif // COMMON_DYNAMICLIB_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp b/chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp
new file mode 100644
index 00000000000..97068ff6ca9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/GPUInfo.cpp
@@ -0,0 +1,105 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/GPUInfo.h"
+
+#include "dawn/common/Assert.h"
+
+#include <algorithm>
+#include <array>
+
+namespace gpu_info {
+ namespace {
+ // Intel
+ // Referenced from the following Mesa source code:
+ // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
+ // gen9
+ const std::array<uint32_t, 25> Skylake = {
+ {0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
+ 0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
+ 0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
+ // gen9p5
+ const std::array<uint32_t, 20> Kabylake = {
+ {0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
+ 0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
+ const std::array<uint32_t, 17> Coffeelake = {
+ {0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
+ 0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
+ const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
+ const std::array<uint32_t, 21> Cometlake = {
+ {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
+ 0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
+
+ // According to Intel graphics driver version schema, build number is generated from the
+ // last two fields.
+ // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
+ // more details.
+ uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
+ return driverVersion[2] * 10000 + driverVersion[3];
+ }
+
+ } // anonymous namespace
+
+ bool IsAMD(PCIVendorID vendorId) {
+ return vendorId == kVendorID_AMD;
+ }
+ bool IsARM(PCIVendorID vendorId) {
+ return vendorId == kVendorID_ARM;
+ }
+ bool IsImgTec(PCIVendorID vendorId) {
+ return vendorId == kVendorID_ImgTec;
+ }
+ bool IsIntel(PCIVendorID vendorId) {
+ return vendorId == kVendorID_Intel;
+ }
+ bool IsNvidia(PCIVendorID vendorId) {
+ return vendorId == kVendorID_Nvidia;
+ }
+ bool IsQualcomm(PCIVendorID vendorId) {
+ return vendorId == kVendorID_Qualcomm;
+ }
+ bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
+ return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
+ }
+ bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
+ return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
+ }
+
+ int CompareD3DDriverVersion(PCIVendorID vendorId,
+ const D3DDriverVersion& version1,
+ const D3DDriverVersion& version2) {
+ if (IsIntel(vendorId)) {
+ uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
+ uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
+ return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
+ }
+
+ // TODO(crbug.com/dawn/823): support other GPU vendors
+ UNREACHABLE();
+ return 0;
+ }
+
+ // Intel GPUs
+ bool IsSkylake(PCIDeviceID deviceId) {
+ return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
+ }
+ bool IsKabylake(PCIDeviceID deviceId) {
+ return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
+ }
+ bool IsCoffeelake(PCIDeviceID deviceId) {
+ return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
+ (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
+ (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
+ }
+} // namespace gpu_info
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.h b/chromium/third_party/dawn/src/dawn/common/GPUInfo.h
index 2b30d96662e..2b30d96662e 100644
--- a/chromium/third_party/dawn/src/common/GPUInfo.h
+++ b/chromium/third_party/dawn/src/dawn/common/GPUInfo.h
diff --git a/chromium/third_party/dawn/src/dawn/common/HashUtils.h b/chromium/third_party/dawn/src/dawn/common/HashUtils.h
new file mode 100644
index 00000000000..e59e8c5434d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/HashUtils.h
@@ -0,0 +1,101 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_HASHUTILS_H_
+#define COMMON_HASHUTILS_H_
+
+#include "dawn/common/Platform.h"
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/ityp_bitset.h"
+
+#include <bitset>
+#include <functional>
+
+// Wrapper around std::hash to make it a templated function instead of a functor. It is marginally
+// nicer, and avoids adding to the std namespace to add hashing of other types.
+template <typename T>
+size_t Hash(const T& value) {
+ return std::hash<T>()(value);
+}
+
+// Add hashing of TypedIntegers
+template <typename Tag, typename T>
+size_t Hash(const TypedInteger<Tag, T>& value) {
+ return Hash(static_cast<T>(value));
+}
+
+// When hashing sparse structures we want to iteratively build a hash value with only parts of the
+// data. HashCombine "hashes" together an existing hash and hashable values.
+//
+// Example usage to compute the hash of a mask and values corresponding to the mask:
+//
+// size_t hash = Hash(mask):
+// for (uint32_t i : IterateBitSet(mask)) { HashCombine(&hash, hashables[i]); }
+// return hash;
+template <typename T>
+void HashCombine(size_t* hash, const T& value) {
+#if defined(DAWN_PLATFORM_64_BIT)
+ const size_t offset = 0x9e3779b97f4a7c16;
+#elif defined(DAWN_PLATFORM_32_BIT)
+ const size_t offset = 0x9e3779b9;
+#else
+# error "Unsupported platform"
+#endif
+ *hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2);
+}
+
+template <typename T, typename... Args>
+void HashCombine(size_t* hash, const T& value, const Args&... args) {
+ HashCombine(hash, value);
+ HashCombine(hash, args...);
+}
+
+// Workaround a bug between clang++ and libstdlibc++ by defining our own hashing for bitsets.
+// When _GLIBCXX_DEBUG is enabled libstdc++ wraps containers into debug containers. For bitset this
+// means what is normally std::bitset is defined as std::__cxx1988::bitset and is replaced by the
+// debug version of bitset.
+// When hashing, std::hash<std::bitset> proxies the call to std::hash<std::__cxx1998::bitset> and
+// fails on clang because the latter tries to access the private _M_getdata member of the bitset.
+// It looks like it should work because the non-debug bitset declares
+//
+// friend struct std::hash<bitset> // bitset is the name of the class itself
+//
+// which should friend std::hash<std::__cxx1998::bitset> but somehow doesn't work on clang.
+#if defined(_GLIBCXX_DEBUG)
+template <size_t N>
+size_t Hash(const std::bitset<N>& value) {
+ constexpr size_t kWindowSize = sizeof(unsigned long long);
+
+ std::bitset<N> bits = value;
+ size_t hash = 0;
+ for (size_t processedBits = 0; processedBits < N; processedBits += kWindowSize) {
+ HashCombine(&hash, bits.to_ullong());
+ bits >>= kWindowSize;
+ }
+
+ return hash;
+}
+#endif
+
+namespace std {
+ template <typename Index, size_t N>
+ struct hash<ityp::bitset<Index, N>> {
+ public:
+ size_t operator()(const ityp::bitset<Index, N>& value) const {
+ return Hash(static_cast<const std::bitset<N>&>(value));
+ }
+ };
+} // namespace std
+
+#endif // COMMON_HASHUTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/IOKitRef.h b/chromium/third_party/dawn/src/dawn/common/IOKitRef.h
new file mode 100644
index 00000000000..4ff441372ad
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/IOKitRef.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_IOKITREF_H_
+#define COMMON_IOKITREF_H_
+
+#include "dawn/common/RefBase.h"
+
+#include <IOKit/IOKitLib.h>
+
+template <typename T>
+struct IOKitRefTraits {
+ static constexpr T kNullValue = IO_OBJECT_NULL;
+ static void Reference(T value) {
+ IOObjectRetain(value);
+ }
+ static void Release(T value) {
+ IOObjectRelease(value);
+ }
+};
+
+template <typename T>
+class IORef : public RefBase<T, IOKitRefTraits<T>> {
+ public:
+ using RefBase<T, IOKitRefTraits<T>>::RefBase;
+};
+
+template <typename T>
+IORef<T> AcquireIORef(T pointee) {
+ IORef<T> ref;
+ ref.Acquire(pointee);
+ return ref;
+}
+
+#endif // COMMON_IOKITREF_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/LinkedList.h b/chromium/third_party/dawn/src/dawn/common/LinkedList.h
new file mode 100644
index 00000000000..673f596e623
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/LinkedList.h
@@ -0,0 +1,274 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a copy of Chromium's /src/base/containers/linked_list.h with the following
+// modifications:
+// - Added iterators for ranged based iterations
+// - Added in list check before removing node to prevent segfault, now returns true iff removed
+// - Added MoveInto functionality for moving list elements to another list
+
+#ifndef COMMON_LINKED_LIST_H
+#define COMMON_LINKED_LIST_H
+
+#include "dawn/common/Assert.h"
+
+// Simple LinkedList type. (See the Q&A section to understand how this
+// differs from std::list).
+//
+// To use, start by declaring the class which will be contained in the linked
+// list, as extending LinkNode (this gives it next/previous pointers).
+//
+// class MyNodeType : public LinkNode<MyNodeType> {
+// ...
+// };
+//
+// Next, to keep track of the list's head/tail, use a LinkedList instance:
+//
+// LinkedList<MyNodeType> list;
+//
+// To add elements to the list, use any of LinkedList::Append,
+// LinkNode::InsertBefore, or LinkNode::InsertAfter:
+//
+// LinkNode<MyNodeType>* n1 = ...;
+// LinkNode<MyNodeType>* n2 = ...;
+// LinkNode<MyNodeType>* n3 = ...;
+//
+// list.Append(n1);
+// list.Append(n3);
+// n3->InsertBefore(n3);
+//
+// Lastly, to iterate through the linked list forwards:
+//
+// for (LinkNode<MyNodeType>* node = list.head();
+// node != list.end();
+// node = node->next()) {
+// MyNodeType* value = node->value();
+// ...
+// }
+//
+// for (LinkNode<MyNodeType*> node : list) {
+// MyNodeType* value = node->value();
+// ...
+// }
+//
+// Or to iterate the linked list backwards:
+//
+// for (LinkNode<MyNodeType>* node = list.tail();
+// node != list.end();
+// node = node->previous()) {
+// MyNodeType* value = node->value();
+// ...
+// }
+//
+// Questions and Answers:
+//
+// Q. Should I use std::list or base::LinkedList?
+//
+// A. The main reason to use base::LinkedList over std::list is
+// performance. If you don't care about the performance differences
+// then use an STL container, as it makes for better code readability.
+//
+// Comparing the performance of base::LinkedList<T> to std::list<T*>:
+//
+// * Erasing an element of type T* from base::LinkedList<T> is
+// an O(1) operation. Whereas for std::list<T*> it is O(n).
+// That is because with std::list<T*> you must obtain an
+// iterator to the T* element before you can call erase(iterator).
+//
+// * Insertion operations with base::LinkedList<T> never require
+// heap allocations.
+//
+// Q. How does base::LinkedList implementation differ from std::list?
+//
+// A. Doubly-linked lists are made up of nodes that contain "next" and
+// "previous" pointers that reference other nodes in the list.
+//
+// With base::LinkedList<T>, the type being inserted already reserves
+// space for the "next" and "previous" pointers (base::LinkNode<T>*).
+// Whereas with std::list<T> the type can be anything, so the implementation
+// needs to glue on the "next" and "previous" pointers using
+// some internal node type.
+
+// Forward declarations of the types in order for recursive referencing and friending.
+template <typename T>
+class LinkNode;
+template <typename T>
+class LinkedList;
+
+template <typename T>
+class LinkNode {
+ public:
+ LinkNode() : previous_(nullptr), next_(nullptr) {
+ }
+ LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
+ }
+
+ LinkNode(LinkNode<T>&& rhs) {
+ next_ = rhs.next_;
+ rhs.next_ = nullptr;
+ previous_ = rhs.previous_;
+ rhs.previous_ = nullptr;
+
+ // If the node belongs to a list, next_ and previous_ are both non-null.
+ // Otherwise, they are both null.
+ if (next_) {
+ next_->previous_ = this;
+ previous_->next_ = this;
+ }
+ }
+
+ // Insert |this| into the linked list, before |e|.
+ void InsertBefore(LinkNode<T>* e) {
+ this->next_ = e;
+ this->previous_ = e->previous_;
+ e->previous_->next_ = this;
+ e->previous_ = this;
+ }
+
+ // Insert |this| into the linked list, after |e|.
+ void InsertAfter(LinkNode<T>* e) {
+ this->next_ = e->next_;
+ this->previous_ = e;
+ e->next_->previous_ = this;
+ e->next_ = this;
+ }
+
+ // Check if |this| is in a list.
+ bool IsInList() const {
+ ASSERT((this->previous_ == nullptr) == (this->next_ == nullptr));
+ return this->next_ != nullptr;
+ }
+
+ // Remove |this| from the linked list. Returns true iff removed from a list.
+ bool RemoveFromList() {
+ if (!IsInList()) {
+ return false;
+ }
+
+ this->previous_->next_ = this->next_;
+ this->next_->previous_ = this->previous_;
+ // next() and previous() return non-null if and only this node is not in any list.
+ this->next_ = nullptr;
+ this->previous_ = nullptr;
+ return true;
+ }
+
+ LinkNode<T>* previous() const {
+ return previous_;
+ }
+
+ LinkNode<T>* next() const {
+ return next_;
+ }
+
+ // Cast from the node-type to the value type.
+ const T* value() const {
+ return static_cast<const T*>(this);
+ }
+
+ T* value() {
+ return static_cast<T*>(this);
+ }
+
+ private:
+ friend class LinkedList<T>;
+ LinkNode<T>* previous_;
+ LinkNode<T>* next_;
+};
+
+template <typename T>
+class LinkedList {
+ public:
+ // The "root" node is self-referential, and forms the basis of a circular
+ // list (root_.next() will point back to the start of the list,
+ // and root_->previous() wraps around to the end of the list).
+ LinkedList() : root_(&root_, &root_) {
+ }
+
+ ~LinkedList() {
+ // If any LinkNodes still exist in the LinkedList, there will be outstanding references to
+ // root_ even after it has been freed. We should remove root_ from the list to prevent any
+ // future access.
+ root_.RemoveFromList();
+ }
+
+ // Appends |e| to the end of the linked list.
+ void Append(LinkNode<T>* e) {
+ e->InsertBefore(&root_);
+ }
+
+ // Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
+ void MoveInto(LinkedList<T>* l) {
+ if (empty()) {
+ return;
+ }
+ l->root_.previous_->next_ = root_.next_;
+ root_.next_->previous_ = l->root_.previous_;
+ l->root_.previous_ = root_.previous_;
+ root_.previous_->next_ = &l->root_;
+
+ root_.next_ = &root_;
+ root_.previous_ = &root_;
+ }
+
+ LinkNode<T>* head() const {
+ return root_.next();
+ }
+
+ LinkNode<T>* tail() const {
+ return root_.previous();
+ }
+
+ const LinkNode<T>* end() const {
+ return &root_;
+ }
+
+ bool empty() const {
+ return head() == end();
+ }
+
+ private:
+ LinkNode<T> root_;
+};
+
+template <typename T>
+class LinkedListIterator {
+ public:
+ LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {
+ }
+
+ // We keep an early reference to the next node in the list so that even if the current element
+ // is modified or removed from the list, we have a valid next node.
+ LinkedListIterator<T> const& operator++() {
+ current_ = next_;
+ next_ = current_->next();
+ return *this;
+ }
+
+ bool operator!=(const LinkedListIterator<T>& other) const {
+ return current_ != other.current_;
+ }
+
+ LinkNode<T>* operator*() const {
+ return current_;
+ }
+
+ private:
+ LinkNode<T>* current_;
+ LinkNode<T>* next_;
+};
+
+template <typename T>
+LinkedListIterator<T> begin(LinkedList<T>& l) {
+ return LinkedListIterator<T>(l.head());
+}
+
+// Free end function does't use LinkedList<T>::end because of it's const nature. Instead we wrap
+// around from tail.
+template <typename T>
+LinkedListIterator<T> end(LinkedList<T>& l) {
+ return LinkedListIterator<T>(l.tail()->next());
+}
+
+#endif // COMMON_LINKED_LIST_H
diff --git a/chromium/third_party/dawn/src/dawn/common/Log.cpp b/chromium/third_party/dawn/src/dawn/common/Log.cpp
new file mode 100644
index 00000000000..b85094b76fc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Log.cpp
@@ -0,0 +1,116 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Log.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Platform.h"
+
+#include <cstdio>
+
+#if defined(DAWN_PLATFORM_ANDROID)
+# include <android/log.h>
+#endif
+
+namespace dawn {
+
+ namespace {
+
+ const char* SeverityName(LogSeverity severity) {
+ switch (severity) {
+ case LogSeverity::Debug:
+ return "Debug";
+ case LogSeverity::Info:
+ return "Info";
+ case LogSeverity::Warning:
+ return "Warning";
+ case LogSeverity::Error:
+ return "Error";
+ default:
+ UNREACHABLE();
+ return "";
+ }
+ }
+
+#if defined(DAWN_PLATFORM_ANDROID)
+ android_LogPriority AndroidLogPriority(LogSeverity severity) {
+ switch (severity) {
+ case LogSeverity::Debug:
+ return ANDROID_LOG_INFO;
+ case LogSeverity::Info:
+ return ANDROID_LOG_INFO;
+ case LogSeverity::Warning:
+ return ANDROID_LOG_WARN;
+ case LogSeverity::Error:
+ return ANDROID_LOG_ERROR;
+ default:
+ UNREACHABLE();
+ return ANDROID_LOG_ERROR;
+ }
+ }
+#endif // defined(DAWN_PLATFORM_ANDROID)
+
+ } // anonymous namespace
+
+ LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
+ }
+
+ LogMessage::~LogMessage() {
+ std::string fullMessage = mStream.str();
+
+ // If this message has been moved, its stream is empty.
+ if (fullMessage.empty()) {
+ return;
+ }
+
+ const char* severityName = SeverityName(mSeverity);
+
+#if defined(DAWN_PLATFORM_ANDROID)
+ android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
+ __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
+#else // defined(DAWN_PLATFORM_ANDROID)
+ FILE* outputStream = stdout;
+ if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
+ outputStream = stderr;
+ }
+
+ // Note: we use fprintf because <iostream> includes static initializers.
+ fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
+ fflush(outputStream);
+#endif // defined(DAWN_PLATFORM_ANDROID)
+ }
+
+ LogMessage DebugLog() {
+ return {LogSeverity::Debug};
+ }
+
+ LogMessage InfoLog() {
+ return {LogSeverity::Info};
+ }
+
+ LogMessage WarningLog() {
+ return {LogSeverity::Warning};
+ }
+
+ LogMessage ErrorLog() {
+ return {LogSeverity::Error};
+ }
+
+ LogMessage DebugLog(const char* file, const char* function, int line) {
+ LogMessage message = DebugLog();
+ message << file << ":" << line << "(" << function << ")";
+ return message;
+ }
+
+} // namespace dawn
diff --git a/chromium/third_party/dawn/src/common/Log.h b/chromium/third_party/dawn/src/dawn/common/Log.h
index 0504af61ed3..0504af61ed3 100644
--- a/chromium/third_party/dawn/src/common/Log.h
+++ b/chromium/third_party/dawn/src/dawn/common/Log.h
diff --git a/chromium/third_party/dawn/src/dawn/common/Math.cpp b/chromium/third_party/dawn/src/dawn/common/Math.cpp
new file mode 100644
index 00000000000..bd936a8f71c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Math.cpp
@@ -0,0 +1,160 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Math.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Platform.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+
+#if defined(DAWN_COMPILER_MSVC)
+# include <intrin.h>
+#endif
+
+uint32_t ScanForward(uint32_t bits) {
+ ASSERT(bits != 0);
+#if defined(DAWN_COMPILER_MSVC)
+ unsigned long firstBitIndex = 0ul;
+ unsigned char ret = _BitScanForward(&firstBitIndex, bits);
+ ASSERT(ret != 0);
+ return firstBitIndex;
+#else
+ return static_cast<uint32_t>(__builtin_ctz(bits));
+#endif
+}
+
+uint32_t Log2(uint32_t value) {
+ ASSERT(value != 0);
+#if defined(DAWN_COMPILER_MSVC)
+ unsigned long firstBitIndex = 0ul;
+ unsigned char ret = _BitScanReverse(&firstBitIndex, value);
+ ASSERT(ret != 0);
+ return firstBitIndex;
+#else
+ return 31 - static_cast<uint32_t>(__builtin_clz(value));
+#endif
+}
+
+uint32_t Log2(uint64_t value) {
+ ASSERT(value != 0);
+#if defined(DAWN_COMPILER_MSVC)
+# if defined(DAWN_PLATFORM_64_BIT)
+ unsigned long firstBitIndex = 0ul;
+ unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
+ ASSERT(ret != 0);
+ return firstBitIndex;
+# else // defined(DAWN_PLATFORM_64_BIT)
+ unsigned long firstBitIndex = 0ul;
+ if (_BitScanReverse(&firstBitIndex, value >> 32)) {
+ return firstBitIndex + 32;
+ }
+ unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
+ ASSERT(ret != 0);
+ return firstBitIndex;
+# endif // defined(DAWN_PLATFORM_64_BIT)
+#else // defined(DAWN_COMPILER_MSVC)
+ return 63 - static_cast<uint32_t>(__builtin_clzll(value));
+#endif // defined(DAWN_COMPILER_MSVC)
+}
+
+uint64_t NextPowerOfTwo(uint64_t n) {
+ if (n <= 1) {
+ return 1;
+ }
+
+ return 1ull << (Log2(n - 1) + 1);
+}
+
+bool IsPowerOfTwo(uint64_t n) {
+ ASSERT(n != 0);
+ return (n & (n - 1)) == 0;
+}
+
+bool IsPtrAligned(const void* ptr, size_t alignment) {
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ return (reinterpret_cast<size_t>(ptr) & (alignment - 1)) == 0;
+}
+
+bool IsAligned(uint32_t value, size_t alignment) {
+ ASSERT(alignment <= UINT32_MAX);
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ uint32_t alignment32 = static_cast<uint32_t>(alignment);
+ return (value & (alignment32 - 1)) == 0;
+}
+
+uint16_t Float32ToFloat16(float fp32) {
+ uint32_t fp32i = BitCast<uint32_t>(fp32);
+ uint32_t sign16 = (fp32i & 0x80000000) >> 16;
+ uint32_t mantissaAndExponent = fp32i & 0x7FFFFFFF;
+
+ if (mantissaAndExponent > 0x7F800000) { // NaN
+ return 0x7FFF;
+ } else if (mantissaAndExponent > 0x47FFEFFF) { // Infinity
+ return static_cast<uint16_t>(sign16 | 0x7C00);
+ } else if (mantissaAndExponent < 0x38800000) { // Denormal
+ uint32_t mantissa = (mantissaAndExponent & 0x007FFFFF) | 0x00800000;
+ int32_t exponent = 113 - (mantissaAndExponent >> 23);
+
+ if (exponent < 24) {
+ mantissaAndExponent = mantissa >> exponent;
+ } else {
+ mantissaAndExponent = 0;
+ }
+
+ return static_cast<uint16_t>(
+ sign16 | (mantissaAndExponent + 0x00000FFF + ((mantissaAndExponent >> 13) & 1)) >> 13);
+ } else {
+ return static_cast<uint16_t>(sign16 | (mantissaAndExponent + 0xC8000000 + 0x00000FFF +
+ ((mantissaAndExponent >> 13) & 1)) >>
+ 13);
+ }
+}
+
+float Float16ToFloat32(uint16_t fp16) {
+ uint32_t tmp = (fp16 & 0x7fff) << 13 | (fp16 & 0x8000) << 16;
+ float tmp2 = *reinterpret_cast<float*>(&tmp);
+ return pow(2, 127 - 15) * tmp2;
+}
+
+bool IsFloat16NaN(uint16_t fp16) {
+ return (fp16 & 0x7FFF) > 0x7C00;
+}
+
+// Based on the Khronos Data Format Specification 1.2 Section 13.3 sRGB transfer functions
+float SRGBToLinear(float srgb) {
+ // sRGB is always used in unsigned normalized formats so clamp to [0.0, 1.0]
+ if (srgb <= 0.0f) {
+ return 0.0f;
+ } else if (srgb > 1.0f) {
+ return 1.0f;
+ }
+
+ if (srgb < 0.04045f) {
+ return srgb / 12.92f;
+ } else {
+ return std::pow((srgb + 0.055f) / 1.055f, 2.4f);
+ }
+}
+
+uint64_t RoundUp(uint64_t n, uint64_t m) {
+ ASSERT(m > 0);
+ ASSERT(n > 0);
+ ASSERT(m <= std::numeric_limits<uint64_t>::max() - n);
+ return ((n + m - 1) / m) * m;
+}
diff --git a/chromium/third_party/dawn/src/dawn/common/Math.h b/chromium/third_party/dawn/src/dawn/common/Math.h
new file mode 100644
index 00000000000..9ef02d0c40b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Math.h
@@ -0,0 +1,107 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_MATH_H_
+#define COMMON_MATH_H_
+
+#include "dawn/common/Assert.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include <limits>
+#include <type_traits>
+
+// The following are not valid for 0
+uint32_t ScanForward(uint32_t bits);
+uint32_t Log2(uint32_t value);
+uint32_t Log2(uint64_t value);
+bool IsPowerOfTwo(uint64_t n);
+uint64_t RoundUp(uint64_t n, uint64_t m);
+
+constexpr uint32_t ConstexprLog2(uint64_t v) {
+ return v <= 1 ? 0 : 1 + ConstexprLog2(v / 2);
+}
+
+constexpr uint32_t ConstexprLog2Ceil(uint64_t v) {
+ return v <= 1 ? 0 : ConstexprLog2(v - 1) + 1;
+}
+
+inline uint32_t Log2Ceil(uint32_t v) {
+ return v <= 1 ? 0 : Log2(v - 1) + 1;
+}
+
+inline uint32_t Log2Ceil(uint64_t v) {
+ return v <= 1 ? 0 : Log2(v - 1) + 1;
+}
+
+uint64_t NextPowerOfTwo(uint64_t n);
+bool IsPtrAligned(const void* ptr, size_t alignment);
+void* AlignVoidPtr(void* ptr, size_t alignment);
+bool IsAligned(uint32_t value, size_t alignment);
+
+template <typename T>
+T Align(T value, size_t alignment) {
+ ASSERT(value <= std::numeric_limits<T>::max() - (alignment - 1));
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ T alignmentT = static_cast<T>(alignment);
+ return (value + (alignmentT - 1)) & ~(alignmentT - 1);
+}
+
+template <typename T>
+DAWN_FORCE_INLINE T* AlignPtr(T* ptr, size_t alignment) {
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ return reinterpret_cast<T*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
+ ~(alignment - 1));
+}
+
+template <typename T>
+DAWN_FORCE_INLINE const T* AlignPtr(const T* ptr, size_t alignment) {
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ return reinterpret_cast<const T*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
+ ~(alignment - 1));
+}
+
+template <typename destType, typename sourceType>
+destType BitCast(const sourceType& source) {
+ static_assert(sizeof(destType) == sizeof(sourceType), "BitCast: cannot lose precision.");
+ destType output;
+ std::memcpy(&output, &source, sizeof(destType));
+ return output;
+}
+
+uint16_t Float32ToFloat16(float fp32);
+float Float16ToFloat32(uint16_t fp16);
+bool IsFloat16NaN(uint16_t fp16);
+
+template <typename T>
+T FloatToUnorm(float value) {
+ return static_cast<T>(value * static_cast<float>(std::numeric_limits<T>::max()));
+}
+
+float SRGBToLinear(float srgb);
+
+template <typename T1,
+ typename T2,
+ typename Enable = typename std::enable_if<sizeof(T1) == sizeof(T2)>::type>
+constexpr bool IsSubset(T1 subset, T2 set) {
+ T2 bitsAlsoInSet = subset & set;
+ return bitsAlsoInSet == subset;
+}
+
+#endif // COMMON_MATH_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/NSRef.h b/chromium/third_party/dawn/src/dawn/common/NSRef.h
new file mode 100644
index 00000000000..5bf49145c44
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/NSRef.h
@@ -0,0 +1,123 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_NSREF_H_
+#define COMMON_NSREF_H_
+
+#include "dawn/common/RefBase.h"
+
+#import <Foundation/NSObject.h>
+
+#if !defined(__OBJC__)
+# error "NSRef can only be used in Objective C/C++ code."
+#endif
+
+// This file contains smart pointers that automatically reference and release Objective C objects
+// and prototocals in a manner very similar to Ref<>. Note that NSRef<> and NSPRef's constructor add
+// a reference to the object by default, so the pattern to get a reference for a newly created
+// NSObject is the following:
+//
+// NSRef<NSFoo> foo = AcquireNSRef([NSFoo alloc]);
+//
+// NSRef overloads -> and * but these operators don't work extremely well with Objective C's
+// features. For example automatic dereferencing when doing the following doesn't work:
+//
+// NSFoo* foo;
+// foo.member = 1;
+// someVar = foo.member;
+//
+// Instead use the message passing syntax:
+//
+// NSRef<NSFoo> foo;
+// [*foo setMember: 1];
+// someVar = [*foo member];
+//
+// Also did you notive the extra '*' in the example above? That's because Objective C's message
+// passing doesn't automatically call a C++ operator to dereference smart pointers (like -> does) so
+// we have to dereference manually using '*'. In some cases the extra * or message passing syntax
+// can get a bit annoying so instead a local "naked" pointer can be borrowed from the NSRef. This
+// would change the syntax overload in the following:
+//
+// NSRef<NSFoo> foo;
+// [*foo setA:1];
+// [*foo setB:2];
+// [*foo setC:3];
+//
+// Into (note access to members of ObjC classes referenced via pointer is done with . and not ->):
+//
+// NSRef<NSFoo> fooRef;
+// NSFoo* foo = fooRef.Get();
+// foo.a = 1;
+// foo.b = 2;
+// boo.c = 3;
+//
+// Which can be subjectively easier to read.
+
+template <typename T>
+struct NSRefTraits {
+ static constexpr T kNullValue = nullptr;
+ static void Reference(T value) {
+ [value retain];
+ }
+ static void Release(T value) {
+ [value release];
+ }
+};
+
+template <typename T>
+class NSRef : public RefBase<T*, NSRefTraits<T*>> {
+ public:
+ using RefBase<T*, NSRefTraits<T*>>::RefBase;
+
+ const T* operator*() const {
+ return this->Get();
+ }
+
+ T* operator*() {
+ return this->Get();
+ }
+};
+
+template <typename T>
+NSRef<T> AcquireNSRef(T* pointee) {
+ NSRef<T> ref;
+ ref.Acquire(pointee);
+ return ref;
+}
+
+// This is a RefBase<> for an Objective C protocol (hence the P). Objective C protocols must always
+// be referenced with id<ProtocolName> and not just ProtocolName* so they cannot use NSRef<>
+// itself. That's what the P in NSPRef stands for: Protocol.
+template <typename T>
+class NSPRef : public RefBase<T, NSRefTraits<T>> {
+ public:
+ using RefBase<T, NSRefTraits<T>>::RefBase;
+
+ const T operator*() const {
+ return this->Get();
+ }
+
+ T operator*() {
+ return this->Get();
+ }
+};
+
+template <typename T>
+NSPRef<T> AcquireNSPRef(T pointee) {
+ NSPRef<T> ref;
+ ref.Acquire(pointee);
+ return ref;
+}
+
+#endif // COMMON_NSREF_H_
diff --git a/chromium/third_party/dawn/src/common/NonCopyable.h b/chromium/third_party/dawn/src/dawn/common/NonCopyable.h
index 2d217dfbad3..2d217dfbad3 100644
--- a/chromium/third_party/dawn/src/common/NonCopyable.h
+++ b/chromium/third_party/dawn/src/dawn/common/NonCopyable.h
diff --git a/chromium/third_party/dawn/src/common/PlacementAllocated.h b/chromium/third_party/dawn/src/dawn/common/PlacementAllocated.h
index 6c715ca66a6..6c715ca66a6 100644
--- a/chromium/third_party/dawn/src/common/PlacementAllocated.h
+++ b/chromium/third_party/dawn/src/dawn/common/PlacementAllocated.h
diff --git a/chromium/third_party/dawn/src/common/Platform.h b/chromium/third_party/dawn/src/dawn/common/Platform.h
index f9471021fd1..f9471021fd1 100644
--- a/chromium/third_party/dawn/src/common/Platform.h
+++ b/chromium/third_party/dawn/src/dawn/common/Platform.h
diff --git a/chromium/third_party/dawn/src/common/Preprocessor.h b/chromium/third_party/dawn/src/dawn/common/Preprocessor.h
index 4eef736aa72..4eef736aa72 100644
--- a/chromium/third_party/dawn/src/common/Preprocessor.h
+++ b/chromium/third_party/dawn/src/dawn/common/Preprocessor.h
diff --git a/chromium/third_party/dawn/src/dawn/common/RefBase.h b/chromium/third_party/dawn/src/dawn/common/RefBase.h
new file mode 100644
index 00000000000..5d1078916bb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/RefBase.h
@@ -0,0 +1,183 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_REFBASE_H_
+#define COMMON_REFBASE_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+
+#include <type_traits>
+#include <utility>
+
+// A common class for various smart-pointers acting on referenceable/releasable pointer-like
+// objects. Logic for each specialization can be customized using a Traits type that looks
+// like the following:
+//
+// struct {
+// static constexpr T kNullValue = ...;
+// static void Reference(T value) { ... }
+// static void Release(T value) { ... }
+// };
+//
+// RefBase supports
+template <typename T, typename Traits>
+class RefBase {
+ public:
+ // Default constructor and destructor.
+ RefBase() : mValue(Traits::kNullValue) {
+ }
+
+ ~RefBase() {
+ Release(mValue);
+ }
+
+ // Constructors from nullptr.
+ constexpr RefBase(std::nullptr_t) : RefBase() {
+ }
+
+ RefBase<T, Traits>& operator=(std::nullptr_t) {
+ Set(Traits::kNullValue);
+ return *this;
+ }
+
+ // Constructors from a value T.
+ RefBase(T value) : mValue(value) {
+ Reference(value);
+ }
+
+ RefBase<T, Traits>& operator=(const T& value) {
+ Set(value);
+ return *this;
+ }
+
+ // Constructors from a RefBase<T>
+ RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) {
+ Reference(other.mValue);
+ }
+
+ RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
+ Set(other.mValue);
+ return *this;
+ }
+
+ RefBase(RefBase<T, Traits>&& other) {
+ mValue = other.Detach();
+ }
+
+ RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
+ if (&other != this) {
+ Release(mValue);
+ mValue = other.Detach();
+ }
+ return *this;
+ }
+
+ // Constructors from a RefBase<U>. Note that in the *-assignment operators this cannot be the
+ // same as `other` because overload resolution rules would have chosen the *-assignement
+ // operators defined with `other` == RefBase<T, Traits>.
+ template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
+ RefBase(const RefBase<U, UTraits>& other) : mValue(other.mValue) {
+ Reference(other.mValue);
+ }
+
+ template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
+ RefBase<T, Traits>& operator=(const RefBase<U, UTraits>& other) {
+ Set(other.mValue);
+ return *this;
+ }
+
+ template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
+ RefBase(RefBase<U, UTraits>&& other) {
+ mValue = other.Detach();
+ }
+
+ template <typename U, typename UTraits, typename = typename std::is_convertible<U, T>::type>
+ RefBase<T, Traits>& operator=(RefBase<U, UTraits>&& other) {
+ Release(mValue);
+ mValue = other.Detach();
+ return *this;
+ }
+
+ // Comparison operators.
+ bool operator==(const T& other) const {
+ return mValue == other;
+ }
+
+ bool operator!=(const T& other) const {
+ return mValue != other;
+ }
+
+ const T operator->() const {
+ return mValue;
+ }
+ T operator->() {
+ return mValue;
+ }
+
+ // Smart pointer methods.
+ const T& Get() const {
+ return mValue;
+ }
+ T& Get() {
+ return mValue;
+ }
+
+ [[nodiscard]] T Detach() {
+ T value{std::move(mValue)};
+ mValue = Traits::kNullValue;
+ return value;
+ }
+
+ void Acquire(T value) {
+ Release(mValue);
+ mValue = value;
+ }
+
+ [[nodiscard]] T* InitializeInto() {
+ ASSERT(mValue == Traits::kNullValue);
+ return &mValue;
+ }
+
+ private:
+ // Friend is needed so that instances of RefBase<U> can call Reference and Release on
+ // RefBase<T>.
+ template <typename U, typename UTraits>
+ friend class RefBase;
+
+ static void Reference(T value) {
+ if (value != Traits::kNullValue) {
+ Traits::Reference(value);
+ }
+ }
+ static void Release(T value) {
+ if (value != Traits::kNullValue) {
+ Traits::Release(value);
+ }
+ }
+
+ void Set(T value) {
+ if (mValue != value) {
+ // Ensure that the new value is referenced before the old is released to prevent any
+ // transitive frees that may affect the new value.
+ Reference(value);
+ Release(mValue);
+ mValue = value;
+ }
+ }
+
+ T mValue;
+};
+
+#endif // COMMON_REFBASE_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/RefCounted.cpp b/chromium/third_party/dawn/src/dawn/common/RefCounted.cpp
new file mode 100644
index 00000000000..6950d134503
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/RefCounted.cpp
@@ -0,0 +1,86 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/RefCounted.h"
+
+#include "dawn/common/Assert.h"
+
+#include <cstddef>
+
+static constexpr size_t kPayloadBits = 1;
+static constexpr uint64_t kPayloadMask = (uint64_t(1) << kPayloadBits) - 1;
+static constexpr uint64_t kRefCountIncrement = (uint64_t(1) << kPayloadBits);
+
+RefCounted::RefCounted(uint64_t payload) : mRefCount(kRefCountIncrement + payload) {
+ ASSERT((payload & kPayloadMask) == payload);
+}
+
+uint64_t RefCounted::GetRefCountForTesting() const {
+ return mRefCount >> kPayloadBits;
+}
+
+uint64_t RefCounted::GetRefCountPayload() const {
+ // We only care about the payload bits of the refcount. These never change after
+ // initialization so we can use the relaxed memory order. The order doesn't guarantee
+ // anything except the atomicity of the load, which is enough since any past values of the
+ // atomic will have the correct payload bits.
+ return kPayloadMask & mRefCount.load(std::memory_order_relaxed);
+}
+
+void RefCounted::Reference() {
+ ASSERT((mRefCount & ~kPayloadMask) != 0);
+
+ // The relaxed ordering guarantees only the atomicity of the update, which is enough here
+ // because the reference we are copying from still exists and makes sure other threads
+ // don't delete `this`.
+ // See the explanation in the Boost documentation:
+ // https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
+ mRefCount.fetch_add(kRefCountIncrement, std::memory_order_relaxed);
+}
+
+void RefCounted::Release() {
+ ASSERT((mRefCount & ~kPayloadMask) != 0);
+
+ // The release fence here is to make sure all accesses to the object on a thread A
+ // happen-before the object is deleted on a thread B. The release memory order ensures that
+ // all accesses on thread A happen-before the refcount is decreased and the atomic variable
+ // makes sure the refcount decrease in A happens-before the refcount decrease in B. Finally
+ // the acquire fence in the destruction case makes sure the refcount decrease in B
+ // happens-before the `delete this`.
+ //
+ // See the explanation in the Boost documentation:
+ // https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
+ uint64_t previousRefCount = mRefCount.fetch_sub(kRefCountIncrement, std::memory_order_release);
+
+ // Check that the previous reference count was strictly less than 2, ignoring payload bits.
+ if (previousRefCount < 2 * kRefCountIncrement) {
+ // Note that on ARM64 this will generate a `dmb ish` instruction which is a global
+ // memory barrier, when an acquire load on mRefCount (using the `ldar` instruction)
+ // should be enough and could end up being faster.
+ std::atomic_thread_fence(std::memory_order_acquire);
+ DeleteThis();
+ }
+}
+
+void RefCounted::APIReference() {
+ Reference();
+}
+
+void RefCounted::APIRelease() {
+ Release();
+}
+
+void RefCounted::DeleteThis() {
+ delete this;
+}
diff --git a/chromium/third_party/dawn/src/dawn/common/RefCounted.h b/chromium/third_party/dawn/src/dawn/common/RefCounted.h
new file mode 100644
index 00000000000..65f37b935f4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/RefCounted.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_REFCOUNTED_H_
+#define COMMON_REFCOUNTED_H_
+
+#include "dawn/common/RefBase.h"
+
+#include <atomic>
+#include <cstdint>
+
+class RefCounted {
+ public:
+ RefCounted(uint64_t payload = 0);
+
+ uint64_t GetRefCountForTesting() const;
+ uint64_t GetRefCountPayload() const;
+
+ void Reference();
+ void Release();
+
+ void APIReference();
+ void APIRelease();
+
+ protected:
+ virtual ~RefCounted() = default;
+ // A Derived class may override this if they require a custom deleter.
+ virtual void DeleteThis();
+
+ private:
+ std::atomic<uint64_t> mRefCount;
+};
+
+template <typename T>
+struct RefCountedTraits {
+ static constexpr T* kNullValue = nullptr;
+ static void Reference(T* value) {
+ value->Reference();
+ }
+ static void Release(T* value) {
+ value->Release();
+ }
+};
+
+template <typename T>
+class Ref : public RefBase<T*, RefCountedTraits<T>> {
+ public:
+ using RefBase<T*, RefCountedTraits<T>>::RefBase;
+};
+
+template <typename T>
+Ref<T> AcquireRef(T* pointee) {
+ Ref<T> ref;
+ ref.Acquire(pointee);
+ return ref;
+}
+
+#endif // COMMON_REFCOUNTED_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/Result.cpp b/chromium/third_party/dawn/src/dawn/common/Result.cpp
new file mode 100644
index 00000000000..2101e47d098
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Result.cpp
@@ -0,0 +1,30 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Result.h"
+
+// Implementation details of the tagged pointer Results
+namespace detail {
+
+ intptr_t MakePayload(const void* pointer, PayloadType type) {
+ intptr_t payload = reinterpret_cast<intptr_t>(pointer);
+ ASSERT((payload & 3) == 0);
+ return payload | type;
+ }
+
+ PayloadType GetPayloadType(intptr_t payload) {
+ return static_cast<PayloadType>(payload & 3);
+ }
+
+} // namespace detail
diff --git a/chromium/third_party/dawn/src/dawn/common/Result.h b/chromium/third_party/dawn/src/dawn/common/Result.h
new file mode 100644
index 00000000000..5566829a9ae
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/Result.h
@@ -0,0 +1,526 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_RESULT_H_
+#define COMMON_RESULT_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+// Result<T, E> is the following sum type (Haskell notation):
+//
+// data Result T E = Success T | Error E | Empty
+//
+// It is meant to be used as the return type of functions that might fail. The reason for the Empty
+// case is that a Result should never be discarded, only destructured (its error or success moved
+// out) or moved into a different Result. The Empty case tags Results that have been moved out and
+// Result's destructor should ASSERT on it being Empty.
+//
+// Since C++ doesn't have efficient sum types for the special cases we care about, we provide
+// template specializations for them.
+
+template <typename T, typename E>
+class Result;
+
+// The interface of Result<T, E> should look like the following.
+// public:
+// Result(T&& success);
+// Result(std::unique_ptr<E> error);
+//
+// Result(Result<T, E>&& other);
+// Result<T, E>& operator=(Result<T, E>&& other);
+//
+// ~Result();
+//
+// bool IsError() const;
+// bool IsSuccess() const;
+//
+// T&& AcquireSuccess();
+// std::unique_ptr<E> AcquireError();
+
+// Specialization of Result for returning errors only via pointers. It is basically a pointer
+// where nullptr is both Success and Empty.
+template <typename E>
+class [[nodiscard]] Result<void, E> {
+ public:
+ Result();
+ Result(std::unique_ptr<E> error);
+
+ Result(Result<void, E> && other);
+ Result<void, E>& operator=(Result<void, E>&& other);
+
+ ~Result();
+
+ bool IsError() const;
+ bool IsSuccess() const;
+
+ void AcquireSuccess();
+ std::unique_ptr<E> AcquireError();
+
+ private:
+ std::unique_ptr<E> mError;
+};
+
+// Uses SFINAE to try to get alignof(T) but fallback to Default if T isn't defined.
+template <typename T, size_t Default, typename = size_t>
+constexpr size_t alignof_if_defined_else_default = Default;
+
+template <typename T, size_t Default>
+constexpr size_t alignof_if_defined_else_default<T, Default, decltype(alignof(T))> = alignof(T);
+
+// Specialization of Result when both the error an success are pointers. It is implemented as a
+// tagged pointer. The tag for Success is 0 so that returning the value is fastest.
+
+namespace detail {
+ // Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
+ // but we really want them inlined so we keep them in the headers
+ enum PayloadType {
+ Success = 0,
+ Error = 1,
+ Empty = 2,
+ };
+
+ intptr_t MakePayload(const void* pointer, PayloadType type);
+ PayloadType GetPayloadType(intptr_t payload);
+
+ template <typename T>
+ static T* GetSuccessFromPayload(intptr_t payload);
+ template <typename E>
+ static E* GetErrorFromPayload(intptr_t payload);
+
+ constexpr static intptr_t kEmptyPayload = Empty;
+} // namespace detail
+
+template <typename T, typename E>
+class [[nodiscard]] Result<T*, E> {
+ public:
+ static_assert(alignof_if_defined_else_default<T, 4> >= 4,
+ "Result<T*, E*> reserves two bits for tagging pointers");
+ static_assert(alignof_if_defined_else_default<E, 4> >= 4,
+ "Result<T*, E*> reserves two bits for tagging pointers");
+
+ Result(T * success);
+ Result(std::unique_ptr<E> error);
+
+ // Support returning a Result<T*, E*> from a Result<TChild*, E*>
+ template <typename TChild>
+ Result(Result<TChild*, E> && other);
+ template <typename TChild>
+ Result<T*, E>& operator=(Result<TChild*, E>&& other);
+
+ ~Result();
+
+ bool IsError() const;
+ bool IsSuccess() const;
+
+ T* AcquireSuccess();
+ std::unique_ptr<E> AcquireError();
+
+ private:
+ template <typename T2, typename E2>
+ friend class Result;
+
+ intptr_t mPayload = detail::kEmptyPayload;
+};
+
+template <typename T, typename E>
+class [[nodiscard]] Result<const T*, E> {
+ public:
+ static_assert(alignof_if_defined_else_default<T, 4> >= 4,
+ "Result<T*, E*> reserves two bits for tagging pointers");
+ static_assert(alignof_if_defined_else_default<E, 4> >= 4,
+ "Result<T*, E*> reserves two bits for tagging pointers");
+
+ Result(const T* success);
+ Result(std::unique_ptr<E> error);
+
+ Result(Result<const T*, E> && other);
+ Result<const T*, E>& operator=(Result<const T*, E>&& other);
+
+ ~Result();
+
+ bool IsError() const;
+ bool IsSuccess() const;
+
+ const T* AcquireSuccess();
+ std::unique_ptr<E> AcquireError();
+
+ private:
+ intptr_t mPayload = detail::kEmptyPayload;
+};
+
+template <typename T>
+class Ref;
+
+template <typename T, typename E>
+class [[nodiscard]] Result<Ref<T>, E> {
+ public:
+ static_assert(alignof_if_defined_else_default<T, 4> >= 4,
+ "Result<Ref<T>, E> reserves two bits for tagging pointers");
+ static_assert(alignof_if_defined_else_default<E, 4> >= 4,
+ "Result<Ref<T>, E> reserves two bits for tagging pointers");
+
+ template <typename U>
+ Result(Ref<U> && success);
+ template <typename U>
+ Result(const Ref<U>& success);
+ Result(std::unique_ptr<E> error);
+
+ template <typename U>
+ Result(Result<Ref<U>, E> && other);
+ template <typename U>
+ Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other);
+
+ ~Result();
+
+ bool IsError() const;
+ bool IsSuccess() const;
+
+ Ref<T> AcquireSuccess();
+ std::unique_ptr<E> AcquireError();
+
+ private:
+ template <typename T2, typename E2>
+ friend class Result;
+
+ intptr_t mPayload = detail::kEmptyPayload;
+};
+
+// Catchall definition of Result<T, E> implemented as a tagged struct. It could be improved to use
+// a tagged union instead if it turns out to be a hotspot. T and E must be movable and default
+// constructible.
+template <typename T, typename E>
+class [[nodiscard]] Result {
+ public:
+ Result(T && success);
+ Result(std::unique_ptr<E> error);
+
+ Result(Result<T, E> && other);
+ Result<T, E>& operator=(Result<T, E>&& other);
+
+ ~Result();
+
+ bool IsError() const;
+ bool IsSuccess() const;
+
+ T&& AcquireSuccess();
+ std::unique_ptr<E> AcquireError();
+
+ private:
+ enum PayloadType {
+ Success = 0,
+ Error = 1,
+ Acquired = 2,
+ };
+ PayloadType mType;
+
+ std::unique_ptr<E> mError;
+ T mSuccess;
+};
+
+// Implementation of Result<void, E>
+template <typename E>
+Result<void, E>::Result() {
+}
+
+template <typename E>
+Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
+}
+
+template <typename E>
+Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
+}
+
+template <typename E>
+Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
+ ASSERT(mError == nullptr);
+ mError = std::move(other.mError);
+ return *this;
+}
+
+template <typename E>
+Result<void, E>::~Result() {
+ ASSERT(mError == nullptr);
+}
+
+template <typename E>
+bool Result<void, E>::IsError() const {
+ return mError != nullptr;
+}
+
+template <typename E>
+bool Result<void, E>::IsSuccess() const {
+ return mError == nullptr;
+}
+
+template <typename E>
+void Result<void, E>::AcquireSuccess() {
+}
+
+template <typename E>
+std::unique_ptr<E> Result<void, E>::AcquireError() {
+ return std::move(mError);
+}
+
+// Implementation details of the tagged pointer Results
+namespace detail {
+
+ template <typename T>
+ T* GetSuccessFromPayload(intptr_t payload) {
+ ASSERT(GetPayloadType(payload) == Success);
+ return reinterpret_cast<T*>(payload);
+ }
+
+ template <typename E>
+ E* GetErrorFromPayload(intptr_t payload) {
+ ASSERT(GetPayloadType(payload) == Error);
+ return reinterpret_cast<E*>(payload ^ 1);
+ }
+
+} // namespace detail
+
+// Implementation of Result<T*, E>
+template <typename T, typename E>
+Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
+}
+
+template <typename T, typename E>
+Result<T*, E>::Result(std::unique_ptr<E> error)
+ : mPayload(detail::MakePayload(error.release(), detail::Error)) {
+}
+
+template <typename T, typename E>
+template <typename TChild>
+Result<T*, E>::Result(Result<TChild*, E>&& other) : mPayload(other.mPayload) {
+ other.mPayload = detail::kEmptyPayload;
+ static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value);
+}
+
+template <typename T, typename E>
+template <typename TChild>
+Result<T*, E>& Result<T*, E>::operator=(Result<TChild*, E>&& other) {
+ ASSERT(mPayload == detail::kEmptyPayload);
+ static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value);
+ mPayload = other.mPayload;
+ other.mPayload = detail::kEmptyPayload;
+ return *this;
+}
+
+template <typename T, typename E>
+Result<T*, E>::~Result() {
+ ASSERT(mPayload == detail::kEmptyPayload);
+}
+
+template <typename T, typename E>
+bool Result<T*, E>::IsError() const {
+ return detail::GetPayloadType(mPayload) == detail::Error;
+}
+
+template <typename T, typename E>
+bool Result<T*, E>::IsSuccess() const {
+ return detail::GetPayloadType(mPayload) == detail::Success;
+}
+
+template <typename T, typename E>
+T* Result<T*, E>::AcquireSuccess() {
+ T* success = detail::GetSuccessFromPayload<T>(mPayload);
+ mPayload = detail::kEmptyPayload;
+ return success;
+}
+
+template <typename T, typename E>
+std::unique_ptr<E> Result<T*, E>::AcquireError() {
+ std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
+ mPayload = detail::kEmptyPayload;
+ return std::move(error);
+}
+
+// Implementation of Result<const T*, E*>
+template <typename T, typename E>
+Result<const T*, E>::Result(const T* success)
+ : mPayload(detail::MakePayload(success, detail::Success)) {
+}
+
+template <typename T, typename E>
+Result<const T*, E>::Result(std::unique_ptr<E> error)
+ : mPayload(detail::MakePayload(error.release(), detail::Error)) {
+}
+
+template <typename T, typename E>
+Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
+ other.mPayload = detail::kEmptyPayload;
+}
+
+template <typename T, typename E>
+Result<const T*, E>& Result<const T*, E>::operator=(Result<const T*, E>&& other) {
+ ASSERT(mPayload == detail::kEmptyPayload);
+ mPayload = other.mPayload;
+ other.mPayload = detail::kEmptyPayload;
+ return *this;
+}
+
+template <typename T, typename E>
+Result<const T*, E>::~Result() {
+ ASSERT(mPayload == detail::kEmptyPayload);
+}
+
+template <typename T, typename E>
+bool Result<const T*, E>::IsError() const {
+ return detail::GetPayloadType(mPayload) == detail::Error;
+}
+
+template <typename T, typename E>
+bool Result<const T*, E>::IsSuccess() const {
+ return detail::GetPayloadType(mPayload) == detail::Success;
+}
+
+template <typename T, typename E>
+const T* Result<const T*, E>::AcquireSuccess() {
+ T* success = detail::GetSuccessFromPayload<T>(mPayload);
+ mPayload = detail::kEmptyPayload;
+ return success;
+}
+
+template <typename T, typename E>
+std::unique_ptr<E> Result<const T*, E>::AcquireError() {
+ std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
+ mPayload = detail::kEmptyPayload;
+ return std::move(error);
+}
+
+// Implementation of Result<Ref<T>, E>
+template <typename T, typename E>
+template <typename U>
+Result<Ref<T>, E>::Result(Ref<U>&& success)
+ : mPayload(detail::MakePayload(success.Detach(), detail::Success)) {
+ static_assert(std::is_convertible<U*, T*>::value);
+}
+
+template <typename T, typename E>
+template <typename U>
+Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {
+}
+
+template <typename T, typename E>
+Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
+ : mPayload(detail::MakePayload(error.release(), detail::Error)) {
+}
+
+template <typename T, typename E>
+template <typename U>
+Result<Ref<T>, E>::Result(Result<Ref<U>, E>&& other) : mPayload(other.mPayload) {
+ static_assert(std::is_convertible<U*, T*>::value);
+ other.mPayload = detail::kEmptyPayload;
+}
+
+template <typename T, typename E>
+template <typename U>
+Result<Ref<U>, E>& Result<Ref<T>, E>::operator=(Result<Ref<U>, E>&& other) {
+ static_assert(std::is_convertible<U*, T*>::value);
+ ASSERT(mPayload == detail::kEmptyPayload);
+ mPayload = other.mPayload;
+ other.mPayload = detail::kEmptyPayload;
+ return *this;
+}
+
+template <typename T, typename E>
+Result<Ref<T>, E>::~Result() {
+ ASSERT(mPayload == detail::kEmptyPayload);
+}
+
+template <typename T, typename E>
+bool Result<Ref<T>, E>::IsError() const {
+ return detail::GetPayloadType(mPayload) == detail::Error;
+}
+
+template <typename T, typename E>
+bool Result<Ref<T>, E>::IsSuccess() const {
+ return detail::GetPayloadType(mPayload) == detail::Success;
+}
+
+template <typename T, typename E>
+Ref<T> Result<Ref<T>, E>::AcquireSuccess() {
+ ASSERT(IsSuccess());
+ Ref<T> success = AcquireRef(detail::GetSuccessFromPayload<T>(mPayload));
+ mPayload = detail::kEmptyPayload;
+ return success;
+}
+
+template <typename T, typename E>
+std::unique_ptr<E> Result<Ref<T>, E>::AcquireError() {
+ ASSERT(IsError());
+ std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
+ mPayload = detail::kEmptyPayload;
+ return std::move(error);
+}
+
+// Implementation of Result<T, E>
+template <typename T, typename E>
+Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {
+}
+
+template <typename T, typename E>
+Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
+}
+
+template <typename T, typename E>
+Result<T, E>::~Result() {
+ ASSERT(mType == Acquired);
+}
+
+template <typename T, typename E>
+Result<T, E>::Result(Result<T, E>&& other)
+ : mType(other.mType), mError(std::move(other.mError)), mSuccess(std::move(other.mSuccess)) {
+ other.mType = Acquired;
+}
+template <typename T, typename E>
+Result<T, E>& Result<T, E>::operator=(Result<T, E>&& other) {
+ mType = other.mType;
+ mError = std::move(other.mError);
+ mSuccess = std::move(other.mSuccess);
+ other.mType = Acquired;
+ return *this;
+}
+
+template <typename T, typename E>
+bool Result<T, E>::IsError() const {
+ return mType == Error;
+}
+
+template <typename T, typename E>
+bool Result<T, E>::IsSuccess() const {
+ return mType == Success;
+}
+
+template <typename T, typename E>
+T&& Result<T, E>::AcquireSuccess() {
+ ASSERT(mType == Success);
+ mType = Acquired;
+ return std::move(mSuccess);
+}
+
+template <typename T, typename E>
+std::unique_ptr<E> Result<T, E>::AcquireError() {
+ ASSERT(mType == Error);
+ mType = Acquired;
+ return std::move(mError);
+}
+
+#endif // COMMON_RESULT_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/SerialMap.h b/chromium/third_party/dawn/src/dawn/common/SerialMap.h
new file mode 100644
index 00000000000..750f16e0937
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/SerialMap.h
@@ -0,0 +1,76 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SERIALMAP_H_
+#define COMMON_SERIALMAP_H_
+
+#include "dawn/common/SerialStorage.h"
+
+#include <map>
+#include <vector>
+
+template <typename Serial, typename Value>
+class SerialMap;
+
+template <typename SerialT, typename ValueT>
+struct SerialStorageTraits<SerialMap<SerialT, ValueT>> {
+ using Serial = SerialT;
+ using Value = ValueT;
+ using Storage = std::map<Serial, std::vector<Value>>;
+ using StorageIterator = typename Storage::iterator;
+ using ConstStorageIterator = typename Storage::const_iterator;
+};
+
+// SerialMap stores a map from Serial to Value.
+// Unlike SerialQueue, items may be enqueued with Serials in any
+// arbitrary order. SerialMap provides useful iterators for iterating
+// through Value items in order of increasing Serial.
+template <typename Serial, typename Value>
+class SerialMap : public SerialStorage<SerialMap<Serial, Value>> {
+ public:
+ void Enqueue(const Value& value, Serial serial);
+ void Enqueue(Value&& value, Serial serial);
+ void Enqueue(const std::vector<Value>& values, Serial serial);
+ void Enqueue(std::vector<Value>&& values, Serial serial);
+};
+
+// SerialMap
+
+template <typename Serial, typename Value>
+void SerialMap<Serial, Value>::Enqueue(const Value& value, Serial serial) {
+ this->mStorage[serial].emplace_back(value);
+}
+
+template <typename Serial, typename Value>
+void SerialMap<Serial, Value>::Enqueue(Value&& value, Serial serial) {
+ this->mStorage[serial].emplace_back(value);
+}
+
+template <typename Serial, typename Value>
+void SerialMap<Serial, Value>::Enqueue(const std::vector<Value>& values, Serial serial) {
+ DAWN_ASSERT(values.size() > 0);
+ for (const Value& value : values) {
+ Enqueue(value, serial);
+ }
+}
+
+template <typename Serial, typename Value>
+void SerialMap<Serial, Value>::Enqueue(std::vector<Value>&& values, Serial serial) {
+ DAWN_ASSERT(values.size() > 0);
+ for (const Value& value : values) {
+ Enqueue(value, serial);
+ }
+}
+
+#endif // COMMON_SERIALMAP_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/SerialQueue.h b/chromium/third_party/dawn/src/dawn/common/SerialQueue.h
new file mode 100644
index 00000000000..3e33f1e61ff
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/SerialQueue.h
@@ -0,0 +1,85 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SERIALQUEUE_H_
+#define COMMON_SERIALQUEUE_H_
+
+#include "dawn/common/SerialStorage.h"
+
+#include <vector>
+
+template <typename Serial, typename Value>
+class SerialQueue;
+
+template <typename SerialT, typename ValueT>
+struct SerialStorageTraits<SerialQueue<SerialT, ValueT>> {
+ using Serial = SerialT;
+ using Value = ValueT;
+ using SerialPair = std::pair<Serial, std::vector<Value>>;
+ using Storage = std::vector<SerialPair>;
+ using StorageIterator = typename Storage::iterator;
+ using ConstStorageIterator = typename Storage::const_iterator;
+};
+
+// SerialQueue stores an associative list mapping a Serial to Value.
+// It enforces that the Serials enqueued are strictly non-decreasing.
+// This makes it very efficient iterate or clear all items added up
+// to some Serial value because they are stored contiguously in memory.
+template <typename Serial, typename Value>
+class SerialQueue : public SerialStorage<SerialQueue<Serial, Value>> {
+ public:
+ // The serial must be given in (not strictly) increasing order.
+ void Enqueue(const Value& value, Serial serial);
+ void Enqueue(Value&& value, Serial serial);
+ void Enqueue(const std::vector<Value>& values, Serial serial);
+ void Enqueue(std::vector<Value>&& values, Serial serial);
+};
+
+// SerialQueue
+
+template <typename Serial, typename Value>
+void SerialQueue<Serial, Value>::Enqueue(const Value& value, Serial serial) {
+ DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
+
+ if (this->Empty() || this->mStorage.back().first < serial) {
+ this->mStorage.emplace_back(serial, std::vector<Value>{});
+ }
+ this->mStorage.back().second.push_back(value);
+}
+
+template <typename Serial, typename Value>
+void SerialQueue<Serial, Value>::Enqueue(Value&& value, Serial serial) {
+ DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
+
+ if (this->Empty() || this->mStorage.back().first < serial) {
+ this->mStorage.emplace_back(serial, std::vector<Value>{});
+ }
+ this->mStorage.back().second.push_back(std::move(value));
+}
+
+template <typename Serial, typename Value>
+void SerialQueue<Serial, Value>::Enqueue(const std::vector<Value>& values, Serial serial) {
+ DAWN_ASSERT(values.size() > 0);
+ DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
+ this->mStorage.emplace_back(serial, values);
+}
+
+template <typename Serial, typename Value>
+void SerialQueue<Serial, Value>::Enqueue(std::vector<Value>&& values, Serial serial) {
+ DAWN_ASSERT(values.size() > 0);
+ DAWN_ASSERT(this->Empty() || this->mStorage.back().first <= serial);
+ this->mStorage.emplace_back(serial, values);
+}
+
+#endif // COMMON_SERIALQUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/SerialStorage.h b/chromium/third_party/dawn/src/dawn/common/SerialStorage.h
new file mode 100644
index 00000000000..8a103f584d6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/SerialStorage.h
@@ -0,0 +1,322 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SERIALSTORAGE_H_
+#define COMMON_SERIALSTORAGE_H_
+
+#include "dawn/common/Assert.h"
+
+#include <cstdint>
+#include <utility>
+
+template <typename T>
+struct SerialStorageTraits {};
+
+template <typename Derived>
+class SerialStorage {
+ protected:
+ using Serial = typename SerialStorageTraits<Derived>::Serial;
+ using Value = typename SerialStorageTraits<Derived>::Value;
+ using Storage = typename SerialStorageTraits<Derived>::Storage;
+ using StorageIterator = typename SerialStorageTraits<Derived>::StorageIterator;
+ using ConstStorageIterator = typename SerialStorageTraits<Derived>::ConstStorageIterator;
+
+ public:
+ class Iterator {
+ public:
+ Iterator(StorageIterator start);
+ Iterator& operator++();
+
+ bool operator==(const Iterator& other) const;
+ bool operator!=(const Iterator& other) const;
+ Value& operator*() const;
+
+ private:
+ StorageIterator mStorageIterator;
+ // Special case the mSerialIterator when it should be equal to mStorageIterator.begin()
+ // otherwise we could ask mStorageIterator.begin() when mStorageIterator is mStorage.end()
+ // which is invalid. mStorageIterator.begin() is tagged with a nullptr.
+ Value* mSerialIterator;
+ };
+
+ class ConstIterator {
+ public:
+ ConstIterator(ConstStorageIterator start);
+ ConstIterator& operator++();
+
+ bool operator==(const ConstIterator& other) const;
+ bool operator!=(const ConstIterator& other) const;
+ const Value& operator*() const;
+
+ private:
+ ConstStorageIterator mStorageIterator;
+ const Value* mSerialIterator;
+ };
+
+ class BeginEnd {
+ public:
+ BeginEnd(StorageIterator start, StorageIterator end);
+
+ Iterator begin() const;
+ Iterator end() const;
+
+ private:
+ StorageIterator mStartIt;
+ StorageIterator mEndIt;
+ };
+
+ class ConstBeginEnd {
+ public:
+ ConstBeginEnd(ConstStorageIterator start, ConstStorageIterator end);
+
+ ConstIterator begin() const;
+ ConstIterator end() const;
+
+ private:
+ ConstStorageIterator mStartIt;
+ ConstStorageIterator mEndIt;
+ };
+
+ // Derived classes may specialize constraits for elements stored
+ // Ex.) SerialQueue enforces that the serial must be given in (not strictly)
+ // increasing order
+ template <typename... Params>
+ void Enqueue(Params&&... args, Serial serial) {
+ Derived::Enqueue(std::forward<Params>(args)..., serial);
+ }
+
+ bool Empty() const;
+
+ // The UpTo variants of Iterate and Clear affect all values associated to a serial
+ // that is smaller OR EQUAL to the given serial. Iterating is done like so:
+ // for (const T& value : queue.IterateAll()) { stuff(T); }
+ ConstBeginEnd IterateAll() const;
+ ConstBeginEnd IterateUpTo(Serial serial) const;
+ BeginEnd IterateAll();
+ BeginEnd IterateUpTo(Serial serial);
+
+ void Clear();
+ void ClearUpTo(Serial serial);
+
+ Serial FirstSerial() const;
+ Serial LastSerial() const;
+
+ protected:
+ // Returns the first StorageIterator that a serial bigger than serial.
+ ConstStorageIterator FindUpTo(Serial serial) const;
+ StorageIterator FindUpTo(Serial serial);
+ Storage mStorage;
+};
+
+// SerialStorage
+
+template <typename Derived>
+bool SerialStorage<Derived>::Empty() const {
+ return mStorage.empty();
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstBeginEnd SerialStorage<Derived>::IterateAll() const {
+ return {mStorage.begin(), mStorage.end()};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstBeginEnd SerialStorage<Derived>::IterateUpTo(
+ Serial serial) const {
+ return {mStorage.begin(), FindUpTo(serial)};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::BeginEnd SerialStorage<Derived>::IterateAll() {
+ return {mStorage.begin(), mStorage.end()};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::BeginEnd SerialStorage<Derived>::IterateUpTo(Serial serial) {
+ return {mStorage.begin(), FindUpTo(serial)};
+}
+
+template <typename Derived>
+void SerialStorage<Derived>::Clear() {
+ mStorage.clear();
+}
+
+template <typename Derived>
+void SerialStorage<Derived>::ClearUpTo(Serial serial) {
+ mStorage.erase(mStorage.begin(), FindUpTo(serial));
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Serial SerialStorage<Derived>::FirstSerial() const {
+ DAWN_ASSERT(!Empty());
+ return mStorage.begin()->first;
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Serial SerialStorage<Derived>::LastSerial() const {
+ DAWN_ASSERT(!Empty());
+ return mStorage.back().first;
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstStorageIterator SerialStorage<Derived>::FindUpTo(
+ Serial serial) const {
+ auto it = mStorage.begin();
+ while (it != mStorage.end() && it->first <= serial) {
+ it++;
+ }
+ return it;
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::StorageIterator SerialStorage<Derived>::FindUpTo(Serial serial) {
+ auto it = mStorage.begin();
+ while (it != mStorage.end() && it->first <= serial) {
+ it++;
+ }
+ return it;
+}
+
+// SerialStorage::BeginEnd
+
+template <typename Derived>
+SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
+ typename SerialStorage<Derived>::StorageIterator end)
+ : mStartIt(start), mEndIt(end) {
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
+ return {mStartIt};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::end() const {
+ return {mEndIt};
+}
+
+// SerialStorage::Iterator
+
+template <typename Derived>
+SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
+ : mStorageIterator(start), mSerialIterator(nullptr) {
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
+ Value* vectorData = mStorageIterator->second.data();
+
+ if (mSerialIterator == nullptr) {
+ mSerialIterator = vectorData + 1;
+ } else {
+ mSerialIterator++;
+ }
+
+ if (mSerialIterator >= vectorData + mStorageIterator->second.size()) {
+ mSerialIterator = nullptr;
+ mStorageIterator++;
+ }
+
+ return *this;
+}
+
+template <typename Derived>
+bool SerialStorage<Derived>::Iterator::operator==(
+ const typename SerialStorage<Derived>::Iterator& other) const {
+ return other.mStorageIterator == mStorageIterator && other.mSerialIterator == mSerialIterator;
+}
+
+template <typename Derived>
+bool SerialStorage<Derived>::Iterator::operator!=(
+ const typename SerialStorage<Derived>::Iterator& other) const {
+ return !(*this == other);
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::Value& SerialStorage<Derived>::Iterator::operator*() const {
+ if (mSerialIterator == nullptr) {
+ return *mStorageIterator->second.begin();
+ }
+ return *mSerialIterator;
+}
+
+// SerialStorage::ConstBeginEnd
+
+template <typename Derived>
+SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
+ typename SerialStorage<Derived>::ConstStorageIterator start,
+ typename SerialStorage<Derived>::ConstStorageIterator end)
+ : mStartIt(start), mEndIt(end) {
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
+ const {
+ return {mStartIt};
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::end() const {
+ return {mEndIt};
+}
+
+// SerialStorage::ConstIterator
+
+template <typename Derived>
+SerialStorage<Derived>::ConstIterator::ConstIterator(
+ typename SerialStorage<Derived>::ConstStorageIterator start)
+ : mStorageIterator(start), mSerialIterator(nullptr) {
+}
+
+template <typename Derived>
+typename SerialStorage<Derived>::ConstIterator&
+SerialStorage<Derived>::ConstIterator::operator++() {
+ const Value* vectorData = mStorageIterator->second.data();
+
+ if (mSerialIterator == nullptr) {
+ mSerialIterator = vectorData + 1;
+ } else {
+ mSerialIterator++;
+ }
+
+ if (mSerialIterator >= vectorData + mStorageIterator->second.size()) {
+ mSerialIterator = nullptr;
+ mStorageIterator++;
+ }
+
+ return *this;
+}
+
+template <typename Derived>
+bool SerialStorage<Derived>::ConstIterator::operator==(
+ const typename SerialStorage<Derived>::ConstIterator& other) const {
+ return other.mStorageIterator == mStorageIterator && other.mSerialIterator == mSerialIterator;
+}
+
+template <typename Derived>
+bool SerialStorage<Derived>::ConstIterator::operator!=(
+ const typename SerialStorage<Derived>::ConstIterator& other) const {
+ return !(*this == other);
+}
+
+template <typename Derived>
+const typename SerialStorage<Derived>::Value& SerialStorage<Derived>::ConstIterator::operator*()
+ const {
+ if (mSerialIterator == nullptr) {
+ return *mStorageIterator->second.begin();
+ }
+ return *mSerialIterator;
+}
+
+#endif // COMMON_SERIALSTORAGE_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp b/chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp
new file mode 100644
index 00000000000..d680ee36c12
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/SlabAllocator.cpp
@@ -0,0 +1,247 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/SlabAllocator.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <limits>
+#include <new>
+
+// IndexLinkNode
+
+SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
+ : index(index), nextIndex(nextIndex) {
+}
+
+// Slab
+
+SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
+ : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
+}
+
+SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
+
+SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
+}
+
+SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
+
+SlabAllocatorImpl::SentinelSlab::~SentinelSlab() {
+ Slab* slab = this->next;
+ while (slab != nullptr) {
+ Slab* next = slab->next;
+ ASSERT(slab->blocksInUse == 0);
+ // Delete the slab's allocation. The slab is allocated inside slab->allocation.
+ delete[] slab->allocation;
+ slab = next;
+ }
+}
+
+// SlabAllocatorImpl
+
+SlabAllocatorImpl::Index SlabAllocatorImpl::kInvalidIndex =
+ std::numeric_limits<SlabAllocatorImpl::Index>::max();
+
+SlabAllocatorImpl::SlabAllocatorImpl(Index blocksPerSlab,
+ uint32_t objectSize,
+ uint32_t objectAlignment)
+ : mAllocationAlignment(std::max(static_cast<uint32_t>(alignof(Slab)), objectAlignment)),
+ mSlabBlocksOffset(Align(sizeof(Slab), objectAlignment)),
+ mIndexLinkNodeOffset(Align(objectSize, alignof(IndexLinkNode))),
+ mBlockStride(Align(mIndexLinkNodeOffset + sizeof(IndexLinkNode), objectAlignment)),
+ mBlocksPerSlab(blocksPerSlab),
+ mTotalAllocationSize(
+ // required allocation size
+ static_cast<size_t>(mSlabBlocksOffset) + mBlocksPerSlab * mBlockStride +
+ // Pad the allocation size by mAllocationAlignment so that the aligned allocation still
+ // fulfills the required size.
+ mAllocationAlignment) {
+ ASSERT(IsPowerOfTwo(mAllocationAlignment));
+}
+
+SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
+ : mAllocationAlignment(rhs.mAllocationAlignment),
+ mSlabBlocksOffset(rhs.mSlabBlocksOffset),
+ mIndexLinkNodeOffset(rhs.mIndexLinkNodeOffset),
+ mBlockStride(rhs.mBlockStride),
+ mBlocksPerSlab(rhs.mBlocksPerSlab),
+ mTotalAllocationSize(rhs.mTotalAllocationSize),
+ mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
+ mFullSlabs(std::move(rhs.mFullSlabs)),
+ mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
+}
+
+SlabAllocatorImpl::~SlabAllocatorImpl() = default;
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::OffsetFrom(
+ IndexLinkNode* node,
+ std::make_signed_t<Index> offset) const {
+ return reinterpret_cast<IndexLinkNode*>(reinterpret_cast<char*>(node) +
+ static_cast<intptr_t>(mBlockStride) * offset);
+}
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::NodeFromObject(void* object) const {
+ return reinterpret_cast<SlabAllocatorImpl::IndexLinkNode*>(static_cast<char*>(object) +
+ mIndexLinkNodeOffset);
+}
+
+void* SlabAllocatorImpl::ObjectFromNode(IndexLinkNode* node) const {
+ return static_cast<void*>(reinterpret_cast<char*>(node) - mIndexLinkNodeOffset);
+}
+
+bool SlabAllocatorImpl::IsNodeInSlab(Slab* slab, IndexLinkNode* node) const {
+ char* firstObjectPtr = reinterpret_cast<char*>(slab) + mSlabBlocksOffset;
+ IndexLinkNode* firstNode = NodeFromObject(firstObjectPtr);
+ IndexLinkNode* lastNode = OffsetFrom(firstNode, mBlocksPerSlab - 1);
+ return node >= firstNode && node <= lastNode && node->index < mBlocksPerSlab;
+}
+
+void SlabAllocatorImpl::PushFront(Slab* slab, IndexLinkNode* node) const {
+ ASSERT(IsNodeInSlab(slab, node));
+
+ IndexLinkNode* head = slab->freeList;
+ if (head == nullptr) {
+ node->nextIndex = kInvalidIndex;
+ } else {
+ ASSERT(IsNodeInSlab(slab, head));
+ node->nextIndex = head->index;
+ }
+ slab->freeList = node;
+
+ ASSERT(slab->blocksInUse != 0);
+ slab->blocksInUse--;
+}
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::PopFront(Slab* slab) const {
+ ASSERT(slab->freeList != nullptr);
+
+ IndexLinkNode* head = slab->freeList;
+ if (head->nextIndex == kInvalidIndex) {
+ slab->freeList = nullptr;
+ } else {
+ ASSERT(IsNodeInSlab(slab, head));
+ slab->freeList = OffsetFrom(head, head->nextIndex - head->index);
+ ASSERT(IsNodeInSlab(slab, slab->freeList));
+ }
+
+ ASSERT(slab->blocksInUse < mBlocksPerSlab);
+ slab->blocksInUse++;
+ return head;
+}
+
+void SlabAllocatorImpl::SentinelSlab::Prepend(SlabAllocatorImpl::Slab* slab) {
+ if (this->next != nullptr) {
+ this->next->prev = slab;
+ }
+ slab->prev = this;
+ slab->next = this->next;
+ this->next = slab;
+}
+
+void SlabAllocatorImpl::Slab::Splice() {
+ SlabAllocatorImpl::Slab* originalPrev = this->prev;
+ SlabAllocatorImpl::Slab* originalNext = this->next;
+
+ this->prev = nullptr;
+ this->next = nullptr;
+
+ ASSERT(originalPrev != nullptr);
+
+ // Set the originalNext's prev pointer.
+ if (originalNext != nullptr) {
+ originalNext->prev = originalPrev;
+ }
+
+ // Now, set the originalNext as the originalPrev's new next.
+ originalPrev->next = originalNext;
+}
+
+void* SlabAllocatorImpl::Allocate() {
+ if (mAvailableSlabs.next == nullptr) {
+ GetNewSlab();
+ }
+
+ Slab* slab = mAvailableSlabs.next;
+ IndexLinkNode* node = PopFront(slab);
+ ASSERT(node != nullptr);
+
+ // Move full slabs to a separate list, so allocate can always return quickly.
+ if (slab->blocksInUse == mBlocksPerSlab) {
+ slab->Splice();
+ mFullSlabs.Prepend(slab);
+ }
+
+ return ObjectFromNode(node);
+}
+
+void SlabAllocatorImpl::Deallocate(void* ptr) {
+ IndexLinkNode* node = NodeFromObject(ptr);
+
+ ASSERT(node->index < mBlocksPerSlab);
+ void* firstAllocation = ObjectFromNode(OffsetFrom(node, -node->index));
+ Slab* slab = reinterpret_cast<Slab*>(static_cast<char*>(firstAllocation) - mSlabBlocksOffset);
+ ASSERT(slab != nullptr);
+
+ bool slabWasFull = slab->blocksInUse == mBlocksPerSlab;
+
+ ASSERT(slab->blocksInUse != 0);
+ PushFront(slab, node);
+
+ if (slabWasFull) {
+ // Slab is in the full list. Move it to the recycled list.
+ ASSERT(slab->freeList != nullptr);
+ slab->Splice();
+ mRecycledSlabs.Prepend(slab);
+ }
+
+ // TODO(crbug.com/dawn/825): Occasionally prune slabs if |blocksInUse == 0|.
+ // Doing so eagerly hurts performance.
+}
+
+void SlabAllocatorImpl::GetNewSlab() {
+ // Should only be called when there are no available slabs.
+ ASSERT(mAvailableSlabs.next == nullptr);
+
+ if (mRecycledSlabs.next != nullptr) {
+ // If the recycled list is non-empty, swap their contents.
+ std::swap(mAvailableSlabs.next, mRecycledSlabs.next);
+
+ // We swapped the next pointers, so the prev pointer is wrong.
+ // Update it here.
+ mAvailableSlabs.next->prev = &mAvailableSlabs;
+ ASSERT(mRecycledSlabs.next == nullptr);
+ return;
+ }
+
+ // TODO(crbug.com/dawn/824): Use aligned_alloc when possible. It should be available with
+ // C++17 but on macOS it also requires macOS 10.15 to work.
+ char* allocation = new char[mTotalAllocationSize];
+ char* alignedPtr = AlignPtr(allocation, mAllocationAlignment);
+
+ char* dataStart = alignedPtr + mSlabBlocksOffset;
+
+ IndexLinkNode* node = NodeFromObject(dataStart);
+ for (uint32_t i = 0; i < mBlocksPerSlab; ++i) {
+ new (OffsetFrom(node, i)) IndexLinkNode(i, i + 1);
+ }
+
+ IndexLinkNode* lastNode = OffsetFrom(node, mBlocksPerSlab - 1);
+ lastNode->nextIndex = kInvalidIndex;
+
+ mAvailableSlabs.Prepend(new (alignedPtr) Slab(allocation, node));
+}
diff --git a/chromium/third_party/dawn/src/dawn/common/SlabAllocator.h b/chromium/third_party/dawn/src/dawn/common/SlabAllocator.h
new file mode 100644
index 00000000000..58d2d94d0cf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/SlabAllocator.h
@@ -0,0 +1,184 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SLABALLOCATOR_H_
+#define COMMON_SLABALLOCATOR_H_
+
+#include "dawn/common/PlacementAllocated.h"
+
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+
+// The SlabAllocator allocates objects out of one or more fixed-size contiguous "slabs" of memory.
+// This makes it very quick to allocate and deallocate fixed-size objects because the allocator only
+// needs to index an offset into pre-allocated memory. It is similar to a pool-allocator that
+// recycles memory from previous allocations, except multiple allocations are hosted contiguously in
+// one large slab.
+//
+// Internally, the SlabAllocator stores slabs as a linked list to avoid extra indirections indexing
+// into an std::vector. To service an allocation request, the allocator only needs to know the first
+// currently available slab. There are three backing linked lists: AVAILABLE, FULL, and RECYCLED.
+// A slab that is AVAILABLE can be used to immediately service allocation requests. Once it has no
+// remaining space, it is moved to the FULL state. When a FULL slab sees any deallocations, it is
+// moved to the RECYCLED state. The RECYCLED state is separate from the AVAILABLE state so that
+// deallocations don't immediately prepend slabs to the AVAILABLE list, and change the current slab
+// servicing allocations. When the AVAILABLE list becomes empty is it swapped with the RECYCLED
+// list.
+//
+// Allocated objects are placement-allocated with some extra info at the end (we'll call the Object
+// plus the extra bytes a "block") used to specify the constant index of the block in its parent
+// slab, as well as the index of the next available block. So, following the block next-indices
+// forms a linked list of free blocks.
+//
+// Slab creation: When a new slab is allocated, sufficient memory is allocated for it, and then the
+// slab metadata plus all of its child blocks are placement-allocated into the memory. Indices and
+// next-indices are initialized to form the free-list of blocks.
+//
+// Allocation: When an object is allocated, if there is no space available in an existing slab, a
+// new slab is created (or an old slab is recycled). The first block of the slab is removed and
+// returned.
+//
+// Deallocation: When an object is deallocated, it can compute the pointer to its parent slab
+// because it stores the index of its own allocation. That block is then prepended to the slab's
+// free list.
+class SlabAllocatorImpl {
+ public:
+ // Allocations host their current index and the index of the next free block.
+ // Because this is an index, and not a byte offset, it can be much smaller than a size_t.
+ // TODO(crbug.com/dawn/825): Is uint8_t sufficient?
+ using Index = uint16_t;
+
+ SlabAllocatorImpl(SlabAllocatorImpl&& rhs);
+
+ protected:
+ // This is essentially a singly linked list using indices instead of pointers,
+ // so we store the index of "this" in |this->index|.
+ struct IndexLinkNode : PlacementAllocated {
+ IndexLinkNode(Index index, Index nextIndex);
+
+ const Index index; // The index of this block in the slab.
+ Index nextIndex; // The index of the next available block. kInvalidIndex, if none.
+ };
+
+ struct Slab : PlacementAllocated {
+ // A slab is placement-allocated into an aligned pointer from a separate allocation.
+ // Ownership of the allocation is transferred to the slab on creation.
+ // | ---------- allocation --------- |
+ // | pad | Slab | data ------------> |
+ Slab(char allocation[], IndexLinkNode* head);
+ Slab(Slab&& rhs);
+
+ void Splice();
+
+ char* allocation;
+ IndexLinkNode* freeList;
+ Slab* prev;
+ Slab* next;
+ Index blocksInUse;
+ };
+
+ SlabAllocatorImpl(Index blocksPerSlab, uint32_t objectSize, uint32_t objectAlignment);
+ ~SlabAllocatorImpl();
+
+ // Allocate a new block of memory.
+ void* Allocate();
+
+ // Deallocate a block of memory.
+ void Deallocate(void* ptr);
+
+ private:
+ // The maximum value is reserved to indicate the end of the list.
+ static Index kInvalidIndex;
+
+ // Get the IndexLinkNode |offset| slots away.
+ IndexLinkNode* OffsetFrom(IndexLinkNode* node, std::make_signed_t<Index> offset) const;
+
+ // Compute the pointer to the IndexLinkNode from an allocated object.
+ IndexLinkNode* NodeFromObject(void* object) const;
+
+ // Compute the pointer to the object from an IndexLinkNode.
+ void* ObjectFromNode(IndexLinkNode* node) const;
+
+ bool IsNodeInSlab(Slab* slab, IndexLinkNode* node) const;
+
+ // The Slab stores a linked-list of free allocations.
+ // PushFront/PopFront adds/removes an allocation from the free list.
+ void PushFront(Slab* slab, IndexLinkNode* node) const;
+ IndexLinkNode* PopFront(Slab* slab) const;
+
+ // Replace the current slab with a new one, and chain the old one off of it.
+ // Both slabs may still be used for for allocation/deallocation, but older slabs
+ // will be a little slower to get allocations from.
+ void GetNewSlab();
+
+ const uint32_t mAllocationAlignment;
+
+ // | Slab | pad | Obj | pad | Node | pad | Obj | pad | Node | pad | ....
+ // | -----------| mSlabBlocksOffset
+ // | | ---------------------- | mBlockStride
+ // | | ----------| mIndexLinkNodeOffset
+ // | --------------------------------------> (mSlabBlocksOffset + mBlocksPerSlab * mBlockStride)
+
+ // A Slab is metadata, followed by the aligned memory to allocate out of. |mSlabBlocksOffset| is
+ // the offset to the start of the aligned memory region.
+ const uint32_t mSlabBlocksOffset;
+
+ // The IndexLinkNode is stored after the Allocation itself. This is the offset to it.
+ const uint32_t mIndexLinkNodeOffset;
+
+ // Because alignment of allocations may introduce padding, |mBlockStride| is the
+ // distance between aligned blocks of (Allocation + IndexLinkNode)
+ const uint32_t mBlockStride;
+
+ const Index mBlocksPerSlab; // The total number of blocks in a slab.
+
+ const size_t mTotalAllocationSize;
+
+ struct SentinelSlab : Slab {
+ SentinelSlab();
+ ~SentinelSlab();
+
+ SentinelSlab(SentinelSlab&& rhs);
+
+ void Prepend(Slab* slab);
+ };
+
+ SentinelSlab mAvailableSlabs; // Available slabs to service allocations.
+ SentinelSlab mFullSlabs; // Full slabs. Stored here so we can skip checking them.
+ SentinelSlab mRecycledSlabs; // Recycled slabs. Not immediately added to |mAvailableSlabs| so
+ // we don't thrash the current "active" slab.
+};
+
+template <typename T>
+class SlabAllocator : public SlabAllocatorImpl {
+ public:
+ SlabAllocator(size_t totalObjectBytes,
+ uint32_t objectSize = sizeof(T),
+ uint32_t objectAlignment = alignof(T))
+ : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
+ }
+
+ template <typename... Args>
+ T* Allocate(Args&&... args) {
+ void* ptr = SlabAllocatorImpl::Allocate();
+ return new (ptr) T(std::forward<Args>(args)...);
+ }
+
+ void Deallocate(T* object) {
+ SlabAllocatorImpl::Deallocate(object);
+ }
+};
+
+#endif // COMMON_SLABALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/StackContainer.h b/chromium/third_party/dawn/src/dawn/common/StackContainer.h
new file mode 100644
index 00000000000..4de688ffb5f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/StackContainer.h
@@ -0,0 +1,262 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a modified copy of Chromium's /src/base/containers/stack_container.h
+
+#ifndef COMMON_STACKCONTAINER_H_
+#define COMMON_STACKCONTAINER_H_
+
+#include "dawn/common/Compiler.h"
+
+#include <cstddef>
+#include <vector>
+
+// This allocator can be used with STL containers to provide a stack buffer
+// from which to allocate memory and overflows onto the heap. This stack buffer
+// would be allocated on the stack and allows us to avoid heap operations in
+// some situations.
+//
+// STL likes to make copies of allocators, so the allocator itself can't hold
+// the data. Instead, we make the creator responsible for creating a
+// StackAllocator::Source which contains the data. Copying the allocator
+// merely copies the pointer to this shared source, so all allocators created
+// based on our allocator will share the same stack buffer.
+//
+// This stack buffer implementation is very simple. The first allocation that
+// fits in the stack buffer will use the stack buffer. Any subsequent
+// allocations will not use the stack buffer, even if there is unused room.
+// This makes it appropriate for array-like containers, but the caller should
+// be sure to reserve() in the container up to the stack buffer size. Otherwise
+// the container will allocate a small array which will "use up" the stack
+// buffer.
+template <typename T, size_t stack_capacity>
+class StackAllocator : public std::allocator<T> {
+ public:
+ typedef typename std::allocator<T>::pointer pointer;
+ typedef typename std::allocator<T>::size_type size_type;
+
+ // Backing store for the allocator. The container owner is responsible for
+ // maintaining this for as long as any containers using this allocator are
+ // live.
+ struct Source {
+ Source() : used_stack_buffer_(false) {
+ }
+
+ // Casts the buffer in its right type.
+ T* stack_buffer() {
+ return reinterpret_cast<T*>(stack_buffer_);
+ }
+ const T* stack_buffer() const {
+ return reinterpret_cast<const T*>(&stack_buffer_);
+ }
+
+ // The buffer itself. It is not of type T because we don't want the
+ // constructors and destructors to be automatically called. Define a POD
+ // buffer of the right size instead.
+ alignas(T) char stack_buffer_[sizeof(T[stack_capacity])];
+#if defined(DAWN_COMPILER_GCC) && !defined(__x86_64__) && !defined(__i386__)
+ static_assert(alignof(T) <= 16, "http://crbug.com/115612");
+#endif
+
+ // Set when the stack buffer is used for an allocation. We do not track
+ // how much of the buffer is used, only that somebody is using it.
+ bool used_stack_buffer_;
+ };
+
+ // Used by containers when they want to refer to an allocator of type U.
+ template <typename U>
+ struct rebind {
+ typedef StackAllocator<U, stack_capacity> other;
+ };
+
+ // For the straight up copy c-tor, we can share storage.
+ StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
+ : std::allocator<T>(), source_(rhs.source_) {
+ }
+
+ // ISO C++ requires the following constructor to be defined,
+ // and std::vector in VC++2008SP1 Release fails with an error
+ // in the class _Container_base_aux_alloc_real (from <xutility>)
+ // if the constructor does not exist.
+ // For this constructor, we cannot share storage; there's
+ // no guarantee that the Source buffer of Ts is large enough
+ // for Us.
+ // TODO: If we were fancy pants, perhaps we could share storage
+ // iff sizeof(T) == sizeof(U).
+ template <typename U, size_t other_capacity>
+ StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
+ }
+
+ // This constructor must exist. It creates a default allocator that doesn't
+ // actually have a stack buffer. glibc's std::string() will compare the
+ // current allocator against the default-constructed allocator, so this
+ // should be fast.
+ StackAllocator() : source_(nullptr) {
+ }
+
+ explicit StackAllocator(Source* source) : source_(source) {
+ }
+
+ // Actually do the allocation. Use the stack buffer if nobody has used it yet
+ // and the size requested fits. Otherwise, fall through to the standard
+ // allocator.
+ pointer allocate(size_type n) {
+ if (source_ && !source_->used_stack_buffer_ && n <= stack_capacity) {
+ source_->used_stack_buffer_ = true;
+ return source_->stack_buffer();
+ } else {
+ return std::allocator<T>::allocate(n);
+ }
+ }
+
+ // Free: when trying to free the stack buffer, just mark it as free. For
+ // non-stack-buffer pointers, just fall though to the standard allocator.
+ void deallocate(pointer p, size_type n) {
+ if (source_ && p == source_->stack_buffer())
+ source_->used_stack_buffer_ = false;
+ else
+ std::allocator<T>::deallocate(p, n);
+ }
+
+ private:
+ Source* source_;
+};
+
+// A wrapper around STL containers that maintains a stack-sized buffer that the
+// initial capacity of the vector is based on. Growing the container beyond the
+// stack capacity will transparently overflow onto the heap. The container must
+// support reserve().
+//
+// This will not work with std::string since some implementations allocate
+// more bytes than requested in calls to reserve(), forcing the allocation onto
+// the heap. http://crbug.com/709273
+//
+// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
+// type. This object is really intended to be used only internally. You'll want
+// to use the wrappers below for different types.
+template <typename TContainerType, size_t stack_capacity>
+class StackContainer {
+ public:
+ typedef TContainerType ContainerType;
+ typedef typename ContainerType::value_type ContainedType;
+ typedef StackAllocator<ContainedType, stack_capacity> Allocator;
+
+ // Allocator must be constructed before the container!
+ StackContainer() : allocator_(&stack_data_), container_(allocator_) {
+ // Make the container use the stack allocation by reserving our buffer size
+ // before doing anything else.
+ container_.reserve(stack_capacity);
+ }
+
+ // Getters for the actual container.
+ //
+ // Danger: any copies of this made using the copy constructor must have
+ // shorter lifetimes than the source. The copy will share the same allocator
+ // and therefore the same stack buffer as the original. Use std::copy to
+ // copy into a "real" container for longer-lived objects.
+ ContainerType& container() {
+ return container_;
+ }
+ const ContainerType& container() const {
+ return container_;
+ }
+
+ // Support operator-> to get to the container. This allows nicer syntax like:
+ // StackContainer<...> foo;
+ // std::sort(foo->begin(), foo->end());
+ ContainerType* operator->() {
+ return &container_;
+ }
+ const ContainerType* operator->() const {
+ return &container_;
+ }
+
+ // Retrieves the stack source so that that unit tests can verify that the
+ // buffer is being used properly.
+ const typename Allocator::Source& stack_data() const {
+ return stack_data_;
+ }
+
+ protected:
+ typename Allocator::Source stack_data_;
+ Allocator allocator_;
+ ContainerType container_;
+
+ private:
+ StackContainer(const StackContainer& rhs) = delete;
+ StackContainer& operator=(const StackContainer& rhs) = delete;
+ StackContainer(StackContainer&& rhs) = delete;
+ StackContainer& operator=(StackContainer&& rhs) = delete;
+};
+
+// Range-based iteration support for StackContainer.
+template <typename TContainerType, size_t stack_capacity>
+auto begin(const StackContainer<TContainerType, stack_capacity>& stack_container)
+ -> decltype(begin(stack_container.container())) {
+ return begin(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto begin(StackContainer<TContainerType, stack_capacity>& stack_container)
+ -> decltype(begin(stack_container.container())) {
+ return begin(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto end(StackContainer<TContainerType, stack_capacity>& stack_container)
+ -> decltype(end(stack_container.container())) {
+ return end(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto end(const StackContainer<TContainerType, stack_capacity>& stack_container)
+ -> decltype(end(stack_container.container())) {
+ return end(stack_container.container());
+}
+
+// StackVector -----------------------------------------------------------------
+
+// Example:
+// StackVector<int, 16> foo;
+// foo->push_back(22); // we have overloaded operator->
+// foo[0] = 10; // as well as operator[]
+template <typename T, size_t stack_capacity>
+class StackVector
+ : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
+ public:
+ StackVector()
+ : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
+ }
+
+ // We need to put this in STL containers sometimes, which requires a copy
+ // constructor. We can't call the regular copy constructor because that will
+ // take the stack buffer from the original. Here, we create an empty object
+ // and make a stack buffer of its own.
+ StackVector(const StackVector<T, stack_capacity>& other)
+ : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
+ this->container().assign(other->begin(), other->end());
+ }
+
+ StackVector<T, stack_capacity>& operator=(const StackVector<T, stack_capacity>& other) {
+ this->container().assign(other->begin(), other->end());
+ return *this;
+ }
+
+ // Vectors are commonly indexed, which isn't very convenient even with
+ // operator-> (using "->at()" does exception stuff we don't want).
+ T& operator[](size_t i) {
+ return this->container().operator[](i);
+ }
+ const T& operator[](size_t i) const {
+ return this->container().operator[](i);
+ }
+
+ private:
+ // StackVector(const StackVector& rhs) = delete;
+ // StackVector& operator=(const StackVector& rhs) = delete;
+ StackVector(StackVector&& rhs) = delete;
+ StackVector& operator=(StackVector&& rhs) = delete;
+};
+
+#endif // COMMON_STACKCONTAINER_H_
diff --git a/chromium/third_party/dawn/src/common/SwapChainUtils.h b/chromium/third_party/dawn/src/dawn/common/SwapChainUtils.h
index c1ad5f2e62a..c1ad5f2e62a 100644
--- a/chromium/third_party/dawn/src/common/SwapChainUtils.h
+++ b/chromium/third_party/dawn/src/dawn/common/SwapChainUtils.h
diff --git a/chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp b/chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp
new file mode 100644
index 00000000000..a5ce0f15402
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/SystemUtils.cpp
@@ -0,0 +1,229 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/SystemUtils.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# include <Windows.h>
+# include <vector>
+#elif defined(DAWN_PLATFORM_LINUX)
+# include <dlfcn.h>
+# include <limits.h>
+# include <unistd.h>
+# include <cstdlib>
+#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
+# include <dlfcn.h>
+# include <mach-o/dyld.h>
+# include <vector>
+#endif
+
+#include <array>
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+const char* GetPathSeparator() {
+ return "\\";
+}
+
+std::pair<std::string, bool> GetEnvironmentVar(const char* variableName) {
+ // First pass a size of 0 to get the size of variable value.
+ DWORD sizeWithNullTerminator = GetEnvironmentVariableA(variableName, nullptr, 0);
+ if (sizeWithNullTerminator == 0) {
+ DWORD err = GetLastError();
+ if (err != ERROR_ENVVAR_NOT_FOUND) {
+ dawn::WarningLog() << "GetEnvironmentVariableA failed with code " << err;
+ }
+ return std::make_pair(std::string(), false);
+ }
+
+ // Then get variable value with its actual size.
+ std::vector<char> buffer(sizeWithNullTerminator);
+ DWORD sizeStored =
+ GetEnvironmentVariableA(variableName, buffer.data(), static_cast<DWORD>(buffer.size()));
+ if (sizeStored + 1 != sizeWithNullTerminator) {
+ DWORD err = GetLastError();
+ if (err) {
+ dawn::WarningLog() << "GetEnvironmentVariableA failed with code " << err;
+ }
+ return std::make_pair(std::string(), false);
+ }
+ return std::make_pair(std::string(buffer.data(), sizeStored), true);
+}
+
+bool SetEnvironmentVar(const char* variableName, const char* value) {
+ return SetEnvironmentVariableA(variableName, value) == TRUE;
+}
+#elif defined(DAWN_PLATFORM_POSIX)
+const char* GetPathSeparator() {
+ return "/";
+}
+
+std::pair<std::string, bool> GetEnvironmentVar(const char* variableName) {
+ char* value = getenv(variableName);
+ return value == nullptr ? std::make_pair(std::string(), false)
+ : std::make_pair(std::string(value), true);
+}
+
+bool SetEnvironmentVar(const char* variableName, const char* value) {
+ if (value == nullptr) {
+ return unsetenv(variableName) == 0;
+ }
+ return setenv(variableName, value, 1) == 0;
+}
+#else
+# error "Implement Get/SetEnvironmentVar for your platform."
+#endif
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+std::optional<std::string> GetHModulePath(HMODULE module) {
+ std::array<char, MAX_PATH> executableFileBuf;
+ DWORD executablePathLen = GetModuleFileNameA(nullptr, executableFileBuf.data(),
+ static_cast<DWORD>(executableFileBuf.size()));
+ if (executablePathLen == 0) {
+ return {};
+ }
+ return executableFileBuf.data();
+}
+std::optional<std::string> GetExecutablePath() {
+ return GetHModulePath(nullptr);
+}
+#elif defined(DAWN_PLATFORM_LINUX)
+std::optional<std::string> GetExecutablePath() {
+ std::array<char, PATH_MAX> path;
+ ssize_t result = readlink("/proc/self/exe", path.data(), PATH_MAX - 1);
+ if (result < 0 || static_cast<size_t>(result) >= PATH_MAX - 1) {
+ return {};
+ }
+
+ path[result] = '\0';
+ return path.data();
+}
+#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
+std::optional<std::string> GetExecutablePath() {
+ uint32_t size = 0;
+ _NSGetExecutablePath(nullptr, &size);
+
+ std::vector<char> buffer(size + 1);
+ if (_NSGetExecutablePath(buffer.data(), &size) != 0) {
+ return {};
+ }
+
+ buffer[size] = '\0';
+ return buffer.data();
+}
+#elif defined(DAWN_PLATFORM_FUCHSIA)
+std::optional<std::string> GetExecutablePath() {
+ // TODO: Implement on Fuchsia
+ return {};
+}
+#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
+std::optional<std::string> GetExecutablePath() {
+ return {};
+}
+#else
+# error "Implement GetExecutablePath for your platform."
+#endif
+
+std::optional<std::string> GetExecutableDirectory() {
+ std::optional<std::string> exePath = GetExecutablePath();
+ if (!exePath) {
+ return {};
+ }
+ size_t lastPathSepLoc = exePath->find_last_of(GetPathSeparator());
+ if (lastPathSepLoc == std::string::npos) {
+ return {};
+ }
+ return exePath->substr(0, lastPathSepLoc + 1);
+}
+
+#if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
+std::optional<std::string> GetModulePath() {
+ static int placeholderSymbol = 0;
+ Dl_info dlInfo;
+ if (dladdr(&placeholderSymbol, &dlInfo) == 0) {
+ return {};
+ }
+
+ std::array<char, PATH_MAX> absolutePath;
+ if (realpath(dlInfo.dli_fname, absolutePath.data()) == NULL) {
+ return {};
+ }
+ return absolutePath.data();
+}
+#elif defined(DAWN_PLATFORM_WINDOWS)
+std::optional<std::string> GetModulePath() {
+ static int placeholderSymbol = 0;
+ HMODULE module = nullptr;
+// GetModuleHandleEx is unavailable on UWP
+# if defined(DAWN_IS_WINUWP)
+ return {};
+# else
+ if (!GetModuleHandleExA(
+ GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ reinterpret_cast<LPCSTR>(&placeholderSymbol), &module)) {
+ return {};
+ }
+# endif
+ return GetHModulePath(module);
+}
+#elif defined(DAWN_PLATFORM_FUCHSIA)
+std::optional<std::string> GetModulePath() {
+ return {};
+}
+#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
+std::optional<std::string> GetModulePath() {
+ return {};
+}
+#else
+# error "Implement GetModulePath for your platform."
+#endif
+
+std::optional<std::string> GetModuleDirectory() {
+ std::optional<std::string> modPath = GetModulePath();
+ if (!modPath) {
+ return {};
+ }
+ size_t lastPathSepLoc = modPath->find_last_of(GetPathSeparator());
+ if (lastPathSepLoc == std::string::npos) {
+ return {};
+ }
+ return modPath->substr(0, lastPathSepLoc + 1);
+}
+
+// ScopedEnvironmentVar
+
+ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
+ : mName(variableName),
+ mOriginalValue(GetEnvironmentVar(variableName)),
+ mIsSet(SetEnvironmentVar(variableName, value)) {
+}
+
+ScopedEnvironmentVar::~ScopedEnvironmentVar() {
+ if (mIsSet) {
+ bool success = SetEnvironmentVar(
+ mName.c_str(), mOriginalValue.second ? mOriginalValue.first.c_str() : nullptr);
+ // If we set the environment variable in the constructor, we should never fail restoring it.
+ ASSERT(success);
+ }
+}
+
+bool ScopedEnvironmentVar::Set(const char* variableName, const char* value) {
+ ASSERT(!mIsSet);
+ mName = variableName;
+ mOriginalValue = GetEnvironmentVar(variableName);
+ mIsSet = SetEnvironmentVar(variableName, value);
+ return mIsSet;
+}
diff --git a/chromium/third_party/dawn/src/dawn/common/SystemUtils.h b/chromium/third_party/dawn/src/dawn/common/SystemUtils.h
new file mode 100644
index 00000000000..bb59966727c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/SystemUtils.h
@@ -0,0 +1,57 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SYSTEMUTILS_H_
+#define COMMON_SYSTEMUTILS_H_
+
+#include "dawn/common/Platform.h"
+
+#include <optional>
+#include <string>
+
+const char* GetPathSeparator();
+// Returns a pair of the environment variable's value, and a boolean indicating whether the variable
+// was present.
+std::pair<std::string, bool> GetEnvironmentVar(const char* variableName);
+bool SetEnvironmentVar(const char* variableName, const char* value);
+// Directories are always returned with a trailing path separator.
+// May return std::nullopt if the path is too long, there is no current
+// module (statically linked into executable), or the function is not
+// implemented on the platform.
+std::optional<std::string> GetExecutableDirectory();
+std::optional<std::string> GetModuleDirectory();
+
+#ifdef DAWN_PLATFORM_MACOS
+void GetMacOSVersion(int32_t* majorVersion, int32_t* minorVersion = nullptr);
+bool IsMacOSVersionAtLeast(uint32_t majorVersion, uint32_t minorVersion = 0);
+#endif
+
+class ScopedEnvironmentVar {
+ public:
+ ScopedEnvironmentVar() = default;
+ ScopedEnvironmentVar(const char* variableName, const char* value);
+ ~ScopedEnvironmentVar();
+
+ ScopedEnvironmentVar(const ScopedEnvironmentVar& rhs) = delete;
+ ScopedEnvironmentVar& operator=(const ScopedEnvironmentVar& rhs) = delete;
+
+ bool Set(const char* variableName, const char* value);
+
+ private:
+ std::string mName;
+ std::pair<std::string, bool> mOriginalValue;
+ bool mIsSet = false;
+};
+
+#endif // COMMON_SYSTEMUTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/SystemUtils_mac.mm b/chromium/third_party/dawn/src/dawn/common/SystemUtils_mac.mm
new file mode 100644
index 00000000000..b706c20f6dd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/SystemUtils_mac.mm
@@ -0,0 +1,33 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/SystemUtils.h"
+
+#include "dawn/common/Assert.h"
+
+#import <Foundation/NSProcessInfo.h>
+
+void GetMacOSVersion(int32_t* majorVersion, int32_t* minorVersion) {
+ NSOperatingSystemVersion version = [[NSProcessInfo processInfo] operatingSystemVersion];
+ ASSERT(majorVersion != nullptr);
+ *majorVersion = version.majorVersion;
+ if (minorVersion != nullptr) {
+ *minorVersion = version.minorVersion;
+ }
+}
+
+bool IsMacOSVersionAtLeast(uint32_t majorVersion, uint32_t minorVersion) {
+ return
+ [NSProcessInfo.processInfo isOperatingSystemAtLeastVersion:{majorVersion, minorVersion, 0}];
+}
diff --git a/chromium/third_party/dawn/src/common/TypeTraits.h b/chromium/third_party/dawn/src/dawn/common/TypeTraits.h
index 3348b892f48..3348b892f48 100644
--- a/chromium/third_party/dawn/src/common/TypeTraits.h
+++ b/chromium/third_party/dawn/src/dawn/common/TypeTraits.h
diff --git a/chromium/third_party/dawn/src/dawn/common/TypedInteger.h b/chromium/third_party/dawn/src/dawn/common/TypedInteger.h
new file mode 100644
index 00000000000..6669d148e67
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/TypedInteger.h
@@ -0,0 +1,262 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_TYPEDINTEGER_H_
+#define COMMON_TYPEDINTEGER_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <limits>
+#include <type_traits>
+
+// TypedInteger is helper class that provides additional type safety in Debug.
+// - Integers of different (Tag, BaseIntegerType) may not be used interoperably
+// - Allows casts only to the underlying type.
+// - Integers of the same (Tag, BaseIntegerType) may be compared or assigned.
+// This class helps ensure that the many types of indices in Dawn aren't mixed up and used
+// interchangably.
+// In Release builds, when DAWN_ENABLE_ASSERTS is not defined, TypedInteger is a passthrough
+// typedef of the underlying type.
+//
+// Example:
+// using UintA = TypedInteger<struct TypeA, uint32_t>;
+// using UintB = TypedInteger<struct TypeB, uint32_t>;
+//
+// in Release:
+// using UintA = uint32_t;
+// using UintB = uint32_t;
+//
+// in Debug:
+// using UintA = detail::TypedIntegerImpl<struct TypeA, uint32_t>;
+// using UintB = detail::TypedIntegerImpl<struct TypeB, uint32_t>;
+//
+// Assignment, construction, comparison, and arithmetic with TypedIntegerImpl are allowed
+// only for typed integers of exactly the same type. Further, they must be
+// created / cast explicitly; there is no implicit conversion.
+//
+// UintA a(2);
+// uint32_t aValue = static_cast<uint32_t>(a);
+//
+namespace detail {
+ template <typename Tag, typename T>
+ class TypedIntegerImpl;
+} // namespace detail
+
+template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
+#if defined(DAWN_ENABLE_ASSERTS)
+using TypedInteger = detail::TypedIntegerImpl<Tag, T>;
+#else
+using TypedInteger = T;
+#endif
+
+namespace detail {
+ template <typename Tag, typename T>
+ class alignas(T) TypedIntegerImpl {
+ static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
+ T mValue;
+
+ public:
+ constexpr TypedIntegerImpl() : mValue(0) {
+ static_assert(alignof(TypedIntegerImpl) == alignof(T));
+ static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
+ }
+
+ // Construction from non-narrowing integral types.
+ template <typename I,
+ typename = std::enable_if_t<
+ std::is_integral<I>::value &&
+ std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
+ std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
+ explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
+ }
+
+ // Allow explicit casts only to the underlying type. If you're casting out of an
+ // TypedInteger, you should know what what you're doing, and exactly what type you
+ // expect.
+ explicit constexpr operator T() const {
+ return static_cast<T>(this->mValue);
+ }
+
+// Same-tag TypedInteger comparison operators
+#define TYPED_COMPARISON(op) \
+ constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
+ return mValue op rhs.mValue; \
+ }
+ TYPED_COMPARISON(<)
+ TYPED_COMPARISON(<=)
+ TYPED_COMPARISON(>)
+ TYPED_COMPARISON(>=)
+ TYPED_COMPARISON(==)
+ TYPED_COMPARISON(!=)
+#undef TYPED_COMPARISON
+
+ // Increment / decrement operators for for-loop iteration
+ constexpr TypedIntegerImpl& operator++() {
+ ASSERT(this->mValue < std::numeric_limits<T>::max());
+ ++this->mValue;
+ return *this;
+ }
+
+ constexpr TypedIntegerImpl operator++(int) {
+ TypedIntegerImpl ret = *this;
+
+ ASSERT(this->mValue < std::numeric_limits<T>::max());
+ ++this->mValue;
+ return ret;
+ }
+
+ constexpr TypedIntegerImpl& operator--() {
+ assert(this->mValue > std::numeric_limits<T>::min());
+ --this->mValue;
+ return *this;
+ }
+
+ constexpr TypedIntegerImpl operator--(int) {
+ TypedIntegerImpl ret = *this;
+
+ ASSERT(this->mValue > std::numeric_limits<T>::min());
+ --this->mValue;
+ return ret;
+ }
+
+ template <typename T2 = T>
+ static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
+ AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
+ static_assert(std::is_same<T, T2>::value);
+
+ // Overflow would wrap around
+ ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
+ return lhs.mValue + rhs.mValue;
+ }
+
+ template <typename T2 = T>
+ static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
+ AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
+ static_assert(std::is_same<T, T2>::value);
+
+ if (lhs.mValue > 0) {
+ // rhs is positive: |rhs| is at most the distance between max and |lhs|.
+ // rhs is negative: (positive + negative) won't overflow
+ ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
+ } else {
+ // rhs is postive: (negative + positive) won't underflow
+ // rhs is negative: |rhs| isn't less than the (negative) distance between min
+ // and |lhs|
+ ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
+ }
+ return lhs.mValue + rhs.mValue;
+ }
+
+ template <typename T2 = T>
+ static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
+ SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
+ static_assert(std::is_same<T, T2>::value);
+
+ // Overflow would wrap around
+ ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
+ return lhs.mValue - rhs.mValue;
+ }
+
+ template <typename T2 = T>
+ static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
+ TypedIntegerImpl<Tag, T> lhs,
+ TypedIntegerImpl<Tag, T2> rhs) {
+ static_assert(std::is_same<T, T2>::value);
+
+ if (lhs.mValue > 0) {
+ // rhs is positive: positive minus positive won't overflow
+ // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
+ // and max.
+ ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
+ } else {
+ // rhs is positive: |rhs| is at most the distance between min and |lhs|
+ // rhs is negative: negative minus negative won't overflow
+ ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
+ }
+ return lhs.mValue - rhs.mValue;
+ }
+
+ template <typename T2 = T>
+ constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
+ static_assert(std::is_same<T, T2>::value);
+ // The negation of the most negative value cannot be represented.
+ ASSERT(this->mValue != std::numeric_limits<T>::min());
+ return TypedIntegerImpl(-this->mValue);
+ }
+
+ constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
+ auto result = AddImpl(*this, rhs);
+ static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
+ return TypedIntegerImpl(result);
+ }
+
+ constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
+ auto result = SubImpl(*this, rhs);
+ static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
+ return TypedIntegerImpl(result);
+ }
+ };
+
+} // namespace detail
+
+namespace std {
+
+ template <typename Tag, typename T>
+ class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
+ public:
+ static detail::TypedIntegerImpl<Tag, T> max() noexcept {
+ return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
+ }
+ static detail::TypedIntegerImpl<Tag, T> min() noexcept {
+ return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
+ }
+ };
+
+} // namespace std
+
+namespace ityp {
+
+ // These helpers below are provided since the default arithmetic operators for small integer
+ // types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
+ // casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
+ // ityp::Sub(a, b) instead.
+
+ template <typename Tag, typename T>
+ constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
+ ::detail::TypedIntegerImpl<Tag, T> rhs) {
+ return ::detail::TypedIntegerImpl<Tag, T>(
+ static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
+ }
+
+ template <typename Tag, typename T>
+ constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
+ ::detail::TypedIntegerImpl<Tag, T> rhs) {
+ return ::detail::TypedIntegerImpl<Tag, T>(
+ static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
+ }
+
+ template <typename T>
+ constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
+ return static_cast<T>(lhs + rhs);
+ }
+
+ template <typename T>
+ constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
+ return static_cast<T>(lhs - rhs);
+ }
+
+} // namespace ityp
+
+#endif // COMMON_TYPEDINTEGER_H_
diff --git a/chromium/third_party/dawn/src/common/UnderlyingType.h b/chromium/third_party/dawn/src/dawn/common/UnderlyingType.h
index 09c72c023f9..09c72c023f9 100644
--- a/chromium/third_party/dawn/src/common/UnderlyingType.h
+++ b/chromium/third_party/dawn/src/dawn/common/UnderlyingType.h
diff --git a/chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp b/chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp
new file mode 100644
index 00000000000..fd924f4ae21
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/WindowsUtils.cpp
@@ -0,0 +1,43 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/WindowsUtils.h"
+
+#include "dawn/common/windows_with_undefs.h"
+
+#include <memory>
+
+std::string WCharToUTF8(const wchar_t* input) {
+ // The -1 argument asks WideCharToMultiByte to use the null terminator to know the size of
+ // input. It will return a size that includes the null terminator.
+ int requiredSize = WideCharToMultiByte(CP_UTF8, 0, input, -1, nullptr, 0, nullptr, nullptr);
+
+ std::string result;
+ result.resize(requiredSize - 1);
+ WideCharToMultiByte(CP_UTF8, 0, input, -1, result.data(), requiredSize, nullptr, nullptr);
+
+ return result;
+}
+
+std::wstring UTF8ToWStr(const char* input) {
+ // The -1 argument asks MultiByteToWideChar to use the null terminator to know the size of
+ // input. It will return a size that includes the null terminator.
+ int requiredSize = MultiByteToWideChar(CP_UTF8, 0, input, -1, nullptr, 0);
+
+ std::wstring result;
+ result.resize(requiredSize - 1);
+ MultiByteToWideChar(CP_UTF8, 0, input, -1, result.data(), requiredSize);
+
+ return result;
+}
diff --git a/chromium/third_party/dawn/src/common/WindowsUtils.h b/chromium/third_party/dawn/src/dawn/common/WindowsUtils.h
index 3ab916bbcdc..3ab916bbcdc 100644
--- a/chromium/third_party/dawn/src/common/WindowsUtils.h
+++ b/chromium/third_party/dawn/src/dawn/common/WindowsUtils.h
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_array.h b/chromium/third_party/dawn/src/dawn/common/ityp_array.h
new file mode 100644
index 00000000000..c7db71a0c01
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_array.h
@@ -0,0 +1,98 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_ARRAY_H_
+#define COMMON_ITYP_ARRAY_H_
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <array>
+#include <cstddef>
+#include <type_traits>
+
+namespace ityp {
+
+ // ityp::array is a helper class that wraps std::array with the restriction that
+ // indices must be a particular type |Index|. Dawn uses multiple flat maps of
+ // index-->data, and this class helps ensure an indices cannot be passed interchangably
+ // to a flat map of a different type.
+ template <typename Index, typename Value, size_t Size>
+ class array : private std::array<Value, Size> {
+ using I = UnderlyingType<Index>;
+ using Base = std::array<Value, Size>;
+
+ static_assert(Size <= std::numeric_limits<I>::max());
+
+ public:
+ constexpr array() = default;
+
+ template <typename... Values>
+ constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
+ }
+
+ Value& operator[](Index i) {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < I(Size));
+ return Base::operator[](index);
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < I(Size));
+ return Base::operator[](index);
+ }
+
+ Value& at(Index i) {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < I(Size));
+ return Base::at(index);
+ }
+
+ constexpr const Value& at(Index i) const {
+ I index = static_cast<I>(i);
+ ASSERT(index >= 0 && index < I(Size));
+ return Base::at(index);
+ }
+
+ typename Base::iterator begin() noexcept {
+ return Base::begin();
+ }
+
+ typename Base::const_iterator begin() const noexcept {
+ return Base::begin();
+ }
+
+ typename Base::iterator end() noexcept {
+ return Base::end();
+ }
+
+ typename Base::const_iterator end() const noexcept {
+ return Base::end();
+ }
+
+ constexpr Index size() const {
+ return Index(I(Size));
+ }
+
+ using Base::back;
+ using Base::data;
+ using Base::empty;
+ using Base::fill;
+ using Base::front;
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_ARRAY_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_bitset.h b/chromium/third_party/dawn/src/dawn/common/ityp_bitset.h
new file mode 100644
index 00000000000..057e54ed2dd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_bitset.h
@@ -0,0 +1,134 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_BITSET_H_
+#define COMMON_ITYP_BITSET_H_
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+namespace ityp {
+
+ // ityp::bitset is a helper class that wraps std::bitset with the restriction that
+ // indices must be a particular type |Index|.
+ template <typename Index, size_t N>
+ class bitset : private std::bitset<N> {
+ using I = UnderlyingType<Index>;
+ using Base = std::bitset<N>;
+
+ static_assert(sizeof(I) <= sizeof(size_t));
+
+ constexpr bitset(const Base& rhs) : Base(rhs) {
+ }
+
+ public:
+ constexpr bitset() noexcept : Base() {
+ }
+
+ constexpr bitset(unsigned long long value) noexcept : Base(value) {
+ }
+
+ constexpr bool operator[](Index i) const {
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ typename Base::reference operator[](Index i) {
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ bool test(Index i) const {
+ return Base::test(static_cast<I>(i));
+ }
+
+ using Base::all;
+ using Base::any;
+ using Base::count;
+ using Base::none;
+ using Base::size;
+
+ bool operator==(const bitset& other) const noexcept {
+ return Base::operator==(static_cast<const Base&>(other));
+ }
+
+ bool operator!=(const bitset& other) const noexcept {
+ return Base::operator!=(static_cast<const Base&>(other));
+ }
+
+ bitset& operator&=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
+ }
+
+ bitset& operator|=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
+ }
+
+ bitset& operator^=(const bitset& other) noexcept {
+ return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
+ }
+
+ bitset operator~() const noexcept {
+ return bitset(*this).flip();
+ }
+
+ bitset& set() noexcept {
+ return static_cast<bitset&>(Base::set());
+ }
+
+ bitset& set(Index i, bool value = true) {
+ return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
+ }
+
+ bitset& reset() noexcept {
+ return static_cast<bitset&>(Base::reset());
+ }
+
+ bitset& reset(Index i) {
+ return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
+ }
+
+ bitset& flip() noexcept {
+ return static_cast<bitset&>(Base::flip());
+ }
+
+ bitset& flip(Index i) {
+ return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
+ }
+
+ using Base::to_string;
+ using Base::to_ullong;
+ using Base::to_ulong;
+
+ friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
+ }
+
+ friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
+ }
+
+ friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
+ return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
+ }
+
+ friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
+ return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
+ }
+
+ friend struct std::hash<bitset>;
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_BITSET_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_span.h b/chromium/third_party/dawn/src/dawn/common/ityp_span.h
new file mode 100644
index 00000000000..c73f9831187
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_span.h
@@ -0,0 +1,103 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_SPAN_H_
+#define COMMON_ITYP_SPAN_H_
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <type_traits>
+
+namespace ityp {
+
+ // ityp::span is a helper class that wraps an unowned packed array of type |Value|.
+ // It stores the size and pointer to first element. It has the restriction that
+ // indices must be a particular type |Index|. This provides a type-safe way to index
+ // raw pointers.
+ template <typename Index, typename Value>
+ class span {
+ using I = UnderlyingType<Index>;
+
+ public:
+ constexpr span() : mData(nullptr), mSize(0) {
+ }
+ constexpr span(Value* data, Index size) : mData(data), mSize(size) {
+ }
+
+ constexpr Value& operator[](Index i) const {
+ ASSERT(i < mSize);
+ return mData[static_cast<I>(i)];
+ }
+
+ Value* data() noexcept {
+ return mData;
+ }
+
+ const Value* data() const noexcept {
+ return mData;
+ }
+
+ Value* begin() noexcept {
+ return mData;
+ }
+
+ const Value* begin() const noexcept {
+ return mData;
+ }
+
+ Value* end() noexcept {
+ return mData + static_cast<I>(mSize);
+ }
+
+ const Value* end() const noexcept {
+ return mData + static_cast<I>(mSize);
+ }
+
+ Value& front() {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *mData;
+ }
+
+ const Value& front() const {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *mData;
+ }
+
+ Value& back() {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *(mData + static_cast<I>(mSize) - 1);
+ }
+
+ const Value& back() const {
+ ASSERT(mData != nullptr);
+ ASSERT(static_cast<I>(mSize) >= 0);
+ return *(mData + static_cast<I>(mSize) - 1);
+ }
+
+ Index size() const {
+ return mSize;
+ }
+
+ private:
+ Value* mData;
+ Index mSize;
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_SPAN_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h b/chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h
new file mode 100644
index 00000000000..47c437e3652
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_stack_vec.h
@@ -0,0 +1,103 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_STACK_VEC_H_
+#define COMMON_ITYP_STACK_VEC_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/StackContainer.h"
+#include "dawn/common/UnderlyingType.h"
+
+namespace ityp {
+
+ template <typename Index, typename Value, size_t StaticCapacity>
+ class stack_vec : private StackVector<Value, StaticCapacity> {
+ using I = UnderlyingType<Index>;
+ using Base = StackVector<Value, StaticCapacity>;
+ using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
+ static_assert(StaticCapacity <= std::numeric_limits<I>::max());
+
+ public:
+ stack_vec() : Base() {
+ }
+ stack_vec(Index size) : Base() {
+ this->container().resize(static_cast<I>(size));
+ }
+
+ Value& operator[](Index i) {
+ ASSERT(i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ ASSERT(i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ void resize(Index size) {
+ this->container().resize(static_cast<I>(size));
+ }
+
+ void reserve(Index size) {
+ this->container().reserve(static_cast<I>(size));
+ }
+
+ Value* data() {
+ return this->container().data();
+ }
+
+ const Value* data() const {
+ return this->container().data();
+ }
+
+ typename VectorBase::iterator begin() noexcept {
+ return this->container().begin();
+ }
+
+ typename VectorBase::const_iterator begin() const noexcept {
+ return this->container().begin();
+ }
+
+ typename VectorBase::iterator end() noexcept {
+ return this->container().end();
+ }
+
+ typename VectorBase::const_iterator end() const noexcept {
+ return this->container().end();
+ }
+
+ typename VectorBase::reference front() {
+ return this->container().front();
+ }
+
+ typename VectorBase::const_reference front() const {
+ return this->container().front();
+ }
+
+ typename VectorBase::reference back() {
+ return this->container().back();
+ }
+
+ typename VectorBase::const_reference back() const {
+ return this->container().back();
+ }
+
+ Index size() const {
+ return Index(static_cast<I>(this->container().size()));
+ }
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_STACK_VEC_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/ityp_vector.h b/chromium/third_party/dawn/src/dawn/common/ityp_vector.h
new file mode 100644
index 00000000000..9d83adf8e5d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/ityp_vector.h
@@ -0,0 +1,108 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_VECTOR_H_
+#define COMMON_ITYP_VECTOR_H_
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/common/UnderlyingType.h"
+
+#include <type_traits>
+#include <vector>
+
+namespace ityp {
+
+ // ityp::vector is a helper class that wraps std::vector with the restriction that
+ // indices must be a particular type |Index|.
+ template <typename Index, typename Value>
+ class vector : public std::vector<Value> {
+ using I = UnderlyingType<Index>;
+ using Base = std::vector<Value>;
+
+ private:
+ // Disallow access to base constructors and untyped index/size-related operators.
+ using Base::Base;
+ using Base::operator=;
+ using Base::operator[];
+ using Base::at;
+ using Base::reserve;
+ using Base::resize;
+ using Base::size;
+
+ public:
+ vector() : Base() {
+ }
+
+ explicit vector(Index size) : Base(static_cast<I>(size)) {
+ }
+
+ vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
+ }
+
+ vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
+ }
+
+ vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
+ }
+
+ vector(std::initializer_list<Value> init) : Base(init) {
+ }
+
+ vector& operator=(const vector& rhs) {
+ Base::operator=(static_cast<const Base&>(rhs));
+ return *this;
+ }
+
+ vector& operator=(vector&& rhs) noexcept {
+ Base::operator=(static_cast<Base&&>(rhs));
+ return *this;
+ }
+
+ Value& operator[](Index i) {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ Value& at(Index i) {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::at(static_cast<I>(i));
+ }
+
+ constexpr const Value& at(Index i) const {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::at(static_cast<I>(i));
+ }
+
+ constexpr Index size() const {
+ ASSERT(std::numeric_limits<I>::max() >= Base::size());
+ return Index(static_cast<I>(Base::size()));
+ }
+
+ void resize(Index size) {
+ Base::resize(static_cast<I>(size));
+ }
+
+ void reserve(Index size) {
+ Base::reserve(static_cast<I>(size));
+ }
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_VECTOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/vulkan_platform.h b/chromium/third_party/dawn/src/dawn/common/vulkan_platform.h
new file mode 100644
index 00000000000..57d2b130b42
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/vulkan_platform.h
@@ -0,0 +1,194 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_VULKANPLATFORM_H_
+#define COMMON_VULKANPLATFORM_H_
+
+#if !defined(DAWN_ENABLE_BACKEND_VULKAN)
+# error "vulkan_platform.h included without the Vulkan backend enabled"
+#endif
+#if defined(VULKAN_CORE_H_)
+# error "vulkan.h included before vulkan_platform.h"
+#endif
+
+#include "dawn/common/Platform.h"
+
+#include <cstddef>
+#include <cstdint>
+
+// vulkan.h defines non-dispatchable handles to opaque pointers on 64bit architectures and uint64_t
+// on 32bit architectures. This causes a problem in 32bit where the handles cannot be used to
+// distinguish between overloads of the same function.
+// Change the definition of non-dispatchable handles to be opaque structures containing a uint64_t
+// and overload the comparison operators between themselves and VK_NULL_HANDLE (which will be
+// redefined to be nullptr). This keeps the type-safety of having the handles be different types
+// (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
+
+#if defined(DAWN_PLATFORM_64_BIT)
+# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
+// This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
+// TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
+template <typename T>
+T NativeNonDispatachableHandleFromU64(uint64_t u64) {
+ return reinterpret_cast<T>(u64);
+}
+#elif defined(DAWN_PLATFORM_32_BIT)
+# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
+template <typename T>
+T NativeNonDispatachableHandleFromU64(uint64_t u64) {
+ return u64;
+}
+#else
+# error "Unsupported platform"
+#endif
+
+// Define a dummy Vulkan handle for use before we include vulkan.h
+DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
+
+// Find out the alignment of native handles. Logically we would use alignof(VkSomeHandleNative) so
+// why bother with the wrapper struct? It turns out that on Linux Intel x86 alignof(uint64_t) is 8
+// but alignof(struct{uint64_t a;}) is 4. This is because this Intel ABI doesn't say anything about
+// double-word alignment so for historical reasons compilers violated the standard and use an
+// alignment of 4 for uint64_t (and double) inside structures.
+// See https://stackoverflow.com/questions/44877185
+// One way to get the alignment inside structures of a type is to look at the alignment of it
+// wrapped in a structure. Hence VkSameHandleNativeWrappe
+
+namespace dawn::native::vulkan {
+
+ namespace detail {
+ template <typename T>
+ struct WrapperStruct {
+ T member;
+ };
+
+ template <typename T>
+ static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
+
+ static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
+ static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
+
+ // Simple handle types that supports "nullptr_t" as a 0 value.
+ template <typename Tag, typename HandleType>
+ class alignas(detail::kNativeVkHandleAlignment) VkHandle {
+ public:
+ // Default constructor and assigning of VK_NULL_HANDLE
+ VkHandle() = default;
+ VkHandle(std::nullptr_t) {
+ }
+
+ // Use default copy constructor/assignment
+ VkHandle(const VkHandle<Tag, HandleType>& other) = default;
+ VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
+
+ // Comparisons between handles
+ bool operator==(VkHandle<Tag, HandleType> other) const {
+ return mHandle == other.mHandle;
+ }
+ bool operator!=(VkHandle<Tag, HandleType> other) const {
+ return mHandle != other.mHandle;
+ }
+
+ // Comparisons between handles and VK_NULL_HANDLE
+ bool operator==(std::nullptr_t) const {
+ return mHandle == 0;
+ }
+ bool operator!=(std::nullptr_t) const {
+ return mHandle != 0;
+ }
+
+ // Implicit conversion to real Vulkan types.
+ operator HandleType() const {
+ return GetHandle();
+ }
+
+ HandleType GetHandle() const {
+ return mHandle;
+ }
+
+ HandleType& operator*() {
+ return mHandle;
+ }
+
+ static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
+ return VkHandle{handle};
+ }
+
+ private:
+ explicit VkHandle(HandleType handle) : mHandle(handle) {
+ }
+
+ HandleType mHandle = 0;
+ };
+ } // namespace detail
+
+ static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
+
+ template <typename Tag, typename HandleType>
+ HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
+ return reinterpret_cast<HandleType*>(handle);
+ }
+
+} // namespace dawn::native::vulkan
+
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
+ DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
+ namespace dawn::native::vulkan { \
+ using object = detail::VkHandle<struct VkTag##object, ::object>; \
+ static_assert(sizeof(object) == sizeof(uint64_t)); \
+ static_assert(alignof(object) == detail::kUint64Alignment); \
+ static_assert(sizeof(object) == sizeof(::object)); \
+ static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
+ } // namespace dawn::native::vulkan
+
+// Import additional parts of Vulkan that are supported on our architecture and preemptively include
+// headers that vulkan.h includes that we have "undefs" for.
+#if defined(DAWN_PLATFORM_WINDOWS)
+# define VK_USE_PLATFORM_WIN32_KHR
+# include "dawn/common/windows_with_undefs.h"
+#endif // DAWN_PLATFORM_WINDOWS
+
+#if defined(DAWN_USE_X11)
+# define VK_USE_PLATFORM_XLIB_KHR
+# define VK_USE_PLATFORM_XCB_KHR
+# include "dawn/common/xlib_with_undefs.h"
+#endif // defined(DAWN_USE_X11)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+# define VK_USE_PLATFORM_METAL_EXT
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_ANDROID)
+# define VK_USE_PLATFORM_ANDROID_KHR
+#endif // defined(DAWN_PLATFORM_ANDROID)
+
+#if defined(DAWN_PLATFORM_FUCHSIA)
+# define VK_USE_PLATFORM_FUCHSIA
+#endif // defined(DAWN_PLATFORM_FUCHSIA)
+
+// The actual inclusion of vulkan.h!
+#define VK_NO_PROTOTYPES
+#include <vulkan/vulkan.h>
+
+// Redefine VK_NULL_HANDLE for better type safety where possible.
+#undef VK_NULL_HANDLE
+#if defined(DAWN_PLATFORM_64_BIT)
+static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
+#elif defined(DAWN_PLATFORM_32_BIT)
+static constexpr uint64_t VK_NULL_HANDLE = 0;
+#else
+# error "Unsupported platform"
+#endif
+
+#endif // COMMON_VULKANPLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h b/chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h
new file mode 100644
index 00000000000..686da9fa9f2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/windows_with_undefs.h
@@ -0,0 +1,38 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_WINDOWS_WITH_UNDEFS_H_
+#define COMMON_WINDOWS_WITH_UNDEFS_H_
+
+#include "dawn/common/Platform.h"
+
+#if !defined(DAWN_PLATFORM_WINDOWS)
+# error "windows_with_undefs.h included on non-Windows"
+#endif
+
+// This header includes <windows.h> but removes all the extra defines that conflict with identifiers
+// in internal code. It should never be included in something that is part of the public interface.
+#include <windows.h>
+
+// Macros defined for ANSI / Unicode support
+#undef CreateWindow
+#undef GetMessage
+
+// Macros defined to produce compiler intrinsics
+#undef MemoryBarrier
+
+// Macro defined as an alias of GetTickCount
+#undef GetCurrentTime
+
+#endif // COMMON_WINDOWS_WITH_UNDEFS_H_
diff --git a/chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h b/chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h
new file mode 100644
index 00000000000..7ac5a62b7db
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/common/xlib_with_undefs.h
@@ -0,0 +1,40 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_XLIB_WITH_UNDEFS_H_
+#define COMMON_XLIB_WITH_UNDEFS_H_
+
+#include "dawn/common/Platform.h"
+
+#if !defined(DAWN_PLATFORM_LINUX)
+# error "xlib_with_undefs.h included on non-Linux"
+#endif
+
+// This header includes <X11/Xlib.h> but removes all the extra defines that conflict with
+// identifiers in internal code. It should never be included in something that is part of the public
+// interface.
+#include <X11/Xlib.h>
+
+// Xlib-xcb.h technically includes Xlib.h but we separate the includes to make it more clear what
+// the problem is if one of these two includes fail.
+#include <X11/Xlib-xcb.h>
+
+#undef Success
+#undef None
+#undef Always
+#undef Bool
+
+using XErrorHandler = int (*)(Display*, XErrorEvent*);
+
+#endif // COMMON_XLIB_WITH_UNDEFS_H_
diff --git a/chromium/third_party/dawn/src/dawn/fuzzers/BUILD.gn b/chromium/third_party/dawn/src/dawn/fuzzers/BUILD.gn
new file mode 100644
index 00000000000..f7ea2a044db
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/fuzzers/BUILD.gn
@@ -0,0 +1,124 @@
+# Copyright 2018 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("//build_overrides/build.gni")
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+# We only have libfuzzer in Chromium builds but if we build fuzzer targets only
+# there, we would risk breaking fuzzer targets all the time when making changes
+# to Dawn. To avoid that, we make fuzzer targets compile in standalone builds
+# as well with a dawn_fuzzer_test target that acts like Chromium's fuzzer_test.
+#
+# The standalone fuzzer targets are able to run a single fuzzer input which
+# could help reproduce fuzzer crashes more easily because you don't need a
+# whole Chromium checkout.
+
+if (build_with_chromium) {
+ import("//testing/libfuzzer/fuzzer_test.gni")
+
+ # In Chromium build we just proxy everything to the real fuzzer_test
+ template("dawn_fuzzer_test") {
+ fuzzer_test(target_name) {
+ forward_variables_from(invoker, "*")
+ }
+ }
+} else {
+ import("//testing/test.gni")
+
+ # In standalone build we do something similar to fuzzer_test.
+ template("dawn_fuzzer_test") {
+ test(target_name) {
+ forward_variables_from(invoker,
+ [
+ "asan_options",
+ "cflags",
+ "cflags_cc",
+ "check_includes",
+ "defines",
+ "deps",
+ "include_dirs",
+ "sources",
+ ])
+
+ if (defined(asan_options)) {
+ not_needed([ "asan_options" ])
+ }
+
+ if (!defined(configs)) {
+ configs = []
+ }
+
+ # Weirdly fuzzer_test uses a special variable for additional configs.
+ if (defined(invoker.additional_configs)) {
+ configs += invoker.additional_configs
+ }
+
+ sources += [ "StandaloneFuzzerMain.cpp" ]
+ }
+ }
+}
+
+static_library("dawn_wire_server_fuzzer_common") {
+ sources = [
+ "DawnWireServerFuzzer.cpp",
+ "DawnWireServerFuzzer.h",
+ ]
+ public_deps = [
+ "${dawn_root}/src/dawn:cpp",
+ "${dawn_root}/src/dawn:proc",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_root}/src/dawn/native:static",
+ "${dawn_root}/src/dawn/utils",
+ "${dawn_root}/src/dawn/wire:static",
+ ]
+}
+
+dawn_fuzzer_test("dawn_wire_server_and_frontend_fuzzer") {
+ sources = [ "DawnWireServerAndFrontendFuzzer.cpp" ]
+
+ deps = [ ":dawn_wire_server_fuzzer_common" ]
+
+ additional_configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+}
+
+if (is_win) {
+ dawn_fuzzer_test("dawn_wire_server_and_d3d12_backend_fuzzer") {
+ sources = [ "DawnWireServerAndD3D12BackendFuzzer.cpp" ]
+
+ deps = [ ":dawn_wire_server_fuzzer_common" ]
+
+ additional_configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+ }
+}
+
+dawn_fuzzer_test("dawn_wire_server_and_vulkan_backend_fuzzer") {
+ sources = [ "DawnWireServerAndVulkanBackendFuzzer.cpp" ]
+
+ deps = [ ":dawn_wire_server_fuzzer_common" ]
+
+ additional_configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+}
+
+# A group target to build all the fuzzers
+group("fuzzers") {
+ testonly = true
+ deps = [
+ ":dawn_wire_server_and_frontend_fuzzer",
+ ":dawn_wire_server_and_vulkan_backend_fuzzer",
+ ]
+
+ if (is_win) {
+ deps += [ ":dawn_wire_server_and_d3d12_backend_fuzzer" ]
+ }
+}
diff --git a/chromium/third_party/dawn/src/dawn/native/Adapter.cpp b/chromium/third_party/dawn/src/dawn/native/Adapter.cpp
new file mode 100644
index 00000000000..4c000ac1bfe
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Adapter.cpp
@@ -0,0 +1,227 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Adapter.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+namespace dawn::native {
+
+ AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
+ : mInstance(instance), mBackend(backend) {
+ mSupportedFeatures.EnableFeature(Feature::DawnNative);
+ mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
+ }
+
+ MaybeError AdapterBase::Initialize() {
+ DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
+ DAWN_TRY_CONTEXT(
+ InitializeSupportedFeaturesImpl(),
+ "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+ "backend=%s type=%s)",
+ mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+ DAWN_TRY_CONTEXT(
+ InitializeSupportedLimitsImpl(&mLimits),
+ "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+ "backend=%s type=%s)",
+ mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+
+ // Enforce internal Dawn constants.
+ mLimits.v1.maxVertexBufferArrayStride =
+ std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
+ mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
+ mLimits.v1.maxVertexAttributes =
+ std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
+ mLimits.v1.maxVertexBuffers =
+ std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
+ mLimits.v1.maxInterStageShaderComponents =
+ std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
+ mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
+ mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
+ mLimits.v1.maxSamplersPerShaderStage =
+ std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
+ mLimits.v1.maxStorageBuffersPerShaderStage =
+ std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
+ mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
+ mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
+ mLimits.v1.maxUniformBuffersPerShaderStage =
+ std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
+ mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
+ std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
+ kMaxDynamicUniformBuffersPerPipelineLayout);
+ mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
+ std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
+ kMaxDynamicStorageBuffersPerPipelineLayout);
+
+ return {};
+ }
+
+ bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
+ return GetLimits(limits);
+ }
+
+ void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
+ properties->vendorID = mVendorId;
+ properties->deviceID = mDeviceId;
+ properties->name = mName.c_str();
+ properties->driverDescription = mDriverDescription.c_str();
+ properties->adapterType = mAdapterType;
+ properties->backendType = mBackend;
+ }
+
+ bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
+ return mSupportedFeatures.IsEnabled(feature);
+ }
+
+ size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+ return mSupportedFeatures.EnumerateFeatures(features);
+ }
+
+ DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
+ DeviceDescriptor defaultDesc = {};
+ if (descriptor == nullptr) {
+ descriptor = &defaultDesc;
+ }
+ auto result = CreateDeviceInternal(descriptor);
+ if (result.IsError()) {
+ mInstance->ConsumedError(result.AcquireError());
+ return nullptr;
+ }
+ return result.AcquireSuccess().Detach();
+ }
+
+ void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ static constexpr DeviceDescriptor kDefaultDescriptor = {};
+ if (descriptor == nullptr) {
+ descriptor = &kDefaultDescriptor;
+ }
+ auto result = CreateDeviceInternal(descriptor);
+
+ if (result.IsError()) {
+ std::unique_ptr<ErrorData> errorData = result.AcquireError();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPURequestDeviceStatus_Error, nullptr,
+ errorData->GetFormattedMessage().c_str(), userdata);
+ return;
+ }
+
+ Ref<DeviceBase> device = result.AcquireSuccess();
+
+ WGPURequestDeviceStatus status =
+ device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(status, ToAPI(device.Detach()), nullptr, userdata);
+ }
+
+ uint32_t AdapterBase::GetVendorId() const {
+ return mVendorId;
+ }
+
+ uint32_t AdapterBase::GetDeviceId() const {
+ return mDeviceId;
+ }
+
+ wgpu::BackendType AdapterBase::GetBackendType() const {
+ return mBackend;
+ }
+
+ InstanceBase* AdapterBase::GetInstance() const {
+ return mInstance;
+ }
+
+ FeaturesSet AdapterBase::GetSupportedFeatures() const {
+ return mSupportedFeatures;
+ }
+
+ bool AdapterBase::SupportsAllRequiredFeatures(
+ const ityp::span<size_t, const wgpu::FeatureName>& features) const {
+ for (wgpu::FeatureName f : features) {
+ if (!mSupportedFeatures.IsEnabled(f)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
+ WGPUDeviceProperties adapterProperties = {};
+ adapterProperties.deviceID = mDeviceId;
+ adapterProperties.vendorID = mVendorId;
+ adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
+
+ mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
+ // This is OK for now because there are no limit feature structs.
+ // If we add additional structs, the caller will need to provide memory
+ // to store them (ex. by calling GetLimits directly instead). Currently,
+ // we keep this function as it's only used internally in Chromium to
+ // send the adapter properties across the wire.
+ GetLimits(FromAPI(&adapterProperties.limits));
+ return adapterProperties;
+ }
+
+ bool AdapterBase::GetLimits(SupportedLimits* limits) const {
+ ASSERT(limits != nullptr);
+ if (limits->nextInChain != nullptr) {
+ return false;
+ }
+ if (mUseTieredLimits) {
+ limits->limits = ApplyLimitTiers(mLimits.v1);
+ } else {
+ limits->limits = mLimits.v1;
+ }
+ return true;
+ }
+
+ ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
+ const DeviceDescriptor* descriptor) {
+ ASSERT(descriptor != nullptr);
+
+ for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
+ wgpu::FeatureName f = descriptor->requiredFeatures[i];
+ DAWN_TRY(ValidateFeatureName(f));
+ DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f),
+ "Requested feature %s is not supported.", f);
+ }
+
+ if (descriptor->requiredLimits != nullptr) {
+ DAWN_TRY_CONTEXT(
+ ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
+ descriptor->requiredLimits->limits),
+ "validating required limits");
+
+ DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
+ "nextInChain is not nullptr.");
+ }
+ return CreateDeviceImpl(descriptor);
+ }
+
+ void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
+ mUseTieredLimits = useTieredLimits;
+ }
+
+ void AdapterBase::ResetInternalDeviceForTesting() {
+ mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
+ }
+
+ MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
+ return DAWN_INTERNAL_ERROR(
+ "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Adapter.h b/chromium/third_party/dawn/src/dawn/native/Adapter.h
new file mode 100644
index 00000000000..bd66c8b87e9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Adapter.h
@@ -0,0 +1,99 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ADAPTER_H_
+#define DAWNNATIVE_ADAPTER_H_
+
+#include "dawn/native/DawnNative.h"
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/ityp_span.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/Limits.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <string>
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ class AdapterBase : public RefCounted {
+ public:
+ AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
+ virtual ~AdapterBase() = default;
+
+ MaybeError Initialize();
+
+ // WebGPU API
+ bool APIGetLimits(SupportedLimits* limits) const;
+ void APIGetProperties(AdapterProperties* properties) const;
+ bool APIHasFeature(wgpu::FeatureName feature) const;
+ size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+ void APIRequestDevice(const DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
+ DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
+
+ uint32_t GetVendorId() const;
+ uint32_t GetDeviceId() const;
+ wgpu::BackendType GetBackendType() const;
+ InstanceBase* GetInstance() const;
+
+ void ResetInternalDeviceForTesting();
+
+ FeaturesSet GetSupportedFeatures() const;
+ bool SupportsAllRequiredFeatures(
+ const ityp::span<size_t, const wgpu::FeatureName>& features) const;
+ WGPUDeviceProperties GetAdapterProperties() const;
+
+ bool GetLimits(SupportedLimits* limits) const;
+
+ void SetUseTieredLimits(bool useTieredLimits);
+
+ virtual bool SupportsExternalImages() const = 0;
+
+ protected:
+ uint32_t mVendorId = 0xFFFFFFFF;
+ uint32_t mDeviceId = 0xFFFFFFFF;
+ std::string mName;
+ wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
+ std::string mDriverDescription;
+ FeaturesSet mSupportedFeatures;
+
+ private:
+ virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+ const DeviceDescriptor* descriptor) = 0;
+
+ virtual MaybeError InitializeImpl() = 0;
+
+ // Check base WebGPU features and discover supported featurees.
+ virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
+
+ // Check base WebGPU limits and populate supported limits.
+ virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
+
+ ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
+
+ virtual MaybeError ResetInternalDeviceForTestingImpl();
+ InstanceBase* mInstance = nullptr;
+ wgpu::BackendType mBackend;
+ CombinedLimits mLimits;
+ bool mUseTieredLimits = false;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_ADAPTER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp b/chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp
new file mode 100644
index 00000000000..a1e29486d9b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/AsyncTask.cpp
@@ -0,0 +1,65 @@
+#include "dawn/native/AsyncTask.h"
+
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::native {
+
+ AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
+ : mWorkerTaskPool(workerTaskPool) {
+ }
+
+ void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
+ // If these allocations becomes expensive, we can slab-allocate tasks.
+ Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
+ waitableTask->taskManager = this;
+ waitableTask->asyncTask = std::move(asyncTask);
+
+ {
+ // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
+ // and we may remove waitableTask objects from mPendingTasks in either main thread
+ // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
+ // protected by a mutex.
+ std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+ mPendingTasks.emplace(waitableTask.Get(), waitableTask);
+ }
+
+ // Ref the task since it is accessed inside the worker function.
+ // The worker function will acquire and release the task upon completion.
+ waitableTask->Reference();
+ waitableTask->waitableEvent =
+ mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
+ }
+
+ void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
+ std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+ auto iter = mPendingTasks.find(task);
+ if (iter != mPendingTasks.end()) {
+ mPendingTasks.erase(iter);
+ }
+ }
+
+ void AsyncTaskManager::WaitAllPendingTasks() {
+ std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
+
+ {
+ std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+ allPendingTasks.swap(mPendingTasks);
+ }
+
+ for (auto& [_, task] : allPendingTasks) {
+ task->waitableEvent->Wait();
+ }
+ }
+
+ bool AsyncTaskManager::HasPendingTasks() {
+ std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+ return !mPendingTasks.empty();
+ }
+
+ void AsyncTaskManager::DoWaitableTask(void* task) {
+ Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
+ waitableTask->asyncTask();
+ waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/AsyncTask.h b/chromium/third_party/dawn/src/dawn/native/AsyncTask.h
new file mode 100644
index 00000000000..ca2edd0d66c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/AsyncTask.h
@@ -0,0 +1,65 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ASYC_TASK_H_
+#define DAWNNATIVE_ASYC_TASK_H_
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+
+#include "dawn/common/RefCounted.h"
+
+namespace dawn::platform {
+ class WaitableEvent;
+ class WorkerTaskPool;
+} // namespace dawn::platform
+
+namespace dawn::native {
+
+ // TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
+ // Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
+ // shutting down the device. RunNow() could be used for more advanced scenarios, for example
+ // always doing ShaderModule initial compilation asynchronously, but being able to steal the
+ // task if we need it for synchronous pipeline compilation.
+ using AsyncTask = std::function<void()>;
+
+ class AsyncTaskManager {
+ public:
+ explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
+
+ void PostTask(AsyncTask asyncTask);
+ void WaitAllPendingTasks();
+ bool HasPendingTasks();
+
+ private:
+ class WaitableTask : public RefCounted {
+ public:
+ AsyncTask asyncTask;
+ AsyncTaskManager* taskManager;
+ std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
+ };
+
+ static void DoWaitableTask(void* task);
+ void HandleTaskCompletion(WaitableTask* task);
+
+ std::mutex mPendingTasksMutex;
+ std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
+ dawn::platform::WorkerTaskPool* mWorkerTaskPool;
+ };
+
+} // namespace dawn::native
+
+#endif
diff --git a/chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp b/chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp
new file mode 100644
index 00000000000..c5ac739d584
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/AttachmentState.cpp
@@ -0,0 +1,165 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/AttachmentState.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+ AttachmentStateBlueprint::AttachmentStateBlueprint(
+ const RenderBundleEncoderDescriptor* descriptor)
+ : mSampleCount(descriptor->sampleCount) {
+ ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
+ mColorAttachmentsSet.set(i);
+ mColorFormats[i] = descriptor->colorFormats[static_cast<uint8_t>(i)];
+ }
+ mDepthStencilFormat = descriptor->depthStencilFormat;
+ }
+
+ AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
+ : mSampleCount(descriptor->multisample.count) {
+ if (descriptor->fragment != nullptr) {
+ ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
+ ++i) {
+ mColorAttachmentsSet.set(i);
+ mColorFormats[i] = descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
+ }
+ }
+ if (descriptor->depthStencil != nullptr) {
+ mDepthStencilFormat = descriptor->depthStencil->format;
+ }
+ }
+
+ AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
+ ++i) {
+ TextureViewBase* attachment =
+ descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
+ mColorAttachmentsSet.set(i);
+ mColorFormats[i] = attachment->GetFormat().format;
+ if (mSampleCount == 0) {
+ mSampleCount = attachment->GetTexture()->GetSampleCount();
+ } else {
+ ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
+ }
+ }
+ if (descriptor->depthStencilAttachment != nullptr) {
+ TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
+ mDepthStencilFormat = attachment->GetFormat().format;
+ if (mSampleCount == 0) {
+ mSampleCount = attachment->GetTexture()->GetSampleCount();
+ } else {
+ ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
+ }
+ }
+ ASSERT(mSampleCount > 0);
+ }
+
+ AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) =
+ default;
+
+ size_t AttachmentStateBlueprint::HashFunc::operator()(
+ const AttachmentStateBlueprint* attachmentState) const {
+ size_t hash = 0;
+
+ // Hash color formats
+ HashCombine(&hash, attachmentState->mColorAttachmentsSet);
+ for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
+ HashCombine(&hash, attachmentState->mColorFormats[i]);
+ }
+
+ // Hash depth stencil attachment
+ HashCombine(&hash, attachmentState->mDepthStencilFormat);
+
+ // Hash sample count
+ HashCombine(&hash, attachmentState->mSampleCount);
+
+ return hash;
+ }
+
+ bool AttachmentStateBlueprint::EqualityFunc::operator()(
+ const AttachmentStateBlueprint* a,
+ const AttachmentStateBlueprint* b) const {
+ // Check set attachments
+ if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
+ return false;
+ }
+
+ // Check color formats
+ for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
+ if (a->mColorFormats[i] != b->mColorFormats[i]) {
+ return false;
+ }
+ }
+
+ // Check depth stencil format
+ if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
+ return false;
+ }
+
+ // Check sample count
+ if (a->mSampleCount != b->mSampleCount) {
+ return false;
+ }
+
+ return true;
+ }
+
+ AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
+ : AttachmentStateBlueprint(blueprint), ObjectBase(device) {
+ }
+
+ AttachmentState::~AttachmentState() {
+ GetDevice()->UncacheAttachmentState(this);
+ }
+
+ size_t AttachmentState::ComputeContentHash() {
+ // TODO(dawn:549): skip this traversal and reuse the blueprint.
+ return AttachmentStateBlueprint::HashFunc()(this);
+ }
+
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
+ AttachmentState::GetColorAttachmentsMask() const {
+ return mColorAttachmentsSet;
+ }
+
+ wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(
+ ColorAttachmentIndex index) const {
+ ASSERT(mColorAttachmentsSet[index]);
+ return mColorFormats[index];
+ }
+
+ bool AttachmentState::HasDepthStencilAttachment() const {
+ return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
+ }
+
+ wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
+ ASSERT(HasDepthStencilAttachment());
+ return mDepthStencilFormat;
+ }
+
+ uint32_t AttachmentState::GetSampleCount() const {
+ return mSampleCount;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/AttachmentState.h b/chromium/third_party/dawn/src/dawn/native/AttachmentState.h
new file mode 100644
index 00000000000..21eff85c565
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/AttachmentState.h
@@ -0,0 +1,83 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ATTACHMENTSTATE_H_
+#define DAWNNATIVE_ATTACHMENTSTATE_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ // AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
+ // can be constructed by copying the blueprint state instead of traversing descriptors.
+ // Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
+ class AttachmentStateBlueprint {
+ public:
+ // Note: Descriptors must be validated before the AttachmentState is constructed.
+ explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
+ explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
+ explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
+
+ AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
+
+ // Functors necessary for the unordered_set<AttachmentState*>-based cache.
+ struct HashFunc {
+ size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
+ };
+ struct EqualityFunc {
+ bool operator()(const AttachmentStateBlueprint* a,
+ const AttachmentStateBlueprint* b) const;
+ };
+
+ protected:
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
+ ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
+ // Default (texture format Undefined) indicates there is no depth stencil attachment.
+ wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
+ uint32_t mSampleCount = 0;
+ };
+
+ class AttachmentState final : public AttachmentStateBlueprint,
+ public ObjectBase,
+ public CachedObject {
+ public:
+ AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
+
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+ wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
+ bool HasDepthStencilAttachment() const;
+ wgpu::TextureFormat GetDepthStencilFormat() const;
+ uint32_t GetSampleCount() const;
+
+ size_t ComputeContentHash() override;
+
+ private:
+ ~AttachmentState() override;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_ATTACHMENTSTATE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BUILD.gn b/chromium/third_party/dawn/src/dawn/native/BUILD.gn
new file mode 100644
index 00000000000..d70cdbfe6fa
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BUILD.gn
@@ -0,0 +1,771 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("//build_overrides/build.gni")
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_component.gni")
+import("${dawn_root}/scripts/dawn_features.gni")
+
+# Import mac_deployment_target
+if (is_mac) {
+ if (dawn_has_build) {
+ import("//build/config/mac/mac_sdk.gni")
+ } else {
+ mac_deployment_target = "10.11.0"
+ }
+}
+
+# The VVLs are an optional dependency, only use it if the path has been set.
+enable_vulkan_validation_layers = dawn_enable_vulkan_validation_layers &&
+ dawn_vulkan_validation_layers_dir != ""
+if (enable_vulkan_validation_layers) {
+ import("//build_overrides/vulkan_validation_layers.gni")
+}
+
+# ANGLE is an optional dependency; only use it if the path has been set.
+use_angle = dawn_use_angle && defined(dawn_angle_dir)
+
+# Swiftshader is an optional dependency, only use it if the path has been set.
+use_swiftshader = dawn_use_swiftshader && dawn_swiftshader_dir != ""
+if (use_swiftshader) {
+ assert(dawn_enable_vulkan,
+ "dawn_use_swiftshader requires dawn_enable_vulkan=true")
+ import("${dawn_swiftshader_dir}/src/Vulkan/vulkan.gni")
+}
+
+# The Vulkan loader is an optional dependency, only use it if the path has been
+# set.
+if (dawn_enable_vulkan) {
+ enable_vulkan_loader =
+ dawn_enable_vulkan_loader && dawn_vulkan_loader_dir != ""
+}
+
+group("abseil") {
+ # When build_with_chromium=true we need to include "//third_party/abseil-cpp:absl" while
+ # it's beneficial to be more specific with standalone Dawn, especially when it comes to
+ # including it as a dependency in other projects (such as Skia).
+ if (build_with_chromium) {
+ public_deps = [ "$dawn_abseil_dir:absl" ]
+ } else {
+ public_deps = [ "${dawn_root}/third_party/gn/abseil-cpp:str_format" ]
+ }
+}
+
+config("internal") {
+ configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+
+ # Suppress warnings that Metal isn't in the deployment target of Chrome:
+ # initialization of the Metal backend is behind a IsMetalSupported check so
+ # Dawn won't call Metal functions on macOS 10.10.
+ # At the time this is written Chromium supports 10.10.0 and above, so if we
+ # aren't on 10.11 it means we are on 10.11 and above, and Metal is available.
+ # Skipping this check on 10.11 and above is important as it allows getting
+ # proper compilation warning when using 10.12 and above feature for example.
+ # TODO(crbug.com/1004024): Consider using API_AVAILABLE annotations on all
+ # metal code in dawn once crbug.com/1004024 is sorted out if Chromium still
+ # supports 10.10 then.
+ if (is_mac && mac_deployment_target == "10.10.0") {
+ cflags_objcc = [ "-Wno-unguarded-availability" ]
+ }
+}
+
+config("weak_framework") {
+ if (is_mac && dawn_enable_metal) {
+ weak_frameworks = [ "Metal.framework" ]
+ }
+}
+
+# Config that adds the @executable_path rpath if needed so that Swiftshader or the Vulkan loader are found.
+config("vulkan_rpath") {
+ if (is_mac && dawn_enable_vulkan &&
+ (use_swiftshader || enable_vulkan_loader)) {
+ ldflags = [
+ "-rpath",
+ "@executable_path/",
+ ]
+ }
+}
+
+dawn_json_generator("utils_gen") {
+ target = "native_utils"
+ outputs = [
+ "src/dawn/native/ChainUtils_autogen.h",
+ "src/dawn/native/ChainUtils_autogen.cpp",
+ "src/dawn/native/ProcTable.cpp",
+ "src/dawn/native/dawn_platform_autogen.h",
+ "src/dawn/native/wgpu_structs_autogen.h",
+ "src/dawn/native/wgpu_structs_autogen.cpp",
+ "src/dawn/native/ValidationUtils_autogen.h",
+ "src/dawn/native/ValidationUtils_autogen.cpp",
+ "src/dawn/native/webgpu_absl_format_autogen.h",
+ "src/dawn/native/webgpu_absl_format_autogen.cpp",
+ "src/dawn/native/ObjectType_autogen.h",
+ "src/dawn/native/ObjectType_autogen.cpp",
+ ]
+}
+
+if (dawn_enable_opengl) {
+ dawn_generator("opengl_loader_gen") {
+ script = "${dawn_root}/generator/opengl_loader_generator.py"
+ args = [
+ "--gl-xml",
+ rebase_path("${dawn_root}/third_party/khronos/gl.xml", root_build_dir),
+ "--supported-extensions",
+ rebase_path("opengl/supported_extensions.json", root_build_dir),
+ ]
+ outputs = [
+ "src/dawn/native/opengl/OpenGLFunctionsBase_autogen.cpp",
+ "src/dawn/native/opengl/OpenGLFunctionsBase_autogen.h",
+ "src/dawn/native/opengl/opengl_platform_autogen.h",
+ ]
+ }
+}
+
+# Public dawn native headers so they can be publicly visible for
+# dependencies of dawn native
+source_set("headers") {
+ public_deps = [ "${dawn_root}/include/dawn:cpp_headers" ]
+ all_dependent_configs = [ "${dawn_root}/include/dawn:public" ]
+ sources = [
+ "${dawn_root}/include/dawn/native/DawnNative.h",
+ "${dawn_root}/include/dawn/native/dawn_native_export.h",
+
+ # Include all backend's public headers so that dependencies can include
+ # them even when the backends are disabled.
+ "${dawn_root}/include/dawn/native/D3D12Backend.h",
+ "${dawn_root}/include/dawn/native/MetalBackend.h",
+ "${dawn_root}/include/dawn/native/NullBackend.h",
+ "${dawn_root}/include/dawn/native/OpenGLBackend.h",
+ "${dawn_root}/include/dawn/native/VulkanBackend.h",
+ ]
+}
+
+# The meat of the compilation for dawn native so that we can cheaply have
+# shared_library / static_library versions of it. It compiles all the files
+# except those that define exported symbols.
+source_set("sources") {
+ deps = [
+ ":headers",
+ ":utils_gen",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_spirv_tools_dir}:spvtools_opt",
+ "${dawn_spirv_tools_dir}:spvtools_val",
+ "${dawn_tint_dir}/src:libtint",
+ ]
+ defines = []
+ libs = []
+ data_deps = []
+
+ configs += [ ":internal" ]
+
+ # Dependencies that are needed to compile dawn native entry points in
+ # FooBackend.cpp need to be public deps so they are propagated to the
+ # dawn native target
+ public_deps = [
+ ":abseil",
+ "${dawn_root}/src/dawn/platform",
+ ]
+
+ sources = get_target_outputs(":utils_gen")
+ sources += [
+ "Adapter.cpp",
+ "Adapter.h",
+ "AsyncTask.cpp",
+ "AsyncTask.h",
+ "AttachmentState.cpp",
+ "AttachmentState.h",
+ "BackendConnection.cpp",
+ "BackendConnection.h",
+ "BindGroup.cpp",
+ "BindGroup.h",
+ "BindGroupLayout.cpp",
+ "BindGroupLayout.h",
+ "BindGroupTracker.h",
+ "BindingInfo.cpp",
+ "BindingInfo.h",
+ "BuddyAllocator.cpp",
+ "BuddyAllocator.h",
+ "BuddyMemoryAllocator.cpp",
+ "BuddyMemoryAllocator.h",
+ "Buffer.cpp",
+ "Buffer.h",
+ "CachedObject.cpp",
+ "CachedObject.h",
+ "CallbackTaskManager.cpp",
+ "CallbackTaskManager.h",
+ "CommandAllocator.cpp",
+ "CommandAllocator.h",
+ "CommandBuffer.cpp",
+ "CommandBuffer.h",
+ "CommandBufferStateTracker.cpp",
+ "CommandBufferStateTracker.h",
+ "CommandEncoder.cpp",
+ "CommandEncoder.h",
+ "CommandValidation.cpp",
+ "CommandValidation.h",
+ "Commands.cpp",
+ "Commands.h",
+ "CompilationMessages.cpp",
+ "CompilationMessages.h",
+ "ComputePassEncoder.cpp",
+ "ComputePassEncoder.h",
+ "ComputePipeline.cpp",
+ "ComputePipeline.h",
+ "CopyTextureForBrowserHelper.cpp",
+ "CopyTextureForBrowserHelper.h",
+ "CreatePipelineAsyncTask.cpp",
+ "CreatePipelineAsyncTask.h",
+ "Device.cpp",
+ "Device.h",
+ "DynamicUploader.cpp",
+ "DynamicUploader.h",
+ "EncodingContext.cpp",
+ "EncodingContext.h",
+ "EnumClassBitmasks.h",
+ "EnumMaskIterator.h",
+ "Error.cpp",
+ "Error.h",
+ "ErrorData.cpp",
+ "ErrorData.h",
+ "ErrorInjector.cpp",
+ "ErrorInjector.h",
+ "ErrorScope.cpp",
+ "ErrorScope.h",
+ "ExternalTexture.cpp",
+ "ExternalTexture.h",
+ "Features.cpp",
+ "Features.h",
+ "Format.cpp",
+ "Format.h",
+ "Forward.h",
+ "IndirectDrawMetadata.cpp",
+ "IndirectDrawMetadata.h",
+ "IndirectDrawValidationEncoder.cpp",
+ "IndirectDrawValidationEncoder.h",
+ "Instance.cpp",
+ "Instance.h",
+ "IntegerTypes.h",
+ "InternalPipelineStore.cpp",
+ "InternalPipelineStore.h",
+ "Limits.cpp",
+ "Limits.h",
+ "ObjectBase.cpp",
+ "ObjectBase.h",
+ "ObjectContentHasher.cpp",
+ "ObjectContentHasher.h",
+ "PassResourceUsage.h",
+ "PassResourceUsageTracker.cpp",
+ "PassResourceUsageTracker.h",
+ "PerStage.cpp",
+ "PerStage.h",
+ "PersistentCache.cpp",
+ "PersistentCache.h",
+ "Pipeline.cpp",
+ "Pipeline.h",
+ "PipelineLayout.cpp",
+ "PipelineLayout.h",
+ "PooledResourceMemoryAllocator.cpp",
+ "PooledResourceMemoryAllocator.h",
+ "ProgrammableEncoder.cpp",
+ "ProgrammableEncoder.h",
+ "QueryHelper.cpp",
+ "QueryHelper.h",
+ "QuerySet.cpp",
+ "QuerySet.h",
+ "Queue.cpp",
+ "Queue.h",
+ "RenderBundle.cpp",
+ "RenderBundle.h",
+ "RenderBundleEncoder.cpp",
+ "RenderBundleEncoder.h",
+ "RenderEncoderBase.cpp",
+ "RenderEncoderBase.h",
+ "RenderPassEncoder.cpp",
+ "RenderPassEncoder.h",
+ "RenderPipeline.cpp",
+ "RenderPipeline.h",
+ "ResourceHeap.h",
+ "ResourceHeapAllocator.h",
+ "ResourceMemoryAllocation.cpp",
+ "ResourceMemoryAllocation.h",
+ "RingBufferAllocator.cpp",
+ "RingBufferAllocator.h",
+ "Sampler.cpp",
+ "Sampler.h",
+ "ScratchBuffer.cpp",
+ "ScratchBuffer.h",
+ "ShaderModule.cpp",
+ "ShaderModule.h",
+ "StagingBuffer.cpp",
+ "StagingBuffer.h",
+ "Subresource.cpp",
+ "Subresource.h",
+ "SubresourceStorage.h",
+ "Surface.cpp",
+ "Surface.h",
+ "SwapChain.cpp",
+ "SwapChain.h",
+ "Texture.cpp",
+ "Texture.h",
+ "TintUtils.cpp",
+ "TintUtils.h",
+ "ToBackend.h",
+ "Toggles.cpp",
+ "Toggles.h",
+ "VertexFormat.cpp",
+ "VertexFormat.h",
+ "dawn_platform.h",
+ "utils/WGPUHelpers.cpp",
+ "utils/WGPUHelpers.h",
+ "webgpu_absl_format.cpp",
+ "webgpu_absl_format.h",
+ ]
+
+ if (dawn_use_x11) {
+ libs += [ "X11" ]
+ sources += [
+ "XlibXcbFunctions.cpp",
+ "XlibXcbFunctions.h",
+ ]
+ }
+
+ # Only win32 app needs to link with user32.lib
+ # In UWP, all availiable APIs are defined in WindowsApp.lib
+ if (is_win && !dawn_is_winuwp) {
+ libs += [ "user32.lib" ]
+ }
+
+ if (dawn_is_winuwp && is_debug) {
+ # DXGIGetDebugInterface1 is defined in dxgi.lib
+ # But this API is tagged as a development-only capability
+ # which implies that linking to this function will cause
+ # the application to fail Windows store certification
+ # So we only link to it in debug build when compiling for UWP.
+ # In win32 we load dxgi.dll using LoadLibrary
+ # so no need for static linking.
+ libs += [ "dxgi.lib" ]
+ }
+
+ # TODO(dawn:766):
+ # Should link dxcompiler.lib and WinPixEventRuntime_UAP.lib in UWP
+ # Somehow use dxcompiler.lib makes CoreApp unable to activate
+ # WinPIX should be added as third party tools and linked statically
+
+ if (dawn_enable_d3d12) {
+ libs += [ "dxguid.lib" ]
+ sources += [
+ "d3d12/AdapterD3D12.cpp",
+ "d3d12/AdapterD3D12.h",
+ "d3d12/BackendD3D12.cpp",
+ "d3d12/BackendD3D12.h",
+ "d3d12/BindGroupD3D12.cpp",
+ "d3d12/BindGroupD3D12.h",
+ "d3d12/BindGroupLayoutD3D12.cpp",
+ "d3d12/BindGroupLayoutD3D12.h",
+ "d3d12/BufferD3D12.cpp",
+ "d3d12/BufferD3D12.h",
+ "d3d12/CPUDescriptorHeapAllocationD3D12.cpp",
+ "d3d12/CPUDescriptorHeapAllocationD3D12.h",
+ "d3d12/CommandAllocatorManager.cpp",
+ "d3d12/CommandAllocatorManager.h",
+ "d3d12/CommandBufferD3D12.cpp",
+ "d3d12/CommandBufferD3D12.h",
+ "d3d12/CommandRecordingContext.cpp",
+ "d3d12/CommandRecordingContext.h",
+ "d3d12/ComputePipelineD3D12.cpp",
+ "d3d12/ComputePipelineD3D12.h",
+ "d3d12/D3D11on12Util.cpp",
+ "d3d12/D3D11on12Util.h",
+ "d3d12/D3D12Error.cpp",
+ "d3d12/D3D12Error.h",
+ "d3d12/D3D12Info.cpp",
+ "d3d12/D3D12Info.h",
+ "d3d12/DeviceD3D12.cpp",
+ "d3d12/DeviceD3D12.h",
+ "d3d12/Forward.h",
+ "d3d12/GPUDescriptorHeapAllocationD3D12.cpp",
+ "d3d12/GPUDescriptorHeapAllocationD3D12.h",
+ "d3d12/HeapAllocatorD3D12.cpp",
+ "d3d12/HeapAllocatorD3D12.h",
+ "d3d12/HeapD3D12.cpp",
+ "d3d12/HeapD3D12.h",
+ "d3d12/IntegerTypes.h",
+ "d3d12/NativeSwapChainImplD3D12.cpp",
+ "d3d12/NativeSwapChainImplD3D12.h",
+ "d3d12/PageableD3D12.cpp",
+ "d3d12/PageableD3D12.h",
+ "d3d12/PipelineLayoutD3D12.cpp",
+ "d3d12/PipelineLayoutD3D12.h",
+ "d3d12/PlatformFunctions.cpp",
+ "d3d12/PlatformFunctions.h",
+ "d3d12/QuerySetD3D12.cpp",
+ "d3d12/QuerySetD3D12.h",
+ "d3d12/QueueD3D12.cpp",
+ "d3d12/QueueD3D12.h",
+ "d3d12/RenderPassBuilderD3D12.cpp",
+ "d3d12/RenderPassBuilderD3D12.h",
+ "d3d12/RenderPipelineD3D12.cpp",
+ "d3d12/RenderPipelineD3D12.h",
+ "d3d12/ResidencyManagerD3D12.cpp",
+ "d3d12/ResidencyManagerD3D12.h",
+ "d3d12/ResourceAllocatorManagerD3D12.cpp",
+ "d3d12/ResourceAllocatorManagerD3D12.h",
+ "d3d12/ResourceHeapAllocationD3D12.cpp",
+ "d3d12/ResourceHeapAllocationD3D12.h",
+ "d3d12/SamplerD3D12.cpp",
+ "d3d12/SamplerD3D12.h",
+ "d3d12/SamplerHeapCacheD3D12.cpp",
+ "d3d12/SamplerHeapCacheD3D12.h",
+ "d3d12/ShaderModuleD3D12.cpp",
+ "d3d12/ShaderModuleD3D12.h",
+ "d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp",
+ "d3d12/ShaderVisibleDescriptorAllocatorD3D12.h",
+ "d3d12/StagingBufferD3D12.cpp",
+ "d3d12/StagingBufferD3D12.h",
+ "d3d12/StagingDescriptorAllocatorD3D12.cpp",
+ "d3d12/StagingDescriptorAllocatorD3D12.h",
+ "d3d12/SwapChainD3D12.cpp",
+ "d3d12/SwapChainD3D12.h",
+ "d3d12/TextureCopySplitter.cpp",
+ "d3d12/TextureCopySplitter.h",
+ "d3d12/TextureD3D12.cpp",
+ "d3d12/TextureD3D12.h",
+ "d3d12/UtilsD3D12.cpp",
+ "d3d12/UtilsD3D12.h",
+ "d3d12/d3d12_platform.h",
+ ]
+ }
+
+ if (dawn_enable_metal) {
+ frameworks = [
+ "Cocoa.framework",
+ "IOKit.framework",
+ "IOSurface.framework",
+ "QuartzCore.framework",
+ ]
+ sources += [
+ "Surface_metal.mm",
+ "metal/BackendMTL.h",
+ "metal/BackendMTL.mm",
+ "metal/BindGroupLayoutMTL.h",
+ "metal/BindGroupLayoutMTL.mm",
+ "metal/BindGroupMTL.h",
+ "metal/BindGroupMTL.mm",
+ "metal/BufferMTL.h",
+ "metal/BufferMTL.mm",
+ "metal/CommandBufferMTL.h",
+ "metal/CommandBufferMTL.mm",
+ "metal/CommandRecordingContext.h",
+ "metal/CommandRecordingContext.mm",
+ "metal/ComputePipelineMTL.h",
+ "metal/ComputePipelineMTL.mm",
+ "metal/DeviceMTL.h",
+ "metal/DeviceMTL.mm",
+ "metal/Forward.h",
+ "metal/PipelineLayoutMTL.h",
+ "metal/PipelineLayoutMTL.mm",
+ "metal/QuerySetMTL.h",
+ "metal/QuerySetMTL.mm",
+ "metal/QueueMTL.h",
+ "metal/QueueMTL.mm",
+ "metal/RenderPipelineMTL.h",
+ "metal/RenderPipelineMTL.mm",
+ "metal/SamplerMTL.h",
+ "metal/SamplerMTL.mm",
+ "metal/ShaderModuleMTL.h",
+ "metal/ShaderModuleMTL.mm",
+ "metal/StagingBufferMTL.h",
+ "metal/StagingBufferMTL.mm",
+ "metal/SwapChainMTL.h",
+ "metal/SwapChainMTL.mm",
+ "metal/TextureMTL.h",
+ "metal/TextureMTL.mm",
+ "metal/UtilsMetal.h",
+ "metal/UtilsMetal.mm",
+ ]
+ }
+
+ if (dawn_enable_null) {
+ sources += [
+ "null/DeviceNull.cpp",
+ "null/DeviceNull.h",
+ ]
+ }
+
+ if (dawn_enable_opengl || dawn_enable_vulkan) {
+ sources += [
+ "SpirvValidation.cpp",
+ "SpirvValidation.h",
+ ]
+ }
+
+ if (dawn_enable_opengl) {
+ public_deps += [
+ ":opengl_loader_gen",
+ "${dawn_root}/third_party/khronos:khronos_platform",
+ ]
+ sources += get_target_outputs(":opengl_loader_gen")
+ sources += [
+ "opengl/BackendGL.cpp",
+ "opengl/BackendGL.h",
+ "opengl/BindGroupGL.cpp",
+ "opengl/BindGroupGL.h",
+ "opengl/BindGroupLayoutGL.cpp",
+ "opengl/BindGroupLayoutGL.h",
+ "opengl/BufferGL.cpp",
+ "opengl/BufferGL.h",
+ "opengl/CommandBufferGL.cpp",
+ "opengl/CommandBufferGL.h",
+ "opengl/ComputePipelineGL.cpp",
+ "opengl/ComputePipelineGL.h",
+ "opengl/DeviceGL.cpp",
+ "opengl/DeviceGL.h",
+ "opengl/Forward.h",
+ "opengl/GLFormat.cpp",
+ "opengl/GLFormat.h",
+ "opengl/NativeSwapChainImplGL.cpp",
+ "opengl/NativeSwapChainImplGL.h",
+ "opengl/OpenGLFunctions.cpp",
+ "opengl/OpenGLFunctions.h",
+ "opengl/OpenGLVersion.cpp",
+ "opengl/OpenGLVersion.h",
+ "opengl/PersistentPipelineStateGL.cpp",
+ "opengl/PersistentPipelineStateGL.h",
+ "opengl/PipelineGL.cpp",
+ "opengl/PipelineGL.h",
+ "opengl/PipelineLayoutGL.cpp",
+ "opengl/PipelineLayoutGL.h",
+ "opengl/QuerySetGL.cpp",
+ "opengl/QuerySetGL.h",
+ "opengl/QueueGL.cpp",
+ "opengl/QueueGL.h",
+ "opengl/RenderPipelineGL.cpp",
+ "opengl/RenderPipelineGL.h",
+ "opengl/SamplerGL.cpp",
+ "opengl/SamplerGL.h",
+ "opengl/ShaderModuleGL.cpp",
+ "opengl/ShaderModuleGL.h",
+ "opengl/SwapChainGL.cpp",
+ "opengl/SwapChainGL.h",
+ "opengl/TextureGL.cpp",
+ "opengl/TextureGL.h",
+ "opengl/UtilsGL.cpp",
+ "opengl/UtilsGL.h",
+ "opengl/opengl_platform.h",
+ ]
+ }
+
+ if (dawn_enable_vulkan) {
+ public_deps += [ "${dawn_root}/third_party/khronos:vulkan_headers" ]
+ sources += [
+ "vulkan/AdapterVk.cpp",
+ "vulkan/AdapterVk.h",
+ "vulkan/BackendVk.cpp",
+ "vulkan/BackendVk.h",
+ "vulkan/BindGroupLayoutVk.cpp",
+ "vulkan/BindGroupLayoutVk.h",
+ "vulkan/BindGroupVk.cpp",
+ "vulkan/BindGroupVk.h",
+ "vulkan/BufferVk.cpp",
+ "vulkan/BufferVk.h",
+ "vulkan/CommandBufferVk.cpp",
+ "vulkan/CommandBufferVk.h",
+ "vulkan/CommandRecordingContext.h",
+ "vulkan/ComputePipelineVk.cpp",
+ "vulkan/ComputePipelineVk.h",
+ "vulkan/DescriptorSetAllocation.h",
+ "vulkan/DescriptorSetAllocator.cpp",
+ "vulkan/DescriptorSetAllocator.h",
+ "vulkan/DeviceVk.cpp",
+ "vulkan/DeviceVk.h",
+ "vulkan/ExternalHandle.h",
+ "vulkan/FencedDeleter.cpp",
+ "vulkan/FencedDeleter.h",
+ "vulkan/Forward.h",
+ "vulkan/NativeSwapChainImplVk.cpp",
+ "vulkan/NativeSwapChainImplVk.h",
+ "vulkan/PipelineLayoutVk.cpp",
+ "vulkan/PipelineLayoutVk.h",
+ "vulkan/QuerySetVk.cpp",
+ "vulkan/QuerySetVk.h",
+ "vulkan/QueueVk.cpp",
+ "vulkan/QueueVk.h",
+ "vulkan/RenderPassCache.cpp",
+ "vulkan/RenderPassCache.h",
+ "vulkan/RenderPipelineVk.cpp",
+ "vulkan/RenderPipelineVk.h",
+ "vulkan/ResourceHeapVk.cpp",
+ "vulkan/ResourceHeapVk.h",
+ "vulkan/ResourceMemoryAllocatorVk.cpp",
+ "vulkan/ResourceMemoryAllocatorVk.h",
+ "vulkan/SamplerVk.cpp",
+ "vulkan/SamplerVk.h",
+ "vulkan/ShaderModuleVk.cpp",
+ "vulkan/ShaderModuleVk.h",
+ "vulkan/StagingBufferVk.cpp",
+ "vulkan/StagingBufferVk.h",
+ "vulkan/SwapChainVk.cpp",
+ "vulkan/SwapChainVk.h",
+ "vulkan/TextureVk.cpp",
+ "vulkan/TextureVk.h",
+ "vulkan/UtilsVulkan.cpp",
+ "vulkan/UtilsVulkan.h",
+ "vulkan/VulkanError.cpp",
+ "vulkan/VulkanError.h",
+ "vulkan/VulkanExtensions.cpp",
+ "vulkan/VulkanExtensions.h",
+ "vulkan/VulkanFunctions.cpp",
+ "vulkan/VulkanFunctions.h",
+ "vulkan/VulkanInfo.cpp",
+ "vulkan/VulkanInfo.h",
+ "vulkan/external_memory/MemoryService.h",
+ "vulkan/external_semaphore/SemaphoreService.h",
+ ]
+
+ if (is_chromeos) {
+ sources += [
+ "vulkan/external_memory/MemoryServiceDmaBuf.cpp",
+ "vulkan/external_semaphore/SemaphoreServiceFD.cpp",
+ ]
+ defines += [ "DAWN_USE_SYNC_FDS" ]
+ } else if (is_linux) {
+ sources += [
+ "vulkan/external_memory/MemoryServiceOpaqueFD.cpp",
+ "vulkan/external_semaphore/SemaphoreServiceFD.cpp",
+ ]
+ } else if (is_fuchsia) {
+ sources += [
+ "vulkan/external_memory/MemoryServiceZirconHandle.cpp",
+ "vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp",
+ ]
+ } else {
+ sources += [
+ "vulkan/external_memory/MemoryServiceNull.cpp",
+ "vulkan/external_semaphore/SemaphoreServiceNull.cpp",
+ ]
+ }
+ if (build_with_chromium && is_fuchsia) {
+ # Necessary to ensure that the Vulkan libraries will be in the
+ # final Fuchsia package.
+ data_deps = [
+ "//third_party/fuchsia-sdk:vulkan_base",
+ "//third_party/fuchsia-sdk:vulkan_validation",
+
+ # NOTE: The line below is a work around for http://crbug.com/1001081
+ "//third_party/fuchsia-sdk/sdk:trace_engine",
+ ]
+ }
+ if (dawn_is_winuwp) {
+ defines += [ "DAWN_IS_WINUWP" ]
+ }
+ if (enable_vulkan_validation_layers) {
+ defines += [
+ "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS",
+ "DAWN_VK_DATA_DIR=\"$vulkan_data_subdir\"",
+ ]
+ }
+ if (enable_vulkan_loader) {
+ data_deps += [ "${dawn_vulkan_loader_dir}:libvulkan" ]
+ }
+ if (use_swiftshader) {
+ data_deps +=
+ [ "${dawn_swiftshader_dir}/src/Vulkan:swiftshader_libvulkan" ]
+ defines += [ "DAWN_ENABLE_SWIFTSHADER" ]
+ }
+ }
+
+ if (use_angle) {
+ data_deps += [
+ "${dawn_angle_dir}:libEGL",
+ "${dawn_angle_dir}:libGLESv2",
+ ]
+ }
+}
+
+# The static and shared libraries for dawn_native. Most of the files are
+# already compiled in dawn_native_sources, but we still need to compile
+# files defining exported symbols.
+dawn_component("native") {
+ DEFINE_PREFIX = "DAWN_NATIVE"
+
+ #Make headers publically visible
+ public_deps = [ ":headers" ]
+
+ deps = [
+ ":sources",
+ "${dawn_root}/src/dawn/common",
+ ]
+ sources = [ "DawnNative.cpp" ]
+ configs = [ ":internal" ]
+ public_configs = [
+ ":weak_framework",
+ ":vulkan_rpath",
+ ]
+
+ if (dawn_enable_d3d12) {
+ sources += [ "d3d12/D3D12Backend.cpp" ]
+ }
+ if (dawn_enable_metal) {
+ sources += [ "metal/MetalBackend.mm" ]
+ }
+ if (dawn_enable_null) {
+ sources += [ "null/NullBackend.cpp" ]
+ }
+ if (dawn_enable_opengl) {
+ sources += [ "opengl/OpenGLBackend.cpp" ]
+ }
+ if (dawn_enable_vulkan) {
+ sources += [ "vulkan/VulkanBackend.cpp" ]
+
+ if (enable_vulkan_validation_layers) {
+ data_deps =
+ [ "${dawn_vulkan_validation_layers_dir}:vulkan_validation_layers" ]
+ if (!is_android) {
+ data_deps +=
+ [ "${dawn_vulkan_validation_layers_dir}:vulkan_gen_json_files" ]
+ }
+ }
+ }
+}
+
+dawn_json_generator("webgpu_dawn_native_proc_gen") {
+ target = "webgpu_dawn_native_proc"
+ outputs = [ "src/dawn/native/webgpu_dawn_native_proc.cpp" ]
+}
+
+dawn_component("webgpu_dawn") {
+ # For a single library - build `webgpu_dawn_shared` with GN args:
+ # dawn_complete_static_libs = true - to package a single lib
+ #
+ # is_debug = false
+ # - setting this to true makes library over 50Mb
+ #
+ # use_custom_libcxx = false
+ # - Otherwise, libc++ symbols may conflict if the
+ # library is used outside of Chromium.
+ #
+ # dawn_use_swiftshader = false
+ # angle_enable_swiftshader = false
+ # - SwiftShader can't be built without use_custom_libcxx.
+ # It should be built separately.
+ DEFINE_PREFIX = "WGPU"
+
+ sources = get_target_outputs(":webgpu_dawn_native_proc_gen")
+ deps = [
+ ":static",
+ ":webgpu_dawn_native_proc_gen",
+ ]
+}
diff --git a/chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp b/chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp
new file mode 100644
index 00000000000..abcc2714530
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BackendConnection.cpp
@@ -0,0 +1,36 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BackendConnection.h"
+
+namespace dawn::native {
+
+ BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
+ : mInstance(instance), mType(type) {
+ }
+
+ wgpu::BackendType BackendConnection::GetType() const {
+ return mType;
+ }
+
+ InstanceBase* BackendConnection::GetInstance() const {
+ return mInstance;
+ }
+
+ ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* options) {
+ return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BackendConnection.h b/chromium/third_party/dawn/src/dawn/native/BackendConnection.h
new file mode 100644
index 00000000000..2879fad9631
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BackendConnection.h
@@ -0,0 +1,50 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BACKENDCONNECTION_H_
+#define DAWNNATIVE_BACKENDCONNECTION_H_
+
+#include "dawn/native/Adapter.h"
+#include "dawn/native/DawnNative.h"
+
+#include <memory>
+
+namespace dawn::native {
+
+ // An common interface for all backends. Mostly used to create adapters for a particular
+ // backend.
+ class BackendConnection {
+ public:
+ BackendConnection(InstanceBase* instance, wgpu::BackendType type);
+ virtual ~BackendConnection() = default;
+
+ wgpu::BackendType GetType() const;
+ InstanceBase* GetInstance() const;
+
+ // Returns all the adapters for the system that can be created by the backend, without extra
+ // options (such as debug adapters, custom driver libraries, etc.)
+ virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
+
+ // Returns new adapters created with the backend-specific options.
+ virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* options);
+
+ private:
+ InstanceBase* mInstance = nullptr;
+ wgpu::BackendType mType;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_BACKENDCONNECTION_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn/native/BindGroup.cpp
new file mode 100644
index 00000000000..fe4681c0994
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroup.cpp
@@ -0,0 +1,543 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BindGroup.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+ namespace {
+
+ // Helper functions to perform binding-type specific validation
+
+ MaybeError ValidateBufferBinding(const DeviceBase* device,
+ const BindGroupEntry& entry,
+ const BindingInfo& bindingInfo) {
+ DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
+
+ DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
+ "Expected only buffer to be set for binding entry.");
+
+ DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY(device->ValidateObject(entry.buffer));
+
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+
+ wgpu::BufferUsage requiredUsage;
+ uint64_t maxBindingSize;
+ uint64_t requiredBindingAlignment;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ requiredUsage = wgpu::BufferUsage::Uniform;
+ maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
+ requiredBindingAlignment =
+ device->GetLimits().v1.minUniformBufferOffsetAlignment;
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ requiredUsage = wgpu::BufferUsage::Storage;
+ maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+ requiredBindingAlignment =
+ device->GetLimits().v1.minStorageBufferOffsetAlignment;
+ break;
+ case kInternalStorageBufferBinding:
+ requiredUsage = kInternalStorageBuffer;
+ maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+ requiredBindingAlignment =
+ device->GetLimits().v1.minStorageBufferOffsetAlignment;
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+
+ uint64_t bufferSize = entry.buffer->GetSize();
+
+ // Handle wgpu::WholeSize, avoiding overflows.
+ DAWN_INVALID_IF(entry.offset > bufferSize,
+ "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
+ bufferSize, entry.buffer);
+
+ uint64_t bindingSize =
+ (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
+
+ DAWN_INVALID_IF(bindingSize > bufferSize,
+ "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
+ bufferSize, entry.buffer);
+
+ DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
+
+ // Note that no overflow can happen because we already checked that
+ // bufferSize >= bindingSize
+ DAWN_INVALID_IF(
+ entry.offset > bufferSize - bindingSize,
+ "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
+ entry.offset, bufferSize, bindingSize, entry.buffer);
+
+ DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
+ "Offset (%u) does not satisfy the minimum %s alignment (%u).",
+ entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
+
+ DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
+ "Binding usage (%s) of %s doesn't match expected usage (%s).",
+ entry.buffer->GetUsage(), entry.buffer, requiredUsage);
+
+ DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
+ "Binding size (%u) is smaller than the minimum binding size (%u).",
+ bindingSize, bindingInfo.buffer.minBindingSize);
+
+ DAWN_INVALID_IF(bindingSize > maxBindingSize,
+ "Binding size (%u) is larger than the maximum binding size (%u).",
+ bindingSize, maxBindingSize);
+
+ return {};
+ }
+
+ MaybeError ValidateTextureBinding(DeviceBase* device,
+ const BindGroupEntry& entry,
+ const BindingInfo& bindingInfo) {
+ DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
+
+ DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
+ "Expected only textureView to be set for binding entry.");
+
+ DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY(device->ValidateObject(entry.textureView));
+
+ TextureViewBase* view = entry.textureView;
+
+ Aspect aspect = view->GetAspects();
+ // TODO(dawn:563): Format Aspects
+ DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects selected in %s.", view);
+
+ TextureBase* texture = view->GetTexture();
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Texture: {
+ SampleTypeBit supportedTypes =
+ texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
+ SampleTypeBit requiredType =
+ SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
+
+ DAWN_INVALID_IF(
+ !(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
+ "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
+ texture->GetUsage(), texture);
+
+ DAWN_INVALID_IF(
+ texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
+ "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
+ texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
+
+ // TODO(dawn:563): Improve error message.
+ DAWN_INVALID_IF((supportedTypes & requiredType) == 0,
+ "Texture component type usage mismatch.");
+
+ DAWN_INVALID_IF(
+ entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
+ "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+ entry.textureView->GetDimension(), entry.textureView,
+ bindingInfo.texture.viewDimension);
+ break;
+ }
+ case BindingInfoType::StorageTexture: {
+ DAWN_INVALID_IF(
+ !(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
+ "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
+ texture->GetUsage(), texture);
+
+ ASSERT(!texture->IsMultisampledTexture());
+
+ DAWN_INVALID_IF(
+ texture->GetFormat().format != bindingInfo.storageTexture.format,
+ "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
+ texture, bindingInfo.storageTexture.format);
+
+ DAWN_INVALID_IF(
+ entry.textureView->GetDimension() !=
+ bindingInfo.storageTexture.viewDimension,
+ "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+ entry.textureView->GetDimension(), entry.textureView,
+ bindingInfo.storageTexture.viewDimension);
+
+ DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
+ "mipLevelCount (%u) of %s expected to be 1.",
+ entry.textureView->GetLevelCount(), entry.textureView);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateSamplerBinding(const DeviceBase* device,
+ const BindGroupEntry& entry,
+ const BindingInfo& bindingInfo) {
+ DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
+
+ DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
+ "Expected only sampler to be set for binding entry.");
+
+ DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY(device->ValidateObject(entry.sampler));
+
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
+
+ switch (bindingInfo.sampler.type) {
+ case wgpu::SamplerBindingType::NonFiltering:
+ DAWN_INVALID_IF(
+ entry.sampler->IsFiltering(),
+ "Filtering sampler %s is incompatible with non-filtering sampler "
+ "binding.",
+ entry.sampler);
+ [[fallthrough]];
+ case wgpu::SamplerBindingType::Filtering:
+ DAWN_INVALID_IF(
+ entry.sampler->IsComparison(),
+ "Comparison sampler %s is incompatible with non-comparison sampler "
+ "binding.",
+ entry.sampler);
+ break;
+ case wgpu::SamplerBindingType::Comparison:
+ DAWN_INVALID_IF(
+ !entry.sampler->IsComparison(),
+ "Non-comparison sampler %s is imcompatible with comparison sampler "
+ "binding.",
+ entry.sampler);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateExternalTextureBinding(
+ const DeviceBase* device,
+ const BindGroupEntry& entry,
+ const ExternalTextureBindingEntry* externalTextureBindingEntry,
+ const ExternalTextureBindingExpansionMap& expansions) {
+ DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
+ "Binding entry external texture not set.");
+
+ DAWN_INVALID_IF(
+ entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
+ "Expected only external texture to be set for binding entry.");
+
+ DAWN_INVALID_IF(
+ expansions.find(BindingNumber(entry.binding)) == expansions.end(),
+ "External texture binding entry %u is not present in the bind group layout.",
+ entry.binding);
+
+ DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
+ wgpu::SType::ExternalTextureBindingEntry));
+
+ DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
+
+ return {};
+ }
+
+ } // anonymous namespace
+
+ MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
+ const BindGroupDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY(device->ValidateObject(descriptor->layout));
+
+ DAWN_INVALID_IF(
+ descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
+ "Number of entries (%u) did not match the number of entries (%u) specified in %s."
+ "\nExpected layout: %s",
+ descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
+ descriptor->layout, descriptor->layout->EntriesToString());
+
+ const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+ ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
+
+ ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupEntry& entry = descriptor->entries[i];
+
+ const auto& it = bindingMap.find(BindingNumber(entry.binding));
+ DAWN_INVALID_IF(it == bindingMap.end(),
+ "In entries[%u], binding index %u not present in the bind group layout."
+ "\nExpected layout: %s",
+ i, entry.binding, descriptor->layout->EntriesToString());
+
+ BindingIndex bindingIndex = it->second;
+ ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+
+ DAWN_INVALID_IF(bindingsSet[bindingIndex],
+ "In entries[%u], binding index %u already used by a previous entry", i,
+ entry.binding);
+
+ bindingsSet.set(bindingIndex);
+
+ // Below this block we validate entries based on the bind group layout, in which
+ // external textures have been expanded into their underlying contents. For this reason
+ // we must identify external texture binding entries by checking the bind group entry
+ // itself.
+ // TODO:(dawn:1293): Store external textures in
+ // BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
+ // be moved in the switch below.
+ const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+ FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+ if (externalTextureBindingEntry != nullptr) {
+ DAWN_TRY(ValidateExternalTextureBinding(
+ device, entry, externalTextureBindingEntry,
+ descriptor->layout->GetExternalTextureBindingExpansionMap()));
+ continue;
+ }
+
+ const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+
+ // Perform binding-type specific validation.
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Buffer."
+ "\nExpected entry layout: %s",
+ i, bindingInfo);
+ break;
+ case BindingInfoType::Texture:
+ case BindingInfoType::StorageTexture:
+ DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Texture."
+ "\nExpected entry layout: %s",
+ i, bindingInfo);
+ break;
+ case BindingInfoType::Sampler:
+ DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Sampler."
+ "\nExpected entry layout: %s",
+ i, bindingInfo);
+ break;
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ // This should always be true because
+ // - numBindings has to match between the bind group and its layout.
+ // - Each binding must be set at most once
+ //
+ // We don't validate the equality because it wouldn't be possible to cover it with a test.
+ ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
+
+ return {};
+ } // anonymous namespace
+
+ // BindGroup
+
+ BindGroupBase::BindGroupBase(DeviceBase* device,
+ const BindGroupDescriptor* descriptor,
+ void* bindingDataStart)
+ : ApiObjectBase(device, descriptor->label),
+ mLayout(descriptor->layout),
+ mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
+ for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
+ // TODO(enga): Shouldn't be needed when bindings are tightly packed.
+ // This is to fill Ref<ObjectBase> holes with nullptrs.
+ new (&mBindingData.bindings[i]) Ref<ObjectBase>();
+ }
+
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupEntry& entry = descriptor->entries[i];
+
+ BindingIndex bindingIndex =
+ descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+
+ // Only a single binding type should be set, so once we found it we can skip to the
+ // next loop iteration.
+
+ if (entry.buffer != nullptr) {
+ ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+ mBindingData.bindings[bindingIndex] = entry.buffer;
+ mBindingData.bufferData[bindingIndex].offset = entry.offset;
+ uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
+ ? entry.buffer->GetSize() - entry.offset
+ : entry.size;
+ mBindingData.bufferData[bindingIndex].size = bufferSize;
+ continue;
+ }
+
+ if (entry.textureView != nullptr) {
+ ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+ mBindingData.bindings[bindingIndex] = entry.textureView;
+ continue;
+ }
+
+ if (entry.sampler != nullptr) {
+ ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+ mBindingData.bindings[bindingIndex] = entry.sampler;
+ continue;
+ }
+
+ // Here we unpack external texture bindings into multiple additional bindings for the
+ // external texture's contents. New binding locations previously determined in the bind
+ // group layout are created in this bind group and filled with the external texture's
+ // underlying resources.
+ const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+ FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+ if (externalTextureBindingEntry != nullptr) {
+ mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
+
+ ExternalTextureBindingExpansionMap expansions =
+ mLayout->GetExternalTextureBindingExpansionMap();
+ ExternalTextureBindingExpansionMap::iterator it =
+ expansions.find(BindingNumber(entry.binding));
+
+ ASSERT(it != expansions.end());
+
+ BindingIndex plane0BindingIndex =
+ descriptor->layout->GetBindingIndex(it->second.plane0);
+ BindingIndex plane1BindingIndex =
+ descriptor->layout->GetBindingIndex(it->second.plane1);
+ BindingIndex paramsBindingIndex =
+ descriptor->layout->GetBindingIndex(it->second.params);
+
+ ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
+
+ mBindingData.bindings[plane0BindingIndex] =
+ externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
+
+ ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
+ mBindingData.bindings[plane1BindingIndex] =
+ externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
+
+ ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
+ mBindingData.bindings[paramsBindingIndex] =
+ externalTextureBindingEntry->externalTexture->GetParamsBuffer();
+ mBindingData.bufferData[paramsBindingIndex].offset = 0;
+ mBindingData.bufferData[paramsBindingIndex].size =
+ sizeof(dawn_native::ExternalTextureParams);
+
+ continue;
+ }
+ }
+
+ uint32_t packedIdx = 0;
+ for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
+ ++bindingIndex) {
+ if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
+ mBindingData.unverifiedBufferSizes[packedIdx] =
+ mBindingData.bufferData[bindingIndex].size;
+ ++packedIdx;
+ }
+ }
+
+ TrackInDevice();
+ }
+
+ BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+ }
+
+ BindGroupBase::~BindGroupBase() = default;
+
+ void BindGroupBase::DestroyImpl() {
+ if (mLayout != nullptr) {
+ ASSERT(!IsError());
+ for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
+ mBindingData.bindings[i].~Ref<ObjectBase>();
+ }
+ }
+ }
+
+ void BindGroupBase::DeleteThis() {
+ // Add another ref to the layout so that if this is the last ref, the layout
+ // is destroyed after the bind group. The bind group is slab-allocated inside
+ // memory owned by the layout (except for the null backend).
+ Ref<BindGroupLayoutBase> layout = mLayout;
+ ApiObjectBase::DeleteThis();
+ }
+
+ BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mBindingData() {
+ }
+
+ // static
+ BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
+ return new BindGroupBase(device, ObjectBase::kError);
+ }
+
+ ObjectType BindGroupBase::GetType() const {
+ return ObjectType::BindGroup;
+ }
+
+ BindGroupLayoutBase* BindGroupBase::GetLayout() {
+ ASSERT(!IsError());
+ return mLayout.Get();
+ }
+
+ const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
+ ASSERT(!IsError());
+ return mLayout.Get();
+ }
+
+ const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
+ ASSERT(!IsError());
+ return mBindingData.unverifiedBufferSizes;
+ }
+
+ BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
+ BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
+ return {buffer, mBindingData.bufferData[bindingIndex].offset,
+ mBindingData.bufferData[bindingIndex].size};
+ }
+
+ SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
+ return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
+ }
+
+ TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
+ mLayout->GetBindingInfo(bindingIndex).bindingType ==
+ BindingInfoType::StorageTexture);
+ return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
+ }
+
+ const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
+ return mBoundExternalTextures;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroup.h b/chromium/third_party/dawn/src/dawn/native/BindGroup.h
new file mode 100644
index 00000000000..7ba883a9fa1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroup.h
@@ -0,0 +1,96 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDGROUP_H_
+#define DAWNNATIVE_BINDGROUP_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
+ const BindGroupDescriptor* descriptor);
+
+ struct BufferBinding {
+ BufferBase* buffer;
+ uint64_t offset;
+ uint64_t size;
+ };
+
+ class BindGroupBase : public ApiObjectBase {
+ public:
+ static BindGroupBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ BindGroupLayoutBase* GetLayout();
+ const BindGroupLayoutBase* GetLayout() const;
+ BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
+ SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
+ TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
+ const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
+ const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
+
+ protected:
+ // To save memory, the size of a bind group is dynamically determined and the bind group is
+ // placement-allocated into memory big enough to hold the bind group with its
+ // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
+ // binding data should be passed as |bindingDataStart|.
+ BindGroupBase(DeviceBase* device,
+ const BindGroupDescriptor* descriptor,
+ void* bindingDataStart);
+
+ // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
+ // be first in the allocation. The binding data is stored after the Derived class.
+ template <typename Derived>
+ BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(device,
+ descriptor,
+ AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
+ descriptor->layout->GetBindingDataAlignment())) {
+ static_assert(std::is_base_of<BindGroupBase, Derived>::value);
+ }
+
+ // Constructor used only for mocking and testing.
+ BindGroupBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ ~BindGroupBase() override;
+
+ private:
+ BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ void DeleteThis() override;
+
+ Ref<BindGroupLayoutBase> mLayout;
+ BindGroupLayoutBase::BindingDataPointers mBindingData;
+
+ // TODO:(dawn:1293): Store external textures in
+ // BindGroupLayoutBase::BindingDataPointers::bindings
+ std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_BINDGROUP_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp
new file mode 100644
index 00000000000..9f5e151f5c6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.cpp
@@ -0,0 +1,670 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BindGroupLayout.h"
+
+#include "dawn/common/BitSetIterator.h"
+
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <algorithm>
+#include <functional>
+#include <set>
+
+namespace dawn::native {
+
+ namespace {
+ MaybeError ValidateStorageTextureFormat(DeviceBase* device,
+ wgpu::TextureFormat storageTextureFormat) {
+ const Format* format = nullptr;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
+
+ ASSERT(format != nullptr);
+ DAWN_INVALID_IF(!format->supportsStorageUsage,
+ "Texture format (%s) does not support storage textures.",
+ storageTextureFormat);
+
+ return {};
+ }
+
+ MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "%s texture views cannot be used as storage textures.", dimension);
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e3D:
+ return {};
+
+ case wgpu::TextureViewDimension::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
+ const BindGroupLayoutEntry& entry,
+ bool allowInternalBinding) {
+ DAWN_TRY(ValidateShaderStage(entry.visibility));
+
+ int bindingMemberCount = 0;
+ BindingInfoType bindingType;
+ wgpu::ShaderStage allowedStages = kAllStages;
+
+ if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::Buffer;
+ const BufferBindingLayout& buffer = entry.buffer;
+
+ // The kInternalStorageBufferBinding is used internally and not a value
+ // in wgpu::BufferBindingType.
+ if (buffer.type == kInternalStorageBufferBinding) {
+ DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
+ } else {
+ DAWN_TRY(ValidateBufferBindingType(buffer.type));
+ }
+
+ if (buffer.type == wgpu::BufferBindingType::Storage ||
+ buffer.type == kInternalStorageBufferBinding) {
+ allowedStages &= ~wgpu::ShaderStage::Vertex;
+ }
+ }
+
+ if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::Sampler;
+ DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
+ }
+
+ if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::Texture;
+ const TextureBindingLayout& texture = entry.texture;
+ DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
+
+ // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
+ if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+ DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
+ viewDimension = texture.viewDimension;
+ }
+
+ DAWN_INVALID_IF(
+ texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
+ "View dimension (%s) for a multisampled texture bindings was not %s.",
+ viewDimension, wgpu::TextureViewDimension::e2D);
+ }
+
+ if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::StorageTexture;
+ const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
+ DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
+ DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
+
+ // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+ if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+ DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
+ DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
+ }
+
+ if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
+ allowedStages &= ~wgpu::ShaderStage::Vertex;
+ }
+ }
+
+ const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+ FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+ if (externalTextureBindingLayout != nullptr) {
+ bindingMemberCount++;
+ bindingType = BindingInfoType::ExternalTexture;
+ }
+
+ DAWN_INVALID_IF(bindingMemberCount != 1,
+ "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
+ "storageTexture, or externalTexture set");
+
+ DAWN_INVALID_IF(
+ !IsSubset(entry.visibility, allowedStages),
+ "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
+ bindingType, entry.visibility, allowedStages);
+
+ return {};
+ }
+
+ BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(
+ uint32_t binding,
+ wgpu::ShaderStage visibility) {
+ BindGroupLayoutEntry entry;
+ entry.binding = binding;
+ entry.visibility = visibility;
+ entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+ entry.texture.multisampled = false;
+ entry.texture.sampleType = wgpu::TextureSampleType::Float;
+ return entry;
+ }
+
+ BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
+ wgpu::ShaderStage visibility) {
+ BindGroupLayoutEntry entry;
+ entry.binding = binding;
+ entry.visibility = visibility;
+ entry.buffer.hasDynamicOffset = false;
+ entry.buffer.type = wgpu::BufferBindingType::Uniform;
+ return entry;
+ }
+
+ std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
+ const BindGroupLayoutDescriptor* descriptor,
+ BindingCounts* bindingCounts,
+ ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
+ std::vector<BindGroupLayoutEntry> expandedOutput;
+
+ // When new bgl entries are created, we use binding numbers larger than
+ // kMaxBindingNumber to ensure there are no collisions.
+ uint32_t nextOpenBindingNumberForNewEntry = kMaxBindingNumber + 1;
+ for (uint32_t i = 0; i < descriptor->entryCount; i++) {
+ const BindGroupLayoutEntry& entry = descriptor->entries[i];
+ const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+ FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+ // External textures are expanded from a texture_external into two sampled texture
+ // bindings and one uniform buffer binding. The original binding number is used
+ // for the first sampled texture.
+ if (externalTextureBindingLayout != nullptr) {
+ for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+ // External textures are not fully implemented, which means that expanding
+ // the external texture at this time will not occupy the same number of
+ // binding slots as defined in the WebGPU specification. Here we prematurely
+ // increment the binding counts for an additional sampled textures and a
+ // sampler so that an external texture will occupy the correct number of
+ // slots for correct validation of shader binding limits.
+ // TODO:(dawn:1082): Consider removing this and instead making a change to
+ // the validation.
+ constexpr uint32_t kUnimplementedSampledTexturesPerExternalTexture = 2;
+ constexpr uint32_t kUnimplementedSamplersPerExternalTexture = 1;
+ bindingCounts->perStage[stage].sampledTextureCount +=
+ kUnimplementedSampledTexturesPerExternalTexture;
+ bindingCounts->perStage[stage].samplerCount +=
+ kUnimplementedSamplersPerExternalTexture;
+ }
+
+ dawn_native::ExternalTextureBindingExpansion bindingExpansion;
+
+ BindGroupLayoutEntry plane0Entry =
+ CreateSampledTextureBindingForExternalTexture(entry.binding,
+ entry.visibility);
+ bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
+ expandedOutput.push_back(plane0Entry);
+
+ BindGroupLayoutEntry plane1Entry =
+ CreateSampledTextureBindingForExternalTexture(
+ nextOpenBindingNumberForNewEntry++, entry.visibility);
+ bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
+ expandedOutput.push_back(plane1Entry);
+
+ BindGroupLayoutEntry paramsEntry = CreateUniformBindingForExternalTexture(
+ nextOpenBindingNumberForNewEntry++, entry.visibility);
+ bindingExpansion.params = BindingNumber(paramsEntry.binding);
+ expandedOutput.push_back(paramsEntry);
+
+ externalTextureBindingExpansions->insert(
+ {BindingNumber(entry.binding), bindingExpansion});
+ } else {
+ expandedOutput.push_back(entry);
+ }
+ }
+
+ return expandedOutput;
+ }
+ } // anonymous namespace
+
+ MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ bool allowInternalBinding) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+ std::set<BindingNumber> bindingsSet;
+ BindingCounts bindingCounts = {};
+
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupLayoutEntry& entry = descriptor->entries[i];
+ BindingNumber bindingNumber = BindingNumber(entry.binding);
+
+ DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
+ "Binding number (%u) exceeds the maximum binding number (%u).",
+ uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
+ DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
+ "On entries[%u]: binding index (%u) was specified by a previous entry.",
+ i, entry.binding);
+
+ DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
+ "validating entries[%u]", i);
+
+ IncrementBindingCounts(&bindingCounts, entry);
+
+ bindingsSet.insert(bindingNumber);
+ }
+
+ DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
+
+ return {};
+ }
+
+ namespace {
+
+ bool operator!=(const BindingInfo& a, const BindingInfo& b) {
+ if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
+ return true;
+ }
+
+ switch (a.bindingType) {
+ case BindingInfoType::Buffer:
+ return a.buffer.type != b.buffer.type ||
+ a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
+ a.buffer.minBindingSize != b.buffer.minBindingSize;
+ case BindingInfoType::Sampler:
+ return a.sampler.type != b.sampler.type;
+ case BindingInfoType::Texture:
+ return a.texture.sampleType != b.texture.sampleType ||
+ a.texture.viewDimension != b.texture.viewDimension ||
+ a.texture.multisampled != b.texture.multisampled;
+ case BindingInfoType::StorageTexture:
+ return a.storageTexture.access != b.storageTexture.access ||
+ a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
+ a.storageTexture.format != b.storageTexture.format;
+ case BindingInfoType::ExternalTexture:
+ return false;
+ }
+ UNREACHABLE();
+ }
+
+ bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
+ return binding.buffer.type != wgpu::BufferBindingType::Undefined;
+ }
+
+ bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
+ if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+ return binding.buffer.hasDynamicOffset;
+ }
+ return false;
+ }
+
+ BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
+ BindingInfo bindingInfo;
+ bindingInfo.binding = BindingNumber(binding.binding);
+ bindingInfo.visibility = binding.visibility;
+
+ if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+ bindingInfo.bindingType = BindingInfoType::Buffer;
+ bindingInfo.buffer = binding.buffer;
+ } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
+ bindingInfo.bindingType = BindingInfoType::Sampler;
+ bindingInfo.sampler = binding.sampler;
+ } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+ bindingInfo.bindingType = BindingInfoType::Texture;
+ bindingInfo.texture = binding.texture;
+
+ if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+ bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+ }
+ } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+ bindingInfo.bindingType = BindingInfoType::StorageTexture;
+ bindingInfo.storageTexture = binding.storageTexture;
+
+ if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+ bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
+ }
+ } else {
+ const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+ FindInChain(binding.nextInChain, &externalTextureBindingLayout);
+ if (externalTextureBindingLayout != nullptr) {
+ bindingInfo.bindingType = BindingInfoType::ExternalTexture;
+ }
+ }
+
+ return bindingInfo;
+ }
+
+ bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
+ const bool aIsBuffer = IsBufferBinding(a);
+ const bool bIsBuffer = IsBufferBinding(b);
+ if (aIsBuffer != bIsBuffer) {
+ // Always place buffers first.
+ return aIsBuffer;
+ }
+
+ if (aIsBuffer) {
+ bool aHasDynamicOffset = BindingHasDynamicOffset(a);
+ bool bHasDynamicOffset = BindingHasDynamicOffset(b);
+ ASSERT(bIsBuffer);
+ if (aHasDynamicOffset != bHasDynamicOffset) {
+ // Buffers with dynamic offsets should come before those without.
+ // This makes it easy to iterate over the dynamic buffer bindings
+ // [0, dynamicBufferCount) during validation.
+ return aHasDynamicOffset;
+ }
+ if (aHasDynamicOffset) {
+ ASSERT(bHasDynamicOffset);
+ ASSERT(a.binding != b.binding);
+ // Above, we ensured that dynamic buffers are first. Now, ensure that
+ // dynamic buffer bindings are in increasing order. This is because dynamic
+ // buffer offsets are applied in increasing order of binding number.
+ return a.binding < b.binding;
+ }
+ }
+
+ // This applies some defaults and gives us a single value to check for the binding type.
+ BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
+ BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
+
+ // Sort by type.
+ if (aInfo.bindingType != bInfo.bindingType) {
+ return aInfo.bindingType < bInfo.bindingType;
+ }
+
+ if (a.visibility != b.visibility) {
+ return a.visibility < b.visibility;
+ }
+
+ switch (aInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
+ return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
+ }
+ break;
+ case BindingInfoType::Sampler:
+ if (aInfo.sampler.type != bInfo.sampler.type) {
+ return aInfo.sampler.type < bInfo.sampler.type;
+ }
+ break;
+ case BindingInfoType::Texture:
+ if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
+ return aInfo.texture.multisampled < bInfo.texture.multisampled;
+ }
+ if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
+ return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
+ }
+ if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
+ return aInfo.texture.sampleType < bInfo.texture.sampleType;
+ }
+ break;
+ case BindingInfoType::StorageTexture:
+ if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
+ return aInfo.storageTexture.access < bInfo.storageTexture.access;
+ }
+ if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
+ return aInfo.storageTexture.viewDimension <
+ bInfo.storageTexture.viewDimension;
+ }
+ if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
+ return aInfo.storageTexture.format < bInfo.storageTexture.format;
+ }
+ break;
+ case BindingInfoType::ExternalTexture:
+ break;
+ }
+ return a.binding < b.binding;
+ }
+
+ // This is a utility function to help ASSERT that the BGL-binding comparator places buffers
+ // first.
+ bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
+ BindingIndex lastBufferIndex{0};
+ BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
+ for (BindingIndex i{0}; i < bindings.size(); ++i) {
+ if (bindings[i].bindingType == BindingInfoType::Buffer) {
+ lastBufferIndex = std::max(i, lastBufferIndex);
+ } else {
+ firstNonBufferIndex = std::min(i, firstNonBufferIndex);
+ }
+ }
+
+ // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
+ // |firstNonBufferIndex| gets set to 0.
+ return firstNonBufferIndex >= lastBufferIndex;
+ }
+
+ } // namespace
+
+ // BindGroupLayoutBase
+
+ BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken,
+ ApiObjectBase::UntrackedByDeviceTag tag)
+ : ApiObjectBase(device, descriptor->label),
+ mPipelineCompatibilityToken(pipelineCompatibilityToken),
+ mUnexpandedBindingCount(descriptor->entryCount) {
+ std::vector<BindGroupLayoutEntry> sortedBindings = ExtractAndExpandBglEntries(
+ descriptor, &mBindingCounts, &mExternalTextureBindingExpansionMap);
+
+ std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
+
+ for (uint32_t i = 0; i < sortedBindings.size(); ++i) {
+ const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
+
+ mBindingInfo.push_back(CreateBindGroupLayoutInfo(binding));
+
+ if (IsBufferBinding(binding)) {
+ // Buffers must be contiguously packed at the start of the binding info.
+ ASSERT(GetBufferCount() == BindingIndex(i));
+ }
+ IncrementBindingCounts(&mBindingCounts, binding);
+
+ const auto& [_, inserted] = mBindingMap.emplace(BindingNumber(binding.binding), i);
+ ASSERT(inserted);
+ }
+ ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
+ ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
+ }
+
+ BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
+ TrackInDevice();
+ }
+
+ BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+ }
+
+ BindGroupLayoutBase::~BindGroupLayoutBase() = default;
+
+ void BindGroupLayoutBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheBindGroupLayout(this);
+ }
+ }
+
+ // static
+ BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
+ return new BindGroupLayoutBase(device, ObjectBase::kError);
+ }
+
+ ObjectType BindGroupLayoutBase::GetType() const {
+ return ObjectType::BindGroupLayout;
+ }
+
+ const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
+ ASSERT(!IsError());
+ return mBindingMap;
+ }
+
+ bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
+ return mBindingMap.count(bindingNumber) != 0;
+ }
+
+ BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
+ ASSERT(!IsError());
+ const auto& it = mBindingMap.find(bindingNumber);
+ ASSERT(it != mBindingMap.end());
+ return it->second;
+ }
+
+ size_t BindGroupLayoutBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mPipelineCompatibilityToken);
+
+ // std::map is sorted by key, so two BGLs constructed in different orders
+ // will still record the same.
+ for (const auto [id, index] : mBindingMap) {
+ recorder.Record(id, index);
+
+ const BindingInfo& info = mBindingInfo[index];
+ recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
+ info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
+ info.texture.sampleType, info.texture.viewDimension,
+ info.texture.multisampled, info.storageTexture.access,
+ info.storageTexture.format, info.storageTexture.viewDimension);
+ }
+
+ return recorder.GetContentHash();
+ }
+
+ bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
+ const BindGroupLayoutBase* b) const {
+ return a->IsLayoutEqual(b);
+ }
+
+ BindingIndex BindGroupLayoutBase::GetBindingCount() const {
+ return mBindingInfo.size();
+ }
+
+ BindingIndex BindGroupLayoutBase::GetBufferCount() const {
+ return BindingIndex(mBindingCounts.bufferCount);
+ }
+
+ BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
+ // This is a binding index because dynamic buffers are packed at the front of the binding
+ // info.
+ return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
+ mBindingCounts.dynamicUniformBufferCount);
+ }
+
+ uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
+ return mBindingCounts.unverifiedBufferCount;
+ }
+
+ uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
+ return mExternalTextureBindingExpansionMap.size();
+ }
+
+ const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
+ return mBindingCounts;
+ }
+
+ const ExternalTextureBindingExpansionMap&
+ BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
+ return mExternalTextureBindingExpansionMap;
+ }
+
+ uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
+ return mUnexpandedBindingCount;
+ }
+
+ bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
+ bool excludePipelineCompatibiltyToken) const {
+ if (!excludePipelineCompatibiltyToken &&
+ GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
+ return false;
+ }
+ if (GetBindingCount() != other->GetBindingCount()) {
+ return false;
+ }
+ for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
+ if (mBindingInfo[i] != other->mBindingInfo[i]) {
+ return false;
+ }
+ }
+ return mBindingMap == other->mBindingMap;
+ }
+
+ PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
+ return mPipelineCompatibilityToken;
+ }
+
+ size_t BindGroupLayoutBase::GetBindingDataSize() const {
+ // | ------ buffer-specific ----------| ------------ object pointers -------------|
+ // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
+ // Followed by:
+ // |---------buffer size array--------|
+ // |-uint64_t[mUnverifiedBufferCount]-|
+ size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
+ ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
+ size_t bufferSizeArrayStart =
+ Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
+ sizeof(uint64_t));
+ ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
+ return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
+ }
+
+ BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
+ void* dataStart) const {
+ BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
+ auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
+ uint64_t* unverifiedBufferSizes = AlignPtr(
+ reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
+
+ ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
+ ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
+ ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
+
+ return {{bufferData, GetBufferCount()},
+ {bindings, GetBindingCount()},
+ {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
+ }
+
+ bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
+ ASSERT(bindingIndex < GetBufferCount());
+ switch (GetBindingInfo(bindingIndex).buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ return false;
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::Storage:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ return true;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ std::string BindGroupLayoutBase::EntriesToString() const {
+ std::string entries = " [";
+ const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
+ for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+ entries += absl::StrFormat("%s, ", bindingInfo);
+ }
+ entries += "]";
+ return entries;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h
new file mode 100644
index 00000000000..5b91a2fd4e5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroupLayout.h
@@ -0,0 +1,170 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDGROUPLAYOUT_H_
+#define DAWNNATIVE_BINDGROUPLAYOUT_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/common/ityp_span.h"
+#include "dawn/common/ityp_vector.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <bitset>
+#include <map>
+
+namespace dawn::native {
+ // TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
+ struct ExternalTextureBindingExpansion {
+ BindingNumber plane0;
+ BindingNumber plane1;
+ BindingNumber params;
+ };
+
+ using ExternalTextureBindingExpansionMap =
+ std::map<BindingNumber, ExternalTextureBindingExpansion>;
+
+ MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ bool allowInternalBinding = false);
+
+ // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
+ // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
+ // into a packed range of |BindingIndex| integers.
+ class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
+ public:
+ BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken,
+ ApiObjectBase::UntrackedByDeviceTag tag);
+ BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+ ~BindGroupLayoutBase() override;
+
+ static BindGroupLayoutBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ // A map from the BindingNumber to its packed BindingIndex.
+ using BindingMap = std::map<BindingNumber, BindingIndex>;
+
+ const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < mBindingInfo.size());
+ return mBindingInfo[bindingIndex];
+ }
+ const BindingMap& GetBindingMap() const;
+ bool HasBinding(BindingNumber bindingNumber) const;
+ BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
+
+ // Functions necessary for the unordered_set<BGLBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
+ };
+
+ BindingIndex GetBindingCount() const;
+ // Returns |BindingIndex| because buffers are packed at the front.
+ BindingIndex GetBufferCount() const;
+ // Returns |BindingIndex| because dynamic buffers are packed at the front.
+ BindingIndex GetDynamicBufferCount() const;
+ uint32_t GetUnverifiedBufferCount() const;
+
+ // Used to get counts and validate them in pipeline layout creation. Other getters
+ // should be used to get typed integer counts.
+ const BindingCounts& GetBindingCountInfo() const;
+
+ uint32_t GetExternalTextureBindingCount() const;
+
+ // Used to specify unpacked external texture binding slots when transforming shader modules.
+ const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
+
+ uint32_t GetUnexpandedBindingCount() const;
+
+ // Tests that the BindingInfo of two bind groups are equal,
+ // ignoring their compatibility groups.
+ bool IsLayoutEqual(const BindGroupLayoutBase* other,
+ bool excludePipelineCompatibiltyToken = false) const;
+ PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
+
+ struct BufferBindingData {
+ uint64_t offset;
+ uint64_t size;
+ };
+
+ struct BindingDataPointers {
+ ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
+ ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
+ ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
+ };
+
+ // Compute the amount of space / alignment required to store bindings for a bind group of
+ // this layout.
+ size_t GetBindingDataSize() const;
+ static constexpr size_t GetBindingDataAlignment() {
+ static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
+ return alignof(BufferBindingData);
+ }
+
+ BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
+
+ bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
+
+ // Returns a detailed string representation of the layout entries for use in error messages.
+ std::string EntriesToString() const;
+
+ protected:
+ // Constructor used only for mocking and testing.
+ BindGroupLayoutBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ template <typename BindGroup>
+ SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
+ return SlabAllocator<BindGroup>(
+ size, // bytes
+ Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(), // size
+ std::max(alignof(BindGroup), GetBindingDataAlignment()) // alignment
+ );
+ }
+
+ private:
+ BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ BindingCounts mBindingCounts = {};
+ ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
+
+ // Map from BindGroupLayoutEntry.binding to packed indices.
+ BindingMap mBindingMap;
+
+ ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
+
+ // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
+ const PipelineCompatibilityToken mPipelineCompatibilityToken =
+ PipelineCompatibilityToken(0);
+
+ uint32_t mUnexpandedBindingCount;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_BINDGROUPLAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h b/chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h
new file mode 100644
index 00000000000..72d0cf4ce47
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BindGroupTracker.h
@@ -0,0 +1,142 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDGROUPTRACKER_H_
+#define DAWNNATIVE_BINDGROUPTRACKER_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/PipelineLayout.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+ // Keeps track of the dirty bind groups so they can be lazily applied when we know the
+ // pipeline state or it changes.
+ // |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
+ // in other backends.
+ template <bool CanInheritBindGroups, typename DynamicOffset>
+ class BindGroupTrackerBase {
+ public:
+ void OnSetBindGroup(BindGroupIndex index,
+ BindGroupBase* bindGroup,
+ uint32_t dynamicOffsetCount,
+ uint32_t* dynamicOffsets) {
+ ASSERT(index < kMaxBindGroupsTyped);
+
+ if (mBindGroupLayoutsMask[index]) {
+ // It is okay to only dirty bind groups that are used by the current pipeline
+ // layout. If the pipeline layout changes, then the bind groups it uses will
+ // become dirty.
+
+ if (mBindGroups[index] != bindGroup) {
+ mDirtyBindGroups.set(index);
+ mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
+ }
+
+ if (dynamicOffsetCount > 0) {
+ mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
+ }
+ }
+
+ mBindGroups[index] = bindGroup;
+ mDynamicOffsetCounts[index] = dynamicOffsetCount;
+ SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
+ }
+
+ void OnSetPipeline(PipelineBase* pipeline) {
+ mPipelineLayout = pipeline->GetLayout();
+ }
+
+ protected:
+ // The Derived class should call this before it applies bind groups.
+ void BeforeApply() {
+ if (mLastAppliedPipelineLayout == mPipelineLayout) {
+ return;
+ }
+
+ // Use the bind group layout mask to avoid marking unused bind groups as dirty.
+ mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
+
+ // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
+ // the first |k| matching bind groups may be inherited.
+ if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
+ // Dirty bind groups that cannot be inherited.
+ BindGroupLayoutMask dirtiedGroups =
+ ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
+
+ mDirtyBindGroups |= dirtiedGroups;
+ mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
+
+ // Clear any bind groups not in the mask.
+ mDirtyBindGroups &= mBindGroupLayoutsMask;
+ mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
+ } else {
+ mDirtyBindGroups = mBindGroupLayoutsMask;
+ mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
+ }
+ }
+
+ // The Derived class should call this after it applies bind groups.
+ void AfterApply() {
+ // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
+ // will be dirtied again by the next pipeline change.
+ mDirtyBindGroups.reset();
+ mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
+ // Keep track of the last applied pipeline layout. This allows us to avoid computing
+ // the intersection of the dirty bind groups and bind group layout mask in next Draw
+ // or Dispatch (which is very hot code) until the layout is changed again.
+ mLastAppliedPipelineLayout = mPipelineLayout;
+ }
+
+ BindGroupLayoutMask mDirtyBindGroups = 0;
+ BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
+ BindGroupLayoutMask mBindGroupLayoutsMask = 0;
+ ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
+ ityp::array<BindGroupIndex,
+ std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
+ kMaxBindGroups>
+ mDynamicOffsets = {};
+
+ // |mPipelineLayout| is the current pipeline layout set on the command buffer.
+ // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
+ // to the bind group bindings.
+ PipelineLayoutBase* mPipelineLayout = nullptr;
+ PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
+
+ private:
+ // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
+ // in other backends.
+ static void SetDynamicOffsets(uint64_t* data,
+ uint32_t dynamicOffsetCount,
+ uint32_t* dynamicOffsets) {
+ for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
+ data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
+ }
+ }
+
+ static void SetDynamicOffsets(uint32_t* data,
+ uint32_t dynamicOffsetCount,
+ uint32_t* dynamicOffsets) {
+ memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
+ }
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_BINDGROUPTRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp b/chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp
new file mode 100644
index 00000000000..af4905a18f0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BindingInfo.cpp
@@ -0,0 +1,266 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BindingInfo.h"
+
+#include "dawn/native/ChainUtils_autogen.h"
+
+namespace dawn::native {
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ BindingInfoType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case BindingInfoType::Buffer:
+ s->Append("Buffer");
+ break;
+ case BindingInfoType::Sampler:
+ s->Append("Sampler");
+ break;
+ case BindingInfoType::Texture:
+ s->Append("Texture");
+ break;
+ case BindingInfoType::StorageTexture:
+ s->Append("StorageTexture");
+ break;
+ case BindingInfoType::ExternalTexture:
+ s->Append("ExternalTexture");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const BindingInfo& value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ s->Append(absl::StrFormat("{\n binding: %u\n visibility: %s\n %s: {\n",
+ static_cast<uint32_t>(value.binding), value.visibility,
+ value.bindingType));
+
+ switch (value.bindingType) {
+ case BindingInfoType::Buffer:
+ s->Append(absl::StrFormat(" type: %s\n", value.buffer.type));
+ if (value.buffer.hasDynamicOffset) {
+ s->Append(" hasDynamicOffset: true\n");
+ }
+ if (value.buffer.minBindingSize != 0) {
+ s->Append(
+ absl::StrFormat(" minBindingSize: %u\n", value.buffer.minBindingSize));
+ }
+ break;
+ case BindingInfoType::Sampler:
+ s->Append(absl::StrFormat(" type: %s\n", value.sampler.type));
+ break;
+ case BindingInfoType::Texture:
+ s->Append(absl::StrFormat(" sampleType: %s\n", value.texture.sampleType));
+ s->Append(absl::StrFormat(" viewDimension: %s\n", value.texture.viewDimension));
+ if (value.texture.multisampled) {
+ s->Append(" multisampled: true\n");
+ } else {
+ s->Append(" multisampled: false\n");
+ }
+ break;
+ case BindingInfoType::StorageTexture:
+ s->Append(absl::StrFormat(" access: %s\n", value.storageTexture.access));
+ s->Append(absl::StrFormat(" format: %s\n", value.storageTexture.format));
+ s->Append(
+ absl::StrFormat(" viewDimension: %s\n", value.storageTexture.viewDimension));
+ break;
+ case BindingInfoType::ExternalTexture:
+ break;
+ }
+
+ s->Append(" }\n}");
+ return {true};
+ }
+
+ void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
+ bindingCounts->totalCount += 1;
+
+ uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
+
+ if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+ ++bindingCounts->bufferCount;
+ const BufferBindingLayout& buffer = entry.buffer;
+
+ if (buffer.minBindingSize == 0) {
+ ++bindingCounts->unverifiedBufferCount;
+ }
+
+ switch (buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ if (buffer.hasDynamicOffset) {
+ ++bindingCounts->dynamicUniformBufferCount;
+ }
+ perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
+ break;
+
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ if (buffer.hasDynamicOffset) {
+ ++bindingCounts->dynamicStorageBufferCount;
+ }
+ perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
+ break;
+
+ case wgpu::BufferBindingType::Undefined:
+ // Can't get here due to the enclosing if statement.
+ UNREACHABLE();
+ break;
+ }
+ } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+ perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
+ } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+ perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
+ } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+ perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
+ } else {
+ const ExternalTextureBindingLayout* externalTextureBindingLayout;
+ FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+ if (externalTextureBindingLayout != nullptr) {
+ perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
+ }
+ }
+
+ ASSERT(perStageBindingCountMember != nullptr);
+ for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+ ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
+ }
+ }
+
+ void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
+ bindingCounts->totalCount += rhs.totalCount;
+ bindingCounts->bufferCount += rhs.bufferCount;
+ bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
+ bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
+ bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
+
+ for (SingleShaderStage stage : IterateStages(kAllStages)) {
+ bindingCounts->perStage[stage].sampledTextureCount +=
+ rhs.perStage[stage].sampledTextureCount;
+ bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
+ bindingCounts->perStage[stage].storageBufferCount +=
+ rhs.perStage[stage].storageBufferCount;
+ bindingCounts->perStage[stage].storageTextureCount +=
+ rhs.perStage[stage].storageTextureCount;
+ bindingCounts->perStage[stage].uniformBufferCount +=
+ rhs.perStage[stage].uniformBufferCount;
+ bindingCounts->perStage[stage].externalTextureCount +=
+ rhs.perStage[stage].externalTextureCount;
+ }
+ }
+
+ MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
+ DAWN_INVALID_IF(
+ bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
+ "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
+ "limit (%u).",
+ bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
+
+ DAWN_INVALID_IF(
+ bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
+ "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
+ "limit (%u).",
+ bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
+
+ for (SingleShaderStage stage : IterateStages(kAllStages)) {
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].sampledTextureCount >
+ kMaxSampledTexturesPerShaderStage,
+ "The number of sampled textures (%u) in the %s stage exceeds the maximum "
+ "per-stage limit (%u).",
+ bindingCounts.perStage[stage].sampledTextureCount, stage,
+ kMaxSampledTexturesPerShaderStage);
+
+ // The per-stage number of external textures is bound by the maximum sampled textures
+ // per stage.
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].externalTextureCount >
+ kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
+ "The number of external textures (%u) in the %s stage exceeds the maximum "
+ "per-stage limit (%u).",
+ bindingCounts.perStage[stage].externalTextureCount, stage,
+ kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].sampledTextureCount +
+ (bindingCounts.perStage[stage].externalTextureCount *
+ kSampledTexturesPerExternalTexture) >
+ kMaxSampledTexturesPerShaderStage,
+ "The combination of sampled textures (%u) and external textures (%u) in the %s "
+ "stage exceeds the maximum per-stage limit (%u).",
+ bindingCounts.perStage[stage].sampledTextureCount,
+ bindingCounts.perStage[stage].externalTextureCount, stage,
+ kMaxSampledTexturesPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
+ "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
+ "(%u).",
+ bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].samplerCount +
+ (bindingCounts.perStage[stage].externalTextureCount *
+ kSamplersPerExternalTexture) >
+ kMaxSamplersPerShaderStage,
+ "The combination of samplers (%u) and external textures (%u) in the %s stage "
+ "exceeds the maximum per-stage limit (%u).",
+ bindingCounts.perStage[stage].samplerCount,
+ bindingCounts.perStage[stage].externalTextureCount, stage,
+ kMaxSamplersPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
+ "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
+ "limit (%u).",
+ bindingCounts.perStage[stage].storageBufferCount, stage,
+ kMaxStorageBuffersPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].storageTextureCount >
+ kMaxStorageTexturesPerShaderStage,
+ "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
+ "limit (%u).",
+ bindingCounts.perStage[stage].storageTextureCount, stage,
+ kMaxStorageTexturesPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
+ "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
+ "limit (%u).",
+ bindingCounts.perStage[stage].uniformBufferCount, stage,
+ kMaxUniformBuffersPerShaderStage);
+
+ DAWN_INVALID_IF(
+ bindingCounts.perStage[stage].uniformBufferCount +
+ (bindingCounts.perStage[stage].externalTextureCount *
+ kUniformsPerExternalTexture) >
+ kMaxUniformBuffersPerShaderStage,
+ "The combination of uniform buffers (%u) and external textures (%u) in the %s "
+ "stage exceeds the maximum per-stage limit (%u).",
+ bindingCounts.perStage[stage].uniformBufferCount,
+ bindingCounts.perStage[stage].externalTextureCount, stage,
+ kMaxUniformBuffersPerShaderStage);
+ }
+
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BindingInfo.h b/chromium/third_party/dawn/src/dawn/native/BindingInfo.h
new file mode 100644
index 00000000000..8e4f8c3fb52
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BindingInfo.h
@@ -0,0 +1,108 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDINGINFO_H_
+#define DAWNNATIVE_BINDINGINFO_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/PerStage.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <cstdint>
+
+namespace dawn::native {
+
+ // Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
+ static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
+ kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
+
+ static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
+ BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
+
+ // Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
+ // API. There should never be more bindings than the max per stage, for each stage.
+ static constexpr uint32_t kMaxBindingsPerPipelineLayout =
+ 3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
+ kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
+ kMaxUniformBuffersPerShaderStage);
+
+ static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
+ BindingIndex(kMaxBindingsPerPipelineLayout);
+
+ // TODO(enga): Figure out a good number for this.
+ static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
+
+ enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ BindingInfoType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+ struct BindingInfo {
+ BindingNumber binding;
+ wgpu::ShaderStage visibility;
+
+ BindingInfoType bindingType;
+
+ // TODO(dawn:527): These four values could be made into a union.
+ BufferBindingLayout buffer;
+ SamplerBindingLayout sampler;
+ TextureBindingLayout texture;
+ StorageTextureBindingLayout storageTexture;
+ };
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const BindingInfo& value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+ struct BindingSlot {
+ BindGroupIndex group;
+ BindingNumber binding;
+ };
+
+ struct PerStageBindingCounts {
+ uint32_t sampledTextureCount;
+ uint32_t samplerCount;
+ uint32_t storageBufferCount;
+ uint32_t storageTextureCount;
+ uint32_t uniformBufferCount;
+ uint32_t externalTextureCount;
+ };
+
+ struct BindingCounts {
+ uint32_t totalCount;
+ uint32_t bufferCount;
+ uint32_t unverifiedBufferCount; // Buffers with minimum buffer size unspecified
+ uint32_t dynamicUniformBufferCount;
+ uint32_t dynamicStorageBufferCount;
+ PerStage<PerStageBindingCounts> perStage;
+ };
+
+ void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
+ void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
+ MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
+
+ // For buffer size validation
+ using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_BINDINGINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp
new file mode 100644
index 00000000000..76d7a657a26
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.cpp
@@ -0,0 +1,264 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BuddyAllocator.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+
+namespace dawn::native {
+
+ BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
+ ASSERT(IsPowerOfTwo(maxSize));
+
+ mFreeLists.resize(Log2(mMaxBlockSize) + 1);
+
+ // Insert the level0 free block.
+ mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
+ mFreeLists[0] = {mRoot};
+ }
+
+ BuddyAllocator::~BuddyAllocator() {
+ if (mRoot) {
+ DeleteBlock(mRoot);
+ }
+ }
+
+ uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
+ return ComputeNumOfFreeBlocks(mRoot);
+ }
+
+ uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
+ if (block->mState == BlockState::Free) {
+ return 1;
+ } else if (block->mState == BlockState::Split) {
+ return ComputeNumOfFreeBlocks(block->split.pLeft) +
+ ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
+ }
+ return 0;
+ }
+
+ uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
+ // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
+ // However, mFreeList zero-indexed by level.
+ // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
+ return Log2(mMaxBlockSize) - Log2(blockSize);
+ }
+
+ uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
+ uint64_t alignment) const {
+ ASSERT(IsPowerOfTwo(alignment));
+ // The current level is the level that corresponds to the allocation size. The free list may
+ // not contain a block at that level until a larger one gets allocated (and splits).
+ // Continue to go up the tree until such a larger block exists.
+ //
+ // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
+ // When the alignment is also a power-of-two, we simply use the next free block whose size
+ // is greater than or equal to the alignment value.
+ //
+ // After one 8-byte allocation:
+ //
+ // Level --------------------------------
+ // 0 32 | S |
+ // --------------------------------
+ // 1 16 | S | F2 | S - split
+ // -------------------------------- F - free
+ // 2 8 | Aa | F1 | | A - allocated
+ // --------------------------------
+ //
+ // Allocate(size=8, alignment=8) will be satisfied by using F1.
+ // Allocate(size=8, alignment=4) will be satified by using F1.
+ // Allocate(size=8, alignment=16) will be satisified by using F2.
+ //
+ for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
+ size_t currLevel = allocationBlockLevel - ii;
+ BuddyBlock* freeBlock = mFreeLists[currLevel].head;
+ if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
+ return currLevel;
+ }
+ }
+ return kInvalidOffset; // No free block exists at any level.
+ }
+
+ // Inserts existing free block into the free-list.
+ // Called by allocate upon splitting to insert a child block into a free-list.
+ // Note: Always insert into the head of the free-list. As when a larger free block at a lower
+ // level was split, there were no smaller free blocks at a higher level to allocate.
+ void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
+ ASSERT(block->mState == BlockState::Free);
+
+ // Inserted block is now the front (no prev).
+ block->free.pPrev = nullptr;
+
+ // Old head is now the inserted block's next.
+ block->free.pNext = mFreeLists[level].head;
+
+ // Block already in HEAD position (ex. right child was inserted first).
+ if (mFreeLists[level].head != nullptr) {
+ // Old head's previous is the inserted block.
+ mFreeLists[level].head->free.pPrev = block;
+ }
+
+ mFreeLists[level].head = block;
+ }
+
+ void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
+ ASSERT(block->mState == BlockState::Free);
+
+ if (mFreeLists[level].head == block) {
+ // Block is in HEAD position.
+ mFreeLists[level].head = mFreeLists[level].head->free.pNext;
+ } else {
+ // Block is after HEAD position.
+ BuddyBlock* pPrev = block->free.pPrev;
+ BuddyBlock* pNext = block->free.pNext;
+
+ ASSERT(pPrev != nullptr);
+ ASSERT(pPrev->mState == BlockState::Free);
+
+ pPrev->free.pNext = pNext;
+
+ if (pNext != nullptr) {
+ ASSERT(pNext->mState == BlockState::Free);
+ pNext->free.pPrev = pPrev;
+ }
+ }
+ }
+
+ uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
+ if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
+ return kInvalidOffset;
+ }
+
+ // Compute the level
+ const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
+
+ ASSERT(allocationSizeToLevel < mFreeLists.size());
+
+ uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
+
+ // Error when no free blocks exist (allocator is full)
+ if (currBlockLevel == kInvalidOffset) {
+ return kInvalidOffset;
+ }
+
+ // Split free blocks level-by-level.
+ // Terminate when the current block level is equal to the computed level of the requested
+ // allocation.
+ BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
+
+ for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
+ ASSERT(currBlock->mState == BlockState::Free);
+
+ // Remove curr block (about to be split).
+ RemoveFreeBlock(currBlock, currBlockLevel);
+
+ // Create two free child blocks (the buddies).
+ const uint64_t nextLevelSize = currBlock->mSize / 2;
+ BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
+ BuddyBlock* rightChildBlock =
+ new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
+
+ // Remember the parent to merge these back upon de-allocation.
+ rightChildBlock->pParent = currBlock;
+ leftChildBlock->pParent = currBlock;
+
+ // Make them buddies.
+ leftChildBlock->pBuddy = rightChildBlock;
+ rightChildBlock->pBuddy = leftChildBlock;
+
+ // Insert the children back into the free list into the next level.
+ // The free list does not require a specific order. However, an order is specified as
+ // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
+ InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
+ InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
+
+ // Curr block is now split.
+ currBlock->mState = BlockState::Split;
+ currBlock->split.pLeft = leftChildBlock;
+
+ // Decend down into the next level.
+ currBlock = leftChildBlock;
+ }
+
+ // Remove curr block from free-list (now allocated).
+ RemoveFreeBlock(currBlock, currBlockLevel);
+ currBlock->mState = BlockState::Allocated;
+
+ return currBlock->mOffset;
+ }
+
+ void BuddyAllocator::Deallocate(uint64_t offset) {
+ BuddyBlock* curr = mRoot;
+
+ // TODO(crbug.com/dawn/827): Optimize de-allocation.
+ // Passing allocationSize directly will avoid the following level-by-level search;
+ // however, it requires the size information to be stored outside the allocator.
+
+ // Search for the free block node that corresponds to the block offset.
+ size_t currBlockLevel = 0;
+ while (curr->mState == BlockState::Split) {
+ if (offset < curr->split.pLeft->pBuddy->mOffset) {
+ curr = curr->split.pLeft;
+ } else {
+ curr = curr->split.pLeft->pBuddy;
+ }
+
+ currBlockLevel++;
+ }
+
+ ASSERT(curr->mState == BlockState::Allocated);
+
+ // Ensure the block is at the correct level
+ ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
+
+ // Mark curr free so we can merge.
+ curr->mState = BlockState::Free;
+
+ // Merge the buddies (LevelN-to-Level0).
+ while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
+ // Remove the buddy.
+ RemoveFreeBlock(curr->pBuddy, currBlockLevel);
+
+ BuddyBlock* parent = curr->pParent;
+
+ // The buddies were inserted in a specific order but
+ // could be deleted in any order.
+ DeleteBlock(curr->pBuddy);
+ DeleteBlock(curr);
+
+ // Parent is now free.
+ parent->mState = BlockState::Free;
+
+ // Ascend up to the next level (parent block).
+ curr = parent;
+ currBlockLevel--;
+ }
+
+ InsertFreeBlock(curr, currBlockLevel);
+ }
+
+ // Helper which deletes a block in the tree recursively (post-order).
+ void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
+ ASSERT(block != nullptr);
+
+ if (block->mState == BlockState::Split) {
+ // Delete the pair in same order we inserted.
+ DeleteBlock(block->split.pLeft->pBuddy);
+ DeleteBlock(block->split.pLeft);
+ }
+ delete block;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h b/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h
new file mode 100644
index 00000000000..31c8b0b9fd8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BuddyAllocator.h
@@ -0,0 +1,117 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BUDDYALLOCATOR_H_
+#define DAWNNATIVE_BUDDYALLOCATOR_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <vector>
+
+namespace dawn::native {
+
+ // Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
+ // Memory is split into halves until just large enough to fit to the request. This
+ // requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
+ // returning the starting offset whose size is guaranteed to be greater than or equal to the
+ // allocation size. To deallocate, the same offset is used to find the corresponding block.
+ //
+ // Internally, it manages a free list to track free blocks in a full binary tree.
+ // Every index in the free list corresponds to a level in the tree. That level also determines
+ // the size of the block to be used to satisfy the request. The first level (index=0) represents
+ // the root whose size is also called the max block size.
+ //
+ class BuddyAllocator {
+ public:
+ BuddyAllocator(uint64_t maxSize);
+ ~BuddyAllocator();
+
+ // Required methods.
+ uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
+ void Deallocate(uint64_t offset);
+
+ // For testing purposes only.
+ uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
+
+ static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+
+ private:
+ uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
+ uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
+
+ enum class BlockState { Free, Split, Allocated };
+
+ struct BuddyBlock {
+ BuddyBlock(uint64_t size, uint64_t offset)
+ : mOffset(offset), mSize(size), mState(BlockState::Free) {
+ free.pPrev = nullptr;
+ free.pNext = nullptr;
+ }
+
+ uint64_t mOffset;
+ uint64_t mSize;
+
+ // Pointer to this block's buddy, iff parent is split.
+ // Used to quickly merge buddy blocks upon de-allocate.
+ BuddyBlock* pBuddy = nullptr;
+ BuddyBlock* pParent = nullptr;
+
+ // Track whether this block has been split or not.
+ BlockState mState;
+
+ struct FreeLinks {
+ BuddyBlock* pPrev;
+ BuddyBlock* pNext;
+ };
+
+ struct SplitLink {
+ BuddyBlock* pLeft;
+ };
+
+ union {
+ // Used upon allocation.
+ // Avoids searching for the next free block.
+ FreeLinks free;
+
+ // Used upon de-allocation.
+ // Had this block split upon allocation, it and it's buddy is to be deleted.
+ SplitLink split;
+ };
+ };
+
+ void InsertFreeBlock(BuddyBlock* block, size_t level);
+ void RemoveFreeBlock(BuddyBlock* block, size_t level);
+ void DeleteBlock(BuddyBlock* block);
+
+ uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
+
+ // Keep track the head and tail (for faster insertion/removal).
+ struct BlockList {
+ BuddyBlock* head = nullptr; // First free block in level.
+ // TODO(crbug.com/dawn/827): Track the tail.
+ };
+
+ BuddyBlock* mRoot = nullptr; // Used to deallocate non-free blocks.
+
+ uint64_t mMaxBlockSize = 0;
+
+ // List of linked-lists of free blocks where the index is a level that
+ // corresponds to a power-of-two sized block.
+ std::vector<BlockList> mFreeLists;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_BUDDYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp
new file mode 100644
index 00000000000..faee03e2da8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.cpp
@@ -0,0 +1,120 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/BuddyMemoryAllocator.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+
+namespace dawn::native {
+
+ BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
+ uint64_t memoryBlockSize,
+ ResourceHeapAllocator* heapAllocator)
+ : mMemoryBlockSize(memoryBlockSize),
+ mBuddyBlockAllocator(maxSystemSize),
+ mHeapAllocator(heapAllocator) {
+ ASSERT(memoryBlockSize <= maxSystemSize);
+ ASSERT(IsPowerOfTwo(mMemoryBlockSize));
+ ASSERT(maxSystemSize % mMemoryBlockSize == 0);
+
+ mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
+ }
+
+ uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
+ ASSERT(offset != BuddyAllocator::kInvalidOffset);
+ return offset / mMemoryBlockSize;
+ }
+
+ ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
+ uint64_t alignment) {
+ ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+
+ if (allocationSize == 0) {
+ return std::move(invalidAllocation);
+ }
+
+ // Check the unaligned size to avoid overflowing NextPowerOfTwo.
+ if (allocationSize > mMemoryBlockSize) {
+ return std::move(invalidAllocation);
+ }
+
+ // Round allocation size to nearest power-of-two.
+ allocationSize = NextPowerOfTwo(allocationSize);
+
+ // Allocation cannot exceed the memory size.
+ if (allocationSize > mMemoryBlockSize) {
+ return std::move(invalidAllocation);
+ }
+
+ // Attempt to sub-allocate a block of the requested size.
+ const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
+ if (blockOffset == BuddyAllocator::kInvalidOffset) {
+ return std::move(invalidAllocation);
+ }
+
+ const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
+ if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+ // Transfer ownership to this allocator
+ std::unique_ptr<ResourceHeapBase> memory;
+ DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
+ mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
+ }
+
+ mTrackedSubAllocations[memoryIndex].refcount++;
+
+ AllocationInfo info;
+ info.mBlockOffset = blockOffset;
+ info.mMethod = AllocationMethod::kSubAllocated;
+
+ // Allocation offset is always local to the memory.
+ const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
+
+ return ResourceMemoryAllocation{
+ info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
+ }
+
+ void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
+ const AllocationInfo info = allocation.GetInfo();
+
+ ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
+
+ const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
+
+ ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
+ mTrackedSubAllocations[memoryIndex].refcount--;
+
+ if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+ mHeapAllocator->DeallocateResourceHeap(
+ std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
+ }
+
+ mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
+ }
+
+ uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
+ return mMemoryBlockSize;
+ }
+
+ uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
+ uint64_t count = 0;
+ for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
+ if (allocation.refcount > 0) {
+ count++;
+ }
+ }
+ return count;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h b/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h
new file mode 100644
index 00000000000..7fcfe71cb3e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/BuddyMemoryAllocator.h
@@ -0,0 +1,74 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
+#define DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
+
+#include "dawn/native/BuddyAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+
+#include <memory>
+#include <vector>
+
+namespace dawn::native {
+
+ class ResourceHeapAllocator;
+
+ // BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
+ // memory created by MemoryAllocator clients. It creates a very large buddy system
+ // where backing device memory blocks equal a specified level in the system.
+ //
+ // Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
+ // memory index and should the memory not exist, it is created. If two sub-allocations share the
+ // same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
+ // release the other prematurely.
+ //
+ // The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
+ // It should also outlive all the resources that are in the buddy allocator.
+ class BuddyMemoryAllocator {
+ public:
+ BuddyMemoryAllocator(uint64_t maxSystemSize,
+ uint64_t memoryBlockSize,
+ ResourceHeapAllocator* heapAllocator);
+ ~BuddyMemoryAllocator() = default;
+
+ ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
+ uint64_t alignment);
+ void Deallocate(const ResourceMemoryAllocation& allocation);
+
+ uint64_t GetMemoryBlockSize() const;
+
+ // For testing purposes.
+ uint64_t ComputeTotalNumOfHeapsForTesting() const;
+
+ private:
+ uint64_t GetMemoryIndex(uint64_t offset) const;
+
+ uint64_t mMemoryBlockSize = 0;
+
+ BuddyAllocator mBuddyBlockAllocator;
+ ResourceHeapAllocator* mHeapAllocator;
+
+ struct TrackedSubAllocations {
+ size_t refcount = 0;
+ std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
+ };
+
+ std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Buffer.cpp b/chromium/third_party/dawn/src/dawn/native/Buffer.cpp
new file mode 100644
index 00000000000..f324597401b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Buffer.cpp
@@ -0,0 +1,562 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Buffer.h"
+
+#include "dawn/common/Alloc.h"
+#include "dawn/common/Assert.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <cstdio>
+#include <cstring>
+#include <utility>
+
+namespace dawn::native {
+
+ namespace {
+ struct MapRequestTask : QueueBase::TaskInFlight {
+ MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
+ : buffer(std::move(buffer)), id(id) {
+ }
+ void Finish() override {
+ buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
+ }
+ void HandleDeviceLoss() override {
+ buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
+ }
+ ~MapRequestTask() override = default;
+
+ private:
+ Ref<BufferBase> buffer;
+ MapRequestID id;
+ };
+
+ class ErrorBuffer final : public BufferBase {
+ public:
+ ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor, ObjectBase::kError) {
+ if (descriptor->mappedAtCreation) {
+ // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
+ // is invalid, and on 32bit systems we should avoid a narrowing conversion that
+ // would make size = 1 << 32 + 1 allocate one byte.
+ bool isValidSize =
+ descriptor->size != 0 &&
+ descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
+
+ if (isValidSize) {
+ mFakeMappedData =
+ std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
+ }
+ // Since error buffers in this case may allocate memory, we need to track them
+ // for destruction on the device.
+ TrackInDevice();
+ }
+ }
+
+ private:
+ bool IsCPUWritableAtCreation() const override {
+ UNREACHABLE();
+ }
+
+ MaybeError MapAtCreationImpl() override {
+ UNREACHABLE();
+ }
+
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
+ UNREACHABLE();
+ }
+
+ void* GetMappedPointerImpl() override {
+ return mFakeMappedData.get();
+ }
+
+ void UnmapImpl() override {
+ mFakeMappedData.reset();
+ }
+
+ std::unique_ptr<uint8_t[]> mFakeMappedData;
+ };
+
+ } // anonymous namespace
+
+ MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+ DAWN_TRY(ValidateBufferUsage(descriptor->usage));
+
+ wgpu::BufferUsage usage = descriptor->usage;
+
+ DAWN_INVALID_IF(usage == wgpu::BufferUsage::None, "Buffer usages must not be 0.");
+
+ const wgpu::BufferUsage kMapWriteAllowedUsages =
+ wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+ DAWN_INVALID_IF(
+ usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
+ "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+ "usage is %s.",
+ usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
+
+ const wgpu::BufferUsage kMapReadAllowedUsages =
+ wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+ DAWN_INVALID_IF(
+ usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
+ "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+ "usage is %s.",
+ usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
+
+ DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
+ "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
+ descriptor->size);
+
+ return {};
+ }
+
+ // Buffer
+
+ BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label),
+ mSize(descriptor->size),
+ mUsage(descriptor->usage),
+ mState(BufferState::Unmapped) {
+ // Add readonly storage usage if the buffer has a storage usage. The validation rules in
+ // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
+ if (mUsage & wgpu::BufferUsage::Storage) {
+ mUsage |= kReadOnlyStorageBuffer;
+ }
+
+ // The query resolve buffer need to be used as a storage buffer in the internal compute
+ // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
+ // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
+ // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
+ // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
+ // as storage buffer if it's created without Storage usage.
+ if (mUsage & wgpu::BufferUsage::QueryResolve) {
+ mUsage |= kInternalStorageBuffer;
+ }
+
+ // We also add internal storage usage for Indirect buffers for some transformations before
+ // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
+ // D3D12), since these transformations involve binding them as storage buffers for use in a
+ // compute pass.
+ if (mUsage & wgpu::BufferUsage::Indirect) {
+ mUsage |= kInternalStorageBuffer;
+ }
+
+ TrackInDevice();
+ }
+
+ BufferBase::BufferBase(DeviceBase* device,
+ const BufferDescriptor* descriptor,
+ ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+ if (descriptor->mappedAtCreation) {
+ mState = BufferState::MappedAtCreation;
+ mMapOffset = 0;
+ mMapSize = mSize;
+ }
+ }
+
+ BufferBase::BufferBase(DeviceBase* device, BufferState state)
+ : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
+ TrackInDevice();
+ }
+
+ BufferBase::~BufferBase() {
+ ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
+ }
+
+ void BufferBase::DestroyImpl() {
+ if (mState == BufferState::Mapped) {
+ UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+ } else if (mState == BufferState::MappedAtCreation) {
+ if (mStagingBuffer != nullptr) {
+ mStagingBuffer.reset();
+ } else if (mSize != 0) {
+ UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+ }
+ }
+ mState = BufferState::Destroyed;
+ }
+
+ // static
+ BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
+ return new ErrorBuffer(device, descriptor);
+ }
+
+ ObjectType BufferBase::GetType() const {
+ return ObjectType::Buffer;
+ }
+
+ uint64_t BufferBase::GetSize() const {
+ ASSERT(!IsError());
+ return mSize;
+ }
+
+ uint64_t BufferBase::GetAllocatedSize() const {
+ ASSERT(!IsError());
+ // The backend must initialize this value.
+ ASSERT(mAllocatedSize != 0);
+ return mAllocatedSize;
+ }
+
+ wgpu::BufferUsage BufferBase::GetUsage() const {
+ ASSERT(!IsError());
+ return mUsage;
+ }
+
+ MaybeError BufferBase::MapAtCreation() {
+ DAWN_TRY(MapAtCreationInternal());
+
+ void* ptr;
+ size_t size;
+ if (mSize == 0) {
+ return {};
+ } else if (mStagingBuffer) {
+ // If there is a staging buffer for initialization, clear its contents directly.
+ // It should be exactly as large as the buffer allocation.
+ ptr = mStagingBuffer->GetMappedPointer();
+ size = mStagingBuffer->GetSize();
+ ASSERT(size == GetAllocatedSize());
+ } else {
+ // Otherwise, the buffer is directly mappable on the CPU.
+ ptr = GetMappedPointerImpl();
+ size = GetAllocatedSize();
+ }
+
+ DeviceBase* device = GetDevice();
+ if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ memset(ptr, uint8_t(0u), size);
+ SetIsDataInitialized();
+ device->IncrementLazyClearCountForTesting();
+ } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ memset(ptr, uint8_t(1u), size);
+ }
+
+ return {};
+ }
+
+ MaybeError BufferBase::MapAtCreationInternal() {
+ ASSERT(!IsError());
+ mMapOffset = 0;
+ mMapSize = mSize;
+
+ // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
+ // Skip handling 0-sized buffers so we don't try to map them in the backend.
+ if (mSize != 0) {
+ // Mappable buffers don't use a staging buffer and are just as if mapped through
+ // MapAsync.
+ if (IsCPUWritableAtCreation()) {
+ DAWN_TRY(MapAtCreationImpl());
+ } else {
+ // If any of these fail, the buffer will be deleted and replaced with an error
+ // buffer. The staging buffer is used to return mappable data to inititalize the
+ // buffer contents. Allocate one as large as the real buffer size so that every byte
+ // is initialized.
+ // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
+ // buffer so we don't create many small buffers.
+ DAWN_TRY_ASSIGN(mStagingBuffer,
+ GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
+ }
+ }
+
+ // Only set the state to mapped at creation if we did no fail any point in this helper.
+ // Otherwise, if we override the default unmapped state before succeeding to create a
+ // staging buffer, we will have issues when we try to destroy the buffer.
+ mState = BufferState::MappedAtCreation;
+ return {};
+ }
+
+ MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
+ ASSERT(!IsError());
+
+ switch (mState) {
+ case BufferState::Destroyed:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
+ case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
+ case BufferState::Unmapped:
+ return {};
+ }
+ UNREACHABLE();
+ }
+
+ void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+ ASSERT(!IsError());
+ if (mMapCallback != nullptr && mapID == mLastMapID) {
+ // Tag the callback as fired before firing it, otherwise it could fire a second time if
+ // for example buffer.Unmap() is called inside the application-provided callback.
+ WGPUBufferMapCallback callback = mMapCallback;
+ mMapCallback = nullptr;
+
+ if (GetDevice()->IsLost()) {
+ callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
+ } else {
+ callback(status, mMapUserdata);
+ }
+ }
+ }
+
+ void BufferBase::APIMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata) {
+ // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
+ // possible to default the function argument (because there is the callback later in the
+ // argument list)
+ if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
+ size = mSize - offset;
+ }
+
+ WGPUBufferMapAsyncStatus status;
+ if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
+ "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
+ size)) {
+ if (callback) {
+ callback(status, userdata);
+ }
+ return;
+ }
+ ASSERT(!IsError());
+
+ mLastMapID++;
+ mMapMode = mode;
+ mMapOffset = offset;
+ mMapSize = size;
+ mMapCallback = callback;
+ mMapUserdata = userdata;
+ mState = BufferState::Mapped;
+
+ if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
+ CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
+ return;
+ }
+ std::unique_ptr<MapRequestTask> request =
+ std::make_unique<MapRequestTask>(this, mLastMapID);
+ GetDevice()->GetQueue()->TrackTask(std::move(request),
+ GetDevice()->GetPendingCommandSerial());
+ }
+
+ void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
+ return GetMappedRange(offset, size, true);
+ }
+
+ const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
+ return GetMappedRange(offset, size, false);
+ }
+
+ void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
+ if (!CanGetMappedRange(writable, offset, size)) {
+ return nullptr;
+ }
+
+ if (mStagingBuffer != nullptr) {
+ return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
+ }
+ if (mSize == 0) {
+ return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
+ }
+ uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
+ return start == nullptr ? nullptr : start + offset;
+ }
+
+ void BufferBase::APIDestroy() {
+ Destroy();
+ }
+
+ MaybeError BufferBase::CopyFromStagingBuffer() {
+ ASSERT(mStagingBuffer);
+ if (mSize == 0) {
+ // Staging buffer is not created if zero size.
+ ASSERT(mStagingBuffer == nullptr);
+ return {};
+ }
+
+ DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
+ GetAllocatedSize()));
+
+ DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
+ uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
+
+ return {};
+ }
+
+ void BufferBase::APIUnmap() {
+ if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
+ return;
+ }
+ Unmap();
+ }
+
+ void BufferBase::Unmap() {
+ UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
+ }
+
+ void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
+ if (mState == BufferState::Mapped) {
+ // A map request can only be called once, so this will fire only if the request wasn't
+ // completed before the Unmap.
+ // Callbacks are not fired if there is no callback registered, so this is correct for
+ // mappedAtCreation = true.
+ CallMapCallback(mLastMapID, callbackStatus);
+ UnmapImpl();
+
+ mMapCallback = nullptr;
+ mMapUserdata = 0;
+ } else if (mState == BufferState::MappedAtCreation) {
+ if (mStagingBuffer != nullptr) {
+ GetDevice()->ConsumedError(CopyFromStagingBuffer());
+ } else if (mSize != 0) {
+ UnmapImpl();
+ }
+ }
+
+ mState = BufferState::Unmapped;
+ }
+
+ MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapAsyncStatus* status) const {
+ *status = WGPUBufferMapAsyncStatus_DeviceLost;
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+ *status = WGPUBufferMapAsyncStatus_Error;
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ DAWN_INVALID_IF(uint64_t(offset) > mSize,
+ "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
+ this);
+
+ DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
+ DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
+
+ DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
+ "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
+ offset, size, mSize, this);
+
+ switch (mState) {
+ case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
+ case BufferState::Destroyed:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
+ case BufferState::Unmapped:
+ break;
+ }
+
+ bool isReadMode = mode & wgpu::MapMode::Read;
+ bool isWriteMode = mode & wgpu::MapMode::Write;
+ DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
+ wgpu::MapMode::Write, wgpu::MapMode::Read);
+
+ if (mode & wgpu::MapMode::Read) {
+ DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
+ "The buffer usages (%s) do not contain %s.", mUsage,
+ wgpu::BufferUsage::MapRead);
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+ DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
+ "The buffer usages (%s) do not contain %s.", mUsage,
+ wgpu::BufferUsage::MapWrite);
+ }
+
+ *status = WGPUBufferMapAsyncStatus_Success;
+ return {};
+ }
+
+ bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
+ if (offset % 8 != 0 || size % 4 != 0) {
+ return false;
+ }
+
+ if (size > mMapSize || offset < mMapOffset) {
+ return false;
+ }
+
+ size_t offsetInMappedRange = offset - mMapOffset;
+ if (offsetInMappedRange > mMapSize - size) {
+ return false;
+ }
+
+ // Note that:
+ //
+ // - We don't check that the device is alive because the application can ask for the
+ // mapped pointer before it knows, and even Dawn knows, that the device was lost, and
+ // still needs to work properly.
+ // - We don't check that the object is alive because we need to return mapped pointers
+ // for error buffers too.
+
+ switch (mState) {
+ // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
+ case BufferState::MappedAtCreation:
+ return true;
+
+ case BufferState::Mapped:
+ ASSERT(bool(mMapMode & wgpu::MapMode::Read) ^
+ bool(mMapMode & wgpu::MapMode::Write));
+ return !writable || (mMapMode & wgpu::MapMode::Write);
+
+ case BufferState::Unmapped:
+ case BufferState::Destroyed:
+ return false;
+ }
+ UNREACHABLE();
+ }
+
+ MaybeError BufferBase::ValidateUnmap() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+ switch (mState) {
+ case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
+ // A buffer may be in the Mapped state if it was created with mappedAtCreation
+ // even if it did not have a mappable usage.
+ return {};
+ case BufferState::Unmapped:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
+ case BufferState::Destroyed:
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
+ }
+ UNREACHABLE();
+ }
+
+ void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+ CallMapCallback(mapID, status);
+ }
+
+ bool BufferBase::NeedsInitialization() const {
+ return !mIsDataInitialized &&
+ GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
+ }
+
+ bool BufferBase::IsDataInitialized() const {
+ return mIsDataInitialized;
+ }
+
+ void BufferBase::SetIsDataInitialized() {
+ mIsDataInitialized = true;
+ }
+
+ bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
+ return offset == 0 && size == GetSize();
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Buffer.h b/chromium/third_party/dawn/src/dawn/native/Buffer.h
new file mode 100644
index 00000000000..2a9759ff2d6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Buffer.h
@@ -0,0 +1,135 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BUFFER_H_
+#define DAWNNATIVE_BUFFER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <memory>
+
+namespace dawn::native {
+
+ struct CopyTextureToBufferCmd;
+
+ enum class MapType : uint32_t;
+
+ MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
+
+ static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
+ wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
+ wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
+ wgpu::BufferUsage::Indirect;
+
+ static constexpr wgpu::BufferUsage kMappableBufferUsages =
+ wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
+
+ class BufferBase : public ApiObjectBase {
+ public:
+ enum class BufferState {
+ Unmapped,
+ Mapped,
+ MappedAtCreation,
+ Destroyed,
+ };
+ BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
+
+ static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
+
+ ObjectType GetType() const override;
+
+ uint64_t GetSize() const;
+ uint64_t GetAllocatedSize() const;
+ wgpu::BufferUsage GetUsage() const;
+
+ MaybeError MapAtCreation();
+ void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+ MaybeError ValidateCanUseOnQueueNow() const;
+
+ bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
+ bool NeedsInitialization() const;
+ bool IsDataInitialized() const;
+ void SetIsDataInitialized();
+
+ void* GetMappedRange(size_t offset, size_t size, bool writable = true);
+ void Unmap();
+
+ // Dawn API
+ void APIMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata);
+ void* APIGetMappedRange(size_t offset, size_t size);
+ const void* APIGetConstMappedRange(size_t offset, size_t size);
+ void APIUnmap();
+ void APIDestroy();
+
+ protected:
+ BufferBase(DeviceBase* device,
+ const BufferDescriptor* descriptor,
+ ObjectBase::ErrorTag tag);
+
+ // Constructor used only for mocking and testing.
+ BufferBase(DeviceBase* device, BufferState state);
+ void DestroyImpl() override;
+
+ ~BufferBase() override;
+
+ MaybeError MapAtCreationInternal();
+
+ uint64_t mAllocatedSize = 0;
+
+ private:
+ virtual MaybeError MapAtCreationImpl() = 0;
+ virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
+ virtual void UnmapImpl() = 0;
+ virtual void* GetMappedPointerImpl() = 0;
+
+ virtual bool IsCPUWritableAtCreation() const = 0;
+ MaybeError CopyFromStagingBuffer();
+ void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+ MaybeError ValidateMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapAsyncStatus* status) const;
+ MaybeError ValidateUnmap() const;
+ bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
+ void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
+
+ uint64_t mSize = 0;
+ wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
+ BufferState mState;
+ bool mIsDataInitialized = false;
+
+ std::unique_ptr<StagingBufferBase> mStagingBuffer;
+
+ WGPUBufferMapCallback mMapCallback = nullptr;
+ void* mMapUserdata = 0;
+ MapRequestID mLastMapID = MapRequestID(0);
+ wgpu::MapMode mMapMode = wgpu::MapMode::None;
+ size_t mMapOffset = 0;
+ size_t mMapSize = 0;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_BUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/native/CMakeLists.txt
new file mode 100644
index 00000000000..38b63d83b79
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CMakeLists.txt
@@ -0,0 +1,554 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DawnJSONGenerator(
+ TARGET "native_utils"
+ PRINT_NAME "Dawn native utilities"
+ RESULT_VARIABLE "DAWN_NATIVE_UTILS_GEN_SOURCES"
+)
+
+add_library(dawn_native ${DAWN_DUMMY_FILE})
+
+target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_SHARED_LIBRARY")
+endif()
+
+target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/native/DawnNative.h"
+ "${DAWN_INCLUDE_DIR}/dawn/native/dawn_native_export.h"
+ ${DAWN_NATIVE_UTILS_GEN_SOURCES}
+ "Adapter.cpp"
+ "Adapter.h"
+ "AsyncTask.cpp"
+ "AsyncTask.h"
+ "AttachmentState.cpp"
+ "AttachmentState.h"
+ "BackendConnection.cpp"
+ "BackendConnection.h"
+ "BindGroup.cpp"
+ "BindGroup.h"
+ "BindGroupLayout.cpp"
+ "BindGroupLayout.h"
+ "BindGroupTracker.h"
+ "BindingInfo.cpp"
+ "BindingInfo.h"
+ "BuddyAllocator.cpp"
+ "BuddyAllocator.h"
+ "BuddyMemoryAllocator.cpp"
+ "BuddyMemoryAllocator.h"
+ "Buffer.cpp"
+ "Buffer.h"
+ "CachedObject.cpp"
+ "CachedObject.h"
+ "CallbackTaskManager.cpp"
+ "CallbackTaskManager.h"
+ "CommandAllocator.cpp"
+ "CommandAllocator.h"
+ "CommandBuffer.cpp"
+ "CommandBuffer.h"
+ "CommandBufferStateTracker.cpp"
+ "CommandBufferStateTracker.h"
+ "CommandEncoder.cpp"
+ "CommandEncoder.h"
+ "CommandValidation.cpp"
+ "CommandValidation.h"
+ "Commands.cpp"
+ "Commands.h"
+ "CompilationMessages.cpp"
+ "CompilationMessages.h"
+ "ComputePassEncoder.cpp"
+ "ComputePassEncoder.h"
+ "ComputePipeline.cpp"
+ "ComputePipeline.h"
+ "CopyTextureForBrowserHelper.cpp"
+ "CopyTextureForBrowserHelper.h"
+ "CreatePipelineAsyncTask.cpp"
+ "CreatePipelineAsyncTask.h"
+ "Device.cpp"
+ "Device.h"
+ "DynamicUploader.cpp"
+ "DynamicUploader.h"
+ "EncodingContext.cpp"
+ "EncodingContext.h"
+ "EnumClassBitmasks.h"
+ "EnumMaskIterator.h"
+ "Error.cpp"
+ "Error.h"
+ "ErrorData.cpp"
+ "ErrorData.h"
+ "ErrorInjector.cpp"
+ "ErrorInjector.h"
+ "ErrorScope.cpp"
+ "ErrorScope.h"
+ "Features.cpp"
+ "Features.h"
+ "ExternalTexture.cpp"
+ "ExternalTexture.h"
+ "IndirectDrawMetadata.cpp"
+ "IndirectDrawMetadata.h"
+ "IndirectDrawValidationEncoder.cpp"
+ "IndirectDrawValidationEncoder.h"
+ "ObjectContentHasher.cpp"
+ "ObjectContentHasher.h"
+ "Format.cpp"
+ "Format.h"
+ "Forward.h"
+ "Instance.cpp"
+ "Instance.h"
+ "InternalPipelineStore.cpp"
+ "InternalPipelineStore.h"
+ "IntegerTypes.h"
+ "Limits.cpp"
+ "Limits.h"
+ "ObjectBase.cpp"
+ "ObjectBase.h"
+ "PassResourceUsage.h"
+ "PassResourceUsageTracker.cpp"
+ "PassResourceUsageTracker.h"
+ "PersistentCache.cpp"
+ "PersistentCache.h"
+ "PerStage.cpp"
+ "PerStage.h"
+ "Pipeline.cpp"
+ "Pipeline.h"
+ "PipelineLayout.cpp"
+ "PipelineLayout.h"
+ "PooledResourceMemoryAllocator.cpp"
+ "PooledResourceMemoryAllocator.h"
+ "ProgrammableEncoder.cpp"
+ "ProgrammableEncoder.h"
+ "QueryHelper.cpp"
+ "QueryHelper.h"
+ "QuerySet.cpp"
+ "QuerySet.h"
+ "Queue.cpp"
+ "Queue.h"
+ "RenderBundle.cpp"
+ "RenderBundle.h"
+ "RenderBundleEncoder.cpp"
+ "RenderBundleEncoder.h"
+ "RenderEncoderBase.cpp"
+ "RenderEncoderBase.h"
+ "RenderPassEncoder.cpp"
+ "RenderPassEncoder.h"
+ "RenderPipeline.cpp"
+ "RenderPipeline.h"
+ "ResourceHeap.h"
+ "ResourceHeapAllocator.h"
+ "ResourceMemoryAllocation.cpp"
+ "ResourceMemoryAllocation.h"
+ "RingBufferAllocator.cpp"
+ "RingBufferAllocator.h"
+ "Sampler.cpp"
+ "Sampler.h"
+ "ScratchBuffer.cpp"
+ "ScratchBuffer.h"
+ "ShaderModule.cpp"
+ "ShaderModule.h"
+ "StagingBuffer.cpp"
+ "StagingBuffer.h"
+ "Subresource.cpp"
+ "Subresource.h"
+ "SubresourceStorage.h"
+ "Surface.cpp"
+ "Surface.h"
+ "SwapChain.cpp"
+ "SwapChain.h"
+ "Texture.cpp"
+ "Texture.h"
+ "TintUtils.cpp"
+ "TintUtils.h"
+ "ToBackend.h"
+ "Toggles.cpp"
+ "Toggles.h"
+ "VertexFormat.cpp"
+ "VertexFormat.h"
+ "dawn_platform.h"
+ "webgpu_absl_format.cpp"
+ "webgpu_absl_format.h"
+ "utils/WGPUHelpers.cpp"
+ "utils/WGPUHelpers.h"
+)
+target_link_libraries(dawn_native
+ PUBLIC dawncpp_headers
+ PRIVATE dawn_common
+ dawn_platform
+ dawn_internal_config
+ libtint
+ SPIRV-Tools-opt
+ absl_strings
+ absl_str_format_internal
+)
+
+target_include_directories(dawn_native PRIVATE ${DAWN_ABSEIL_DIR})
+
+if (DAWN_USE_X11)
+ find_package(X11 REQUIRED)
+ target_link_libraries(dawn_native PRIVATE ${X11_LIBRARIES})
+ target_include_directories(dawn_native PRIVATE ${X11_INCLUDE_DIR})
+ target_sources(dawn_native PRIVATE
+ "XlibXcbFunctions.cpp"
+ "XlibXcbFunctions.h"
+ )
+endif()
+
+# Only win32 app needs to link with user32.lib
+# In UWP, all availiable APIs are defined in WindowsApp.lib
+# and is automatically linked when WINDOWS_STORE set
+if (WIN32 AND NOT WINDOWS_STORE)
+ target_link_libraries(dawn_native PRIVATE user32.lib)
+endif()
+
+# DXGIGetDebugInterface1 is defined in dxgi.lib
+# But this API is tagged as a development-only capability
+# which implies that linking to this function will cause
+# the application to fail Windows store certification
+# So we only link to it in debug build when compiling for UWP.
+# In win32 we load dxgi.dll using LoadLibrary
+# so no need for static linking.
+if (WINDOWS_STORE)
+ target_link_libraries(dawn_native PRIVATE debug dxgi.lib)
+endif()
+
+if (DAWN_ENABLE_D3D12)
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/native/D3D12Backend.h"
+ "d3d12/AdapterD3D12.cpp"
+ "d3d12/AdapterD3D12.h"
+ "d3d12/BackendD3D12.cpp"
+ "d3d12/BackendD3D12.h"
+ "d3d12/BindGroupD3D12.cpp"
+ "d3d12/BindGroupD3D12.h"
+ "d3d12/BindGroupLayoutD3D12.cpp"
+ "d3d12/BindGroupLayoutD3D12.h"
+ "d3d12/BufferD3D12.cpp"
+ "d3d12/BufferD3D12.h"
+ "d3d12/CPUDescriptorHeapAllocationD3D12.cpp"
+ "d3d12/CPUDescriptorHeapAllocationD3D12.h"
+ "d3d12/CommandAllocatorManager.cpp"
+ "d3d12/CommandAllocatorManager.h"
+ "d3d12/CommandBufferD3D12.cpp"
+ "d3d12/CommandBufferD3D12.h"
+ "d3d12/CommandRecordingContext.cpp"
+ "d3d12/CommandRecordingContext.h"
+ "d3d12/ComputePipelineD3D12.cpp"
+ "d3d12/ComputePipelineD3D12.h"
+ "d3d12/D3D11on12Util.cpp"
+ "d3d12/D3D11on12Util.h"
+ "d3d12/D3D12Error.cpp"
+ "d3d12/D3D12Error.h"
+ "d3d12/D3D12Info.cpp"
+ "d3d12/D3D12Info.h"
+ "d3d12/DeviceD3D12.cpp"
+ "d3d12/DeviceD3D12.h"
+ "d3d12/Forward.h"
+ "d3d12/GPUDescriptorHeapAllocationD3D12.cpp"
+ "d3d12/GPUDescriptorHeapAllocationD3D12.h"
+ "d3d12/HeapAllocatorD3D12.cpp"
+ "d3d12/HeapAllocatorD3D12.h"
+ "d3d12/HeapD3D12.cpp"
+ "d3d12/HeapD3D12.h"
+ "d3d12/IntegerTypes.h"
+ "d3d12/NativeSwapChainImplD3D12.cpp"
+ "d3d12/NativeSwapChainImplD3D12.h"
+ "d3d12/PageableD3D12.cpp"
+ "d3d12/PageableD3D12.h"
+ "d3d12/PipelineLayoutD3D12.cpp"
+ "d3d12/PipelineLayoutD3D12.h"
+ "d3d12/PlatformFunctions.cpp"
+ "d3d12/PlatformFunctions.h"
+ "d3d12/QuerySetD3D12.cpp"
+ "d3d12/QuerySetD3D12.h"
+ "d3d12/QueueD3D12.cpp"
+ "d3d12/QueueD3D12.h"
+ "d3d12/RenderPassBuilderD3D12.cpp"
+ "d3d12/RenderPassBuilderD3D12.h"
+ "d3d12/RenderPipelineD3D12.cpp"
+ "d3d12/RenderPipelineD3D12.h"
+ "d3d12/ResidencyManagerD3D12.cpp"
+ "d3d12/ResidencyManagerD3D12.h"
+ "d3d12/ResourceAllocatorManagerD3D12.cpp"
+ "d3d12/ResourceAllocatorManagerD3D12.h"
+ "d3d12/ResourceHeapAllocationD3D12.cpp"
+ "d3d12/ResourceHeapAllocationD3D12.h"
+ "d3d12/SamplerD3D12.cpp"
+ "d3d12/SamplerD3D12.h"
+ "d3d12/SamplerHeapCacheD3D12.cpp"
+ "d3d12/SamplerHeapCacheD3D12.h"
+ "d3d12/ShaderModuleD3D12.cpp"
+ "d3d12/ShaderModuleD3D12.h"
+ "d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp"
+ "d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+ "d3d12/StagingBufferD3D12.cpp"
+ "d3d12/StagingBufferD3D12.h"
+ "d3d12/StagingDescriptorAllocatorD3D12.cpp"
+ "d3d12/StagingDescriptorAllocatorD3D12.h"
+ "d3d12/SwapChainD3D12.cpp"
+ "d3d12/SwapChainD3D12.h"
+ "d3d12/TextureCopySplitter.cpp"
+ "d3d12/TextureCopySplitter.h"
+ "d3d12/TextureD3D12.cpp"
+ "d3d12/TextureD3D12.h"
+ "d3d12/UtilsD3D12.cpp"
+ "d3d12/UtilsD3D12.h"
+ "d3d12/d3d12_platform.h"
+ )
+ target_link_libraries(dawn_native PRIVATE dxguid.lib)
+endif()
+
+if (DAWN_ENABLE_METAL)
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/native/MetalBackend.h"
+ "Surface_metal.mm"
+ "metal/BackendMTL.h"
+ "metal/BackendMTL.mm"
+ "metal/BindGroupLayoutMTL.h"
+ "metal/BindGroupLayoutMTL.mm"
+ "metal/BindGroupMTL.h"
+ "metal/BindGroupMTL.mm"
+ "metal/BufferMTL.h"
+ "metal/BufferMTL.mm"
+ "metal/CommandBufferMTL.h"
+ "metal/CommandBufferMTL.mm"
+ "metal/CommandRecordingContext.h"
+ "metal/CommandRecordingContext.mm"
+ "metal/ComputePipelineMTL.h"
+ "metal/ComputePipelineMTL.mm"
+ "metal/DeviceMTL.h"
+ "metal/DeviceMTL.mm"
+ "metal/Forward.h"
+ "metal/PipelineLayoutMTL.h"
+ "metal/PipelineLayoutMTL.mm"
+ "metal/QueueMTL.h"
+ "metal/QueueMTL.mm"
+ "metal/QuerySetMTL.h"
+ "metal/QuerySetMTL.mm"
+ "metal/RenderPipelineMTL.h"
+ "metal/RenderPipelineMTL.mm"
+ "metal/SamplerMTL.h"
+ "metal/SamplerMTL.mm"
+ "metal/ShaderModuleMTL.h"
+ "metal/ShaderModuleMTL.mm"
+ "metal/StagingBufferMTL.h"
+ "metal/StagingBufferMTL.mm"
+ "metal/SwapChainMTL.h"
+ "metal/SwapChainMTL.mm"
+ "metal/TextureMTL.h"
+ "metal/TextureMTL.mm"
+ "metal/UtilsMetal.h"
+ "metal/UtilsMetal.mm"
+ )
+ target_link_libraries(dawn_native PRIVATE
+ "-framework Cocoa"
+ "-framework IOKit"
+ "-framework IOSurface"
+ "-framework QuartzCore"
+ "-framework Metal"
+ )
+endif()
+
+if (DAWN_ENABLE_NULL)
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/native/NullBackend.h"
+ "null/DeviceNull.cpp"
+ "null/DeviceNull.h"
+ )
+endif()
+
+if (DAWN_ENABLE_OPENGL OR DAWN_ENABLE_VULKAN)
+ target_sources(dawn_native PRIVATE
+ "SpirvValidation.cpp"
+ "SpirvValidation.h"
+ )
+endif()
+
+if (DAWN_ENABLE_OPENGL)
+ DawnGenerator(
+ SCRIPT "${Dawn_SOURCE_DIR}/generator/opengl_loader_generator.py"
+ PRINT_NAME "OpenGL function loader"
+ ARGS "--gl-xml"
+ "${Dawn_SOURCE_DIR}/third_party/khronos/gl.xml"
+ "--supported-extensions"
+ "${Dawn_SOURCE_DIR}/src/dawn/native/opengl/supported_extensions.json"
+ RESULT_VARIABLE "DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES"
+ )
+
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/native/OpenGLBackend.h"
+ ${DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES}
+ "opengl/BackendGL.cpp"
+ "opengl/BackendGL.h"
+ "opengl/BindGroupGL.cpp"
+ "opengl/BindGroupGL.h"
+ "opengl/BindGroupLayoutGL.cpp"
+ "opengl/BindGroupLayoutGL.h"
+ "opengl/BufferGL.cpp"
+ "opengl/BufferGL.h"
+ "opengl/CommandBufferGL.cpp"
+ "opengl/CommandBufferGL.h"
+ "opengl/ComputePipelineGL.cpp"
+ "opengl/ComputePipelineGL.h"
+ "opengl/DeviceGL.cpp"
+ "opengl/DeviceGL.h"
+ "opengl/Forward.h"
+ "opengl/GLFormat.cpp"
+ "opengl/GLFormat.h"
+ "opengl/NativeSwapChainImplGL.cpp"
+ "opengl/NativeSwapChainImplGL.h"
+ "opengl/OpenGLFunctions.cpp"
+ "opengl/OpenGLFunctions.h"
+ "opengl/OpenGLVersion.cpp"
+ "opengl/OpenGLVersion.h"
+ "opengl/PersistentPipelineStateGL.cpp"
+ "opengl/PersistentPipelineStateGL.h"
+ "opengl/PipelineGL.cpp"
+ "opengl/PipelineGL.h"
+ "opengl/PipelineLayoutGL.cpp"
+ "opengl/PipelineLayoutGL.h"
+ "opengl/QuerySetGL.cpp"
+ "opengl/QuerySetGL.h"
+ "opengl/QueueGL.cpp"
+ "opengl/QueueGL.h"
+ "opengl/RenderPipelineGL.cpp"
+ "opengl/RenderPipelineGL.h"
+ "opengl/SamplerGL.cpp"
+ "opengl/SamplerGL.h"
+ "opengl/ShaderModuleGL.cpp"
+ "opengl/ShaderModuleGL.h"
+ "opengl/SwapChainGL.cpp"
+ "opengl/SwapChainGL.h"
+ "opengl/TextureGL.cpp"
+ "opengl/TextureGL.h"
+ "opengl/UtilsGL.cpp"
+ "opengl/UtilsGL.h"
+ "opengl/opengl_platform.h"
+ )
+
+ target_link_libraries(dawn_native PRIVATE dawn_khronos_platform)
+endif()
+
+if (DAWN_ENABLE_VULKAN)
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/native/VulkanBackend.h"
+ "vulkan/AdapterVk.cpp"
+ "vulkan/AdapterVk.h"
+ "vulkan/BackendVk.cpp"
+ "vulkan/BackendVk.h"
+ "vulkan/BindGroupLayoutVk.cpp"
+ "vulkan/BindGroupLayoutVk.h"
+ "vulkan/BindGroupVk.cpp"
+ "vulkan/BindGroupVk.h"
+ "vulkan/BufferVk.cpp"
+ "vulkan/BufferVk.h"
+ "vulkan/CommandBufferVk.cpp"
+ "vulkan/CommandBufferVk.h"
+ "vulkan/CommandRecordingContext.h"
+ "vulkan/ComputePipelineVk.cpp"
+ "vulkan/ComputePipelineVk.h"
+ "vulkan/DescriptorSetAllocation.h"
+ "vulkan/DescriptorSetAllocator.cpp"
+ "vulkan/DescriptorSetAllocator.h"
+ "vulkan/DeviceVk.cpp"
+ "vulkan/DeviceVk.h"
+ "vulkan/ExternalHandle.h"
+ "vulkan/FencedDeleter.cpp"
+ "vulkan/FencedDeleter.h"
+ "vulkan/Forward.h"
+ "vulkan/NativeSwapChainImplVk.cpp"
+ "vulkan/NativeSwapChainImplVk.h"
+ "vulkan/PipelineLayoutVk.cpp"
+ "vulkan/PipelineLayoutVk.h"
+ "vulkan/QuerySetVk.cpp"
+ "vulkan/QuerySetVk.h"
+ "vulkan/QueueVk.cpp"
+ "vulkan/QueueVk.h"
+ "vulkan/RenderPassCache.cpp"
+ "vulkan/RenderPassCache.h"
+ "vulkan/RenderPipelineVk.cpp"
+ "vulkan/RenderPipelineVk.h"
+ "vulkan/ResourceHeapVk.cpp"
+ "vulkan/ResourceHeapVk.h"
+ "vulkan/ResourceMemoryAllocatorVk.cpp"
+ "vulkan/ResourceMemoryAllocatorVk.h"
+ "vulkan/SamplerVk.cpp"
+ "vulkan/SamplerVk.h"
+ "vulkan/ShaderModuleVk.cpp"
+ "vulkan/ShaderModuleVk.h"
+ "vulkan/StagingBufferVk.cpp"
+ "vulkan/StagingBufferVk.h"
+ "vulkan/SwapChainVk.cpp"
+ "vulkan/SwapChainVk.h"
+ "vulkan/TextureVk.cpp"
+ "vulkan/TextureVk.h"
+ "vulkan/UtilsVulkan.cpp"
+ "vulkan/UtilsVulkan.h"
+ "vulkan/VulkanError.cpp"
+ "vulkan/VulkanError.h"
+ "vulkan/VulkanExtensions.cpp"
+ "vulkan/VulkanExtensions.h"
+ "vulkan/VulkanFunctions.cpp"
+ "vulkan/VulkanFunctions.h"
+ "vulkan/VulkanInfo.cpp"
+ "vulkan/VulkanInfo.h"
+ "vulkan/external_memory/MemoryService.h"
+ "vulkan/external_semaphore/SemaphoreService.h"
+ )
+
+ target_link_libraries(dawn_native PUBLIC dawn_vulkan_headers)
+
+ if (UNIX AND NOT APPLE)
+ target_sources(dawn_native PRIVATE
+ "vulkan/external_memory/MemoryServiceOpaqueFD.cpp"
+ "vulkan/external_semaphore/SemaphoreServiceFD.cpp"
+ )
+ else()
+ target_sources(dawn_native PRIVATE
+ "vulkan/external_memory/MemoryServiceNull.cpp"
+ "vulkan/external_semaphore/SemaphoreServiceNull.cpp"
+ )
+ endif()
+endif()
+
+# TODO how to do the component build in CMake?
+target_sources(dawn_native PRIVATE "DawnNative.cpp")
+if (DAWN_ENABLE_D3D12)
+ target_sources(dawn_native PRIVATE "d3d12/D3D12Backend.cpp")
+endif()
+if (DAWN_ENABLE_METAL)
+ target_sources(dawn_native PRIVATE "metal/MetalBackend.mm")
+endif()
+if (DAWN_ENABLE_NULL)
+ target_sources(dawn_native PRIVATE "null/NullBackend.cpp")
+endif()
+if (DAWN_ENABLE_OPENGL)
+ target_sources(dawn_native PRIVATE "opengl/OpenGLBackend.cpp")
+endif()
+if (DAWN_ENABLE_VULKAN)
+ target_sources(dawn_native PRIVATE "vulkan/VulkanBackend.cpp")
+endif()
+
+DawnJSONGenerator(
+ TARGET "webgpu_dawn_native_proc"
+ PRINT_NAME "Dawn native WebGPU procs"
+ RESULT_VARIABLE "WEBGPU_DAWN_NATIVE_PROC_GEN"
+)
+
+add_library(webgpu_dawn ${DAWN_DUMMY_FILE})
+target_link_libraries(webgpu_dawn PRIVATE dawn_native)
+target_compile_definitions(webgpu_dawn PRIVATE "WGPU_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(webgpu_dawn PRIVATE "WGPU_SHARED_LIBRARY")
+endif()
+target_sources(webgpu_dawn PRIVATE ${WEBGPU_DAWN_NATIVE_PROC_GEN})
diff --git a/chromium/third_party/dawn/src/dawn/native/CachedObject.cpp b/chromium/third_party/dawn/src/dawn/native/CachedObject.cpp
new file mode 100644
index 00000000000..9a6d0647018
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CachedObject.cpp
@@ -0,0 +1,44 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CachedObject.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native {
+
+ bool CachedObject::IsCachedReference() const {
+ return mIsCachedReference;
+ }
+
+ void CachedObject::SetIsCachedReference() {
+ mIsCachedReference = true;
+ }
+
+ size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
+ return obj->GetContentHash();
+ }
+
+ size_t CachedObject::GetContentHash() const {
+ ASSERT(mIsContentHashInitialized);
+ return mContentHash;
+ }
+
+ void CachedObject::SetContentHash(size_t contentHash) {
+ ASSERT(!mIsContentHashInitialized);
+ mContentHash = contentHash;
+ mIsContentHashInitialized = true;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CachedObject.h b/chromium/third_party/dawn/src/dawn/native/CachedObject.h
new file mode 100644
index 00000000000..b6c011511fb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CachedObject.h
@@ -0,0 +1,53 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CACHED_OBJECT_H_
+#define DAWNNATIVE_CACHED_OBJECT_H_
+
+#include <cstddef>
+
+namespace dawn::native {
+
+ // Some objects are cached so that instead of creating new duplicate objects,
+ // we increase the refcount of an existing object.
+ // When an object is successfully created, the device should call
+ // SetIsCachedReference() and insert the object into the cache.
+ class CachedObject {
+ public:
+ bool IsCachedReference() const;
+
+ // Functor necessary for the unordered_set<CachedObject*>-based cache.
+ struct HashFunc {
+ size_t operator()(const CachedObject* obj) const;
+ };
+
+ size_t GetContentHash() const;
+ void SetContentHash(size_t contentHash);
+
+ private:
+ friend class DeviceBase;
+ void SetIsCachedReference();
+
+ bool mIsCachedReference = false;
+
+ // Called by ObjectContentHasher upon creation to record the object.
+ virtual size_t ComputeContentHash() = 0;
+
+ size_t mContentHash = 0;
+ bool mIsContentHashInitialized = false;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_CACHED_OBJECT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp b/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp
new file mode 100644
index 00000000000..a8be5cc744f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.cpp
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CallbackTaskManager.h"
+
+namespace dawn::native {
+
+ bool CallbackTaskManager::IsEmpty() {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+ return mCallbackTaskQueue.empty();
+ }
+
+ std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+
+ std::vector<std::unique_ptr<CallbackTask>> allTasks;
+ allTasks.swap(mCallbackTaskQueue);
+ return allTasks;
+ }
+
+ void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
+ std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+ mCallbackTaskQueue.push_back(std::move(callbackTask));
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h b/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h
new file mode 100644
index 00000000000..37fddd431d7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CallbackTaskManager.h
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CALLBACK_TASK_MANAGER_H_
+#define DAWNNATIVE_CALLBACK_TASK_MANAGER_H_
+
+#include <memory>
+#include <mutex>
+#include <vector>
+
+namespace dawn::native {
+
+ struct CallbackTask {
+ public:
+ virtual ~CallbackTask() = default;
+ virtual void Finish() = 0;
+ virtual void HandleShutDown() = 0;
+ virtual void HandleDeviceLoss() = 0;
+ };
+
+ class CallbackTaskManager {
+ public:
+ void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
+ bool IsEmpty();
+ std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
+
+ private:
+ std::mutex mCallbackTaskQueueMutex;
+ std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
+ };
+
+} // namespace dawn::native
+
+#endif
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp
new file mode 100644
index 00000000000..5d36aad0b41
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandAllocator.cpp
@@ -0,0 +1,228 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandAllocator.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+
+#include <algorithm>
+#include <climits>
+#include <cstdlib>
+#include <utility>
+
+namespace dawn::native {
+
+ // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
+
+ CommandIterator::CommandIterator() {
+ Reset();
+ }
+
+ CommandIterator::~CommandIterator() {
+ ASSERT(IsEmpty());
+ }
+
+ CommandIterator::CommandIterator(CommandIterator&& other) {
+ if (!other.IsEmpty()) {
+ mBlocks = std::move(other.mBlocks);
+ other.Reset();
+ }
+ Reset();
+ }
+
+ CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
+ ASSERT(IsEmpty());
+ if (!other.IsEmpty()) {
+ mBlocks = std::move(other.mBlocks);
+ other.Reset();
+ }
+ Reset();
+ return *this;
+ }
+
+ CommandIterator::CommandIterator(CommandAllocator allocator)
+ : mBlocks(allocator.AcquireBlocks()) {
+ Reset();
+ }
+
+ void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
+ ASSERT(IsEmpty());
+ mBlocks.clear();
+ for (CommandAllocator& allocator : allocators) {
+ CommandBlocks blocks = allocator.AcquireBlocks();
+ if (!blocks.empty()) {
+ mBlocks.reserve(mBlocks.size() + blocks.size());
+ for (BlockDef& block : blocks) {
+ mBlocks.push_back(std::move(block));
+ }
+ }
+ }
+ Reset();
+ }
+
+ bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
+ mCurrentBlock++;
+ if (mCurrentBlock >= mBlocks.size()) {
+ Reset();
+ *commandId = detail::kEndOfBlock;
+ return false;
+ }
+ mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
+ return NextCommandId(commandId);
+ }
+
+ void CommandIterator::Reset() {
+ mCurrentBlock = 0;
+
+ if (mBlocks.empty()) {
+ // This will case the first NextCommandId call to try to move to the next block and stop
+ // the iteration immediately, without special casing the initialization.
+ mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
+ mBlocks.emplace_back();
+ mBlocks[0].size = sizeof(mEndOfBlock);
+ mBlocks[0].block = mCurrentPtr;
+ } else {
+ mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
+ }
+ }
+
+ void CommandIterator::MakeEmptyAsDataWasDestroyed() {
+ if (IsEmpty()) {
+ return;
+ }
+
+ for (BlockDef& block : mBlocks) {
+ free(block.block);
+ }
+ mBlocks.clear();
+ Reset();
+ ASSERT(IsEmpty());
+ }
+
+ bool CommandIterator::IsEmpty() const {
+ return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
+ }
+
+ // Potential TODO(crbug.com/dawn/835):
+ // - Host the size and pointer to next block in the block itself to avoid having an allocation
+ // in the vector
+ // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
+ // in Allocate
+ // - Be able to optimize allocation to one block, for command buffers expected to live long to
+ // avoid cache misses
+ // - Better block allocation, maybe have Dawn API to say command buffer is going to have size
+ // close to another
+
+ CommandAllocator::CommandAllocator() {
+ ResetPointers();
+ }
+
+ CommandAllocator::~CommandAllocator() {
+ Reset();
+ }
+
+ CommandAllocator::CommandAllocator(CommandAllocator&& other)
+ : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
+ other.mBlocks.clear();
+ if (!other.IsEmpty()) {
+ mCurrentPtr = other.mCurrentPtr;
+ mEndPtr = other.mEndPtr;
+ } else {
+ ResetPointers();
+ }
+ other.Reset();
+ }
+
+ CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
+ Reset();
+ if (!other.IsEmpty()) {
+ std::swap(mBlocks, other.mBlocks);
+ mLastAllocationSize = other.mLastAllocationSize;
+ mCurrentPtr = other.mCurrentPtr;
+ mEndPtr = other.mEndPtr;
+ }
+ other.Reset();
+ return *this;
+ }
+
+ void CommandAllocator::Reset() {
+ for (BlockDef& block : mBlocks) {
+ free(block.block);
+ }
+ mBlocks.clear();
+ mLastAllocationSize = kDefaultBaseAllocationSize;
+ ResetPointers();
+ }
+
+ bool CommandAllocator::IsEmpty() const {
+ return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mDummyEnum[0]);
+ }
+
+ CommandBlocks&& CommandAllocator::AcquireBlocks() {
+ ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
+ ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+ ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
+ *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
+
+ mCurrentPtr = nullptr;
+ mEndPtr = nullptr;
+ return std::move(mBlocks);
+ }
+
+ uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment) {
+ // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
+ // to move to the next one. kEndOfBlock on the last block means the end of the commands.
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ *idAlloc = detail::kEndOfBlock;
+
+ // We'll request a block that can contain at least the command ID, the command and an
+ // additional ID to contain the kEndOfBlock tag.
+ size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
+
+ // The computation of the request could overflow.
+ if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
+ return nullptr;
+ }
+
+ if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
+ return nullptr;
+ }
+ return Allocate(commandId, commandSize, commandAlignment);
+ }
+
+ bool CommandAllocator::GetNewBlock(size_t minimumSize) {
+ // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
+ mLastAllocationSize =
+ std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
+
+ uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
+ if (DAWN_UNLIKELY(block == nullptr)) {
+ return false;
+ }
+
+ mBlocks.push_back({mLastAllocationSize, block});
+ mCurrentPtr = AlignPtr(block, alignof(uint32_t));
+ mEndPtr = block + mLastAllocationSize;
+ return true;
+ }
+
+ void CommandAllocator::ResetPointers() {
+ mCurrentPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[0]);
+ mEndPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[1]);
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandAllocator.h b/chromium/third_party/dawn/src/dawn/native/CommandAllocator.h
new file mode 100644
index 00000000000..9d2b4718252
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandAllocator.h
@@ -0,0 +1,273 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMAND_ALLOCATOR_H_
+#define DAWNNATIVE_COMMAND_ALLOCATOR_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/NonCopyable.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <vector>
+
+namespace dawn::native {
+
+ // Allocation for command buffers should be fast. To avoid doing an allocation per command
+ // or to avoid copying commands when reallocing, we use a linear allocator in a growing set
+ // of large memory blocks. We also use this to have the format to be (u32 commandId, command),
+ // so that iteration over the commands is easy.
+
+ // Usage of the allocator and iterator:
+ // CommandAllocator allocator;
+ // DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
+ // // Fill command
+ // // Repeat allocation and filling commands
+ //
+ // CommandIterator commands(allocator);
+ // CommandType type;
+ // while(commands.NextCommandId(&type)) {
+ // switch(type) {
+ // case CommandType::Draw:
+ // DrawCommand* draw = commands.NextCommand<DrawCommand>();
+ // // Do the draw
+ // break;
+ // // other cases
+ // }
+ // }
+
+ // Note that you need to extract the commands from the CommandAllocator before destroying it
+ // and must tell the CommandIterator when the allocated commands have been processed for
+ // deletion.
+
+ // These are the lists of blocks, should not be used directly, only through CommandAllocator
+ // and CommandIterator
+ struct BlockDef {
+ size_t size;
+ uint8_t* block;
+ };
+ using CommandBlocks = std::vector<BlockDef>;
+
+ namespace detail {
+ constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
+ constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
+ } // namespace detail
+
+ class CommandAllocator;
+
+ class CommandIterator : public NonCopyable {
+ public:
+ CommandIterator();
+ ~CommandIterator();
+
+ CommandIterator(CommandIterator&& other);
+ CommandIterator& operator=(CommandIterator&& other);
+
+ // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
+ explicit CommandIterator(CommandAllocator allocator);
+
+ void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
+
+ template <typename E>
+ bool NextCommandId(E* commandId) {
+ return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
+ }
+ template <typename T>
+ T* NextCommand() {
+ return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
+ }
+ template <typename T>
+ T* NextData(size_t count) {
+ return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
+ }
+
+ // Sets iterator to the beginning of the commands without emptying the list. This method can
+ // be used if iteration was stopped early and the iterator needs to be restarted.
+ void Reset();
+
+ // This method must to be called after commands have been deleted. This indicates that the
+ // commands have been submitted and they are no longer valid.
+ void MakeEmptyAsDataWasDestroyed();
+
+ private:
+ bool IsEmpty() const;
+
+ DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
+ uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
+ ASSERT(idPtr + sizeof(uint32_t) <=
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+ uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
+
+ if (id != detail::kEndOfBlock) {
+ mCurrentPtr = idPtr + sizeof(uint32_t);
+ *commandId = id;
+ return true;
+ }
+ return NextCommandIdInNewBlock(commandId);
+ }
+
+ bool NextCommandIdInNewBlock(uint32_t* commandId);
+
+ DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
+ uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
+ ASSERT(commandPtr + sizeof(commandSize) <=
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+ mCurrentPtr = commandPtr + commandSize;
+ return commandPtr;
+ }
+
+ DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
+ uint32_t id;
+ bool hasId = NextCommandId(&id);
+ ASSERT(hasId);
+ ASSERT(id == detail::kAdditionalData);
+
+ return NextCommand(dataSize, dataAlignment);
+ }
+
+ CommandBlocks mBlocks;
+ uint8_t* mCurrentPtr = nullptr;
+ size_t mCurrentBlock = 0;
+ // Used to avoid a special case for empty iterators.
+ uint32_t mEndOfBlock = detail::kEndOfBlock;
+ };
+
+ class CommandAllocator : public NonCopyable {
+ public:
+ CommandAllocator();
+ ~CommandAllocator();
+
+ // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
+ CommandAllocator(CommandAllocator&&);
+ CommandAllocator& operator=(CommandAllocator&&);
+
+ // Frees all blocks held by the allocator and restores it to its initial empty state.
+ void Reset();
+
+ bool IsEmpty() const;
+
+ template <typename T, typename E>
+ T* Allocate(E commandId) {
+ static_assert(sizeof(E) == sizeof(uint32_t));
+ static_assert(alignof(E) == alignof(uint32_t));
+ static_assert(alignof(T) <= kMaxSupportedAlignment);
+ T* result = reinterpret_cast<T*>(
+ Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
+ if (!result) {
+ return nullptr;
+ }
+ new (result) T;
+ return result;
+ }
+
+ template <typename T>
+ T* AllocateData(size_t count) {
+ static_assert(alignof(T) <= kMaxSupportedAlignment);
+ T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
+ if (!result) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < count; i++) {
+ new (result + i) T;
+ }
+ return result;
+ }
+
+ private:
+ // This is used for some internal computations and can be any power of two as long as code
+ // using the CommandAllocator passes the static_asserts.
+ static constexpr size_t kMaxSupportedAlignment = 8;
+
+ // To avoid checking for overflows at every step of the computations we compute an upper
+ // bound of the space that will be needed in addition to the command data.
+ static constexpr size_t kWorstCaseAdditionalSize =
+ sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
+
+ // The default value of mLastAllocationSize.
+ static constexpr size_t kDefaultBaseAllocationSize = 2048;
+
+ friend CommandIterator;
+ CommandBlocks&& AcquireBlocks();
+
+ DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment) {
+ ASSERT(mCurrentPtr != nullptr);
+ ASSERT(mEndPtr != nullptr);
+ ASSERT(commandId != detail::kEndOfBlock);
+
+ // It should always be possible to allocate one id, for kEndOfBlock tagging,
+ ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+ ASSERT(mEndPtr >= mCurrentPtr);
+ ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
+
+ // The memory after the ID will contain the following:
+ // - the current ID
+ // - padding to align the command, maximum kMaxSupportedAlignment
+ // - the command of size commandSize
+ // - padding to align the next ID, maximum alignof(uint32_t)
+ // - the next ID of size sizeof(uint32_t)
+
+ // This can't overflow because by construction mCurrentPtr always has space for the next
+ // ID.
+ size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
+
+ // The good case were we have enough space for the command data and upper bound of the
+ // extra required space.
+ if ((remainingSize >= kWorstCaseAdditionalSize) &&
+ (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ *idAlloc = commandId;
+
+ uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
+ mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
+
+ return commandAlloc;
+ }
+ return AllocateInNewBlock(commandId, commandSize, commandAlignment);
+ }
+
+ uint8_t* AllocateInNewBlock(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment);
+
+ DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
+ return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
+ }
+
+ bool GetNewBlock(size_t minimumSize);
+
+ void ResetPointers();
+
+ CommandBlocks mBlocks;
+ size_t mLastAllocationSize = kDefaultBaseAllocationSize;
+
+ // Data used for the block range at initialization so that the first call to Allocate sees
+ // there is not enough space and calls GetNewBlock. This avoids having to special case the
+ // initialization in Allocate.
+ uint32_t mDummyEnum[1] = {0};
+
+ // Pointers to the current range of allocation in the block. Guaranteed to allow for at
+ // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
+ // be written. Nullptr iff the blocks were moved out.
+ uint8_t* mCurrentPtr = nullptr;
+ uint8_t* mEndPtr = nullptr;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMMAND_ALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp
new file mode 100644
index 00000000000..f8c7836b40f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandBuffer.cpp
@@ -0,0 +1,245 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandBuffer.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+ CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor)
+ : ApiObjectBase(encoder->GetDevice(), descriptor->label),
+ mCommands(encoder->AcquireCommands()),
+ mResourceUsages(encoder->AcquireResourceUsages()) {
+ TrackInDevice();
+ }
+
+ CommandBufferBase::CommandBufferBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+ }
+
+ CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ // static
+ CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
+ return new CommandBufferBase(device, ObjectBase::kError);
+ }
+
+ ObjectType CommandBufferBase::GetType() const {
+ return ObjectType::CommandBuffer;
+ }
+
+ MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+
+ DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
+ return {};
+ }
+
+ void CommandBufferBase::DestroyImpl() {
+ FreeCommands(&mCommands);
+ mResourceUsages = {};
+ }
+
+ const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
+ return mResourceUsages;
+ }
+
+ CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
+ return &mCommands;
+ }
+
+ bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
+ const Extent3D copySize,
+ const uint32_t mipLevel) {
+ Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
+
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ return extent.width == copySize.width;
+ case wgpu::TextureDimension::e2D:
+ return extent.width == copySize.width && extent.height == copySize.height;
+ case wgpu::TextureDimension::e3D:
+ return extent.width == copySize.width && extent.height == copySize.height &&
+ extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
+ }
+ }
+
+ SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
+ const Extent3D& copySize) {
+ switch (copy.texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
+ ASSERT(copy.mipLevel == 0);
+ return {copy.aspect, {0, 1}, {0, 1}};
+ case wgpu::TextureDimension::e2D:
+ return {
+ copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
+ case wgpu::TextureDimension::e3D:
+ return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
+ }
+ }
+
+ void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ auto& attachmentInfo = renderPass->colorAttachments[i];
+ TextureViewBase* view = attachmentInfo.view.Get();
+ bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+
+ ASSERT(view->GetLayerCount() == 1);
+ ASSERT(view->GetLevelCount() == 1);
+ SubresourceRange range = view->GetSubresourceRange();
+
+ // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
+ if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
+ !view->GetTexture()->IsSubresourceContentInitialized(range)) {
+ attachmentInfo.loadOp = wgpu::LoadOp::Clear;
+ attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
+ }
+
+ if (hasResolveTarget) {
+ // We need to set the resolve target to initialized so that it does not get
+ // cleared later in the pipeline. The texture will be resolved from the
+ // source color attachment, which will be correctly initialized.
+ TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
+ ASSERT(resolveView->GetLayerCount() == 1);
+ ASSERT(resolveView->GetLevelCount() == 1);
+ resolveView->GetTexture()->SetIsSubresourceContentInitialized(
+ true, resolveView->GetSubresourceRange());
+ }
+
+ switch (attachmentInfo.storeOp) {
+ case wgpu::StoreOp::Store:
+ view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
+ break;
+
+ case wgpu::StoreOp::Discard:
+ view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
+ break;
+
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto& attachmentInfo = renderPass->depthStencilAttachment;
+ TextureViewBase* view = attachmentInfo.view.Get();
+ ASSERT(view->GetLayerCount() == 1);
+ ASSERT(view->GetLevelCount() == 1);
+ SubresourceRange range = view->GetSubresourceRange();
+
+ SubresourceRange depthRange = range;
+ depthRange.aspects = range.aspects & Aspect::Depth;
+
+ SubresourceRange stencilRange = range;
+ stencilRange.aspects = range.aspects & Aspect::Stencil;
+
+ // If the depth stencil texture has not been initialized, we want to use loadop
+ // clear to init the contents to 0's
+ if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
+ attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearDepth = 0.0f;
+ attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+ }
+
+ if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
+ attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearStencil = 0u;
+ attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+ }
+
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
+
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
+ }
+ }
+
+ bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
+ ASSERT(copy != nullptr);
+
+ if (copy->destination.offset > 0) {
+ // The copy doesn't touch the start of the buffer.
+ return false;
+ }
+
+ const TextureBase* texture = copy->source.texture.Get();
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
+ const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
+ const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
+ const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
+ const bool multiRow = multiSlice || heightInBlocks > 1;
+
+ if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
+ // There are gaps between slices that aren't overwritten
+ return false;
+ }
+
+ const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize;
+ if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
+ // There are gaps between rows that aren't overwritten
+ return false;
+ }
+
+ // After the above checks, we're sure the copy has no gaps.
+ // Now, compute the total number of bytes written.
+ const uint64_t writtenBytes =
+ ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow,
+ copy->destination.rowsPerImage)
+ .AcquireSuccess();
+ if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
+ // The written bytes don't cover the whole buffer.
+ return false;
+ }
+
+ return true;
+ }
+
+ std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
+ const std::array<float, 4> outputValue = {
+ static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
+ static_cast<float>(color.a)};
+ return outputValue;
+ }
+ std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
+ const std::array<int32_t, 4> outputValue = {
+ static_cast<int32_t>(color.r), static_cast<int32_t>(color.g),
+ static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)};
+ return outputValue;
+ }
+
+ std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
+ const std::array<uint32_t, 4> outputValue = {
+ static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
+ static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
+ return outputValue;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandBuffer.h b/chromium/third_party/dawn/src/dawn/native/CommandBuffer.h
new file mode 100644
index 00000000000..3d9d71a0ba5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandBuffer.h
@@ -0,0 +1,76 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDBUFFER_H_
+#define DAWNNATIVE_COMMANDBUFFER_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+ struct BeginRenderPassCmd;
+ struct CopyTextureToBufferCmd;
+ struct TextureCopy;
+
+ class CommandBufferBase : public ApiObjectBase {
+ public:
+ CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+ static CommandBufferBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ MaybeError ValidateCanUseInSubmitNow() const;
+
+ const CommandBufferResourceUsage& GetResourceUsages() const;
+
+ CommandIterator* GetCommandIteratorForTesting();
+
+ protected:
+ // Constructor used only for mocking and testing.
+ CommandBufferBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ CommandIterator mCommands;
+
+ private:
+ CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ CommandBufferResourceUsage mResourceUsages;
+ };
+
+ bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
+ const Extent3D copySize,
+ const uint32_t mipLevel);
+ SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
+ const Extent3D& copySize);
+
+ void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
+
+ bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
+
+ std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
+ std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
+ std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMMANDBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp
new file mode 100644
index 00000000000..370139ef816
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.cpp
@@ -0,0 +1,407 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandBufferStateTracker.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/RenderPipeline.h"
+
+// TODO(dawn:563): None of the error messages in this file include the buffer objects they are
+// validating against. It would be nice to improve that, but difficult to do without incurring
+// additional tracking costs.
+
+namespace dawn::native {
+
+ namespace {
+ bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
+ const std::vector<uint64_t>& pipelineMinBufferSizes) {
+ ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
+
+ for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
+ if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+ } // namespace
+
+ enum ValidationAspect {
+ VALIDATION_ASPECT_PIPELINE,
+ VALIDATION_ASPECT_BIND_GROUPS,
+ VALIDATION_ASPECT_VERTEX_BUFFERS,
+ VALIDATION_ASPECT_INDEX_BUFFER,
+
+ VALIDATION_ASPECT_COUNT
+ };
+ static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
+
+ static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
+ 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
+
+ static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
+ 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
+ 1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
+
+ static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
+ 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
+ 1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
+
+ static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
+ 1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
+ 1 << VALIDATION_ASPECT_INDEX_BUFFER;
+
+ MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
+ return ValidateOperation(kDispatchAspects);
+ }
+
+ MaybeError CommandBufferStateTracker::ValidateCanDraw() {
+ return ValidateOperation(kDrawAspects);
+ }
+
+ MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
+ return ValidateOperation(kDrawIndexedAspects);
+ }
+
+ MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(
+ uint32_t vertexCount,
+ uint32_t firstVertex) {
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ vertexBufferSlotsUsedAsVertexBuffer =
+ lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
+
+ for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
+ const VertexBufferInfo& vertexBuffer =
+ lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
+ uint64_t arrayStride = vertexBuffer.arrayStride;
+ uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
+
+ if (arrayStride == 0) {
+ DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+ "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+ "is smaller than the required size for all attributes (%u)",
+ bufferSize, static_cast<uint8_t>(usedSlotVertex),
+ vertexBuffer.usedBytesInStride);
+ } else {
+ uint64_t requiredSize =
+ (static_cast<uint64_t>(firstVertex) + vertexCount) * arrayStride;
+ // firstVertex and vertexCount are in uint32_t, and arrayStride must not
+ // be larger than kMaxVertexBufferArrayStride, which is currently 2048. So by
+ // doing checks in uint64_t we avoid overflows.
+ DAWN_INVALID_IF(
+ requiredSize > bufferSize,
+ "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than the "
+ "bound buffer size (%u) of the vertex buffer at slot %u with stride (%u).",
+ firstVertex, vertexCount, requiredSize, bufferSize,
+ static_cast<uint8_t>(usedSlotVertex), arrayStride);
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
+ uint32_t instanceCount,
+ uint32_t firstInstance) {
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ vertexBufferSlotsUsedAsInstanceBuffer =
+ lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
+
+ for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
+ const VertexBufferInfo& vertexBuffer =
+ lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
+ uint64_t arrayStride = vertexBuffer.arrayStride;
+ uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
+ if (arrayStride == 0) {
+ DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+ "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+ "is smaller than the required size for all attributes (%u)",
+ bufferSize, static_cast<uint8_t>(usedSlotInstance),
+ vertexBuffer.usedBytesInStride);
+ } else {
+ uint64_t requiredSize =
+ (static_cast<uint64_t>(firstInstance) + instanceCount) * arrayStride;
+ // firstInstance and instanceCount are in uint32_t, and arrayStride must
+ // not be larger than kMaxVertexBufferArrayStride, which is currently 2048.
+ // So by doing checks in uint64_t we avoid overflows.
+ DAWN_INVALID_IF(
+ requiredSize > bufferSize,
+ "Instance range (first: %u, count: %u) requires a larger buffer (%u) than the "
+ "bound buffer size (%u) of the vertex buffer at slot %u with stride (%u).",
+ firstInstance, instanceCount, requiredSize, bufferSize,
+ static_cast<uint8_t>(usedSlotInstance), arrayStride);
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
+ uint32_t firstIndex) {
+ // Validate the range of index buffer
+ // firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
+ // wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
+ // uint64_t we avoid overflows.
+ DAWN_INVALID_IF(
+ (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
+ mIndexBufferSize,
+ "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
+ "(%u).",
+ firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
+ return {};
+ }
+
+ MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
+ // Fast return-true path if everything is good
+ ValidationAspects missingAspects = requiredAspects & ~mAspects;
+ if (missingAspects.none()) {
+ return {};
+ }
+
+ // Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
+ // requires the pipeline to be set.
+ DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
+
+ RecomputeLazyAspects(missingAspects);
+
+ DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
+
+ return {};
+ }
+
+ void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
+ ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
+ ASSERT((aspects & ~kLazyAspects).none());
+
+ if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
+ bool matches = true;
+
+ for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+ if (mBindgroups[i] == nullptr ||
+ mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
+ !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+ (*mMinBufferSizes)[i])) {
+ matches = false;
+ break;
+ }
+ }
+
+ if (matches) {
+ mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
+ }
+ }
+
+ if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
+ lastRenderPipeline->GetVertexBufferSlotsUsed();
+ if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
+ mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
+ }
+ }
+
+ if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+ if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
+ mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
+ mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
+ }
+ }
+ }
+
+ MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
+ if (!aspects.any()) {
+ return {};
+ }
+
+ DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
+
+ if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
+ DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
+
+ RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+ wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
+
+ if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
+ DAWN_INVALID_IF(
+ pipelineIndexFormat == wgpu::IndexFormat::Undefined,
+ "%s has a strip primitive topology (%s) but a strip index format of %s, which "
+ "prevents it for being used for indexed draw calls.",
+ lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
+ pipelineIndexFormat);
+
+ DAWN_INVALID_IF(
+ mIndexFormat != pipelineIndexFormat,
+ "Strip index format (%s) of %s does not match index buffer format (%s).",
+ pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
+ }
+
+ // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+ // It returns the first invalid state found. We shouldn't be able to reach this line
+ // because to have invalid aspects one of the above conditions must have failed earlier.
+ // If this is reached, make sure lazy aspects and the error checks above are consistent.
+ UNREACHABLE();
+ return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
+ }
+
+ // TODO(dawn:563): Indicate which slots were not set.
+ DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
+ "Vertex buffer slots required by %s were not set.", GetRenderPipeline());
+
+ if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
+ for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+ ASSERT(HasPipeline());
+
+ DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
+ static_cast<uint32_t>(i));
+
+ BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
+ BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
+
+ DAWN_INVALID_IF(
+ requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
+ currentBGL->GetPipelineCompatibilityToken() !=
+ requiredBGL->GetPipelineCompatibilityToken(),
+ "The current pipeline (%s) was created with a default layout, and is not "
+ "compatible with the %s at index %u which uses a %s that was not created by "
+ "the pipeline. Either use the bind group layout returned by calling "
+ "getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
+ "provide an explicit pipeline layout when creating the pipeline.",
+ mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
+ static_cast<uint32_t>(i));
+
+ DAWN_INVALID_IF(
+ requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
+ currentBGL->GetPipelineCompatibilityToken() !=
+ PipelineCompatibilityToken(0),
+ "%s at index %u uses a %s which was created as part of the default layout for "
+ "a different pipeline than the current one (%s), and as a result is not "
+ "compatible. Use an explicit bind group layout when creating bind groups and "
+ "an explicit pipeline layout when creating pipelines to share bind groups "
+ "between pipelines.",
+ mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
+
+ DAWN_INVALID_IF(
+ mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
+ "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
+ "group %s at index %u.",
+ requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
+ static_cast<uint32_t>(i));
+
+ // TODO(dawn:563): Report the binding sizes and which ones are failing.
+ DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+ (*mMinBufferSizes)[i]),
+ "Binding sizes are too small for bind group %s at index %u",
+ mBindgroups[i], static_cast<uint32_t>(i));
+ }
+
+ // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+ // It returns the first invalid state found. We shouldn't be able to reach this line
+ // because to have invalid aspects one of the above conditions must have failed earlier.
+ // If this is reached, make sure lazy aspects and the error checks above are consistent.
+ UNREACHABLE();
+ return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
+ }
+
+ UNREACHABLE();
+ }
+
+ void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
+ SetPipelineCommon(pipeline);
+ }
+
+ void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
+ SetPipelineCommon(pipeline);
+ }
+
+ void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
+ BindGroupBase* bindgroup,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ mBindgroups[index] = bindgroup;
+ mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
+ mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
+ }
+
+ void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
+ mIndexBufferSet = true;
+ mIndexFormat = format;
+ mIndexBufferSize = size;
+ }
+
+ void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
+ mVertexBufferSlotsUsed.set(slot);
+ mVertexBufferSizes[slot] = size;
+ }
+
+ void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
+ mLastPipeline = pipeline;
+ mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
+ mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
+
+ mAspects.set(VALIDATION_ASPECT_PIPELINE);
+
+ // Reset lazy aspects so they get recomputed on the next operation.
+ mAspects &= ~kLazyAspects;
+ }
+
+ BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
+ return mBindgroups[index];
+ }
+
+ const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
+ BindGroupIndex index) const {
+ return mDynamicOffsets[index];
+ }
+
+ bool CommandBufferStateTracker::HasPipeline() const {
+ return mLastPipeline != nullptr;
+ }
+
+ RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
+ ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
+ return static_cast<RenderPipelineBase*>(mLastPipeline);
+ }
+
+ ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
+ ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
+ return static_cast<ComputePipelineBase*>(mLastPipeline);
+ }
+
+ PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
+ return mLastPipelineLayout;
+ }
+
+ wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
+ return mIndexFormat;
+ }
+
+ uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
+ return mIndexBufferSize;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h b/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h
new file mode 100644
index 00000000000..b68e27a0689
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandBufferStateTracker.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
+#define DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+
+namespace dawn::native {
+
+ class CommandBufferStateTracker {
+ public:
+ // Non-state-modifying validation functions
+ MaybeError ValidateCanDispatch();
+ MaybeError ValidateCanDraw();
+ MaybeError ValidateCanDrawIndexed();
+ MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex);
+ MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,
+ uint32_t firstInstance);
+ MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex);
+
+ // State-modifying methods
+ void SetComputePipeline(ComputePipelineBase* pipeline);
+ void SetRenderPipeline(RenderPipelineBase* pipeline);
+ void SetBindGroup(BindGroupIndex index,
+ BindGroupBase* bindgroup,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets);
+ void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size);
+ void SetVertexBuffer(VertexBufferSlot slot, uint64_t size);
+
+ static constexpr size_t kNumAspects = 4;
+ using ValidationAspects = std::bitset<kNumAspects>;
+
+ BindGroupBase* GetBindGroup(BindGroupIndex index) const;
+ const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const;
+ bool HasPipeline() const;
+ RenderPipelineBase* GetRenderPipeline() const;
+ ComputePipelineBase* GetComputePipeline() const;
+ PipelineLayoutBase* GetPipelineLayout() const;
+ wgpu::IndexFormat GetIndexFormat() const;
+ uint64_t GetIndexBufferSize() const;
+
+ private:
+ MaybeError ValidateOperation(ValidationAspects requiredAspects);
+ void RecomputeLazyAspects(ValidationAspects aspects);
+ MaybeError CheckMissingAspects(ValidationAspects aspects);
+
+ void SetPipelineCommon(PipelineBase* pipeline);
+
+ ValidationAspects mAspects;
+
+ ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
+ ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {};
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
+ bool mIndexBufferSet = false;
+ wgpu::IndexFormat mIndexFormat;
+ uint64_t mIndexBufferSize = 0;
+
+ ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {};
+
+ PipelineLayoutBase* mLastPipelineLayout = nullptr;
+ PipelineBase* mLastPipeline = nullptr;
+
+ const RequiredBufferSizes* mMinBufferSizes = nullptr;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp
new file mode 100644
index 00000000000..123f99ef8f3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandEncoder.cpp
@@ -0,0 +1,1211 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandEncoder.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/QueryHelper.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/RenderPassEncoder.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <cmath>
+#include <map>
+
+namespace dawn::native {
+
+ namespace {
+
+ MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
+ uint64_t srcOffset,
+ uint64_t dstOffset) {
+ // Copy size must be a multiple of 4 bytes on macOS.
+ DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
+
+ // SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
+ DAWN_INVALID_IF(
+ srcOffset % 4 != 0 || dstOffset % 4 != 0,
+ "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
+ srcOffset, dstOffset);
+
+ return {};
+ }
+
+ MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
+ DAWN_INVALID_IF(texture->GetSampleCount() > 1,
+ "%s sample count (%u) is not 1 when copying to or from a buffer.",
+ texture, texture->GetSampleCount());
+
+ return {};
+ }
+
+ MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
+ const TexelBlockInfo& blockInfo,
+ const bool hasDepthOrStencil) {
+ if (hasDepthOrStencil) {
+ // For depth-stencil texture, buffer offset must be a multiple of 4.
+ DAWN_INVALID_IF(layout.offset % 4 != 0,
+ "Offset (%u) is not a multiple of 4 for depth/stencil texture.",
+ layout.offset);
+ } else {
+ DAWN_INVALID_IF(layout.offset % blockInfo.byteSize != 0,
+ "Offset (%u) is not a multiple of the texel block byte size (%u).",
+ layout.offset, blockInfo.byteSize);
+ }
+ return {};
+ }
+
+ MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(
+ const ImageCopyTexture& src) {
+ Aspect aspectUsed;
+ DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
+ if (aspectUsed == Aspect::Depth) {
+ switch (src.texture->GetFormat().format) {
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "The depth aspect of %s format %s cannot be selected in a texture to "
+ "buffer copy.",
+ src.texture, src.texture->GetFormat().format);
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
+ // Currently we do not support layered rendering.
+ DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
+ "The layer count (%u) of %s used as attachment is greater than 1.",
+ attachment->GetLayerCount(), attachment);
+
+ DAWN_INVALID_IF(attachment->GetLevelCount() > 1,
+ "The mip level count (%u) of %s used as attachment is greater than 1.",
+ attachment->GetLevelCount(), attachment);
+
+ return {};
+ }
+
+ MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
+ uint32_t* width,
+ uint32_t* height) {
+ const Extent3D& attachmentSize =
+ attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
+
+ if (*width == 0) {
+ DAWN_ASSERT(*height == 0);
+ *width = attachmentSize.width;
+ *height = attachmentSize.height;
+ DAWN_ASSERT(*width != 0 && *height != 0);
+ } else {
+ DAWN_INVALID_IF(
+ *width != attachmentSize.width || *height != attachmentSize.height,
+ "Attachment %s size (width: %u, height: %u) does not match the size of the "
+ "other attachments (width: %u, height: %u).",
+ attachment, attachmentSize.width, attachmentSize.height, *width, *height);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
+ uint32_t* sampleCount) {
+ if (*sampleCount == 0) {
+ *sampleCount = colorAttachment->GetTexture()->GetSampleCount();
+ DAWN_ASSERT(*sampleCount != 0);
+ } else {
+ DAWN_INVALID_IF(
+ *sampleCount != colorAttachment->GetTexture()->GetSampleCount(),
+ "Color attachment %s sample count (%u) does not match the sample count of the "
+ "other attachments (%u).",
+ colorAttachment, colorAttachment->GetTexture()->GetSampleCount(), *sampleCount);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateResolveTarget(const DeviceBase* device,
+ const RenderPassColorAttachment& colorAttachment,
+ UsageValidationMode usageValidationMode) {
+ if (colorAttachment.resolveTarget == nullptr) {
+ return {};
+ }
+
+ const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
+ const TextureViewBase* attachment = colorAttachment.view;
+ DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
+ DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
+ wgpu::TextureUsage::RenderAttachment, usageValidationMode));
+
+ DAWN_INVALID_IF(
+ !attachment->GetTexture()->IsMultisampledTexture(),
+ "Cannot set %s as a resolve target when the color attachment %s has a sample "
+ "count of 1.",
+ resolveTarget, attachment);
+
+ DAWN_INVALID_IF(resolveTarget->GetTexture()->IsMultisampledTexture(),
+ "Cannot use %s as resolve target. Sample count (%u) is greater than 1.",
+ resolveTarget, resolveTarget->GetTexture()->GetSampleCount());
+
+ DAWN_INVALID_IF(resolveTarget->GetLayerCount() > 1,
+ "The resolve target %s array layer count (%u) is not 1.", resolveTarget,
+ resolveTarget->GetLayerCount());
+
+ DAWN_INVALID_IF(resolveTarget->GetLevelCount() > 1,
+ "The resolve target %s mip level count (%u) is not 1.", resolveTarget,
+ resolveTarget->GetLevelCount());
+
+ const Extent3D& colorTextureSize =
+ attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
+ const Extent3D& resolveTextureSize =
+ resolveTarget->GetTexture()->GetMipLevelVirtualSize(
+ resolveTarget->GetBaseMipLevel());
+ DAWN_INVALID_IF(
+ colorTextureSize.width != resolveTextureSize.width ||
+ colorTextureSize.height != resolveTextureSize.height,
+ "The Resolve target %s size (width: %u, height: %u) does not match the color "
+ "attachment %s size (width: %u, height: %u).",
+ resolveTarget, resolveTextureSize.width, resolveTextureSize.height, attachment,
+ colorTextureSize.width, colorTextureSize.height);
+
+ wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
+ DAWN_INVALID_IF(
+ resolveTargetFormat != attachment->GetFormat().format,
+ "The resolve target %s format (%s) does not match the color attachment %s format "
+ "(%s).",
+ resolveTarget, resolveTargetFormat, attachment, attachment->GetFormat().format);
+
+ return {};
+ }
+
+ MaybeError ValidateRenderPassColorAttachment(
+ DeviceBase* device,
+ const RenderPassColorAttachment& colorAttachment,
+ uint32_t* width,
+ uint32_t* height,
+ uint32_t* sampleCount,
+ UsageValidationMode usageValidationMode) {
+ TextureViewBase* attachment = colorAttachment.view;
+ DAWN_TRY(device->ValidateObject(attachment));
+ DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
+ wgpu::TextureUsage::RenderAttachment, usageValidationMode));
+
+ DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) ||
+ !attachment->GetFormat().isRenderable,
+ "The color attachment %s format (%s) is not color renderable.",
+ attachment, attachment->GetFormat().format);
+
+ DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
+ DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
+
+ if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
+ DAWN_INVALID_IF(std::isnan(colorAttachment.clearColor.r) ||
+ std::isnan(colorAttachment.clearColor.g) ||
+ std::isnan(colorAttachment.clearColor.b) ||
+ std::isnan(colorAttachment.clearColor.a),
+ "Color clear value (%s) contain a NaN.",
+ &colorAttachment.clearColor);
+ }
+
+ DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
+
+ DAWN_TRY(ValidateResolveTarget(device, colorAttachment, usageValidationMode));
+
+ DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
+ DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+
+ return {};
+ }
+
+ MaybeError ValidateRenderPassDepthStencilAttachment(
+ DeviceBase* device,
+ const RenderPassDepthStencilAttachment* depthStencilAttachment,
+ uint32_t* width,
+ uint32_t* height,
+ uint32_t* sampleCount,
+ UsageValidationMode usageValidationMode) {
+ DAWN_ASSERT(depthStencilAttachment != nullptr);
+
+ TextureViewBase* attachment = depthStencilAttachment->view;
+ DAWN_TRY(device->ValidateObject(attachment));
+ DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
+ wgpu::TextureUsage::RenderAttachment, usageValidationMode));
+
+ const Format& format = attachment->GetFormat();
+ DAWN_INVALID_IF(
+ !format.HasDepthOrStencil(),
+ "The depth stencil attachment %s format (%s) is not a depth stencil format.",
+ attachment, format.format);
+
+ DAWN_INVALID_IF(!format.isRenderable,
+ "The depth stencil attachment %s format (%s) is not renderable.",
+ attachment, format.format);
+
+ DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
+ "The depth stencil attachment %s must encompass all aspects.",
+ attachment);
+
+ DAWN_INVALID_IF(
+ attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
+ depthStencilAttachment->depthReadOnly !=
+ depthStencilAttachment->stencilReadOnly,
+ "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
+ "is 'all'.",
+ depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
+
+ if (depthStencilAttachment->depthReadOnly) {
+ if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Load &&
+ depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Store) {
+ // TODO(dawn:1269): Remove this branch after the deprecation period.
+ device->EmitDeprecationWarning(
+ "Setting depthLoadOp and depthStore when "
+ "depthReadOnly is true is deprecated.");
+ } else {
+ DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Undefined,
+ "depthLoadOp (%s) must not be set when depthReadOnly is true.",
+ depthStencilAttachment->depthLoadOp);
+ DAWN_INVALID_IF(
+ depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
+ "depthStoreOp (%s) must not be set when depthReadOnly is true.",
+ depthStencilAttachment->depthStoreOp);
+ }
+ } else {
+ DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
+ DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
+ }
+
+ if (depthStencilAttachment->stencilReadOnly) {
+ if (depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Load &&
+ depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Store) {
+ // TODO(dawn:1269): Remove this branch after the deprecation period.
+ device->EmitDeprecationWarning(
+ "Setting stencilLoadOp and stencilStoreOp when "
+ "stencilReadOnly is true is deprecated.");
+ } else {
+ DAWN_INVALID_IF(
+ depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Undefined,
+ "stencilLoadOp (%s) must not be set when stencilReadOnly is true.",
+ depthStencilAttachment->stencilLoadOp);
+ DAWN_INVALID_IF(
+ depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Undefined,
+ "stencilStoreOp (%s) must not be set when stencilReadOnly is true.",
+ depthStencilAttachment->stencilStoreOp);
+ }
+ } else {
+ DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
+ DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
+ }
+
+ DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
+ std::isnan(depthStencilAttachment->clearDepth),
+ "Depth clear value is NaN.");
+
+ // *sampleCount == 0 must only happen when there is no color attachment. In that case we
+ // do not need to validate the sample count of the depth stencil attachment.
+ const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
+ if (*sampleCount != 0) {
+ DAWN_INVALID_IF(
+ depthStencilSampleCount != *sampleCount,
+ "The depth stencil attachment %s sample count (%u) does not match the sample "
+ "count of the other attachments (%u).",
+ attachment, depthStencilSampleCount, *sampleCount);
+ } else {
+ *sampleCount = depthStencilSampleCount;
+ }
+
+ DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
+ DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+
+ return {};
+ }
+
+ MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
+ const RenderPassDescriptor* descriptor,
+ uint32_t* width,
+ uint32_t* height,
+ uint32_t* sampleCount,
+ UsageValidationMode usageValidationMode) {
+ DAWN_INVALID_IF(
+ descriptor->colorAttachmentCount > kMaxColorAttachments,
+ "Color attachment count (%u) exceeds the maximum number of color attachments (%u).",
+ descriptor->colorAttachmentCount, kMaxColorAttachments);
+
+ for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
+ DAWN_TRY_CONTEXT(ValidateRenderPassColorAttachment(
+ device, descriptor->colorAttachments[i], width, height,
+ sampleCount, usageValidationMode),
+ "validating colorAttachments[%u].", i);
+ }
+
+ if (descriptor->depthStencilAttachment != nullptr) {
+ DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
+ device, descriptor->depthStencilAttachment, width, height,
+ sampleCount, usageValidationMode),
+ "validating depthStencilAttachment.");
+ }
+
+ if (descriptor->occlusionQuerySet != nullptr) {
+ DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
+
+ DAWN_INVALID_IF(
+ descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
+ "The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet,
+ descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
+ }
+
+ DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 &&
+ descriptor->depthStencilAttachment == nullptr,
+ "Render pass has no attachments.");
+
+ return {};
+ }
+
+ MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
+ const ComputePassDescriptor* descriptor) {
+ return {};
+ }
+
+ MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ const BufferBase* destination,
+ uint64_t destinationOffset) {
+ DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
+ "First query (%u) exceeds the number of queries (%u) in %s.",
+ firstQuery, querySet->GetQueryCount(), querySet);
+
+ DAWN_INVALID_IF(
+ queryCount > querySet->GetQueryCount() - firstQuery,
+ "The query range (firstQuery: %u, queryCount: %u) exceeds the number of queries "
+ "(%u) in %s.",
+ firstQuery, queryCount, querySet->GetQueryCount(), querySet);
+
+ DAWN_INVALID_IF(destinationOffset % 256 != 0,
+ "The destination buffer %s offset (%u) is not a multiple of 256.",
+ destination, destinationOffset);
+
+ uint64_t bufferSize = destination->GetSize();
+ // The destination buffer must have enough storage, from destination offset, to contain
+ // the result of resolved queries
+ bool fitsInBuffer = destinationOffset <= bufferSize &&
+ (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
+ (bufferSize - destinationOffset));
+ DAWN_INVALID_IF(
+ !fitsInBuffer,
+ "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
+ querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination,
+ bufferSize, destinationOffset);
+
+ return {};
+ }
+
+ MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
+ QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset) {
+ DeviceBase* device = encoder->GetDevice();
+
+ // The availability got from query set is a reference to vector<bool>, need to covert
+ // bool to uint32_t due to a user input in pipeline must not contain a bool type in
+ // WGSL.
+ std::vector<uint32_t> availability{querySet->GetQueryAvailability().begin(),
+ querySet->GetQueryAvailability().end()};
+
+ // Timestamp availability storage buffer
+ BufferDescriptor availabilityDesc = {};
+ availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
+ availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
+ Ref<BufferBase> availabilityBuffer;
+ DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
+
+ DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
+ availability.data(),
+ availability.size() * sizeof(uint32_t)));
+
+ // Timestamp params uniform buffer
+ TimestampParams params(firstQuery, queryCount, static_cast<uint32_t>(destinationOffset),
+ device->GetTimestampPeriodInNS());
+
+ BufferDescriptor parmsDesc = {};
+ parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+ parmsDesc.size = sizeof(params);
+ Ref<BufferBase> paramsBuffer;
+ DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
+
+ DAWN_TRY(
+ device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
+
+ return EncodeConvertTimestampsToNanoseconds(
+ encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
+ }
+
+ bool IsReadOnlyDepthStencilAttachment(
+ const RenderPassDepthStencilAttachment* depthStencilAttachment) {
+ DAWN_ASSERT(depthStencilAttachment != nullptr);
+ Aspect aspects = depthStencilAttachment->view->GetAspects();
+ DAWN_ASSERT(IsSubset(aspects, Aspect::Depth | Aspect::Stencil));
+
+ if ((aspects & Aspect::Depth) && !depthStencilAttachment->depthReadOnly) {
+ return false;
+ }
+ if (aspects & Aspect::Stencil && !depthStencilAttachment->stencilReadOnly) {
+ return false;
+ }
+ return true;
+ }
+
+ } // namespace
+
+ MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
+ const CommandEncoderDescriptor* descriptor) {
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+ wgpu::SType::DawnEncoderInternalUsageDescriptor));
+
+ const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+ DAWN_INVALID_IF(internalUsageDesc != nullptr &&
+ !device->APIHasFeature(wgpu::FeatureName::DawnInternalUsages),
+ "%s is not available.", wgpu::FeatureName::DawnInternalUsages);
+ return {};
+ }
+
+ // static
+ Ref<CommandEncoder> CommandEncoder::Create(DeviceBase* device,
+ const CommandEncoderDescriptor* descriptor) {
+ return AcquireRef(new CommandEncoder(device, descriptor));
+ }
+
+ // static
+ CommandEncoder* CommandEncoder::MakeError(DeviceBase* device) {
+ return new CommandEncoder(device, ObjectBase::kError);
+ }
+
+ CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label), mEncodingContext(device, this) {
+ TrackInDevice();
+
+ const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+ if (internalUsageDesc != nullptr && internalUsageDesc->useInternalUsages) {
+ mUsageValidationMode = UsageValidationMode::Internal;
+ } else {
+ mUsageValidationMode = UsageValidationMode::Default;
+ }
+ }
+
+ CommandEncoder::CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag),
+ mEncodingContext(device, this),
+ mUsageValidationMode(UsageValidationMode::Default) {
+ mEncodingContext.HandleError(DAWN_FORMAT_VALIDATION_ERROR("%s is invalid.", this));
+ }
+
+ ObjectType CommandEncoder::GetType() const {
+ return ObjectType::CommandEncoder;
+ }
+
+ void CommandEncoder::DestroyImpl() {
+ mEncodingContext.Destroy();
+ }
+
+ CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
+ return CommandBufferResourceUsage{
+ mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
+ std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
+ }
+
+ CommandIterator CommandEncoder::AcquireCommands() {
+ return mEncodingContext.AcquireCommands();
+ }
+
+ void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
+ mUsedQuerySets.insert(querySet);
+ }
+
+ void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
+ DAWN_ASSERT(querySet != nullptr);
+
+ if (GetDevice()->IsValidationEnabled()) {
+ TrackUsedQuerySet(querySet);
+ }
+
+ // Set the query at queryIndex to available for resolving in query set.
+ querySet->SetQueryAvailability(queryIndex, true);
+ }
+
+ // Implementation of the API's command recording methods
+
+ ComputePassEncoder* CommandEncoder::APIBeginComputePass(
+ const ComputePassDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+
+ bool success = mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ DAWN_TRY(ValidateComputePassDescriptor(device, descriptor));
+
+ allocator->Allocate<BeginComputePassCmd>(Command::BeginComputePass);
+
+ return {};
+ },
+ "encoding %s.BeginComputePass(%s).", this, descriptor);
+
+ if (success) {
+ const ComputePassDescriptor defaultDescriptor = {};
+ if (descriptor == nullptr) {
+ descriptor = &defaultDescriptor;
+ }
+
+ ComputePassEncoder* passEncoder =
+ new ComputePassEncoder(device, descriptor, this, &mEncodingContext);
+ mEncodingContext.EnterPass(passEncoder);
+ return passEncoder;
+ }
+
+ return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
+ }
+
+ RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+
+ RenderPassResourceUsageTracker usageTracker;
+
+ uint32_t width = 0;
+ uint32_t height = 0;
+ bool depthReadOnly = false;
+ bool stencilReadOnly = false;
+ Ref<AttachmentState> attachmentState;
+ bool success = mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ uint32_t sampleCount = 0;
+
+ DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height,
+ &sampleCount, mUsageValidationMode));
+
+ ASSERT(width > 0 && height > 0 && sampleCount > 0);
+
+ mEncodingContext.WillBeginRenderPass();
+ BeginRenderPassCmd* cmd =
+ allocator->Allocate<BeginRenderPassCmd>(Command::BeginRenderPass);
+
+ cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
+ attachmentState = cmd->attachmentState;
+
+ for (ColorAttachmentIndex index :
+ IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
+ uint8_t i = static_cast<uint8_t>(index);
+ TextureViewBase* view = descriptor->colorAttachments[i].view;
+ TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
+
+ cmd->colorAttachments[index].view = view;
+ cmd->colorAttachments[index].resolveTarget = resolveTarget;
+ cmd->colorAttachments[index].loadOp = descriptor->colorAttachments[i].loadOp;
+ cmd->colorAttachments[index].storeOp = descriptor->colorAttachments[i].storeOp;
+ cmd->colorAttachments[index].clearColor =
+ descriptor->colorAttachments[i].clearColor;
+
+ usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
+
+ if (resolveTarget != nullptr) {
+ usageTracker.TextureViewUsedAs(resolveTarget,
+ wgpu::TextureUsage::RenderAttachment);
+ }
+ }
+
+ if (cmd->attachmentState->HasDepthStencilAttachment()) {
+ TextureViewBase* view = descriptor->depthStencilAttachment->view;
+
+ cmd->depthStencilAttachment.view = view;
+ cmd->depthStencilAttachment.clearDepth =
+ descriptor->depthStencilAttachment->clearDepth;
+ cmd->depthStencilAttachment.clearStencil =
+ descriptor->depthStencilAttachment->clearStencil;
+ cmd->depthStencilAttachment.depthReadOnly =
+ descriptor->depthStencilAttachment->depthReadOnly;
+ cmd->depthStencilAttachment.stencilReadOnly =
+ descriptor->depthStencilAttachment->stencilReadOnly;
+
+ if (descriptor->depthStencilAttachment->depthReadOnly) {
+ cmd->depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Load;
+ cmd->depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
+ } else {
+ cmd->depthStencilAttachment.depthLoadOp =
+ descriptor->depthStencilAttachment->depthLoadOp;
+ cmd->depthStencilAttachment.depthStoreOp =
+ descriptor->depthStencilAttachment->depthStoreOp;
+ }
+
+ if (descriptor->depthStencilAttachment->stencilReadOnly) {
+ cmd->depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Load;
+ cmd->depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
+ } else {
+ cmd->depthStencilAttachment.stencilLoadOp =
+ descriptor->depthStencilAttachment->stencilLoadOp;
+ cmd->depthStencilAttachment.stencilStoreOp =
+ descriptor->depthStencilAttachment->stencilStoreOp;
+ }
+
+ if (IsReadOnlyDepthStencilAttachment(descriptor->depthStencilAttachment)) {
+ usageTracker.TextureViewUsedAs(view, kReadOnlyRenderAttachment);
+ } else {
+ usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
+ }
+
+ depthReadOnly = descriptor->depthStencilAttachment->depthReadOnly;
+ stencilReadOnly = descriptor->depthStencilAttachment->stencilReadOnly;
+ }
+
+ cmd->width = width;
+ cmd->height = height;
+
+ cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
+
+ return {};
+ },
+ "encoding %s.BeginRenderPass(%s).", this, descriptor);
+
+ if (success) {
+ RenderPassEncoder* passEncoder = new RenderPassEncoder(
+ device, descriptor, this, &mEncodingContext, std::move(usageTracker),
+ std::move(attachmentState), descriptor->occlusionQuerySet, width, height,
+ depthReadOnly, stencilReadOnly);
+ mEncodingContext.EnterPass(passEncoder);
+ return passEncoder;
+ }
+
+ return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
+ }
+
+ void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(source));
+ DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+ DAWN_INVALID_IF(source == destination,
+ "Source and destination are the same buffer (%s).", source);
+
+ DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
+ "validating source %s copy size.", source);
+ DAWN_TRY_CONTEXT(
+ ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
+ "validating destination %s copy size.", destination);
+ DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
+
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc),
+ "validating source %s usage.", source);
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst),
+ "validating destination %s usage.", destination);
+
+ mTopLevelBuffers.insert(source);
+ mTopLevelBuffers.insert(destination);
+ }
+
+ CopyBufferToBufferCmd* copy =
+ allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
+ copy->source = source;
+ copy->sourceOffset = sourceOffset;
+ copy->destination = destination;
+ copy->destinationOffset = destinationOffset;
+ copy->size = size;
+
+ return {};
+ },
+ "encoding %s.CopyBufferToBuffer(%s, %u, %s, %u, %u).", this, source, sourceOffset,
+ destination, destinationOffset, size);
+ }
+
+ void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc),
+ "validating source %s usage.", source->buffer);
+
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
+ DAWN_TRY_CONTEXT(
+ ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+ mUsageValidationMode),
+ "validating destination %s usage.", destination->texture);
+ DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
+
+ DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
+ }
+ const TexelBlockInfo& blockInfo =
+ destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateLinearTextureCopyOffset(
+ source->layout, blockInfo,
+ destination->texture->GetFormat().HasDepthOrStencil()));
+ DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
+ blockInfo, *copySize));
+
+ mTopLevelBuffers.insert(source->buffer);
+ mTopLevelTextures.insert(destination->texture);
+ }
+
+ TextureDataLayout srcLayout = source->layout;
+ ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
+
+ CopyBufferToTextureCmd* copy =
+ allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
+ copy->source.buffer = source->buffer;
+ copy->source.offset = srcLayout.offset;
+ copy->source.bytesPerRow = srcLayout.bytesPerRow;
+ copy->source.rowsPerImage = srcLayout.rowsPerImage;
+ copy->destination.texture = destination->texture;
+ copy->destination.origin = destination->origin;
+ copy->destination.mipLevel = destination->mipLevel;
+ copy->destination.aspect =
+ ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+ copy->copySize = *copySize;
+
+ return {};
+ },
+ "encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer,
+ destination->texture, copySize);
+ }
+
+ void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
+ const ImageCopyBuffer* destination,
+ const Extent3D* copySize) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+ mUsageValidationMode),
+ "validating source %s usage.", source->texture);
+ DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
+ DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
+
+ DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
+ DAWN_TRY_CONTEXT(
+ ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
+ "validating destination %s usage.", destination->buffer);
+
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
+ }
+ const TexelBlockInfo& blockInfo =
+ source->texture->GetFormat().GetAspectInfo(source->aspect).block;
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateLinearTextureCopyOffset(
+ destination->layout, blockInfo,
+ source->texture->GetFormat().HasDepthOrStencil()));
+ DAWN_TRY(ValidateLinearTextureData(
+ destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
+
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelBuffers.insert(destination->buffer);
+ }
+
+ TextureDataLayout dstLayout = destination->layout;
+ ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
+
+ CopyTextureToBufferCmd* copy =
+ allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
+ copy->source.texture = source->texture;
+ copy->source.origin = source->origin;
+ copy->source.mipLevel = source->mipLevel;
+ copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+ copy->destination.buffer = destination->buffer;
+ copy->destination.offset = dstLayout.offset;
+ copy->destination.bytesPerRow = dstLayout.bytesPerRow;
+ copy->destination.rowsPerImage = dstLayout.rowsPerImage;
+ copy->copySize = *copySize;
+
+ return {};
+ },
+ "encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture,
+ destination->buffer, copySize);
+ }
+
+ void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
+ APICopyTextureToTextureHelper<false>(source, destination, copySize);
+ }
+
+ void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
+ APICopyTextureToTextureHelper<true>(source, destination, copySize);
+ }
+
+ template <bool Internal>
+ void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(source->texture));
+ DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *source, *copySize),
+ "validating source %s.", source->texture);
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *destination, *copySize),
+ "validating destination %s.", destination->texture);
+
+ DAWN_TRY(
+ ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
+
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *source, *copySize),
+ "validating source %s copy range.", source->texture);
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *destination, *copySize),
+ "validating source %s copy range.", destination->texture);
+
+ // For internal usages (CopyToCopyInternal) we don't care if the user has added
+ // CopySrc as a usage for this texture, but we will always add it internally.
+ if (Internal) {
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+ UsageValidationMode::Internal));
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+ UsageValidationMode::Internal));
+ } else {
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+ mUsageValidationMode));
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+ mUsageValidationMode));
+ }
+
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelTextures.insert(destination->texture);
+ }
+
+ CopyTextureToTextureCmd* copy =
+ allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
+ copy->source.texture = source->texture;
+ copy->source.origin = source->origin;
+ copy->source.mipLevel = source->mipLevel;
+ copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+ copy->destination.texture = destination->texture;
+ copy->destination.origin = destination->origin;
+ copy->destination.mipLevel = destination->mipLevel;
+ copy->destination.aspect =
+ ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+ copy->copySize = *copySize;
+
+ return {};
+ },
+ "encoding %s.CopyTextureToTexture(%s, %s, %s).", this, source->texture,
+ destination->texture, copySize);
+ }
+
+ void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(offset > bufferSize,
+ "Buffer offset (%u) is larger than the size (%u) of %s.",
+ offset, bufferSize, buffer);
+
+ uint64_t remainingSize = bufferSize - offset;
+ if (size == wgpu::kWholeSize) {
+ size = remainingSize;
+ } else {
+ DAWN_INVALID_IF(size > remainingSize,
+ "Buffer range (offset: %u, size: %u) doesn't fit in "
+ "the size (%u) of %s.",
+ offset, size, bufferSize, buffer);
+ }
+
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst),
+ "validating buffer %s usage.", buffer);
+
+ // Size must be a multiple of 4 bytes on macOS.
+ DAWN_INVALID_IF(size % 4 != 0, "Fill size (%u) is not a multiple of 4 bytes.",
+ size);
+
+ // Offset must be multiples of 4 bytes on macOS.
+ DAWN_INVALID_IF(offset % 4 != 0, "Offset (%u) is not a multiple of 4 bytes,",
+ offset);
+
+ mTopLevelBuffers.insert(buffer);
+ } else {
+ if (size == wgpu::kWholeSize) {
+ DAWN_ASSERT(buffer->GetSize() >= offset);
+ size = buffer->GetSize() - offset;
+ }
+ }
+
+ ClearBufferCmd* cmd = allocator->Allocate<ClearBufferCmd>(Command::ClearBuffer);
+ cmd->buffer = buffer;
+ cmd->offset = offset;
+ cmd->size = size;
+
+ return {};
+ },
+ "encoding %s.ClearBuffer(%s, %u, %u).", this, buffer, offset, size);
+ }
+
+ void CommandEncoder::APIInjectValidationError(const char* message) {
+ if (mEncodingContext.CheckCurrentEncoder(this)) {
+ mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
+ }
+ }
+
+ void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ InsertDebugMarkerCmd* cmd =
+ allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ return {};
+ },
+ "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
+ }
+
+ void CommandEncoder::APIPopDebugGroup() {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_INVALID_IF(
+ mDebugGroupStackSize == 0,
+ "PopDebugGroup called when no debug groups are currently pushed.");
+ }
+ allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+ mDebugGroupStackSize--;
+ mEncodingContext.PopDebugGroupLabel();
+
+ return {};
+ },
+ "encoding %s.PopDebugGroup().", this);
+ }
+
+ void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ PushDebugGroupCmd* cmd =
+ allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ mDebugGroupStackSize++;
+ mEncodingContext.PushDebugGroupLabel(groupLabel);
+
+ return {};
+ },
+ "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
+ }
+
+ void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+ DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
+ destinationOffset));
+
+ DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
+
+ TrackUsedQuerySet(querySet);
+ mTopLevelBuffers.insert(destination);
+ }
+
+ ResolveQuerySetCmd* cmd =
+ allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
+ cmd->querySet = querySet;
+ cmd->firstQuery = firstQuery;
+ cmd->queryCount = queryCount;
+ cmd->destination = destination;
+ cmd->destinationOffset = destinationOffset;
+
+ // Encode internal compute pipeline for timestamp query
+ if (querySet->GetQueryType() == wgpu::QueryType::Timestamp) {
+ DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
+ this, querySet, firstQuery, queryCount, destination, destinationOffset));
+ }
+
+ return {};
+ },
+ "encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery,
+ queryCount, destination, destinationOffset);
+ }
+
+ void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ uint64_t size) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+ }
+
+ WriteBufferCmd* cmd = allocator->Allocate<WriteBufferCmd>(Command::WriteBuffer);
+ cmd->buffer = buffer;
+ cmd->offset = bufferOffset;
+ cmd->size = size;
+
+ uint8_t* inlinedData = allocator->AllocateData<uint8_t>(size);
+ memcpy(inlinedData, data, size);
+
+ mTopLevelBuffers.insert(buffer);
+
+ return {};
+ },
+ "encoding %s.WriteBuffer(%s, %u, ..., %u).", this, buffer, bufferOffset, size);
+ }
+
+ void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ }
+
+ TrackQueryAvailability(querySet, queryIndex);
+
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
+
+ return {};
+ },
+ "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+ }
+
+ CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
+ Ref<CommandBufferBase> commandBuffer;
+ if (GetDevice()->ConsumedError(FinishInternal(descriptor), &commandBuffer)) {
+ return CommandBufferBase::MakeError(GetDevice());
+ }
+ ASSERT(!IsError());
+ return commandBuffer.Detach();
+ }
+
+ ResultOrError<Ref<CommandBufferBase>> CommandEncoder::FinishInternal(
+ const CommandBufferDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+
+ // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
+ // state of the encoding context. The internal state is set to finished, and subsequent
+ // calls to encode commands will generate errors.
+ DAWN_TRY(mEncodingContext.Finish());
+ DAWN_TRY(device->ValidateIsAlive());
+
+ if (device->IsValidationEnabled()) {
+ DAWN_TRY(ValidateFinish());
+ }
+
+ const CommandBufferDescriptor defaultDescriptor = {};
+ if (descriptor == nullptr) {
+ descriptor = &defaultDescriptor;
+ }
+
+ return device->CreateCommandBuffer(this, descriptor);
+ }
+
+ // Implementation of the command buffer validation that can be precomputed before submit
+ MaybeError CommandEncoder::ValidateFinish() const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
+ DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(passUsage),
+ "validating render pass usage.");
+ }
+
+ for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
+ for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
+ DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(scope),
+ "validating compute pass usage.");
+ }
+ }
+
+ DAWN_INVALID_IF(
+ mDebugGroupStackSize != 0,
+ "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup prior to "
+ "calling Finish.",
+ mDebugGroupStackSize);
+
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn/native/CommandEncoder.h
new file mode 100644
index 00000000000..86f7cb107e2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandEncoder.h
@@ -0,0 +1,119 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDENCODER_H_
+#define DAWNNATIVE_COMMANDENCODER_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/native/EncodingContext.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PassResourceUsage.h"
+
+#include <string>
+
+namespace dawn::native {
+
+ enum class UsageValidationMode;
+
+ MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
+ const CommandEncoderDescriptor* descriptor);
+
+ class CommandEncoder final : public ApiObjectBase {
+ public:
+ static Ref<CommandEncoder> Create(DeviceBase* device,
+ const CommandEncoderDescriptor* descriptor);
+ static CommandEncoder* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ CommandIterator AcquireCommands();
+ CommandBufferResourceUsage AcquireResourceUsages();
+
+ void TrackUsedQuerySet(QuerySetBase* querySet);
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+
+ // Dawn API
+ ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
+ RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
+
+ void APICopyBufferToBuffer(BufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size);
+ void APICopyBufferToTexture(const ImageCopyBuffer* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+ void APICopyTextureToBuffer(const ImageCopyTexture* source,
+ const ImageCopyBuffer* destination,
+ const Extent3D* copySize);
+ void APICopyTextureToTexture(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+ void APICopyTextureToTextureInternal(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+ void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size);
+
+ void APIInjectValidationError(const char* message);
+ void APIInsertDebugMarker(const char* groupLabel);
+ void APIPopDebugGroup();
+ void APIPushDebugGroup(const char* groupLabel);
+
+ void APIResolveQuerySet(QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset);
+ void APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ uint64_t size);
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+ CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
+
+ private:
+ CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
+ CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ void DestroyImpl() override;
+ ResultOrError<Ref<CommandBufferBase>> FinishInternal(
+ const CommandBufferDescriptor* descriptor);
+
+ // Helper to be able to implement both APICopyTextureToTexture and
+ // APICopyTextureToTextureInternal. The only difference between both
+ // copies, is that the Internal one will also check internal usage.
+ template <bool Internal>
+ void APICopyTextureToTextureHelper(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+
+ MaybeError ValidateFinish() const;
+
+ EncodingContext mEncodingContext;
+ std::set<BufferBase*> mTopLevelBuffers;
+ std::set<TextureBase*> mTopLevelTextures;
+ std::set<QuerySetBase*> mUsedQuerySets;
+
+ uint64_t mDebugGroupStackSize = 0;
+
+ UsageValidationMode mUsageValidationMode;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMMANDENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp
new file mode 100644
index 00000000000..a9caaace91c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandValidation.cpp
@@ -0,0 +1,483 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CommandValidation.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+namespace dawn::native {
+
+ // Performs validation of the "synchronization scope" rules of WebGPU.
+ MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
+ // Buffers can only be used as single-write or multiple read.
+ for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
+ const wgpu::BufferUsage usage = scope.bufferUsages[i];
+ bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
+ bool singleUse = wgpu::HasZeroOrOneBits(usage);
+
+ DAWN_INVALID_IF(!readOnly && !singleUse,
+ "%s usage (%s) includes writable usage and another usage in the same "
+ "synchronization scope.",
+ scope.buffers[i], usage);
+ }
+
+ // Check that every single subresource is used as either a single-write usage or a
+ // combination of readonly usages.
+ for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
+ const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
+ MaybeError error = {};
+ textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
+ bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
+ bool singleUse = wgpu::HasZeroOrOneBits(usage);
+ if (!readOnly && !singleUse && !error.IsError()) {
+ error = DAWN_FORMAT_VALIDATION_ERROR(
+ "%s usage (%s) includes writable usage and another usage in the same "
+ "synchronization scope.",
+ scope.textures[i], usage);
+ }
+ });
+ DAWN_TRY(std::move(error));
+ }
+ return {};
+ }
+
+ MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex) {
+ DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
+ "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
+
+ DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
+ "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
+ querySet->GetQueryCount(), querySet);
+
+ return {};
+ }
+
+ MaybeError ValidateWriteBuffer(const DeviceBase* device,
+ const BufferBase* buffer,
+ uint64_t bufferOffset,
+ uint64_t size) {
+ DAWN_TRY(device->ValidateObject(buffer));
+
+ DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
+ bufferOffset);
+
+ DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
+ "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
+ bufferOffset, size, buffer, bufferSize);
+
+ DAWN_INVALID_IF(!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst),
+ "%s usage (%s) does not include %s.", buffer, buffer->GetUsage(),
+ wgpu::BufferUsage::CopyDst);
+
+ return {};
+ }
+
+ bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
+ uint32_t maxStart = std::max(startA, startB);
+ uint32_t minStart = std::min(startA, startB);
+ return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
+ static_cast<uint64_t>(maxStart);
+ }
+
+ template <typename A, typename B>
+ DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
+ static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
+ static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
+ return uint64_t(a) * uint64_t(b);
+ }
+
+ ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+ const Extent3D& copySize,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ ASSERT(copySize.width % blockInfo.width == 0);
+ ASSERT(copySize.height % blockInfo.height == 0);
+ uint32_t widthInBlocks = copySize.width / blockInfo.width;
+ uint32_t heightInBlocks = copySize.height / blockInfo.height;
+ uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
+
+ if (copySize.depthOrArrayLayers == 0) {
+ return 0;
+ }
+
+ // Check for potential overflows for the rest of the computations. We have the following
+ // inequalities:
+ //
+ // bytesInLastRow <= bytesPerRow
+ // heightInBlocks <= rowsPerImage
+ //
+ // So:
+ //
+ // bytesInLastImage = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
+ // <= bytesPerRow * heightInBlocks
+ // <= bytesPerRow * rowsPerImage
+ // <= bytesPerImage
+ //
+ // This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
+ // computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
+ ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
+ rowsPerImage != wgpu::kCopyStrideUndefined));
+ uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
+ DAWN_INVALID_IF(
+ bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+ "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
+ bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+ copySize.depthOrArrayLayers);
+
+ uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
+ if (heightInBlocks > 0) {
+ ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
+ uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
+ requiredBytesInCopy += bytesInLastImage;
+ }
+ return requiredBytesInCopy;
+ }
+
+ MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+ uint64_t offset,
+ uint64_t size) {
+ uint64_t bufferSize = buffer->GetSize();
+ bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
+ DAWN_INVALID_IF(!fitsInBuffer,
+ "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
+ size, buffer.Get(), bufferSize);
+
+ return {};
+ }
+
+ // Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
+ // it.
+ void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent) {
+ ASSERT(layout != nullptr);
+ ASSERT(copyExtent.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
+
+ if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
+ ASSERT(copyExtent.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
+ uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
+
+ ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
+ layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
+ }
+ if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
+ ASSERT(copyExtent.depthOrArrayLayers <= 1);
+ layout->rowsPerImage = heightInBlocks;
+ }
+ }
+
+ MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+ uint64_t byteSize,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent) {
+ ASSERT(copyExtent.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
+
+ // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
+ // validation message. Investigate ways to make it print as a more readable symbol.
+ DAWN_INVALID_IF(
+ copyExtent.depthOrArrayLayers > 1 &&
+ (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
+ layout.rowsPerImage == wgpu::kCopyStrideUndefined),
+ "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
+ copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
+
+ DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
+ "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
+ heightInBlocks);
+
+ // Validation for other members in layout:
+ ASSERT(copyExtent.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
+ ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <=
+ std::numeric_limits<uint32_t>::max());
+ uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
+
+ // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
+ // but they should get optimized out.
+ DAWN_INVALID_IF(
+ layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
+ "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
+ layout.bytesPerRow);
+
+ DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
+ heightInBlocks > layout.rowsPerImage,
+ "The height of each image in blocks (%u) is > rowsPerImage (%u).",
+ heightInBlocks, layout.rowsPerImage);
+
+ // We compute required bytes in copy after validating texel block alignments
+ // because the divisibility conditions are necessary for the algorithm to be valid,
+ // also the bytesPerRow bound is necessary to avoid overflows.
+ uint64_t requiredBytesInCopy;
+ DAWN_TRY_ASSIGN(requiredBytesInCopy,
+ ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow,
+ layout.rowsPerImage));
+
+ bool fitsInData =
+ layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
+ DAWN_INVALID_IF(
+ !fitsInData,
+ "Required size for texture data layout (%u) exceeds the linear data size (%u) with "
+ "offset (%u).",
+ requiredBytesInCopy, byteSize, layout.offset);
+
+ return {};
+ }
+
+ MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+ const ImageCopyBuffer& imageCopyBuffer) {
+ DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
+ if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
+ DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
+ "bytesPerRow (%u) is not a multiple of %u.",
+ imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+ const ImageCopyTexture& textureCopy,
+ const Extent3D& copySize) {
+ const TextureBase* texture = textureCopy.texture;
+ DAWN_TRY(device->ValidateObject(texture));
+
+ DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
+ "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
+ textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
+
+ DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
+ DAWN_INVALID_IF(
+ SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
+ "%s format (%s) does not have the selected aspect (%s).", texture,
+ texture->GetFormat().format, textureCopy.aspect);
+
+ if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
+ Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ DAWN_INVALID_IF(
+ textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
+ subresourceSize.width != copySize.width ||
+ subresourceSize.height != copySize.height,
+ "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
+ "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
+ "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
+ &textureCopy.origin, &copySize, &subresourceSize, texture,
+ texture->GetFormat().format, texture->GetSampleCount());
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+ const ImageCopyTexture& textureCopy,
+ const Extent3D& copySize) {
+ const TextureBase* texture = textureCopy.texture;
+
+ // Validation for the copy being in-bounds:
+ Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+ // For 1D/2D textures, include the array layer as depth so it can be checked with other
+ // dimensions.
+ if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
+ mipSize.depthOrArrayLayers = texture->GetArrayLayers();
+ }
+ // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
+ // overflows.
+ DAWN_INVALID_IF(
+ static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
+ static_cast<uint64_t>(mipSize.width) ||
+ static_cast<uint64_t>(textureCopy.origin.y) +
+ static_cast<uint64_t>(copySize.height) >
+ static_cast<uint64_t>(mipSize.height) ||
+ static_cast<uint64_t>(textureCopy.origin.z) +
+ static_cast<uint64_t>(copySize.depthOrArrayLayers) >
+ static_cast<uint64_t>(mipSize.depthOrArrayLayers),
+ "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
+ "size (%s).",
+ &textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
+
+ // Validation for the texel block alignments:
+ const Format& format = textureCopy.texture->GetFormat();
+ if (format.isCompressed) {
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
+ DAWN_INVALID_IF(
+ textureCopy.origin.x % blockInfo.width != 0,
+ "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
+ "width (%u).",
+ textureCopy.origin.x, blockInfo.width);
+ DAWN_INVALID_IF(
+ textureCopy.origin.y % blockInfo.height != 0,
+ "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
+ "height (%u).",
+ textureCopy.origin.y, blockInfo.height);
+ DAWN_INVALID_IF(
+ copySize.width % blockInfo.width != 0,
+ "copySize.width (%u) is not a multiple of compressed texture format block width "
+ "(%u).",
+ copySize.width, blockInfo.width);
+ DAWN_INVALID_IF(
+ copySize.height % blockInfo.height != 0,
+ "copySize.height (%u) is not a multiple of compressed texture format block "
+ "height (%u).",
+ copySize.height, blockInfo.height);
+ }
+
+ return {};
+ }
+
+ // Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
+ // formats).
+ ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
+ const Format& format = view.texture->GetFormat();
+ switch (view.aspect) {
+ case wgpu::TextureAspect::All: {
+ DAWN_INVALID_IF(
+ !HasOneBit(format.aspects),
+ "More than a single aspect (%s) is selected for multi-planar format (%s) in "
+ "%s <-> linear data copy.",
+ view.aspect, format.format, view.texture);
+
+ Aspect single = format.aspects;
+ return single;
+ }
+ case wgpu::TextureAspect::DepthOnly:
+ ASSERT(format.aspects & Aspect::Depth);
+ return Aspect::Depth;
+ case wgpu::TextureAspect::StencilOnly:
+ ASSERT(format.aspects & Aspect::Stencil);
+ return Aspect::Stencil;
+ case wgpu::TextureAspect::Plane0Only:
+ case wgpu::TextureAspect::Plane1Only:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
+ Aspect aspectUsed;
+ DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
+ DAWN_INVALID_IF(aspectUsed == Aspect::Depth, "Cannot copy into the depth aspect of %s.",
+ dst.texture);
+
+ return {};
+ }
+
+ MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize) {
+ const uint32_t srcSamples = src.texture->GetSampleCount();
+ const uint32_t dstSamples = dst.texture->GetSampleCount();
+
+ DAWN_INVALID_IF(
+ srcSamples != dstSamples,
+ "Source %s sample count (%u) and destination %s sample count (%u) does not match.",
+ src.texture, srcSamples, dst.texture, dstSamples);
+
+ // Metal cannot select a single aspect for texture-to-texture copies.
+ const Format& format = src.texture->GetFormat();
+ DAWN_INVALID_IF(
+ SelectFormatAspects(format, src.aspect) != format.aspects,
+ "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
+ src.texture, src.aspect, format.format);
+
+ DAWN_INVALID_IF(
+ SelectFormatAspects(format, dst.aspect) != format.aspects,
+ "Destination %s aspect (%s) doesn't select all the aspects of the destination format "
+ "(%s).",
+ dst.texture, dst.aspect, format.format);
+
+ if (src.texture == dst.texture) {
+ switch (src.texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(src.mipLevel == 0 && src.origin.z == 0 && dst.origin.z == 0);
+ return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
+
+ case wgpu::TextureDimension::e2D:
+ DAWN_INVALID_IF(src.mipLevel == dst.mipLevel &&
+ IsRangeOverlapped(src.origin.z, dst.origin.z,
+ copySize.depthOrArrayLayers),
+ "Copy source and destination are overlapping layer ranges "
+ "([%u, %u) and [%u, %u)) of %s mip level %u",
+ src.origin.z, src.origin.z + copySize.depthOrArrayLayers,
+ dst.origin.z, dst.origin.z + copySize.depthOrArrayLayers,
+ src.texture, src.mipLevel);
+ break;
+
+ case wgpu::TextureDimension::e3D:
+ DAWN_INVALID_IF(src.mipLevel == dst.mipLevel,
+ "Copy is from %s mip level %u to itself.", src.texture,
+ src.mipLevel);
+ break;
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize) {
+ // Metal requires texture-to-texture copies happens between texture formats that equal to
+ // each other or only have diff on srgb-ness.
+ DAWN_INVALID_IF(
+ !src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
+ "Source %s format (%s) and destination %s format (%s) are not copy compatible.",
+ src.texture, src.texture->GetFormat().format, dst.texture,
+ dst.texture->GetFormat().format);
+
+ return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
+ }
+
+ MaybeError ValidateCanUseAs(const TextureBase* texture,
+ wgpu::TextureUsage usage,
+ UsageValidationMode mode) {
+ ASSERT(wgpu::HasZeroOrOneBits(usage));
+ switch (mode) {
+ case UsageValidationMode::Default:
+ DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
+ texture, texture->GetUsage(), usage);
+ break;
+ case UsageValidationMode::Internal:
+ DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
+ "%s internal usage (%s) doesn't include %s.", texture,
+ texture->GetInternalUsage(), usage);
+ break;
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
+ ASSERT(wgpu::HasZeroOrOneBits(usage));
+ DAWN_INVALID_IF(!(buffer->GetUsage() & usage), "%s usage (%s) doesn't include %s.", buffer,
+ buffer->GetUsage(), usage);
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CommandValidation.h b/chromium/third_party/dawn/src/dawn/native/CommandValidation.h
new file mode 100644
index 00000000000..dbc13ececf5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CommandValidation.h
@@ -0,0 +1,88 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDVALIDATION_H_
+#define DAWNNATIVE_COMMANDVALIDATION_H_
+
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Texture.h"
+
+#include <vector>
+
+namespace dawn::native {
+
+ class QuerySetBase;
+ struct SyncScopeResourceUsage;
+ struct TexelBlockInfo;
+
+ MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
+
+ MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex);
+
+ MaybeError ValidateWriteBuffer(const DeviceBase* device,
+ const BufferBase* buffer,
+ uint64_t bufferOffset,
+ uint64_t size);
+
+ ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+ const Extent3D& copySize,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
+
+ void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent);
+ MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+ uint64_t byteSize,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent);
+ MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+ const ImageCopyTexture& imageCopyTexture,
+ const Extent3D& copySize);
+ ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
+ MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
+
+ MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+ const ImageCopyBuffer& imageCopyBuffer);
+ MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+ const ImageCopyTexture& imageCopyTexture,
+ const Extent3D& copySize);
+
+ MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+ uint64_t offset,
+ uint64_t size);
+
+ bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
+
+ MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize);
+ MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize);
+
+ enum class UsageValidationMode {
+ Default,
+ Internal,
+ };
+
+ MaybeError ValidateCanUseAs(const TextureBase* texture,
+ wgpu::TextureUsage usage,
+ UsageValidationMode mode);
+ MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMMANDVALIDATION_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Commands.cpp b/chromium/third_party/dawn/src/dawn/native/Commands.cpp
new file mode 100644
index 00000000000..3337cbd4cf8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Commands.cpp
@@ -0,0 +1,365 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Commands.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+ void FreeCommands(CommandIterator* commands) {
+ commands->Reset();
+
+ Command type;
+ while (commands->NextCommandId(&type)) {
+ switch (type) {
+ case Command::BeginComputePass: {
+ BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
+ begin->~BeginComputePassCmd();
+ break;
+ }
+ case Command::BeginOcclusionQuery: {
+ BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
+ begin->~BeginOcclusionQueryCmd();
+ break;
+ }
+ case Command::BeginRenderPass: {
+ BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
+ begin->~BeginRenderPassCmd();
+ break;
+ }
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
+ copy->~CopyBufferToBufferCmd();
+ break;
+ }
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
+ copy->~CopyBufferToTextureCmd();
+ break;
+ }
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
+ copy->~CopyTextureToBufferCmd();
+ break;
+ }
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy =
+ commands->NextCommand<CopyTextureToTextureCmd>();
+ copy->~CopyTextureToTextureCmd();
+ break;
+ }
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
+ dispatch->~DispatchCmd();
+ break;
+ }
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
+ dispatch->~DispatchIndirectCmd();
+ break;
+ }
+ case Command::Draw: {
+ DrawCmd* draw = commands->NextCommand<DrawCmd>();
+ draw->~DrawCmd();
+ break;
+ }
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
+ draw->~DrawIndexedCmd();
+ break;
+ }
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
+ draw->~DrawIndirectCmd();
+ break;
+ }
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
+ draw->~DrawIndexedIndirectCmd();
+ break;
+ }
+ case Command::EndComputePass: {
+ EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
+ cmd->~EndComputePassCmd();
+ break;
+ }
+ case Command::EndOcclusionQuery: {
+ EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
+ cmd->~EndOcclusionQueryCmd();
+ break;
+ }
+ case Command::EndRenderPass: {
+ EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
+ cmd->~EndRenderPassCmd();
+ break;
+ }
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
+ auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+ for (size_t i = 0; i < cmd->count; ++i) {
+ (&bundles[i])->~Ref<RenderBundleBase>();
+ }
+ cmd->~ExecuteBundlesCmd();
+ break;
+ }
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
+ cmd->~ClearBufferCmd();
+ break;
+ }
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
+ commands->NextData<char>(cmd->length + 1);
+ cmd->~InsertDebugMarkerCmd();
+ break;
+ }
+ case Command::PopDebugGroup: {
+ PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
+ cmd->~PopDebugGroupCmd();
+ break;
+ }
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
+ commands->NextData<char>(cmd->length + 1);
+ cmd->~PushDebugGroupCmd();
+ break;
+ }
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
+ cmd->~ResolveQuerySetCmd();
+ break;
+ }
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
+ cmd->~SetComputePipelineCmd();
+ break;
+ }
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
+ cmd->~SetRenderPipelineCmd();
+ break;
+ }
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
+ cmd->~SetStencilReferenceCmd();
+ break;
+ }
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
+ cmd->~SetViewportCmd();
+ break;
+ }
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
+ cmd->~SetScissorRectCmd();
+ break;
+ }
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
+ cmd->~SetBlendConstantCmd();
+ break;
+ }
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
+ if (cmd->dynamicOffsetCount > 0) {
+ commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+ cmd->~SetBindGroupCmd();
+ break;
+ }
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
+ cmd->~SetIndexBufferCmd();
+ break;
+ }
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
+ cmd->~SetVertexBufferCmd();
+ break;
+ }
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
+ commands->NextData<uint8_t>(write->size);
+ write->~WriteBufferCmd();
+ break;
+ }
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
+ cmd->~WriteTimestampCmd();
+ break;
+ }
+ }
+ }
+
+ commands->MakeEmptyAsDataWasDestroyed();
+ }
+
+ void SkipCommand(CommandIterator* commands, Command type) {
+ switch (type) {
+ case Command::BeginComputePass:
+ commands->NextCommand<BeginComputePassCmd>();
+ break;
+
+ case Command::BeginOcclusionQuery:
+ commands->NextCommand<BeginOcclusionQueryCmd>();
+ break;
+
+ case Command::BeginRenderPass:
+ commands->NextCommand<BeginRenderPassCmd>();
+ break;
+
+ case Command::CopyBufferToBuffer:
+ commands->NextCommand<CopyBufferToBufferCmd>();
+ break;
+
+ case Command::CopyBufferToTexture:
+ commands->NextCommand<CopyBufferToTextureCmd>();
+ break;
+
+ case Command::CopyTextureToBuffer:
+ commands->NextCommand<CopyTextureToBufferCmd>();
+ break;
+
+ case Command::CopyTextureToTexture:
+ commands->NextCommand<CopyTextureToTextureCmd>();
+ break;
+
+ case Command::Dispatch:
+ commands->NextCommand<DispatchCmd>();
+ break;
+
+ case Command::DispatchIndirect:
+ commands->NextCommand<DispatchIndirectCmd>();
+ break;
+
+ case Command::Draw:
+ commands->NextCommand<DrawCmd>();
+ break;
+
+ case Command::DrawIndexed:
+ commands->NextCommand<DrawIndexedCmd>();
+ break;
+
+ case Command::DrawIndirect:
+ commands->NextCommand<DrawIndirectCmd>();
+ break;
+
+ case Command::DrawIndexedIndirect:
+ commands->NextCommand<DrawIndexedIndirectCmd>();
+ break;
+
+ case Command::EndComputePass:
+ commands->NextCommand<EndComputePassCmd>();
+ break;
+
+ case Command::EndOcclusionQuery:
+ commands->NextCommand<EndOcclusionQueryCmd>();
+ break;
+
+ case Command::EndRenderPass:
+ commands->NextCommand<EndRenderPassCmd>();
+ break;
+
+ case Command::ExecuteBundles: {
+ auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
+ commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+ break;
+ }
+
+ case Command::ClearBuffer:
+ commands->NextCommand<ClearBufferCmd>();
+ break;
+
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
+ commands->NextData<char>(cmd->length + 1);
+ break;
+ }
+
+ case Command::PopDebugGroup:
+ commands->NextCommand<PopDebugGroupCmd>();
+ break;
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
+ commands->NextData<char>(cmd->length + 1);
+ break;
+ }
+
+ case Command::ResolveQuerySet: {
+ commands->NextCommand<ResolveQuerySetCmd>();
+ break;
+ }
+
+ case Command::SetComputePipeline:
+ commands->NextCommand<SetComputePipelineCmd>();
+ break;
+
+ case Command::SetRenderPipeline:
+ commands->NextCommand<SetRenderPipelineCmd>();
+ break;
+
+ case Command::SetStencilReference:
+ commands->NextCommand<SetStencilReferenceCmd>();
+ break;
+
+ case Command::SetViewport:
+ commands->NextCommand<SetViewportCmd>();
+ break;
+
+ case Command::SetScissorRect:
+ commands->NextCommand<SetScissorRectCmd>();
+ break;
+
+ case Command::SetBlendConstant:
+ commands->NextCommand<SetBlendConstantCmd>();
+ break;
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
+ if (cmd->dynamicOffsetCount > 0) {
+ commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+ break;
+ }
+
+ case Command::SetIndexBuffer:
+ commands->NextCommand<SetIndexBufferCmd>();
+ break;
+
+ case Command::SetVertexBuffer: {
+ commands->NextCommand<SetVertexBufferCmd>();
+ break;
+ }
+
+ case Command::WriteBuffer:
+ commands->NextCommand<WriteBufferCmd>();
+ break;
+
+ case Command::WriteTimestamp: {
+ commands->NextCommand<WriteTimestampCmd>();
+ break;
+ }
+ }
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Commands.h b/chromium/third_party/dawn/src/dawn/native/Commands.h
new file mode 100644
index 00000000000..be1de068bd4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Commands.h
@@ -0,0 +1,290 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMMANDS_H_
+#define DAWNNATIVE_COMMANDS_H_
+
+#include "dawn/common/Constants.h"
+
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/Texture.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+ // Definition of the commands that are present in the CommandIterator given by the
+ // CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
+ // dependencies: Ref<Object> needs Object to be defined.
+
+ enum class Command {
+ BeginComputePass,
+ BeginOcclusionQuery,
+ BeginRenderPass,
+ ClearBuffer,
+ CopyBufferToBuffer,
+ CopyBufferToTexture,
+ CopyTextureToBuffer,
+ CopyTextureToTexture,
+ Dispatch,
+ DispatchIndirect,
+ Draw,
+ DrawIndexed,
+ DrawIndirect,
+ DrawIndexedIndirect,
+ EndComputePass,
+ EndOcclusionQuery,
+ EndRenderPass,
+ ExecuteBundles,
+ InsertDebugMarker,
+ PopDebugGroup,
+ PushDebugGroup,
+ ResolveQuerySet,
+ SetComputePipeline,
+ SetRenderPipeline,
+ SetStencilReference,
+ SetViewport,
+ SetScissorRect,
+ SetBlendConstant,
+ SetBindGroup,
+ SetIndexBuffer,
+ SetVertexBuffer,
+ WriteBuffer,
+ WriteTimestamp,
+ };
+
+ struct BeginComputePassCmd {};
+
+ struct BeginOcclusionQueryCmd {
+ Ref<QuerySetBase> querySet;
+ uint32_t queryIndex;
+ };
+
+ struct RenderPassColorAttachmentInfo {
+ Ref<TextureViewBase> view;
+ Ref<TextureViewBase> resolveTarget;
+ wgpu::LoadOp loadOp;
+ wgpu::StoreOp storeOp;
+ dawn::native::Color clearColor;
+ };
+
+ struct RenderPassDepthStencilAttachmentInfo {
+ Ref<TextureViewBase> view;
+ wgpu::LoadOp depthLoadOp;
+ wgpu::StoreOp depthStoreOp;
+ wgpu::LoadOp stencilLoadOp;
+ wgpu::StoreOp stencilStoreOp;
+ float clearDepth;
+ uint32_t clearStencil;
+ bool depthReadOnly;
+ bool stencilReadOnly;
+ };
+
+ struct BeginRenderPassCmd {
+ Ref<AttachmentState> attachmentState;
+ ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
+ colorAttachments;
+ RenderPassDepthStencilAttachmentInfo depthStencilAttachment;
+
+ // Cache the width and height of all attachments for convenience
+ uint32_t width;
+ uint32_t height;
+
+ Ref<QuerySetBase> occlusionQuerySet;
+ };
+
+ struct BufferCopy {
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint32_t bytesPerRow;
+ uint32_t rowsPerImage;
+ };
+
+ struct TextureCopy {
+ Ref<TextureBase> texture;
+ uint32_t mipLevel;
+ Origin3D origin; // Texels / array layer
+ Aspect aspect;
+ };
+
+ struct CopyBufferToBufferCmd {
+ Ref<BufferBase> source;
+ uint64_t sourceOffset;
+ Ref<BufferBase> destination;
+ uint64_t destinationOffset;
+ uint64_t size;
+ };
+
+ struct CopyBufferToTextureCmd {
+ BufferCopy source;
+ TextureCopy destination;
+ Extent3D copySize; // Texels
+ };
+
+ struct CopyTextureToBufferCmd {
+ TextureCopy source;
+ BufferCopy destination;
+ Extent3D copySize; // Texels
+ };
+
+ struct CopyTextureToTextureCmd {
+ TextureCopy source;
+ TextureCopy destination;
+ Extent3D copySize; // Texels
+ };
+
+ struct DispatchCmd {
+ uint32_t x;
+ uint32_t y;
+ uint32_t z;
+ };
+
+ struct DispatchIndirectCmd {
+ Ref<BufferBase> indirectBuffer;
+ uint64_t indirectOffset;
+ };
+
+ struct DrawCmd {
+ uint32_t vertexCount;
+ uint32_t instanceCount;
+ uint32_t firstVertex;
+ uint32_t firstInstance;
+ };
+
+ struct DrawIndexedCmd {
+ uint32_t indexCount;
+ uint32_t instanceCount;
+ uint32_t firstIndex;
+ int32_t baseVertex;
+ uint32_t firstInstance;
+ };
+
+ struct DrawIndirectCmd {
+ Ref<BufferBase> indirectBuffer;
+ uint64_t indirectOffset;
+ };
+
+ struct DrawIndexedIndirectCmd {
+ Ref<BufferBase> indirectBuffer;
+ uint64_t indirectOffset;
+ };
+
+ struct EndComputePassCmd {};
+
+ struct EndOcclusionQueryCmd {
+ Ref<QuerySetBase> querySet;
+ uint32_t queryIndex;
+ };
+
+ struct EndRenderPassCmd {};
+
+ struct ExecuteBundlesCmd {
+ uint32_t count;
+ };
+
+ struct ClearBufferCmd {
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint64_t size;
+ };
+
+ struct InsertDebugMarkerCmd {
+ uint32_t length;
+ };
+
+ struct PopDebugGroupCmd {};
+
+ struct PushDebugGroupCmd {
+ uint32_t length;
+ };
+
+ struct ResolveQuerySetCmd {
+ Ref<QuerySetBase> querySet;
+ uint32_t firstQuery;
+ uint32_t queryCount;
+ Ref<BufferBase> destination;
+ uint64_t destinationOffset;
+ };
+
+ struct SetComputePipelineCmd {
+ Ref<ComputePipelineBase> pipeline;
+ };
+
+ struct SetRenderPipelineCmd {
+ Ref<RenderPipelineBase> pipeline;
+ };
+
+ struct SetStencilReferenceCmd {
+ uint32_t reference;
+ };
+
+ struct SetViewportCmd {
+ float x, y, width, height, minDepth, maxDepth;
+ };
+
+ struct SetScissorRectCmd {
+ uint32_t x, y, width, height;
+ };
+
+ struct SetBlendConstantCmd {
+ Color color;
+ };
+
+ struct SetBindGroupCmd {
+ BindGroupIndex index;
+ Ref<BindGroupBase> group;
+ uint32_t dynamicOffsetCount;
+ };
+
+ struct SetIndexBufferCmd {
+ Ref<BufferBase> buffer;
+ wgpu::IndexFormat format;
+ uint64_t offset;
+ uint64_t size;
+ };
+
+ struct SetVertexBufferCmd {
+ VertexBufferSlot slot;
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint64_t size;
+ };
+
+ struct WriteBufferCmd {
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint64_t size;
+ };
+
+ struct WriteTimestampCmd {
+ Ref<QuerySetBase> querySet;
+ uint32_t queryIndex;
+ };
+
+ // This needs to be called before the CommandIterator is freed so that the Ref<> present in
+ // the commands have a chance to run their destructor and remove internal references.
+ class CommandIterator;
+ void FreeCommands(CommandIterator* commands);
+
+ // Helper function to allow skipping over a command when it is unimplemented, while still
+ // consuming the correct amount of data from the command iterator.
+ void SkipCommand(CommandIterator* commands, Command type);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMMANDS_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp b/chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp
new file mode 100644
index 00000000000..47c3d0be329
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CompilationMessages.cpp
@@ -0,0 +1,201 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CompilationMessages.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <tint/tint.h>
+
+namespace dawn::native {
+
+ namespace {
+
+ WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
+ switch (severity) {
+ case tint::diag::Severity::Note:
+ return WGPUCompilationMessageType_Info;
+ case tint::diag::Severity::Warning:
+ return WGPUCompilationMessageType_Warning;
+ default:
+ return WGPUCompilationMessageType_Error;
+ }
+ }
+
+ } // anonymous namespace
+
+ OwnedCompilationMessages::OwnedCompilationMessages() {
+ mCompilationInfo.nextInChain = 0;
+ mCompilationInfo.messageCount = 0;
+ mCompilationInfo.messages = nullptr;
+ }
+
+ void OwnedCompilationMessages::AddMessageForTesting(std::string message,
+ wgpu::CompilationMessageType type,
+ uint64_t lineNum,
+ uint64_t linePos,
+ uint64_t offset,
+ uint64_t length) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ mMessageStrings.push_back(message);
+ mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type),
+ lineNum, linePos, offset, length});
+ }
+
+ void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ // Tint line and column values are 1-based.
+ uint64_t lineNum = diagnostic.source.range.begin.line;
+ uint64_t linePos = diagnostic.source.range.begin.column;
+ // The offset is 0-based.
+ uint64_t offset = 0;
+ uint64_t length = 0;
+
+ if (lineNum && linePos && diagnostic.source.file) {
+ const auto& lines = diagnostic.source.file->content.lines;
+ size_t i = 0;
+ // To find the offset of the message position, loop through each of the first lineNum-1
+ // lines and add it's length (+1 to account for the line break) to the offset.
+ for (; i < lineNum - 1; ++i) {
+ offset += lines[i].length() + 1;
+ }
+
+ // If the end line is on a different line from the beginning line, add the length of the
+ // lines in between to the ending offset.
+ uint64_t endLineNum = diagnostic.source.range.end.line;
+ uint64_t endLinePos = diagnostic.source.range.end.column;
+
+ // If the range has a valid start but the end it not specified, clamp it to the start.
+ if (endLineNum == 0 || endLinePos == 0) {
+ endLineNum = lineNum;
+ endLinePos = linePos;
+ }
+
+ // Negative ranges aren't allowed
+ ASSERT(endLineNum >= lineNum);
+
+ uint64_t endOffset = offset;
+ for (; i < endLineNum - 1; ++i) {
+ endOffset += lines[i].length() + 1;
+ }
+
+ // Add the line positions to the offset and endOffset to get their final positions
+ // within the code string.
+ offset += linePos - 1;
+ endOffset += endLinePos - 1;
+
+ // Negative ranges aren't allowed
+ ASSERT(endOffset >= offset);
+
+ // The length of the message is the difference between the starting offset and the
+ // ending offset.
+ length = endOffset - offset;
+ }
+
+ if (diagnostic.code) {
+ mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
+ } else {
+ mMessageStrings.push_back(diagnostic.message);
+ }
+
+ mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity),
+ lineNum, linePos, offset, length});
+ }
+
+ void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ for (const auto& diag : diagnostics) {
+ AddMessage(diag);
+ }
+
+ AddFormattedTintMessages(diagnostics);
+ }
+
+ void OwnedCompilationMessages::ClearMessages() {
+ // Cannot clear messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ mMessageStrings.clear();
+ mMessages.clear();
+ }
+
+ const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
+ mCompilationInfo.messageCount = mMessages.size();
+ mCompilationInfo.messages = mMessages.data();
+
+ // Ensure every message points at the correct message string. Cannot do this earlier, since
+ // vector reallocations may move the pointers around.
+ for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
+ WGPUCompilationMessage& message = mMessages[i];
+ std::string& messageString = mMessageStrings[i];
+ message.message = messageString.c_str();
+ }
+
+ return &mCompilationInfo;
+ }
+
+ const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
+ return mFormattedTintMessages;
+ }
+
+ void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
+ tint::diag::List messageList;
+ size_t warningCount = 0;
+ size_t errorCount = 0;
+ for (auto& diag : diagnostics) {
+ switch (diag.severity) {
+ case (tint::diag::Severity::Fatal):
+ case (tint::diag::Severity::Error):
+ case (tint::diag::Severity::InternalCompilerError): {
+ errorCount++;
+ messageList.add(tint::diag::Diagnostic(diag));
+ break;
+ }
+ case (tint::diag::Severity::Warning): {
+ warningCount++;
+ messageList.add(tint::diag::Diagnostic(diag));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ if (errorCount == 0 && warningCount == 0) {
+ return;
+ }
+ tint::diag::Formatter::Style style;
+ style.print_newline_at_end = false;
+ std::ostringstream t;
+ if (errorCount > 0) {
+ t << errorCount << " error(s) ";
+ if (warningCount > 0) {
+ t << "and ";
+ }
+ }
+ if (warningCount > 0) {
+ t << warningCount << " warning(s) ";
+ }
+ t << "generated while compiling the shader:" << std::endl
+ << tint::diag::Formatter{style}.format(messageList);
+ mFormattedTintMessages.push_back(t.str());
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CompilationMessages.h b/chromium/third_party/dawn/src/dawn/native/CompilationMessages.h
new file mode 100644
index 00000000000..92e33465a17
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CompilationMessages.h
@@ -0,0 +1,62 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMPILATIONMESSAGES_H_
+#define DAWNNATIVE_COMPILATIONMESSAGES_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/NonCopyable.h"
+
+#include <string>
+#include <vector>
+
+namespace tint::diag {
+ class Diagnostic;
+ class List;
+} // namespace tint::diag
+
+namespace dawn::native {
+
+ class OwnedCompilationMessages : public NonCopyable {
+ public:
+ OwnedCompilationMessages();
+ ~OwnedCompilationMessages() = default;
+
+ void AddMessageForTesting(
+ std::string message,
+ wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
+ uint64_t lineNum = 0,
+ uint64_t linePos = 0,
+ uint64_t offset = 0,
+ uint64_t length = 0);
+ void AddMessages(const tint::diag::List& diagnostics);
+ void ClearMessages();
+
+ const WGPUCompilationInfo* GetCompilationInfo();
+ const std::vector<std::string>& GetFormattedTintMessages();
+
+ private:
+ void AddMessage(const tint::diag::Diagnostic& diagnostic);
+ void AddFormattedTintMessages(const tint::diag::List& diagnostics);
+
+ WGPUCompilationInfo mCompilationInfo;
+ std::vector<std::string> mMessageStrings;
+ std::vector<WGPUCompilationMessage> mMessages;
+ std::vector<std::string> mFormattedTintMessages;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMPILATIONMESSAGES_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp
new file mode 100644
index 00000000000..b06f3af692b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.cpp
@@ -0,0 +1,467 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ComputePassEncoder.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PassResourceUsageTracker.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+
+namespace dawn::native {
+
+ namespace {
+
+ ResultOrError<ComputePipelineBase*> GetOrCreateIndirectDispatchValidationPipeline(
+ DeviceBase* device) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+ if (store->dispatchIndirectValidationPipeline != nullptr) {
+ return store->dispatchIndirectValidationPipeline.Get();
+ }
+
+ // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this
+ // shader in various failure modes.
+ // Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable.
+ Ref<ShaderModuleBase> shaderModule;
+ DAWN_TRY_ASSIGN(shaderModule, utils::CreateShaderModule(device, R"(
+ struct UniformParams {
+ maxComputeWorkgroupsPerDimension: u32;
+ clientOffsetInU32: u32;
+ enableValidation: u32;
+ duplicateNumWorkgroups: u32;
+ };
+
+ struct IndirectParams {
+ data: array<u32>;
+ };
+
+ struct ValidatedParams {
+ data: array<u32>;
+ };
+
+ @group(0) @binding(0) var<uniform> uniformParams: UniformParams;
+ @group(0) @binding(1) var<storage, read_write> clientParams: IndirectParams;
+ @group(0) @binding(2) var<storage, write> validatedParams: ValidatedParams;
+
+ @stage(compute) @workgroup_size(1, 1, 1)
+ fn main() {
+ for (var i = 0u; i < 3u; i = i + 1u) {
+ var numWorkgroups = clientParams.data[uniformParams.clientOffsetInU32 + i];
+ if (uniformParams.enableValidation > 0u &&
+ numWorkgroups > uniformParams.maxComputeWorkgroupsPerDimension) {
+ numWorkgroups = 0u;
+ }
+ validatedParams.data[i] = numWorkgroups;
+
+ if (uniformParams.duplicateNumWorkgroups > 0u) {
+ validatedParams.data[i + 3u] = numWorkgroups;
+ }
+ }
+ }
+ )"));
+
+ Ref<BindGroupLayoutBase> bindGroupLayout;
+ DAWN_TRY_ASSIGN(
+ bindGroupLayout,
+ utils::MakeBindGroupLayout(
+ device,
+ {
+ {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+ {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+ {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+ },
+ /* allowInternalBinding */ true));
+
+ Ref<PipelineLayoutBase> pipelineLayout;
+ DAWN_TRY_ASSIGN(pipelineLayout,
+ utils::MakeBasicPipelineLayout(device, bindGroupLayout));
+
+ ComputePipelineDescriptor computePipelineDescriptor = {};
+ computePipelineDescriptor.layout = pipelineLayout.Get();
+ computePipelineDescriptor.compute.module = shaderModule.Get();
+ computePipelineDescriptor.compute.entryPoint = "main";
+
+ DAWN_TRY_ASSIGN(store->dispatchIndirectValidationPipeline,
+ device->CreateComputePipeline(&computePipelineDescriptor));
+
+ return store->dispatchIndirectValidationPipeline.Get();
+ }
+
+ } // namespace
+
+ ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+ const ComputePassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext)
+ : ProgrammableEncoder(device, descriptor->label, encodingContext),
+ mCommandEncoder(commandEncoder) {
+ TrackInDevice();
+ }
+
+ ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
+ : ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
+ }
+
+ ComputePassEncoder* ComputePassEncoder::MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext) {
+ return new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
+ }
+
+ void ComputePassEncoder::DestroyImpl() {
+ // Ensure that the pass has exited. This is done for passes only since validation requires
+ // they exit before destruction while bundles do not.
+ mEncodingContext->EnsurePassExited(this);
+ }
+
+ ObjectType ComputePassEncoder::GetType() const {
+ return ObjectType::ComputePassEncoder;
+ }
+
+ void ComputePassEncoder::APIEnd() {
+ if (mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateProgrammableEncoderEnd());
+ }
+
+ allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
+
+ return {};
+ },
+ "encoding %s.End().", this)) {
+ mEncodingContext->ExitComputePass(this, mUsageTracker.AcquireResourceUsage());
+ }
+ }
+
+ void ComputePassEncoder::APIEndPass() {
+ GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
+ APIEnd();
+ }
+
+ void ComputePassEncoder::APIDispatch(uint32_t workgroupCountX,
+ uint32_t workgroupCountY,
+ uint32_t workgroupCountZ) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+ uint32_t workgroupsPerDimension =
+ GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
+
+ DAWN_INVALID_IF(workgroupCountX > workgroupsPerDimension,
+ "Dispatch workgroup count X (%u) exceeds max compute "
+ "workgroups per dimension (%u).",
+ workgroupCountX, workgroupsPerDimension);
+
+ DAWN_INVALID_IF(workgroupCountY > workgroupsPerDimension,
+ "Dispatch workgroup count Y (%u) exceeds max compute "
+ "workgroups per dimension (%u).",
+ workgroupCountY, workgroupsPerDimension);
+
+ DAWN_INVALID_IF(workgroupCountZ > workgroupsPerDimension,
+ "Dispatch workgroup count Z (%u) exceeds max compute "
+ "workgroups per dimension (%u).",
+ workgroupCountZ, workgroupsPerDimension);
+ }
+
+ // Record the synchronization scope for Dispatch, which is just the current
+ // bindgroups.
+ AddDispatchSyncScope();
+
+ DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
+ dispatch->x = workgroupCountX;
+ dispatch->y = workgroupCountY;
+ dispatch->z = workgroupCountZ;
+
+ return {};
+ },
+ "encoding %s.Dispatch(%u, %u, %u).", this, workgroupCountX, workgroupCountY,
+ workgroupCountZ);
+ }
+
+ ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
+ ComputePassEncoder::TransformIndirectDispatchBuffer(Ref<BufferBase> indirectBuffer,
+ uint64_t indirectOffset) {
+ DeviceBase* device = GetDevice();
+
+ const bool shouldDuplicateNumWorkgroups =
+ device->ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ mCommandBufferState.GetComputePipeline());
+ if (!IsValidationEnabled() && !shouldDuplicateNumWorkgroups) {
+ return std::make_pair(indirectBuffer, indirectOffset);
+ }
+
+ // Save the previous command buffer state so it can be restored after the
+ // validation inserts additional commands.
+ CommandBufferStateTracker previousState = mCommandBufferState;
+
+ auto* const store = device->GetInternalPipelineStore();
+
+ Ref<ComputePipelineBase> validationPipeline;
+ DAWN_TRY_ASSIGN(validationPipeline, GetOrCreateIndirectDispatchValidationPipeline(device));
+
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
+
+ uint32_t storageBufferOffsetAlignment =
+ device->GetLimits().v1.minStorageBufferOffsetAlignment;
+
+ // Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
+ const uint32_t clientOffsetFromAlignedBoundary =
+ indirectOffset % storageBufferOffsetAlignment;
+ const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
+ const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
+
+ // Let the size of the binding be the additional offset, plus the size.
+ const uint64_t clientIndirectBindingSize =
+ kDispatchIndirectSize + clientOffsetFromAlignedBoundary;
+
+ // Neither 'enableValidation' nor 'duplicateNumWorkgroups' can be declared as 'bool' as
+ // currently in WGSL type 'bool' cannot be used in storage class 'uniform' as 'it is
+ // non-host-shareable'.
+ struct UniformParams {
+ uint32_t maxComputeWorkgroupsPerDimension;
+ uint32_t clientOffsetInU32;
+ uint32_t enableValidation;
+ uint32_t duplicateNumWorkgroups;
+ };
+
+ // Create a uniform buffer to hold parameters for the shader.
+ Ref<BufferBase> uniformBuffer;
+ {
+ UniformParams params;
+ params.maxComputeWorkgroupsPerDimension =
+ device->GetLimits().v1.maxComputeWorkgroupsPerDimension;
+ params.clientOffsetInU32 = clientOffsetFromAlignedBoundary / sizeof(uint32_t);
+ params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
+ params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
+
+ DAWN_TRY_ASSIGN(uniformBuffer, utils::CreateBufferFromData(
+ device, wgpu::BufferUsage::Uniform, {params}));
+ }
+
+ // Reserve space in the scratch buffer to hold the validated indirect params.
+ ScratchBuffer& scratchBuffer = store->scratchIndirectStorage;
+ const uint64_t scratchBufferSize =
+ shouldDuplicateNumWorkgroups ? 2 * kDispatchIndirectSize : kDispatchIndirectSize;
+ DAWN_TRY(scratchBuffer.EnsureCapacity(scratchBufferSize));
+ Ref<BufferBase> validatedIndirectBuffer = scratchBuffer.GetBuffer();
+
+ Ref<BindGroupBase> validationBindGroup;
+ ASSERT(indirectBuffer->GetUsage() & kInternalStorageBuffer);
+ DAWN_TRY_ASSIGN(validationBindGroup,
+ utils::MakeBindGroup(device, layout,
+ {
+ {0, uniformBuffer},
+ {1, indirectBuffer, clientIndirectBindingOffset,
+ clientIndirectBindingSize},
+ {2, validatedIndirectBuffer, 0, scratchBufferSize},
+ }));
+
+ // Issue commands to validate the indirect buffer.
+ APISetPipeline(validationPipeline.Get());
+ APISetBindGroup(0, validationBindGroup.Get());
+ APIDispatch(1);
+
+ // Restore the state.
+ RestoreCommandBufferState(std::move(previousState));
+
+ // Return the new indirect buffer and indirect buffer offset.
+ return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
+ }
+
+ void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer,
+ uint64_t indirectOffset) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+ DAWN_INVALID_IF(indirectOffset % 4 != 0,
+ "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+ DAWN_INVALID_IF(
+ indirectOffset >= indirectBuffer->GetSize() ||
+ indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize(),
+ "Indirect offset (%u) and dispatch size (%u) exceeds the indirect buffer "
+ "size (%u).",
+ indirectOffset, kDispatchIndirectSize, indirectBuffer->GetSize());
+ }
+
+ SyncScopeUsageTracker scope;
+ scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+ mUsageTracker.AddReferencedBuffer(indirectBuffer);
+ // TODO(crbug.com/dawn/1166): If validation is enabled, adding |indirectBuffer|
+ // is needed for correct usage validation even though it will only be bound for
+ // storage. This will unecessarily transition the |indirectBuffer| in
+ // the backend.
+
+ Ref<BufferBase> indirectBufferRef = indirectBuffer;
+
+ // Get applied indirect buffer with necessary changes on the original indirect
+ // buffer. For example,
+ // - Validate each indirect dispatch with a single dispatch to copy the indirect
+ // buffer params into a scratch buffer if they're valid, and otherwise zero them
+ // out.
+ // - Duplicate all the indirect dispatch parameters to support @num_workgroups on
+ // D3D12.
+ // - Directly return the original indirect dispatch buffer if we don't need any
+ // transformations on it.
+ // We could consider moving the validation earlier in the pass after the last
+ // last point the indirect buffer was used with writable usage, as well as batch
+ // validation for multiple dispatches into one, but inserting commands at
+ // arbitrary points in the past is not possible right now.
+ DAWN_TRY_ASSIGN(std::tie(indirectBufferRef, indirectOffset),
+ TransformIndirectDispatchBuffer(indirectBufferRef, indirectOffset));
+
+ // If we have created a new scratch dispatch indirect buffer in
+ // TransformIndirectDispatchBuffer(), we need to track it in mUsageTracker.
+ if (indirectBufferRef.Get() != indirectBuffer) {
+ // |indirectBufferRef| was replaced with a scratch buffer. Add it to the
+ // synchronization scope.
+ scope.BufferUsedAs(indirectBufferRef.Get(), wgpu::BufferUsage::Indirect);
+ mUsageTracker.AddReferencedBuffer(indirectBufferRef.Get());
+ }
+
+ AddDispatchSyncScope(std::move(scope));
+
+ DispatchIndirectCmd* dispatch =
+ allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
+ dispatch->indirectBuffer = std::move(indirectBufferRef);
+ dispatch->indirectOffset = indirectOffset;
+ return {};
+ },
+ "encoding %s.DispatchIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+ }
+
+ void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+ }
+
+ mCommandBufferState.SetComputePipeline(pipeline);
+
+ SetComputePipelineCmd* cmd =
+ allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
+ cmd->pipeline = pipeline;
+
+ return {};
+ },
+ "encoding %s.SetPipeline(%s).", this, pipeline);
+ }
+
+ void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
+
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets));
+ }
+
+ mUsageTracker.AddResourcesReferencedByBindGroup(group);
+ RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets);
+ mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets);
+
+ return {};
+ },
+ "encoding %s.SetBindGroup(%u, %s, %u, ...).", this, groupIndexIn, group,
+ dynamicOffsetCount);
+ }
+
+ void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ }
+
+ mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
+
+ return {};
+ },
+ "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+ }
+
+ void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
+ PipelineLayoutBase* layout = mCommandBufferState.GetPipelineLayout();
+ for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ scope.AddBindGroup(mCommandBufferState.GetBindGroup(i));
+ }
+ mUsageTracker.AddDispatch(scope.AcquireSyncScopeUsage());
+ }
+
+ void ComputePassEncoder::RestoreCommandBufferState(CommandBufferStateTracker state) {
+ // Encode commands for the backend to restore the pipeline and bind groups.
+ if (state.HasPipeline()) {
+ APISetPipeline(state.GetComputePipeline());
+ }
+ for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
+ BindGroupBase* bg = state.GetBindGroup(i);
+ if (bg != nullptr) {
+ const std::vector<uint32_t>& offsets = state.GetDynamicOffsets(i);
+ if (offsets.empty()) {
+ APISetBindGroup(static_cast<uint32_t>(i), bg);
+ } else {
+ APISetBindGroup(static_cast<uint32_t>(i), bg, offsets.size(), offsets.data());
+ }
+ }
+ }
+
+ // Restore the frontend state tracking information.
+ mCommandBufferState = std::move(state);
+ }
+
+ CommandBufferStateTracker* ComputePassEncoder::GetCommandBufferStateTrackerForTesting() {
+ return &mCommandBufferState;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h b/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h
new file mode 100644
index 00000000000..38a1fe148d0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ComputePassEncoder.h
@@ -0,0 +1,91 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMPUTEPASSENCODER_H_
+#define DAWNNATIVE_COMPUTEPASSENCODER_H_
+
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/PassResourceUsageTracker.h"
+#include "dawn/native/ProgrammableEncoder.h"
+
+namespace dawn::native {
+
+ class SyncScopeUsageTracker;
+
+ class ComputePassEncoder final : public ProgrammableEncoder {
+ public:
+ ComputePassEncoder(DeviceBase* device,
+ const ComputePassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext);
+
+ static ComputePassEncoder* MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext);
+
+ ObjectType GetType() const override;
+
+ void APIEnd();
+ void APIEndPass(); // TODO(dawn:1286): Remove after deprecation period.
+
+ void APIDispatch(uint32_t workgroupCountX,
+ uint32_t workgroupCountY = 1,
+ uint32_t workgroupCountZ = 1);
+ void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+ void APISetPipeline(ComputePipelineBase* pipeline);
+
+ void APISetBindGroup(uint32_t groupIndex,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount = 0,
+ const uint32_t* dynamicOffsets = nullptr);
+
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+ CommandBufferStateTracker* GetCommandBufferStateTrackerForTesting();
+ void RestoreCommandBufferStateForTesting(CommandBufferStateTracker state) {
+ RestoreCommandBufferState(std::move(state));
+ }
+
+ protected:
+ ComputePassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag);
+
+ private:
+ void DestroyImpl() override;
+
+ ResultOrError<std::pair<Ref<BufferBase>, uint64_t>> TransformIndirectDispatchBuffer(
+ Ref<BufferBase> indirectBuffer,
+ uint64_t indirectOffset);
+
+ void RestoreCommandBufferState(CommandBufferStateTracker state);
+
+ CommandBufferStateTracker mCommandBufferState;
+
+ // Adds the bindgroups used for the current dispatch to the SyncScopeResourceUsage and
+ // records it in mUsageTracker.
+ void AddDispatchSyncScope(SyncScopeUsageTracker scope = {});
+ ComputePassResourceUsageTracker mUsageTracker;
+
+ // For render and compute passes, the encoding context is borrowed from the command encoder.
+ // Keep a reference to the encoder to make sure the context isn't freed.
+ Ref<CommandEncoder> mCommandEncoder;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMPUTEPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp b/chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp
new file mode 100644
index 00000000000..2de7f32009a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ComputePipeline.cpp
@@ -0,0 +1,96 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ObjectType_autogen.h"
+
+namespace dawn::native {
+
+ MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
+ const ComputePipelineDescriptor* descriptor) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_FORMAT_VALIDATION_ERROR("nextInChain must be nullptr.");
+ }
+
+ if (descriptor->layout != nullptr) {
+ DAWN_TRY(device->ValidateObject(descriptor->layout));
+ }
+
+ return ValidateProgrammableStage(
+ device, descriptor->compute.module, descriptor->compute.entryPoint,
+ descriptor->compute.constantCount, descriptor->compute.constants, descriptor->layout,
+ SingleShaderStage::Compute);
+ }
+
+ // ComputePipelineBase
+
+ ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
+ const ComputePipelineDescriptor* descriptor)
+ : PipelineBase(device,
+ descriptor->layout,
+ descriptor->label,
+ {{SingleShaderStage::Compute, descriptor->compute.module,
+ descriptor->compute.entryPoint, descriptor->compute.constantCount,
+ descriptor->compute.constants}}) {
+ SetContentHash(ComputeContentHash());
+ TrackInDevice();
+ }
+
+ ComputePipelineBase::ComputePipelineBase(DeviceBase* device) : PipelineBase(device) {
+ TrackInDevice();
+ }
+
+ ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : PipelineBase(device, tag) {
+ }
+
+ ComputePipelineBase::~ComputePipelineBase() = default;
+
+ void ComputePipelineBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheComputePipeline(this);
+ }
+ }
+
+ // static
+ ComputePipelineBase* ComputePipelineBase::MakeError(DeviceBase* device) {
+ class ErrorComputePipeline final : public ComputePipelineBase {
+ public:
+ ErrorComputePipeline(DeviceBase* device)
+ : ComputePipelineBase(device, ObjectBase::kError) {
+ }
+
+ MaybeError Initialize() override {
+ UNREACHABLE();
+ return {};
+ }
+ };
+
+ return new ErrorComputePipeline(device);
+ }
+
+ ObjectType ComputePipelineBase::GetType() const {
+ return ObjectType::ComputePipeline;
+ }
+
+ bool ComputePipelineBase::EqualityFunc::operator()(const ComputePipelineBase* a,
+ const ComputePipelineBase* b) const {
+ return PipelineBase::EqualForCache(a, b);
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ComputePipeline.h b/chromium/third_party/dawn/src/dawn/native/ComputePipeline.h
new file mode 100644
index 00000000000..1bd97d1dcf4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ComputePipeline.h
@@ -0,0 +1,55 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMPUTEPIPELINE_H_
+#define DAWNNATIVE_COMPUTEPIPELINE_H_
+
+#include "dawn/common/NonCopyable.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/Pipeline.h"
+
+namespace dawn::native {
+
+ class DeviceBase;
+ struct EntryPointMetadata;
+
+ MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
+ const ComputePipelineDescriptor* descriptor);
+
+ class ComputePipelineBase : public PipelineBase {
+ public:
+ ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
+ ~ComputePipelineBase() override;
+
+ static ComputePipelineBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ // Functors necessary for the unordered_set<ComputePipelineBase*>-based cache.
+ struct EqualityFunc {
+ bool operator()(const ComputePipelineBase* a, const ComputePipelineBase* b) const;
+ };
+
+ protected:
+ // Constructor used only for mocking and testing.
+ ComputePipelineBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ private:
+ ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COMPUTEPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp b/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp
new file mode 100644
index 00000000000..4a55372cc6d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.cpp
@@ -0,0 +1,601 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CopyTextureForBrowserHelper.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/RenderPassEncoder.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+
+#include <unordered_set>
+
+namespace dawn::native {
+ namespace {
+
+ static const char sCopyTextureForBrowserShader[] = R"(
+ struct GammaTransferParams {
+ G: f32;
+ A: f32;
+ B: f32;
+ C: f32;
+ D: f32;
+ E: f32;
+ F: f32;
+ padding: u32;
+ };
+
+ struct Uniforms { // offset align size
+ scale: vec2<f32>; // 0 8 8
+ offset: vec2<f32>; // 8 8 8
+ steps_mask: u32; // 16 4 4
+ // implicit padding; // 20 12
+ conversion_matrix: mat3x3<f32>; // 32 16 48
+ gamma_decoding_params: GammaTransferParams; // 80 4 32
+ gamma_encoding_params: GammaTransferParams; // 112 4 32
+ gamma_decoding_for_dst_srgb_params: GammaTransferParams; // 144 4 32
+ };
+
+ @binding(0) @group(0) var<uniform> uniforms : Uniforms;
+
+ struct VertexOutputs {
+ @location(0) texcoords : vec2<f32>;
+ @builtin(position) position : vec4<f32>;
+ };
+
+ // Chromium uses unified equation to construct gamma decoding function
+ // and gamma encoding function.
+ // The logic is:
+ // if x < D
+ // linear = C * x + F
+ // nonlinear = pow(A * x + B, G) + E
+ // (https://source.chromium.org/chromium/chromium/src/+/main:ui/gfx/color_transform.cc;l=541)
+ // Expand the equation with sign() to make it handle all gamma conversions.
+ fn gamma_conversion(v: f32, params: GammaTransferParams) -> f32 {
+ // Linear part: C * x + F
+ if (abs(v) < params.D) {
+ return sign(v) * (params.C * abs(v) + params.F);
+ }
+
+ // Gamma part: pow(A * x + B, G) + E
+ return sign(v) * (pow(params.A * abs(v) + params.B, params.G) + params.E);
+ }
+
+ @stage(vertex)
+ fn vs_main(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> VertexOutputs {
+ var texcoord = array<vec2<f32>, 3>(
+ vec2<f32>(-0.5, 0.0),
+ vec2<f32>( 1.5, 0.0),
+ vec2<f32>( 0.5, 2.0));
+
+ var output : VertexOutputs;
+ output.position = vec4<f32>((texcoord[VertexIndex] * 2.0 - vec2<f32>(1.0, 1.0)), 0.0, 1.0);
+
+ // Y component of scale is calculated by the copySizeHeight / textureHeight. Only
+ // flipY case can get negative number.
+ var flipY = uniforms.scale.y < 0.0;
+
+ // Texture coordinate takes top-left as origin point. We need to map the
+ // texture to triangle carefully.
+ if (flipY) {
+ // We need to get the mirror positions(mirrored based on y = 0.5) on flip cases.
+ // Adopt transform to src texture and then mapping it to triangle coord which
+ // do a +1 shift on Y dimension will help us got that mirror position perfectly.
+ output.texcoords = (texcoord[VertexIndex] * uniforms.scale + uniforms.offset) *
+ vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0);
+ } else {
+ // For the normal case, we need to get the exact position.
+ // So mapping texture to triangle firstly then adopt the transform.
+ output.texcoords = (texcoord[VertexIndex] *
+ vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0)) *
+ uniforms.scale + uniforms.offset;
+ }
+
+ return output;
+ }
+
+ @binding(1) @group(0) var mySampler: sampler;
+ @binding(2) @group(0) var myTexture: texture_2d<f32>;
+
+ @stage(fragment)
+ fn fs_main(
+ @location(0) texcoord : vec2<f32>
+ ) -> @location(0) vec4<f32> {
+ // Clamp the texcoord and discard the out-of-bound pixels.
+ var clampedTexcoord =
+ clamp(texcoord, vec2<f32>(0.0, 0.0), vec2<f32>(1.0, 1.0));
+ if (!all(clampedTexcoord == texcoord)) {
+ discard;
+ }
+
+ // Swizzling of texture formats when sampling / rendering is handled by the
+ // hardware so we don't need special logic in this shader. This is covered by tests.
+ var color = textureSample(myTexture, mySampler, texcoord);
+
+ let kUnpremultiplyStep = 0x01u;
+ let kDecodeToLinearStep = 0x02u;
+ let kConvertToDstGamutStep = 0x04u;
+ let kEncodeToGammaStep = 0x08u;
+ let kPremultiplyStep = 0x10u;
+ let kDecodeForSrgbDstFormat = 0x20u;
+
+ // Unpremultiply step. Appling color space conversion op on premultiplied source texture
+ // also needs to unpremultiply first.
+ if (bool(uniforms.steps_mask & kUnpremultiplyStep)) {
+ if (color.a != 0.0) {
+ color = vec4<f32>(color.rgb / color.a, color.a);
+ }
+ }
+
+ // Linearize the source color using the source color space’s
+ // transfer function if it is non-linear.
+ if (bool(uniforms.steps_mask & kDecodeToLinearStep)) {
+ color = vec4<f32>(gamma_conversion(color.r, uniforms.gamma_decoding_params),
+ gamma_conversion(color.g, uniforms.gamma_decoding_params),
+ gamma_conversion(color.b, uniforms.gamma_decoding_params),
+ color.a);
+ }
+
+ // Convert unpremultiplied, linear source colors to the destination gamut by
+ // multiplying by a 3x3 matrix. Calculate transformFromXYZD50 * transformToXYZD50
+ // in CPU side and upload the final result in uniforms.
+ if (bool(uniforms.steps_mask & kConvertToDstGamutStep)) {
+ color = vec4<f32>(uniforms.conversion_matrix * color.rgb, color.a);
+ }
+
+ // Encode that color using the inverse of the destination color
+ // space’s transfer function if it is non-linear.
+ if (bool(uniforms.steps_mask & kEncodeToGammaStep)) {
+ color = vec4<f32>(gamma_conversion(color.r, uniforms.gamma_encoding_params),
+ gamma_conversion(color.g, uniforms.gamma_encoding_params),
+ gamma_conversion(color.b, uniforms.gamma_encoding_params),
+ color.a);
+ }
+
+ // Premultiply step.
+ if (bool(uniforms.steps_mask & kPremultiplyStep)) {
+ color = vec4<f32>(color.rgb * color.a, color.a);
+ }
+
+ // Decode for copying from non-srgb formats to srgb formats
+ if (bool(uniforms.steps_mask & kDecodeForSrgbDstFormat)) {
+ color = vec4<f32>(gamma_conversion(color.r, uniforms.gamma_decoding_for_dst_srgb_params),
+ gamma_conversion(color.g, uniforms.gamma_decoding_for_dst_srgb_params),
+ gamma_conversion(color.b, uniforms.gamma_decoding_for_dst_srgb_params),
+ color.a);
+ }
+
+ return color;
+ }
+ )";
+
+ // Follow the same order of skcms_TransferFunction
+ // https://source.chromium.org/chromium/chromium/src/+/main:third_party/skia/include/third_party/skcms/skcms.h;l=46;
+ struct GammaTransferParams {
+ float G = 0.0;
+ float A = 0.0;
+ float B = 0.0;
+ float C = 0.0;
+ float D = 0.0;
+ float E = 0.0;
+ float F = 0.0;
+ uint32_t padding = 0;
+ };
+
+ struct Uniform {
+ float scaleX;
+ float scaleY;
+ float offsetX;
+ float offsetY;
+ uint32_t stepsMask = 0;
+ const std::array<uint32_t, 3> padding = {}; // 12 bytes padding
+ std::array<float, 12> conversionMatrix = {};
+ GammaTransferParams gammaDecodingParams = {};
+ GammaTransferParams gammaEncodingParams = {};
+ GammaTransferParams gammaDecodingForDstSrgbParams = {};
+ };
+ static_assert(sizeof(Uniform) == 176);
+
+ // TODO(crbug.com/dawn/856): Expand copyTextureForBrowser to support any
+ // non-depth, non-stencil, non-compressed texture format pair copy. Now this API
+ // supports CopyImageBitmapToTexture normal format pairs.
+ MaybeError ValidateCopyTextureFormatConversion(const wgpu::TextureFormat srcFormat,
+ const wgpu::TextureFormat dstFormat) {
+ switch (srcFormat) {
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ break;
+ default:
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Source texture format (%s) is not supported.", srcFormat);
+ }
+
+ switch (dstFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ break;
+ default:
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Destination texture format (%s) is not supported.", dstFormat);
+ }
+
+ return {};
+ }
+
+ RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store,
+ wgpu::TextureFormat dstFormat) {
+ auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
+ if (pipeline != store->copyTextureForBrowserPipelines.end()) {
+ return pipeline->second.Get();
+ }
+ return nullptr;
+ }
+
+ ResultOrError<RenderPipelineBase*> GetOrCreateCopyTextureForBrowserPipeline(
+ DeviceBase* device,
+ wgpu::TextureFormat dstFormat) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+ if (GetCachedPipeline(store, dstFormat) == nullptr) {
+ // Create vertex shader module if not cached before.
+ if (store->copyTextureForBrowser == nullptr) {
+ DAWN_TRY_ASSIGN(
+ store->copyTextureForBrowser,
+ utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
+ }
+
+ ShaderModuleBase* shaderModule = store->copyTextureForBrowser.Get();
+
+ // Prepare vertex stage.
+ VertexState vertex = {};
+ vertex.module = shaderModule;
+ vertex.entryPoint = "vs_main";
+
+ // Prepare frgament stage.
+ FragmentState fragment = {};
+ fragment.module = shaderModule;
+ fragment.entryPoint = "fs_main";
+
+ // Prepare color state.
+ ColorTargetState target = {};
+ target.format = dstFormat;
+
+ // Create RenderPipeline.
+ RenderPipelineDescriptor renderPipelineDesc = {};
+
+ // Generate the layout based on shader modules.
+ renderPipelineDesc.layout = nullptr;
+
+ renderPipelineDesc.vertex = vertex;
+ renderPipelineDesc.fragment = &fragment;
+
+ renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+
+ fragment.targetCount = 1;
+ fragment.targets = &target;
+
+ Ref<RenderPipelineBase> pipeline;
+ DAWN_TRY_ASSIGN(pipeline, device->CreateRenderPipeline(&renderPipelineDesc));
+ store->copyTextureForBrowserPipelines.insert({dstFormat, std::move(pipeline)});
+ }
+
+ return GetCachedPipeline(store, dstFormat);
+ }
+ } // anonymous namespace
+
+ MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
+ DAWN_TRY(device->ValidateObject(source->texture));
+ DAWN_TRY(device->ValidateObject(destination->texture));
+
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
+ "validating the ImageCopyTexture for the source");
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *destination, *copySize),
+ "validating the ImageCopyTexture for the destination");
+
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *source, *copySize),
+ "validating that the copy fits in the source");
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *destination, *copySize),
+ "validating that the copy fits in the destination");
+
+ DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
+
+ DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).",
+ source->origin.z);
+ DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1,
+ "Copy is for more than one array layer (%u)", copySize->depthOrArrayLayers);
+
+ DAWN_INVALID_IF(
+ source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
+ "The source texture sample count (%u) or the destination texture sample count (%u) is "
+ "not 1.",
+ source->texture->GetSampleCount(), destination->texture->GetSampleCount());
+
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+ UsageValidationMode::Default));
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::TextureBinding,
+ UsageValidationMode::Default));
+
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+ UsageValidationMode::Default));
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::RenderAttachment,
+ UsageValidationMode::Default));
+
+ DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
+ destination->texture->GetFormat().format));
+
+ DAWN_INVALID_IF(options->nextInChain != nullptr, "nextInChain must be nullptr");
+
+ DAWN_TRY(ValidateAlphaMode(options->srcAlphaMode));
+ DAWN_TRY(ValidateAlphaMode(options->dstAlphaMode));
+
+ if (options->needsColorSpaceConversion) {
+ DAWN_INVALID_IF(options->srcTransferFunctionParameters == nullptr,
+ "srcTransferFunctionParameters is nullptr when doing color conversion");
+ DAWN_INVALID_IF(options->conversionMatrix == nullptr,
+ "conversionMatrix is nullptr when doing color conversion");
+ DAWN_INVALID_IF(options->dstTransferFunctionParameters == nullptr,
+ "dstTransferFunctionParameters is nullptr when doing color conversion");
+ }
+ return {};
+ }
+
+ // Whether the format of dst texture of CopyTextureForBrowser() is srgb or non-srgb.
+ bool IsSrgbDstFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ MaybeError DoCopyTextureForBrowser(DeviceBase* device,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
+ // TODO(crbug.com/dawn/856): In D3D12 and Vulkan, compatible texture format can directly
+ // copy to each other. This can be a potential fast path.
+
+ // Noop copy
+ if (copySize->width == 0 || copySize->height == 0 || copySize->depthOrArrayLayers == 0) {
+ return {};
+ }
+
+ bool isSrgbDstFormat = IsSrgbDstFormat(destination->texture->GetFormat().format);
+ RenderPipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateCopyTextureForBrowserPipeline(
+ device, destination->texture->GetFormat().format));
+
+ // Prepare bind group layout.
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+ Extent3D srcTextureSize = source->texture->GetSize();
+
+ // Prepare binding 0 resource: uniform buffer.
+ Uniform uniformData = {
+ copySize->width / static_cast<float>(srcTextureSize.width),
+ copySize->height / static_cast<float>(srcTextureSize.height), // scale
+ source->origin.x / static_cast<float>(srcTextureSize.width),
+ source->origin.y / static_cast<float>(srcTextureSize.height) // offset
+ };
+
+ // Handle flipY. FlipY here means we flip the source texture firstly and then
+ // do copy. This helps on the case which source texture is flipped and the copy
+ // need to unpack the flip.
+ if (options->flipY) {
+ uniformData.scaleY *= -1.0;
+ uniformData.offsetY += copySize->height / static_cast<float>(srcTextureSize.height);
+ }
+
+ uint32_t stepsMask = 0u;
+
+ // Steps to do color space conversion
+ // From https://skia.org/docs/user/color/
+ // - unpremultiply if the source color is premultiplied; Alpha is not involved in color
+ // management, and we need to divide it out if it’s multiplied in.
+ // - linearize the source color using the source color space’s transfer function
+ // - convert those unpremultiplied, linear source colors to XYZ D50 gamut by multiplying by
+ // a 3x3 matrix.
+ // - convert those XYZ D50 colors to the destination gamut by multiplying by a 3x3 matrix.
+ // - encode that color using the inverse of the destination color space’s transfer function.
+ // - premultiply by alpha if the destination is premultiplied.
+ // The reason to choose XYZ D50 as intermediate color space:
+ // From http://www.brucelindbloom.com/index.html?WorkingSpaceInfo.html
+ // "Since the Lab TIFF specification, the ICC profile specification and
+ // Adobe Photoshop all use a D50"
+ constexpr uint32_t kUnpremultiplyStep = 0x01;
+ constexpr uint32_t kDecodeToLinearStep = 0x02;
+ constexpr uint32_t kConvertToDstGamutStep = 0x04;
+ constexpr uint32_t kEncodeToGammaStep = 0x08;
+ constexpr uint32_t kPremultiplyStep = 0x10;
+ constexpr uint32_t kDecodeForSrgbDstFormat = 0x20;
+
+ if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) {
+ if (options->needsColorSpaceConversion ||
+ options->srcAlphaMode != options->dstAlphaMode) {
+ stepsMask |= kUnpremultiplyStep;
+ }
+ }
+
+ if (options->needsColorSpaceConversion) {
+ stepsMask |= kDecodeToLinearStep;
+ const float* decodingParams = options->srcTransferFunctionParameters;
+
+ uniformData.gammaDecodingParams = {
+ decodingParams[0], decodingParams[1], decodingParams[2], decodingParams[3],
+ decodingParams[4], decodingParams[5], decodingParams[6]};
+
+ stepsMask |= kConvertToDstGamutStep;
+ const float* matrix = options->conversionMatrix;
+ uniformData.conversionMatrix = {{
+ matrix[0],
+ matrix[1],
+ matrix[2],
+ 0.0,
+ matrix[3],
+ matrix[4],
+ matrix[5],
+ 0.0,
+ matrix[6],
+ matrix[7],
+ matrix[8],
+ 0.0,
+ }};
+
+ stepsMask |= kEncodeToGammaStep;
+ const float* encodingParams = options->dstTransferFunctionParameters;
+
+ uniformData.gammaEncodingParams = {
+ encodingParams[0], encodingParams[1], encodingParams[2], encodingParams[3],
+ encodingParams[4], encodingParams[5], encodingParams[6]};
+ }
+
+ if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
+ if (options->needsColorSpaceConversion ||
+ options->srcAlphaMode != options->dstAlphaMode) {
+ stepsMask |= kPremultiplyStep;
+ }
+ }
+
+ // Copy to *-srgb texture should keep the bytes exactly the same as copy
+ // to non-srgb texture. Add an extra decode-to-linear step so that after the
+ // sampler of *-srgb format texture applying encoding, the bytes keeps the same
+ // as non-srgb format texture.
+ // NOTE: CopyTextureForBrowser() doesn't need to accept *-srgb format texture as
+ // source input. But above operation also valid for *-srgb format texture input and
+ // non-srgb format dst texture.
+ // TODO(crbug.com/dawn/1195): Reinterpret to non-srgb texture view on *-srgb texture
+ // and use it as render attachment when possible.
+ // TODO(crbug.com/dawn/1195): Opt the condition for this extra step. It is possible to
+ // bypass this extra step in some cases.
+ if (isSrgbDstFormat) {
+ stepsMask |= kDecodeForSrgbDstFormat;
+ // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
+ // mathematics. Order: {G, A, B, C, D, E, F, }
+ uniformData.gammaDecodingForDstSrgbParams = {
+ 2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0};
+ }
+
+ uniformData.stepsMask = stepsMask;
+
+ Ref<BufferBase> uniformBuffer;
+ DAWN_TRY_ASSIGN(
+ uniformBuffer,
+ utils::CreateBufferFromData(
+ device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, {uniformData}));
+
+ // Prepare binding 1 resource: sampler
+ // Use default configuration, filterMode set to Nearest for min and mag.
+ SamplerDescriptor samplerDesc = {};
+ Ref<SamplerBase> sampler;
+ DAWN_TRY_ASSIGN(sampler, device->CreateSampler(&samplerDesc));
+
+ // Prepare binding 2 resource: sampled texture
+ TextureViewDescriptor srcTextureViewDesc = {};
+ srcTextureViewDesc.baseMipLevel = source->mipLevel;
+ srcTextureViewDesc.mipLevelCount = 1;
+ srcTextureViewDesc.arrayLayerCount = 1;
+ Ref<TextureViewBase> srcTextureView;
+ DAWN_TRY_ASSIGN(srcTextureView,
+ device->CreateTextureView(source->texture, &srcTextureViewDesc));
+
+ // Create bind group after all binding entries are set.
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(bindGroup, utils::MakeBindGroup(
+ device, layout,
+ {{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
+
+ // Create command encoder.
+ CommandEncoderDescriptor encoderDesc = {};
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<CommandEncoder> encoder = AcquireRef(device->APICreateCommandEncoder(&encoderDesc));
+
+ // Prepare dst texture view as color Attachment.
+ TextureViewDescriptor dstTextureViewDesc;
+ dstTextureViewDesc.baseMipLevel = destination->mipLevel;
+ dstTextureViewDesc.mipLevelCount = 1;
+ dstTextureViewDesc.baseArrayLayer = destination->origin.z;
+ dstTextureViewDesc.arrayLayerCount = 1;
+ Ref<TextureViewBase> dstView;
+
+ DAWN_TRY_ASSIGN(dstView,
+ device->CreateTextureView(destination->texture, &dstTextureViewDesc));
+ // Prepare render pass color attachment descriptor.
+ RenderPassColorAttachment colorAttachmentDesc;
+
+ colorAttachmentDesc.view = dstView.Get();
+ colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
+ colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
+ colorAttachmentDesc.clearColor = {0.0, 0.0, 0.0, 1.0};
+
+ // Create render pass.
+ RenderPassDescriptor renderPassDesc;
+ renderPassDesc.colorAttachmentCount = 1;
+ renderPassDesc.colorAttachments = &colorAttachmentDesc;
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<RenderPassEncoder> passEncoder =
+ AcquireRef(encoder->APIBeginRenderPass(&renderPassDesc));
+
+ // Start pipeline and encode commands to complete
+ // the copy from src texture to dst texture with transformation.
+ passEncoder->APISetPipeline(pipeline);
+ passEncoder->APISetBindGroup(0, bindGroup.Get());
+ passEncoder->APISetViewport(destination->origin.x, destination->origin.y, copySize->width,
+ copySize->height, 0.0, 1.0);
+ passEncoder->APIDraw(3);
+ passEncoder->APIEnd();
+
+ // Finsh encoding.
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<CommandBufferBase> commandBuffer = AcquireRef(encoder->APIFinish());
+ CommandBufferBase* submitCommandBuffer = commandBuffer.Get();
+
+ // Submit command buffer.
+ device->GetQueue()->APISubmit(1, &submitCommandBuffer);
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h b/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h
new file mode 100644
index 00000000000..de82f5f979b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CopyTextureForBrowserHelper.h
@@ -0,0 +1,41 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
+#define DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ObjectBase.h"
+
+namespace dawn::native {
+ class DeviceBase;
+ struct Extent3D;
+ struct ImageCopyTexture;
+ struct CopyTextureForBrowserOptions;
+
+ MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
+
+ MaybeError DoCopyTextureForBrowser(DeviceBase* device,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp b/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp
new file mode 100644
index 00000000000..ed7764e9c16
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.cpp
@@ -0,0 +1,202 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+
+#include "dawn/native/AsyncTask.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native {
+
+ CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(
+ std::string errorMessage,
+ void* userdata)
+ : mErrorMessage(errorMessage), mUserData(userdata) {
+ }
+
+ CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
+ Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata)
+ : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
+ mPipeline(std::move(pipeline)),
+ mCreateComputePipelineAsyncCallback(callback) {
+ }
+
+ void CreateComputePipelineAsyncCallbackTask::Finish() {
+ ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+ if (mPipeline.Get() != nullptr) {
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
+ ToAPI(mPipeline.Detach()), "", mUserData);
+ } else {
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+ mErrorMessage.c_str(), mUserData);
+ }
+ }
+
+ void CreateComputePipelineAsyncCallbackTask::HandleShutDown() {
+ ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", mUserData);
+ }
+
+ void CreateComputePipelineAsyncCallbackTask::HandleDeviceLoss() {
+ ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "Device lost before callback", mUserData);
+ }
+
+ CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask(
+ Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata)
+ : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
+ mPipeline(std::move(pipeline)),
+ mCreateRenderPipelineAsyncCallback(callback) {
+ }
+
+ void CreateRenderPipelineAsyncCallbackTask::Finish() {
+ ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+ if (mPipeline.Get() != nullptr) {
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
+ ToAPI(mPipeline.Detach()), "", mUserData);
+ } else {
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+ mErrorMessage.c_str(), mUserData);
+ }
+ }
+
+ void CreateRenderPipelineAsyncCallbackTask::HandleShutDown() {
+ ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", mUserData);
+ }
+
+ void CreateRenderPipelineAsyncCallbackTask::HandleDeviceLoss() {
+ ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "Device lost before callback", mUserData);
+ }
+
+ CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
+ Ref<ComputePipelineBase> nonInitializedComputePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata)
+ : mComputePipeline(std::move(nonInitializedComputePipeline)),
+ mCallback(callback),
+ mUserdata(userdata) {
+ ASSERT(mComputePipeline != nullptr);
+ }
+
+ void CreateComputePipelineAsyncTask::Run() {
+ const char* eventLabel = utils::GetLabelForTrace(mComputePipeline->GetLabel().c_str());
+ TRACE_EVENT_FLOW_END1(mComputePipeline->GetDevice()->GetPlatform(), General,
+ "CreateComputePipelineAsyncTask::RunAsync", this, "label",
+ eventLabel);
+ TRACE_EVENT1(mComputePipeline->GetDevice()->GetPlatform(), General,
+ "CreateComputePipelineAsyncTask::Run", "label", eventLabel);
+
+ MaybeError maybeError = mComputePipeline->Initialize();
+ std::string errorMessage;
+ if (maybeError.IsError()) {
+ mComputePipeline = nullptr;
+ errorMessage = maybeError.AcquireError()->GetMessage();
+ }
+
+ mComputePipeline->GetDevice()->AddComputePipelineAsyncCallbackTask(
+ mComputePipeline, errorMessage, mCallback, mUserdata);
+ }
+
+ void CreateComputePipelineAsyncTask::RunAsync(
+ std::unique_ptr<CreateComputePipelineAsyncTask> task) {
+ DeviceBase* device = task->mComputePipeline->GetDevice();
+
+ const char* eventLabel =
+ utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
+
+ // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+ // since C++14:
+ // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+ auto asyncTask = [taskPtr = task.release()] {
+ std::unique_ptr<CreateComputePipelineAsyncTask> innnerTaskPtr(taskPtr);
+ innnerTaskPtr->Run();
+ };
+
+ TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
+ "CreateComputePipelineAsyncTask::RunAsync", task.get(), "label",
+ eventLabel);
+ device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+ }
+
+ CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
+ Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata)
+ : mRenderPipeline(std::move(nonInitializedRenderPipeline)),
+ mCallback(callback),
+ mUserdata(userdata) {
+ ASSERT(mRenderPipeline != nullptr);
+ }
+
+ void CreateRenderPipelineAsyncTask::Run() {
+ const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
+ TRACE_EVENT_FLOW_END1(mRenderPipeline->GetDevice()->GetPlatform(), General,
+ "CreateRenderPipelineAsyncTask::RunAsync", this, "label", eventLabel);
+ TRACE_EVENT1(mRenderPipeline->GetDevice()->GetPlatform(), General,
+ "CreateRenderPipelineAsyncTask::Run", "label", eventLabel);
+
+ MaybeError maybeError = mRenderPipeline->Initialize();
+ std::string errorMessage;
+ if (maybeError.IsError()) {
+ mRenderPipeline = nullptr;
+ errorMessage = maybeError.AcquireError()->GetMessage();
+ }
+
+ mRenderPipeline->GetDevice()->AddRenderPipelineAsyncCallbackTask(
+ mRenderPipeline, errorMessage, mCallback, mUserdata);
+ }
+
+ void CreateRenderPipelineAsyncTask::RunAsync(
+ std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
+ DeviceBase* device = task->mRenderPipeline->GetDevice();
+
+ const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());
+
+ // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+ // since C++14:
+ // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+ auto asyncTask = [taskPtr = task.release()] {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> innerTaskPtr(taskPtr);
+ innerTaskPtr->Run();
+ };
+
+ TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
+ "CreateRenderPipelineAsyncTask::RunAsync", task.get(), "label",
+ eventLabel);
+ device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+ }
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h b/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h
new file mode 100644
index 00000000000..4b936cf6741
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/CreatePipelineAsyncTask.h
@@ -0,0 +1,108 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
+#define DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/CallbackTaskManager.h"
+#include "dawn/native/Error.h"
+#include "dawn/webgpu.h"
+
+namespace dawn::native {
+
+ class ComputePipelineBase;
+ class DeviceBase;
+ class PipelineLayoutBase;
+ class RenderPipelineBase;
+ class ShaderModuleBase;
+ struct FlatComputePipelineDescriptor;
+
+ struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
+ CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
+
+ protected:
+ std::string mErrorMessage;
+ void* mUserData;
+ };
+
+ struct CreateComputePipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
+ CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+
+ void Finish() override;
+ void HandleShutDown() final;
+ void HandleDeviceLoss() final;
+
+ protected:
+ Ref<ComputePipelineBase> mPipeline;
+ WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
+ };
+
+ struct CreateRenderPipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
+ CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ void Finish() override;
+ void HandleShutDown() final;
+ void HandleDeviceLoss() final;
+
+ protected:
+ Ref<RenderPipelineBase> mPipeline;
+ WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
+ };
+
+ // CreateComputePipelineAsyncTask defines all the inputs and outputs of
+ // CreateComputePipelineAsync() tasks, which are the same among all the backends.
+ class CreateComputePipelineAsyncTask {
+ public:
+ CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+
+ void Run();
+
+ static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
+
+ private:
+ Ref<ComputePipelineBase> mComputePipeline;
+ WGPUCreateComputePipelineAsyncCallback mCallback;
+ void* mUserdata;
+ };
+
+ // CreateRenderPipelineAsyncTask defines all the inputs and outputs of
+ // CreateRenderPipelineAsync() tasks, which are the same among all the backends.
+ class CreateRenderPipelineAsyncTask {
+ public:
+ CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ void Run();
+
+ static void RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task);
+
+ private:
+ Ref<RenderPipelineBase> mRenderPipeline;
+ WGPUCreateRenderPipelineAsyncCallback mCallback;
+ void* mUserdata;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn/native/DawnNative.cpp
new file mode 100644
index 00000000000..ca46df8db3a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/DawnNative.cpp
@@ -0,0 +1,312 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/DawnNative.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/Texture.h"
+#include "dawn/platform/DawnPlatform.h"
+
+// Contains the entry-points into dawn_native
+
+namespace dawn::native {
+
+ namespace {
+ struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor {
+ ComboDeprecatedDawnDeviceDescriptor(const DawnDeviceDescriptor* deviceDescriptor) {
+ dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use "
+ "WGPUDeviceDescriptor instead.";
+
+ DeviceDescriptor* desc = this;
+
+ if (deviceDescriptor != nullptr) {
+ desc->nextInChain = &mTogglesDesc;
+ mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data();
+ mTogglesDesc.forceEnabledTogglesCount =
+ deviceDescriptor->forceEnabledToggles.size();
+ mTogglesDesc.forceDisabledToggles =
+ deviceDescriptor->forceDisabledToggles.data();
+ mTogglesDesc.forceDisabledTogglesCount =
+ deviceDescriptor->forceDisabledToggles.size();
+
+ desc->requiredLimits =
+ reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits);
+
+ FeaturesInfo featuresInfo;
+ for (const char* featureStr : deviceDescriptor->requiredFeatures) {
+ mRequiredFeatures.push_back(featuresInfo.FeatureNameToAPIEnum(featureStr));
+ }
+ desc->requiredFeatures = mRequiredFeatures.data();
+ desc->requiredFeaturesCount = mRequiredFeatures.size();
+ }
+ }
+
+ DawnTogglesDeviceDescriptor mTogglesDesc = {};
+ std::vector<wgpu::FeatureName> mRequiredFeatures = {};
+ };
+ } // namespace
+
+ const DawnProcTable& GetProcsAutogen();
+
+ const DawnProcTable& GetProcs() {
+ return GetProcsAutogen();
+ }
+
+ std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
+ return FromAPI(device)->GetTogglesUsed();
+ }
+
+ // Adapter
+
+ Adapter::Adapter() = default;
+
+ Adapter::Adapter(AdapterBase* impl) : mImpl(impl) {
+ if (mImpl != nullptr) {
+ mImpl->Reference();
+ }
+ }
+
+ Adapter::~Adapter() {
+ if (mImpl != nullptr) {
+ mImpl->Release();
+ }
+ mImpl = nullptr;
+ }
+
+ Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {
+ }
+
+ Adapter& Adapter::operator=(const Adapter& other) {
+ if (this != &other) {
+ if (mImpl) {
+ mImpl->Release();
+ }
+ mImpl = other.mImpl;
+ if (mImpl) {
+ mImpl->Reference();
+ }
+ }
+ return *this;
+ }
+
+ void Adapter::GetProperties(wgpu::AdapterProperties* properties) const {
+ GetProperties(reinterpret_cast<WGPUAdapterProperties*>(properties));
+ }
+
+ void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
+ mImpl->APIGetProperties(FromAPI(properties));
+ }
+
+ WGPUAdapter Adapter::Get() const {
+ return ToAPI(mImpl);
+ }
+
+ std::vector<const char*> Adapter::GetSupportedFeatures() const {
+ FeaturesSet supportedFeaturesSet = mImpl->GetSupportedFeatures();
+ return supportedFeaturesSet.GetEnabledFeatureNames();
+ }
+
+ WGPUDeviceProperties Adapter::GetAdapterProperties() const {
+ return mImpl->GetAdapterProperties();
+ }
+
+ bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+ return mImpl->GetLimits(FromAPI(limits));
+ }
+
+ void Adapter::SetUseTieredLimits(bool useTieredLimits) {
+ mImpl->SetUseTieredLimits(useTieredLimits);
+ }
+
+ bool Adapter::SupportsExternalImages() const {
+ return mImpl->SupportsExternalImages();
+ }
+
+ Adapter::operator bool() const {
+ return mImpl != nullptr;
+ }
+
+ WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
+ ComboDeprecatedDawnDeviceDescriptor desc(deviceDescriptor);
+ return ToAPI(mImpl->APICreateDevice(&desc));
+ }
+
+ WGPUDevice Adapter::CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor) {
+ return CreateDevice(reinterpret_cast<const WGPUDeviceDescriptor*>(deviceDescriptor));
+ }
+
+ WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor) {
+ return ToAPI(mImpl->APICreateDevice(FromAPI(deviceDescriptor)));
+ }
+
+ void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ ComboDeprecatedDawnDeviceDescriptor desc(descriptor);
+ mImpl->APIRequestDevice(&desc, callback, userdata);
+ }
+
+ void Adapter::RequestDevice(const wgpu::DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
+ userdata);
+ }
+
+ void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
+ userdata);
+ }
+
+ void Adapter::ResetInternalDeviceForTesting() {
+ mImpl->ResetInternalDeviceForTesting();
+ }
+
+ // AdapterDiscoverOptionsBase
+
+ AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
+ : backendType(type) {
+ }
+
+ // Instance
+
+ Instance::Instance(const WGPUInstanceDescriptor* desc)
+ : mImpl(InstanceBase::Create(reinterpret_cast<const InstanceDescriptor*>(desc))) {
+ }
+
+ Instance::~Instance() {
+ if (mImpl != nullptr) {
+ mImpl->Release();
+ mImpl = nullptr;
+ }
+ }
+
+ void Instance::DiscoverDefaultAdapters() {
+ mImpl->DiscoverDefaultAdapters();
+ }
+
+ bool Instance::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
+ return mImpl->DiscoverAdapters(options);
+ }
+
+ std::vector<Adapter> Instance::GetAdapters() const {
+ // Adapters are owned by mImpl so it is safe to return non RAII pointers to them
+ std::vector<Adapter> adapters;
+ for (const Ref<AdapterBase>& adapter : mImpl->GetAdapters()) {
+ adapters.push_back({adapter.Get()});
+ }
+ return adapters;
+ }
+
+ const ToggleInfo* Instance::GetToggleInfo(const char* toggleName) {
+ return mImpl->GetToggleInfo(toggleName);
+ }
+
+ const FeatureInfo* Instance::GetFeatureInfo(WGPUFeatureName feature) {
+ return mImpl->GetFeatureInfo(static_cast<wgpu::FeatureName>(feature));
+ }
+
+ void Instance::EnableBackendValidation(bool enableBackendValidation) {
+ if (enableBackendValidation) {
+ mImpl->SetBackendValidationLevel(BackendValidationLevel::Full);
+ }
+ }
+
+ void Instance::SetBackendValidationLevel(BackendValidationLevel level) {
+ mImpl->SetBackendValidationLevel(level);
+ }
+
+ void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
+ mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
+ }
+
+ void Instance::SetPlatform(dawn::platform::Platform* platform) {
+ mImpl->SetPlatform(platform);
+ }
+
+ WGPUInstance Instance::Get() const {
+ return ToAPI(mImpl);
+ }
+
+ size_t GetLazyClearCountForTesting(WGPUDevice device) {
+ return FromAPI(device)->GetLazyClearCountForTesting();
+ }
+
+ size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
+ return FromAPI(device)->GetDeprecationWarningCountForTesting();
+ }
+
+ bool IsTextureSubresourceInitialized(WGPUTexture texture,
+ uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ WGPUTextureAspect cAspect) {
+ TextureBase* textureBase = FromAPI(texture);
+
+ Aspect aspect =
+ ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
+ SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
+ return textureBase->IsSubresourceContentInitialized(range);
+ }
+
+ std::vector<const char*> GetProcMapNamesForTestingInternal();
+
+ std::vector<const char*> GetProcMapNamesForTesting() {
+ return GetProcMapNamesForTestingInternal();
+ }
+
+ DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
+ return FromAPI(device)->APITick();
+ }
+
+ // ExternalImageDescriptor
+
+ ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {
+ }
+
+ ExternalImageType ExternalImageDescriptor::GetType() const {
+ return mType;
+ }
+
+ // ExternalImageExportInfo
+
+ ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {
+ }
+
+ ExternalImageType ExternalImageExportInfo::GetType() const {
+ return mType;
+ }
+
+ const char* GetObjectLabelForTesting(void* objectHandle) {
+ ApiObjectBase* object = reinterpret_cast<ApiObjectBase*>(objectHandle);
+ return object->GetLabel().c_str();
+ }
+
+ uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
+ return FromAPI(buffer)->GetAllocatedSize();
+ }
+
+ bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
+ bool excludePipelineCompatibiltyToken = true;
+ return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Device.cpp b/chromium/third_party/dawn/src/dawn/native/Device.cpp
new file mode 100644
index 00000000000..477decb2d74
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Device.cpp
@@ -0,0 +1,1758 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Device.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/native/Adapter.h"
+#include "dawn/native/AsyncTask.h"
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CompilationMessages.h"
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/ErrorInjector.h"
+#include "dawn/native/ErrorScope.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PersistentCache.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/RenderBundleEncoder.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/SwapChain.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <array>
+#include <mutex>
+#include <unordered_set>
+
+namespace dawn::native {
+
+ // DeviceBase sub-structures
+
+ // The caches are unordered_sets of pointers with special hash and compare functions
+ // to compare the value of the objects, instead of the pointers.
+ template <typename Object>
+ using ContentLessObjectCache =
+ std::unordered_set<Object*, typename Object::HashFunc, typename Object::EqualityFunc>;
+
+ struct DeviceBase::Caches {
+ ~Caches() {
+ ASSERT(attachmentStates.empty());
+ ASSERT(bindGroupLayouts.empty());
+ ASSERT(computePipelines.empty());
+ ASSERT(pipelineLayouts.empty());
+ ASSERT(renderPipelines.empty());
+ ASSERT(samplers.empty());
+ ASSERT(shaderModules.empty());
+ }
+
+ ContentLessObjectCache<AttachmentStateBlueprint> attachmentStates;
+ ContentLessObjectCache<BindGroupLayoutBase> bindGroupLayouts;
+ ContentLessObjectCache<ComputePipelineBase> computePipelines;
+ ContentLessObjectCache<PipelineLayoutBase> pipelineLayouts;
+ ContentLessObjectCache<RenderPipelineBase> renderPipelines;
+ ContentLessObjectCache<SamplerBase> samplers;
+ ContentLessObjectCache<ShaderModuleBase> shaderModules;
+ };
+
+ struct DeviceBase::DeprecationWarnings {
+ std::unordered_set<std::string> emitted;
+ size_t count = 0;
+ };
+
+ namespace {
+ struct LoggingCallbackTask : CallbackTask {
+ public:
+ LoggingCallbackTask() = delete;
+ LoggingCallbackTask(wgpu::LoggingCallback loggingCallback,
+ WGPULoggingType loggingType,
+ const char* message,
+ void* userdata)
+ : mCallback(loggingCallback),
+ mLoggingType(loggingType),
+ mMessage(message),
+ mUserdata(userdata) {
+ // Since the Finish() will be called in uncertain future in which time the message
+ // may already disposed, we must keep a local copy in the CallbackTask.
+ }
+
+ void Finish() override {
+ mCallback(mLoggingType, mMessage.c_str(), mUserdata);
+ }
+
+ void HandleShutDown() override {
+ // Do the logging anyway
+ mCallback(mLoggingType, mMessage.c_str(), mUserdata);
+ }
+
+ void HandleDeviceLoss() override {
+ mCallback(mLoggingType, mMessage.c_str(), mUserdata);
+ }
+
+ private:
+ // As all deferred callback tasks will be triggered before modifying the registered
+ // callback or shutting down, we are ensured that callback function and userdata pointer
+ // stored in tasks is valid when triggered.
+ wgpu::LoggingCallback mCallback;
+ WGPULoggingType mLoggingType;
+ std::string mMessage;
+ void* mUserdata;
+ };
+
+ ResultOrError<Ref<PipelineLayoutBase>>
+ ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ DeviceBase* device,
+ const ComputePipelineDescriptor& descriptor,
+ ComputePipelineDescriptor* outDescriptor) {
+ Ref<PipelineLayoutBase> layoutRef;
+ *outDescriptor = descriptor;
+
+ if (outDescriptor->layout == nullptr) {
+ DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
+ device, {{
+ SingleShaderStage::Compute,
+ outDescriptor->compute.module,
+ outDescriptor->compute.entryPoint,
+ outDescriptor->compute.constantCount,
+ outDescriptor->compute.constants,
+ }}));
+ outDescriptor->layout = layoutRef.Get();
+ }
+
+ return layoutRef;
+ }
+
+ ResultOrError<Ref<PipelineLayoutBase>>
+ ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ DeviceBase* device,
+ const RenderPipelineDescriptor& descriptor,
+ RenderPipelineDescriptor* outDescriptor) {
+ Ref<PipelineLayoutBase> layoutRef;
+ *outDescriptor = descriptor;
+
+ if (descriptor.layout == nullptr) {
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ DAWN_TRY_ASSIGN(layoutRef,
+ PipelineLayoutBase::CreateDefault(
+ device, GetRenderStagesAndSetDummyShader(device, &descriptor)));
+ outDescriptor->layout = layoutRef.Get();
+ }
+
+ return layoutRef;
+ }
+
+ } // anonymous namespace
+
+ // DeviceBase
+
+ DeviceBase::DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor)
+ : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
+ ASSERT(descriptor != nullptr);
+
+ const DawnTogglesDeviceDescriptor* togglesDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &togglesDesc);
+ if (togglesDesc != nullptr) {
+ ApplyToggleOverrides(togglesDesc);
+ }
+ ApplyFeatures(descriptor);
+
+ if (descriptor->requiredLimits != nullptr) {
+ mLimits.v1 = ReifyDefaultLimits(descriptor->requiredLimits->limits);
+ } else {
+ GetDefaultLimits(&mLimits.v1);
+ }
+
+ mFormatTable = BuildFormatTable(this);
+ SetDefaultToggles();
+ }
+
+ DeviceBase::DeviceBase() : mState(State::Alive) {
+ mCaches = std::make_unique<DeviceBase::Caches>();
+ }
+
+ DeviceBase::~DeviceBase() {
+ // We need to explicitly release the Queue before we complete the destructor so that the
+ // Queue does not get destroyed after the Device.
+ mQueue = nullptr;
+ }
+
+ MaybeError DeviceBase::Initialize(QueueBase* defaultQueue) {
+ mQueue = AcquireRef(defaultQueue);
+
+#if defined(DAWN_ENABLE_ASSERTS)
+ mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
+ static bool calledOnce = false;
+ if (!calledOnce) {
+ calledOnce = true;
+ dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
+ "probably not intended. If you really want to ignore errors "
+ "and suppress this message, set the callback to null.";
+ }
+ };
+
+ mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
+ static bool calledOnce = false;
+ if (!calledOnce) {
+ calledOnce = true;
+ dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
+ "intended. If you really want to ignore device lost "
+ "and suppress this message, set the callback to null.";
+ }
+ };
+#endif // DAWN_ENABLE_ASSERTS
+
+ mCaches = std::make_unique<DeviceBase::Caches>();
+ mErrorScopeStack = std::make_unique<ErrorScopeStack>();
+ mDynamicUploader = std::make_unique<DynamicUploader>(this);
+ mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
+ mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
+ mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
+ mPersistentCache = std::make_unique<PersistentCache>(this);
+
+ ASSERT(GetPlatform() != nullptr);
+ mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
+ mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
+
+ // Starting from now the backend can start doing reentrant calls so the device is marked as
+ // alive.
+ mState = State::Alive;
+
+ DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
+
+ // If dummy fragment shader module is needed, initialize it
+ if (IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
+ // The empty fragment shader, used as a work around for vertex-only render pipeline
+ constexpr char kEmptyFragmentShader[] = R"(
+ @stage(fragment) fn fs_empty_main() {}
+ )";
+ ShaderModuleDescriptor descriptor;
+ ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = kEmptyFragmentShader;
+ descriptor.nextInChain = &wgslDesc;
+
+ DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
+ CreateShaderModule(&descriptor));
+ }
+
+ return {};
+ }
+
+ void DeviceBase::DestroyObjects() {
+ // List of object types in reverse "dependency" order so we can iterate and delete the
+ // objects safely starting at leaf objects. We define dependent here such that if B has
+ // a ref to A, then B depends on A. We therefore try to destroy B before destroying A. Note
+ // that this only considers the immediate frontend dependencies, while backend objects could
+ // add complications and extra dependencies.
+ //
+ // Note that AttachmentState is not an ApiObject so it cannot be eagerly destroyed. However,
+ // since AttachmentStates are cached by the device, objects that hold references to
+ // AttachmentStates should make sure to un-ref them in their Destroy operation so that we
+ // can destroy the frontend cache.
+
+ // clang-format off
+ static constexpr std::array<ObjectType, 19> kObjectTypeDependencyOrder = {
+ ObjectType::ComputePassEncoder,
+ ObjectType::RenderPassEncoder,
+ ObjectType::RenderBundleEncoder,
+ ObjectType::RenderBundle,
+ ObjectType::CommandEncoder,
+ ObjectType::CommandBuffer,
+ ObjectType::RenderPipeline,
+ ObjectType::ComputePipeline,
+ ObjectType::PipelineLayout,
+ ObjectType::SwapChain,
+ ObjectType::BindGroup,
+ ObjectType::BindGroupLayout,
+ ObjectType::ShaderModule,
+ ObjectType::ExternalTexture,
+ ObjectType::TextureView,
+ ObjectType::Texture,
+ ObjectType::QuerySet,
+ ObjectType::Sampler,
+ ObjectType::Buffer,
+ };
+ // clang-format on
+
+ // We first move all objects out from the tracking list into a separate list so that we can
+ // avoid locking the same mutex twice. We can then iterate across the separate list to call
+ // the actual destroy function.
+ LinkedList<ApiObjectBase> objects;
+ for (ObjectType type : kObjectTypeDependencyOrder) {
+ ApiObjectList& objList = mObjectLists[type];
+ const std::lock_guard<std::mutex> lock(objList.mutex);
+ objList.objects.MoveInto(&objects);
+ }
+ for (LinkNode<ApiObjectBase>* node : objects) {
+ node->value()->Destroy();
+ }
+ }
+
+ void DeviceBase::Destroy() {
+ // Skip if we are already destroyed.
+ if (mState == State::Destroyed) {
+ return;
+ }
+
+ // Skip handling device facilities if they haven't even been created (or failed doing so)
+ if (mState != State::BeingCreated) {
+ // The device is being destroyed so it will be lost, call the application callback.
+ if (mDeviceLostCallback != nullptr) {
+ mDeviceLostCallback(WGPUDeviceLostReason_Destroyed, "Device was destroyed.",
+ mDeviceLostUserdata);
+ mDeviceLostCallback = nullptr;
+ }
+
+ // Call all the callbacks immediately as the device is about to shut down.
+ // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
+ mAsyncTaskManager->WaitAllPendingTasks();
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->HandleShutDown();
+ }
+ }
+
+ // Disconnect the device, depending on which state we are currently in.
+ switch (mState) {
+ case State::BeingCreated:
+ // The GPU timeline was never started so we don't have to wait.
+ break;
+
+ case State::Alive:
+ // Alive is the only state which can have GPU work happening. Wait for all of it to
+ // complete before proceeding with destruction.
+ // Ignore errors so that we can continue with destruction
+ IgnoreErrors(WaitForIdleForDestruction());
+ AssumeCommandsComplete();
+ break;
+
+ case State::BeingDisconnected:
+ // Getting disconnected is a transient state happening in a single API call so there
+ // is always an external reference keeping the Device alive, which means the
+ // destructor cannot run while BeingDisconnected.
+ UNREACHABLE();
+ break;
+
+ case State::Disconnected:
+ break;
+
+ case State::Destroyed:
+ // If we are already destroyed we should've skipped this work entirely.
+ UNREACHABLE();
+ break;
+ }
+ ASSERT(mCompletedSerial == mLastSubmittedSerial);
+ ASSERT(mFutureSerial <= mCompletedSerial);
+
+ if (mState != State::BeingCreated) {
+ // The GPU timeline is finished.
+ // Finish destroying all objects owned by the device and tick the queue-related tasks
+ // since they should be complete. This must be done before DestroyImpl() it may
+ // relinquish resources that will be freed by backends in the DestroyImpl() call.
+ DestroyObjects();
+ mQueue->Tick(GetCompletedCommandSerial());
+ // Call TickImpl once last time to clean up resources
+ // Ignore errors so that we can continue with destruction
+ IgnoreErrors(TickImpl());
+ }
+
+ // At this point GPU operations are always finished, so we are in the disconnected state.
+ // Note that currently this state change is required because some of the backend
+ // implementations of DestroyImpl checks that we are disconnected before doing work.
+ mState = State::Disconnected;
+
+ mDynamicUploader = nullptr;
+ mCallbackTaskManager = nullptr;
+ mAsyncTaskManager = nullptr;
+ mPersistentCache = nullptr;
+ mEmptyBindGroupLayout = nullptr;
+ mInternalPipelineStore = nullptr;
+
+ AssumeCommandsComplete();
+
+ // Now that the GPU timeline is empty, destroy the backend device.
+ DestroyImpl();
+
+ mCaches = nullptr;
+ mState = State::Destroyed;
+ }
+
+ void DeviceBase::APIDestroy() {
+ // TODO(crbug.com/dawn/628) Re-enable once CTS testing is in place and passing.
+ if (IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+ ConsumedError(DAWN_VALIDATION_ERROR(
+ "Explicit device.destroy() is disallowed because it is not fully implemented"));
+ return;
+ }
+ Destroy();
+ }
+
+ void DeviceBase::HandleError(InternalErrorType type, const char* message) {
+ if (type == InternalErrorType::DeviceLost) {
+ mState = State::Disconnected;
+
+ // If the ErrorInjector is enabled, then the device loss might be fake and the device
+ // still be executing commands. Force a wait for idle in this case, with State being
+ // Disconnected so we can detect this case in WaitForIdleForDestruction.
+ if (ErrorInjectorEnabled()) {
+ IgnoreErrors(WaitForIdleForDestruction());
+ }
+
+ // A real device lost happened. Set the state to disconnected as the device cannot be
+ // used. Also tags all commands as completed since the device stopped running.
+ AssumeCommandsComplete();
+ } else if (type == InternalErrorType::Internal) {
+ // If we receive an internal error, assume the backend can't recover and proceed with
+ // device destruction. We first wait for all previous commands to be completed so that
+ // backend objects can be freed immediately, before handling the loss.
+
+ // Move away from the Alive state so that the application cannot use this device
+ // anymore.
+ // TODO(crbug.com/dawn/831): Do we need atomics for this to become visible to other
+ // threads in a multithreaded scenario?
+ mState = State::BeingDisconnected;
+
+ // Ignore errors so that we can continue with destruction
+ // Assume all commands are complete after WaitForIdleForDestruction (because they were)
+ IgnoreErrors(WaitForIdleForDestruction());
+ IgnoreErrors(TickImpl());
+ AssumeCommandsComplete();
+ ASSERT(mFutureSerial <= mCompletedSerial);
+ mState = State::Disconnected;
+
+ // Now everything is as if the device was lost.
+ type = InternalErrorType::DeviceLost;
+ }
+
+ if (type == InternalErrorType::DeviceLost) {
+ // The device was lost, call the application callback.
+ if (mDeviceLostCallback != nullptr) {
+ mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
+ mDeviceLostCallback = nullptr;
+ }
+
+ mQueue->HandleDeviceLoss();
+
+ // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
+ mAsyncTaskManager->WaitAllPendingTasks();
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->HandleDeviceLoss();
+ }
+
+ // Still forward device loss errors to the error scopes so they all reject.
+ mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
+ } else {
+ // Pass the error to the error scope stack and call the uncaptured error callback
+ // if it isn't handled. DeviceLost is not handled here because it should be
+ // handled by the lost callback.
+ bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
+ if (!captured && mUncapturedErrorCallback != nullptr) {
+ mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
+ mUncapturedErrorUserdata);
+ }
+ }
+ }
+
+ void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
+ ASSERT(error != nullptr);
+ HandleError(error->GetType(), error->GetFormattedMessage().c_str());
+ }
+
+ void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
+ // The registered callback function and userdata pointer are stored and used by deferred
+ // callback tasks, and after setting a different callback (especially in the case of
+ // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+ // callback tasks to guarantee we are never going to use the previous callback after
+ // this call.
+ if (IsLost()) {
+ return;
+ }
+ FlushCallbackTaskQueue();
+ mLoggingCallback = callback;
+ mLoggingUserdata = userdata;
+ }
+
+ void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
+ // The registered callback function and userdata pointer are stored and used by deferred
+ // callback tasks, and after setting a different callback (especially in the case of
+ // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+ // callback tasks to guarantee we are never going to use the previous callback after
+ // this call.
+ if (IsLost()) {
+ return;
+ }
+ FlushCallbackTaskQueue();
+ mUncapturedErrorCallback = callback;
+ mUncapturedErrorUserdata = userdata;
+ }
+
+ void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
+ // The registered callback function and userdata pointer are stored and used by deferred
+ // callback tasks, and after setting a different callback (especially in the case of
+ // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+ // callback tasks to guarantee we are never going to use the previous callback after
+ // this call.
+ if (IsLost()) {
+ return;
+ }
+ FlushCallbackTaskQueue();
+ mDeviceLostCallback = callback;
+ mDeviceLostUserdata = userdata;
+ }
+
+ void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
+ if (ConsumedError(ValidateErrorFilter(filter))) {
+ return;
+ }
+ mErrorScopeStack->Push(filter);
+ }
+
+ bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
+ if (mErrorScopeStack->Empty()) {
+ return false;
+ }
+ ErrorScope scope = mErrorScopeStack->Pop();
+ if (callback != nullptr) {
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(),
+ userdata);
+ }
+
+ return true;
+ }
+
+ PersistentCache* DeviceBase::GetPersistentCache() {
+ ASSERT(mPersistentCache.get() != nullptr);
+ return mPersistentCache.get();
+ }
+
+ MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
+ ASSERT(object != nullptr);
+ DAWN_INVALID_IF(object->GetDevice() != this,
+ "%s is associated with %s, and cannot be used with %s.", object,
+ object->GetDevice(), this);
+
+ // TODO(dawn:563): Preserve labels for error objects.
+ DAWN_INVALID_IF(object->IsError(), "%s is invalid.", object);
+
+ return {};
+ }
+
+ MaybeError DeviceBase::ValidateIsAlive() const {
+ DAWN_INVALID_IF(mState != State::Alive, "%s is lost.", this);
+ return {};
+ }
+
+ void DeviceBase::APILoseForTesting() {
+ if (mState != State::Alive) {
+ return;
+ }
+
+ HandleError(InternalErrorType::Internal, "Device lost for testing");
+ }
+
+ DeviceBase::State DeviceBase::GetState() const {
+ return mState;
+ }
+
+ bool DeviceBase::IsLost() const {
+ ASSERT(mState != State::BeingCreated);
+ return mState != State::Alive;
+ }
+
+ void DeviceBase::TrackObject(ApiObjectBase* object) {
+ ApiObjectList& objectList = mObjectLists[object->GetType()];
+ std::lock_guard<std::mutex> lock(objectList.mutex);
+ object->InsertBefore(objectList.objects.head());
+ }
+
+ std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
+ return &mObjectLists[type].mutex;
+ }
+
+ AdapterBase* DeviceBase::GetAdapter() const {
+ return mAdapter;
+ }
+
+ dawn::platform::Platform* DeviceBase::GetPlatform() const {
+ return GetAdapter()->GetInstance()->GetPlatform();
+ }
+
+ ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
+ return mCompletedSerial;
+ }
+
+ ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
+ return mLastSubmittedSerial;
+ }
+
+ ExecutionSerial DeviceBase::GetFutureSerial() const {
+ return mFutureSerial;
+ }
+
+ InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
+ return mInternalPipelineStore.get();
+ }
+
+ void DeviceBase::IncrementLastSubmittedCommandSerial() {
+ mLastSubmittedSerial++;
+ }
+
+ void DeviceBase::AssumeCommandsComplete() {
+ ExecutionSerial maxSerial =
+ ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
+ mLastSubmittedSerial = maxSerial;
+ mCompletedSerial = maxSerial;
+ }
+
+ bool DeviceBase::IsDeviceIdle() {
+ if (mAsyncTaskManager->HasPendingTasks()) {
+ return false;
+ }
+
+ ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
+ if (mCompletedSerial == maxSerial) {
+ return true;
+ }
+ return false;
+ }
+
+ ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
+ return mLastSubmittedSerial + ExecutionSerial(1);
+ }
+
+ void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
+ if (serial > mFutureSerial) {
+ mFutureSerial = serial;
+ }
+ }
+
+ MaybeError DeviceBase::CheckPassedSerials() {
+ ExecutionSerial completedSerial;
+ DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
+
+ ASSERT(completedSerial <= mLastSubmittedSerial);
+ // completedSerial should not be less than mCompletedSerial unless it is 0.
+ // It can be 0 when there's no fences to check.
+ ASSERT(completedSerial >= mCompletedSerial || completedSerial == ExecutionSerial(0));
+
+ if (completedSerial > mCompletedSerial) {
+ mCompletedSerial = completedSerial;
+ }
+
+ return {};
+ }
+
+ ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
+ size_t index = ComputeFormatIndex(format);
+ DAWN_INVALID_IF(index >= mFormatTable.size(), "Unknown texture format %s.", format);
+
+ const Format* internalFormat = &mFormatTable[index];
+ DAWN_INVALID_IF(!internalFormat->isSupported, "Unsupported texture format %s.", format);
+
+ return internalFormat;
+ }
+
+ const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
+ size_t index = ComputeFormatIndex(format);
+ ASSERT(index < mFormatTable.size());
+ ASSERT(mFormatTable[index].isSupported);
+ return mFormatTable[index];
+ }
+
+ ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken,
+ ApiObjectBase::kUntrackedByDevice);
+
+ const size_t blueprintHash = blueprint.ComputeContentHash();
+ blueprint.SetContentHash(blueprintHash);
+
+ Ref<BindGroupLayoutBase> result;
+ auto iter = mCaches->bindGroupLayouts.find(&blueprint);
+ if (iter != mCaches->bindGroupLayouts.end()) {
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result,
+ CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->bindGroupLayouts.insert(result.Get());
+ }
+
+ return std::move(result);
+ }
+
+ void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
+ ASSERT(removedCount == 1);
+ }
+
+ // Private function used at initialization
+ ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
+ BindGroupLayoutDescriptor desc = {};
+ desc.entryCount = 0;
+ desc.entries = nullptr;
+
+ return GetOrCreateBindGroupLayout(&desc);
+ }
+
+ BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
+ ASSERT(mEmptyBindGroupLayout != nullptr);
+ return mEmptyBindGroupLayout.Get();
+ }
+
+ Ref<ComputePipelineBase> DeviceBase::GetCachedComputePipeline(
+ ComputePipelineBase* uninitializedComputePipeline) {
+ Ref<ComputePipelineBase> cachedPipeline;
+ auto iter = mCaches->computePipelines.find(uninitializedComputePipeline);
+ if (iter != mCaches->computePipelines.end()) {
+ cachedPipeline = *iter;
+ }
+
+ return cachedPipeline;
+ }
+
+ Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
+ RenderPipelineBase* uninitializedRenderPipeline) {
+ Ref<RenderPipelineBase> cachedPipeline;
+ auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
+ if (iter != mCaches->renderPipelines.end()) {
+ cachedPipeline = *iter;
+ }
+ return cachedPipeline;
+ }
+
+ Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
+ Ref<ComputePipelineBase> computePipeline) {
+ auto [cachedPipeline, inserted] = mCaches->computePipelines.insert(computePipeline.Get());
+ if (inserted) {
+ computePipeline->SetIsCachedReference();
+ return computePipeline;
+ } else {
+ return *cachedPipeline;
+ }
+ }
+
+ Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
+ Ref<RenderPipelineBase> renderPipeline) {
+ auto [cachedPipeline, inserted] = mCaches->renderPipelines.insert(renderPipeline.Get());
+ if (inserted) {
+ renderPipeline->SetIsCachedReference();
+ return renderPipeline;
+ } else {
+ return *cachedPipeline;
+ }
+ }
+
+ void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->computePipelines.erase(obj);
+ ASSERT(removedCount == 1);
+ }
+
+ ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor) {
+ PipelineLayoutBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+ const size_t blueprintHash = blueprint.ComputeContentHash();
+ blueprint.SetContentHash(blueprintHash);
+
+ Ref<PipelineLayoutBase> result;
+ auto iter = mCaches->pipelineLayouts.find(&blueprint);
+ if (iter != mCaches->pipelineLayouts.end()) {
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->pipelineLayouts.insert(result.Get());
+ }
+
+ return std::move(result);
+ }
+
+ void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->pipelineLayouts.erase(obj);
+ ASSERT(removedCount == 1);
+ }
+
+ void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->renderPipelines.erase(obj);
+ ASSERT(removedCount == 1);
+ }
+
+ ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
+ const SamplerDescriptor* descriptor) {
+ SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+ const size_t blueprintHash = blueprint.ComputeContentHash();
+ blueprint.SetContentHash(blueprintHash);
+
+ Ref<SamplerBase> result;
+ auto iter = mCaches->samplers.find(&blueprint);
+ if (iter != mCaches->samplers.end()) {
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->samplers.insert(result.Get());
+ }
+
+ return std::move(result);
+ }
+
+ void DeviceBase::UncacheSampler(SamplerBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->samplers.erase(obj);
+ ASSERT(removedCount == 1);
+ }
+
+ ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages) {
+ ASSERT(parseResult != nullptr);
+
+ ShaderModuleBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+ const size_t blueprintHash = blueprint.ComputeContentHash();
+ blueprint.SetContentHash(blueprintHash);
+
+ Ref<ShaderModuleBase> result;
+ auto iter = mCaches->shaderModules.find(&blueprint);
+ if (iter != mCaches->shaderModules.end()) {
+ result = *iter;
+ } else {
+ if (!parseResult->HasParsedShader()) {
+ // We skip the parse on creation if validation isn't enabled which let's us quickly
+ // lookup in the cache without validating and parsing. We need the parsed module
+ // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
+ // we can consider splitting it if additional validation is added.
+ ASSERT(!IsValidationEnabled());
+ DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult,
+ compilationMessages));
+ }
+ DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->shaderModules.insert(result.Get());
+ }
+
+ return std::move(result);
+ }
+
+ void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->shaderModules.erase(obj);
+ ASSERT(removedCount == 1);
+ }
+
+ Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+ AttachmentStateBlueprint* blueprint) {
+ auto iter = mCaches->attachmentStates.find(blueprint);
+ if (iter != mCaches->attachmentStates.end()) {
+ return static_cast<AttachmentState*>(*iter);
+ }
+
+ Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
+ attachmentState->SetIsCachedReference();
+ attachmentState->SetContentHash(attachmentState->ComputeContentHash());
+ mCaches->attachmentStates.insert(attachmentState.Get());
+ return attachmentState;
+ }
+
+ Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+ const RenderBundleEncoderDescriptor* descriptor) {
+ AttachmentStateBlueprint blueprint(descriptor);
+ return GetOrCreateAttachmentState(&blueprint);
+ }
+
+ Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+ const RenderPipelineDescriptor* descriptor) {
+ AttachmentStateBlueprint blueprint(descriptor);
+ return GetOrCreateAttachmentState(&blueprint);
+ }
+
+ Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+ const RenderPassDescriptor* descriptor) {
+ AttachmentStateBlueprint blueprint(descriptor);
+ return GetOrCreateAttachmentState(&blueprint);
+ }
+
+ void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
+ ASSERT(obj->IsCachedReference());
+ size_t removedCount = mCaches->attachmentStates.erase(obj);
+ ASSERT(removedCount == 1);
+ }
+
+ // Object creation API methods
+
+ BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
+ Ref<BindGroupBase> result;
+ if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).",
+ this, descriptor)) {
+ return BindGroupBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor) {
+ Ref<BindGroupLayoutBase> result;
+ if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
+ "calling %s.CreateBindGroupLayout(%s).", this, descriptor)) {
+ return BindGroupLayoutBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
+ Ref<BufferBase> result = nullptr;
+ if (ConsumedError(CreateBuffer(descriptor), &result, "calling %s.CreateBuffer(%s).", this,
+ descriptor)) {
+ ASSERT(result == nullptr);
+ return BufferBase::MakeError(this, descriptor);
+ }
+ return result.Detach();
+ }
+ CommandEncoder* DeviceBase::APICreateCommandEncoder(
+ const CommandEncoderDescriptor* descriptor) {
+ const CommandEncoderDescriptor defaultDescriptor = {};
+ if (descriptor == nullptr) {
+ descriptor = &defaultDescriptor;
+ }
+
+ Ref<CommandEncoder> result;
+ if (ConsumedError(CreateCommandEncoder(descriptor), &result,
+ "calling %s.CreateCommandEncoder(%s).", this, descriptor)) {
+ return CommandEncoder::MakeError(this);
+ }
+ return result.Detach();
+ }
+ ComputePipelineBase* DeviceBase::APICreateComputePipeline(
+ const ComputePipelineDescriptor* descriptor) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipeline", "label",
+ utils::GetLabelForTrace(descriptor->label));
+
+ Ref<ComputePipelineBase> result;
+ if (ConsumedError(CreateComputePipeline(descriptor), &result,
+ "calling %s.CreateComputePipeline(%s).", this, descriptor)) {
+ return ComputePipelineBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipelineAsync", "label",
+ utils::GetLabelForTrace(descriptor->label));
+
+ MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
+
+ // Call the callback directly when a validation error has been found in the front-end
+ // validations. If there is no error, then CreateComputePipelineAsync will call the
+ // callback.
+ if (maybeResult.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
+ userdata);
+ }
+ }
+ PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor) {
+ Ref<PipelineLayoutBase> result;
+ if (ConsumedError(CreatePipelineLayout(descriptor), &result,
+ "calling %s.CreatePipelineLayout(%s).", this, descriptor)) {
+ return PipelineLayoutBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
+ Ref<QuerySetBase> result;
+ if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).",
+ this, descriptor)) {
+ return QuerySetBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
+ Ref<SamplerBase> result;
+ if (ConsumedError(CreateSampler(descriptor), &result, "calling %s.CreateSampler(%s).", this,
+ descriptor)) {
+ return SamplerBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipelineAsync", "label",
+ utils::GetLabelForTrace(descriptor->label));
+ // TODO(dawn:563): Add validation error context.
+ MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
+
+ // Call the callback directly when a validation error has been found in the front-end
+ // validations. If there is no error, then CreateRenderPipelineAsync will call the
+ // callback.
+ if (maybeResult.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
+ userdata);
+ }
+ }
+ RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor) {
+ Ref<RenderBundleEncoder> result;
+ if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
+ "calling %s.CreateRenderBundleEncoder(%s).", this, descriptor)) {
+ return RenderBundleEncoder::MakeError(this);
+ }
+ return result.Detach();
+ }
+ RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
+ const RenderPipelineDescriptor* descriptor) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipeline", "label",
+ utils::GetLabelForTrace(descriptor->label));
+
+ Ref<RenderPipelineBase> result;
+ if (ConsumedError(CreateRenderPipeline(descriptor), &result,
+ "calling %s.CreateRenderPipeline(%s).", this, descriptor)) {
+ return RenderPipelineBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
+ TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateShaderModule", "label",
+ utils::GetLabelForTrace(descriptor->label));
+
+ Ref<ShaderModuleBase> result;
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages(
+ std::make_unique<OwnedCompilationMessages>());
+ if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
+ "calling %s.CreateShaderModule(%s).", this, descriptor)) {
+ DAWN_ASSERT(result == nullptr);
+ result = ShaderModuleBase::MakeError(this);
+ }
+ // Move compilation messages into ShaderModuleBase and emit tint errors and warnings
+ // after all other operations are finished successfully.
+ result->InjectCompilationMessages(std::move(compilationMessages));
+
+ return result.Detach();
+ }
+ SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChainBase> result;
+ if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
+ "calling %s.CreateSwapChain(%s).", this, descriptor)) {
+ return SwapChainBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
+ Ref<TextureBase> result;
+ if (ConsumedError(CreateTexture(descriptor), &result, "calling %s.CreateTexture(%s).", this,
+ descriptor)) {
+ return TextureBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+
+ // For Dawn Wire
+
+ BufferBase* DeviceBase::APICreateErrorBuffer() {
+ BufferDescriptor desc = {};
+ return BufferBase::MakeError(this, &desc);
+ }
+
+ // Other Device API methods
+
+ // Returns true if future ticking is needed.
+ bool DeviceBase::APITick() {
+ if (IsLost() || ConsumedError(Tick())) {
+ return false;
+ }
+ return !IsDeviceIdle();
+ }
+
+ MaybeError DeviceBase::Tick() {
+ DAWN_TRY(ValidateIsAlive());
+
+ // to avoid overly ticking, we only want to tick when:
+ // 1. the last submitted serial has moved beyond the completed serial
+ // 2. or the completed serial has not reached the future serial set by the trackers
+ if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
+ DAWN_TRY(CheckPassedSerials());
+ DAWN_TRY(TickImpl());
+
+ // There is no GPU work in flight, we need to move the serials forward so that
+ // so that CPU operations waiting on GPU completion can know they don't have to wait.
+ // AssumeCommandsComplete will assign the max serial we must tick to in order to
+ // fire the awaiting callbacks.
+ if (mCompletedSerial == mLastSubmittedSerial) {
+ AssumeCommandsComplete();
+ }
+
+ // TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
+ // tick the dynamic uploader before the backend resource allocators. This would allow
+ // reclaiming resources one tick earlier.
+ mDynamicUploader->Deallocate(mCompletedSerial);
+ mQueue->Tick(mCompletedSerial);
+ }
+
+ // We have to check callback tasks in every Tick because it is not related to any global
+ // serials.
+ FlushCallbackTaskQueue();
+
+ return {};
+ }
+
+ QueueBase* DeviceBase::APIGetQueue() {
+ // Backends gave the primary queue during initialization.
+ ASSERT(mQueue != nullptr);
+
+ // Returns a new reference to the queue.
+ mQueue->Reference();
+ return mQueue.Get();
+ }
+
+ ExternalTextureBase* DeviceBase::APICreateExternalTexture(
+ const ExternalTextureDescriptor* descriptor) {
+ Ref<ExternalTextureBase> result = nullptr;
+ if (ConsumedError(CreateExternalTextureImpl(descriptor), &result,
+ "calling %s.CreateExternalTexture(%s).", this, descriptor)) {
+ return ExternalTextureBase::MakeError(this);
+ }
+
+ return result.Detach();
+ }
+
+ void DeviceBase::ApplyFeatures(const DeviceDescriptor* deviceDescriptor) {
+ ASSERT(deviceDescriptor);
+ ASSERT(GetAdapter()->SupportsAllRequiredFeatures(
+ {deviceDescriptor->requiredFeatures, deviceDescriptor->requiredFeaturesCount}));
+
+ for (uint32_t i = 0; i < deviceDescriptor->requiredFeaturesCount; ++i) {
+ mEnabledFeatures.EnableFeature(deviceDescriptor->requiredFeatures[i]);
+ }
+ }
+
+ bool DeviceBase::IsFeatureEnabled(Feature feature) const {
+ return mEnabledFeatures.IsEnabled(feature);
+ }
+
+ bool DeviceBase::IsValidationEnabled() const {
+ return !IsToggleEnabled(Toggle::SkipValidation);
+ }
+
+ bool DeviceBase::IsRobustnessEnabled() const {
+ return !IsToggleEnabled(Toggle::DisableRobustness);
+ }
+
+ size_t DeviceBase::GetLazyClearCountForTesting() {
+ return mLazyClearCountForTesting;
+ }
+
+ void DeviceBase::IncrementLazyClearCountForTesting() {
+ ++mLazyClearCountForTesting;
+ }
+
+ size_t DeviceBase::GetDeprecationWarningCountForTesting() {
+ return mDeprecationWarnings->count;
+ }
+
+ void DeviceBase::EmitDeprecationWarning(const char* warning) {
+ mDeprecationWarnings->count++;
+ if (mDeprecationWarnings->emitted.insert(warning).second) {
+ dawn::WarningLog() << warning;
+ }
+ }
+
+ void DeviceBase::EmitLog(const char* message) {
+ this->EmitLog(WGPULoggingType_Info, message);
+ }
+
+ void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
+ if (mLoggingCallback != nullptr) {
+ // Use the thread-safe CallbackTaskManager routine
+ std::unique_ptr<LoggingCallbackTask> callbackTask =
+ std::make_unique<LoggingCallbackTask>(mLoggingCallback, loggingType, message,
+ mLoggingUserdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+ }
+ }
+
+ bool DeviceBase::APIGetLimits(SupportedLimits* limits) const {
+ ASSERT(limits != nullptr);
+ if (limits->nextInChain != nullptr) {
+ return false;
+ }
+ limits->limits = mLimits.v1;
+ return true;
+ }
+
+ bool DeviceBase::APIHasFeature(wgpu::FeatureName feature) const {
+ return mEnabledFeatures.IsEnabled(feature);
+ }
+
+ size_t DeviceBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+ return mEnabledFeatures.EnumerateFeatures(features);
+ }
+
+ void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
+ if (ConsumedError(ValidateErrorType(type))) {
+ return;
+ }
+
+ // This method should only be used to make error scope reject. For DeviceLost there is the
+ // LoseForTesting function that can be used instead.
+ if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
+ HandleError(InternalErrorType::Validation,
+ "Invalid injected error, must be Validation or OutOfMemory");
+ return;
+ }
+
+ HandleError(FromWGPUErrorType(type), message);
+ }
+
+ QueueBase* DeviceBase::GetQueue() const {
+ return mQueue.Get();
+ }
+
+ // Implementation details of object creation
+
+ ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
+ const BindGroupDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor),
+ "validating %s against %s", descriptor, descriptor->layout);
+ }
+ return CreateBindGroupImpl(descriptor);
+ }
+
+ ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor,
+ bool allowInternalBinding) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(
+ ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
+ "validating %s", descriptor);
+ }
+ return GetOrCreateBindGroupLayout(descriptor);
+ }
+
+ ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s",
+ descriptor);
+ }
+
+ Ref<BufferBase> buffer;
+ DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
+
+ if (descriptor->mappedAtCreation) {
+ DAWN_TRY(buffer->MapAtCreation());
+ }
+
+ return std::move(buffer);
+ }
+
+ ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
+ const ComputePipelineDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
+ }
+
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ ComputePipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
+
+ Ref<ComputePipelineBase> uninitializedComputePipeline =
+ CreateUninitializedComputePipelineImpl(&appliedDescriptor);
+ Ref<ComputePipelineBase> cachedComputePipeline =
+ GetCachedComputePipeline(uninitializedComputePipeline.Get());
+ if (cachedComputePipeline.Get() != nullptr) {
+ return cachedComputePipeline;
+ }
+
+ DAWN_TRY(uninitializedComputePipeline->Initialize());
+ return AddOrGetCachedComputePipeline(std::move(uninitializedComputePipeline));
+ }
+
+ ResultOrError<Ref<CommandEncoder>> DeviceBase::CreateCommandEncoder(
+ const CommandEncoderDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateCommandEncoderDescriptor(this, descriptor));
+ }
+ return CommandEncoder::Create(this, descriptor);
+ }
+
+ MaybeError DeviceBase::CreateComputePipelineAsync(
+ const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
+ }
+
+ Ref<PipelineLayoutBase> layoutRef;
+ ComputePipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
+
+ Ref<ComputePipelineBase> uninitializedComputePipeline =
+ CreateUninitializedComputePipelineImpl(&appliedDescriptor);
+
+ // Call the callback directly when we can get a cached compute pipeline object.
+ Ref<ComputePipelineBase> cachedComputePipeline =
+ GetCachedComputePipeline(uninitializedComputePipeline.Get());
+ if (cachedComputePipeline.Get() != nullptr) {
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
+ "", userdata);
+ } else {
+ // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
+ // where the pipeline object may be initialized asynchronously and the result will be
+ // saved to mCreatePipelineAsyncTracker.
+ InitializeComputePipelineAsyncImpl(std::move(uninitializedComputePipeline), callback,
+ userdata);
+ }
+
+ return {};
+ }
+
+ // This function is overwritten with the async version on the backends that supports
+ // initializing compute pipelines asynchronously.
+ void DeviceBase::InitializeComputePipelineAsyncImpl(
+ Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ Ref<ComputePipelineBase> result;
+ std::string errorMessage;
+
+ MaybeError maybeError = computePipeline->Initialize();
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ errorMessage = error->GetMessage();
+ } else {
+ result = AddOrGetCachedComputePipeline(std::move(computePipeline));
+ }
+
+ std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
+ std::make_unique<CreateComputePipelineAsyncCallbackTask>(
+ std::move(result), errorMessage, callback, userdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+ }
+
+ // This function is overwritten with the async version on the backends
+ // that supports initializing render pipeline asynchronously
+ void DeviceBase::InitializeRenderPipelineAsyncImpl(
+ Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ Ref<RenderPipelineBase> result;
+ std::string errorMessage;
+
+ MaybeError maybeError = renderPipeline->Initialize();
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ errorMessage = error->GetMessage();
+ } else {
+ result = AddOrGetCachedRenderPipeline(std::move(renderPipeline));
+ }
+
+ std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
+ std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
+ callback, userdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+ }
+
+ ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
+ }
+ return GetOrCreatePipelineLayout(descriptor);
+ }
+
+ ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTextureImpl(
+ const ExternalTextureDescriptor* descriptor) {
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
+ descriptor);
+ }
+
+ return ExternalTextureBase::Create(this, descriptor);
+ }
+
+ ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(
+ const QuerySetDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s",
+ descriptor);
+ }
+ return CreateQuerySetImpl(descriptor);
+ }
+
+ ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
+ }
+ return RenderBundleEncoder::Create(this, descriptor);
+ }
+
+ ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
+ const RenderPipelineDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+ }
+
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ RenderPipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
+
+ Ref<RenderPipelineBase> uninitializedRenderPipeline =
+ CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
+
+ Ref<RenderPipelineBase> cachedRenderPipeline =
+ GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+ if (cachedRenderPipeline != nullptr) {
+ return cachedRenderPipeline;
+ }
+
+ DAWN_TRY(uninitializedRenderPipeline->Initialize());
+ return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
+ }
+
+ MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+ }
+
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ RenderPipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
+
+ Ref<RenderPipelineBase> uninitializedRenderPipeline =
+ CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
+
+ // Call the callback directly when we can get a cached render pipeline object.
+ Ref<RenderPipelineBase> cachedRenderPipeline =
+ GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+ if (cachedRenderPipeline != nullptr) {
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
+ "", userdata);
+ } else {
+ // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
+ // where the pipeline object may be initialized asynchronously and the result will be
+ // saved to mCreatePipelineAsyncTracker.
+ InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
+ userdata);
+ }
+
+ return {};
+ }
+
+ ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
+ const SamplerDescriptor defaultDescriptor = {};
+ DAWN_TRY(ValidateIsAlive());
+ descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s",
+ descriptor);
+ }
+ return GetOrCreateSampler(descriptor);
+ }
+
+ ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ OwnedCompilationMessages* compilationMessages) {
+ DAWN_TRY(ValidateIsAlive());
+
+ // CreateShaderModule can be called from inside dawn_native. If that's the case handle the
+ // error directly in Dawn and no compilationMessages held in the shader module. It is ok as
+ // long as dawn_native don't use the compilationMessages of these internal shader modules.
+ ShaderModuleParseResult parseResult;
+
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(
+ ValidateShaderModuleDescriptor(this, descriptor, &parseResult, compilationMessages),
+ "validating %s", descriptor);
+ }
+
+ return GetOrCreateShaderModule(descriptor, &parseResult, compilationMessages);
+ }
+
+ ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
+ Surface* surface,
+ const SwapChainDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor),
+ "validating %s", descriptor);
+ }
+
+ // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
+ if (surface == nullptr) {
+ return CreateSwapChainImpl(descriptor);
+ } else {
+ ASSERT(descriptor->implementation == 0);
+
+ NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
+ ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
+ CreateSwapChainImpl(surface, previousSwapChain, descriptor);
+
+ if (previousSwapChain != nullptr) {
+ previousSwapChain->DetachFromSurface();
+ }
+
+ Ref<NewSwapChainBase> newSwapChain;
+ DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
+
+ newSwapChain->SetIsAttached();
+ surface->SetAttachedSwapChain(newSwapChain.Get());
+ return newSwapChain;
+ }
+ }
+
+ ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.",
+ descriptor);
+ }
+ return CreateTextureImpl(descriptor);
+ }
+
+ ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
+ DAWN_TRY(ValidateObject(texture));
+ TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
+ if (IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
+ "validating %s against %s.", &desc, texture);
+ }
+ return CreateTextureViewImpl(texture, &desc);
+ }
+
+ // Other implementation details
+
+ DynamicUploader* DeviceBase::GetDynamicUploader() const {
+ return mDynamicUploader.get();
+ }
+
+ // The Toggle device facility
+
+ std::vector<const char*> DeviceBase::GetTogglesUsed() const {
+ return mEnabledToggles.GetContainedToggleNames();
+ }
+
+ bool DeviceBase::IsToggleEnabled(Toggle toggle) const {
+ return mEnabledToggles.Has(toggle);
+ }
+
+ void DeviceBase::SetToggle(Toggle toggle, bool isEnabled) {
+ if (!mOverridenToggles.Has(toggle)) {
+ mEnabledToggles.Set(toggle, isEnabled);
+ }
+ }
+
+ void DeviceBase::ForceSetToggle(Toggle toggle, bool isEnabled) {
+ if (!mOverridenToggles.Has(toggle) && mEnabledToggles.Has(toggle) != isEnabled) {
+ dawn::WarningLog() << "Forcing toggle \"" << ToggleEnumToName(toggle) << "\" to "
+ << isEnabled << " when it was overriden to be " << !isEnabled;
+ }
+ mEnabledToggles.Set(toggle, isEnabled);
+ }
+
+ void DeviceBase::SetDefaultToggles() {
+ SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
+ SetToggle(Toggle::DisallowUnsafeAPIs, true);
+ }
+
+ void DeviceBase::ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor) {
+ ASSERT(togglesDescriptor != nullptr);
+
+ for (uint32_t i = 0; i < togglesDescriptor->forceEnabledTogglesCount; ++i) {
+ Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
+ togglesDescriptor->forceEnabledToggles[i]);
+ if (toggle != Toggle::InvalidEnum) {
+ mEnabledToggles.Set(toggle, true);
+ mOverridenToggles.Set(toggle, true);
+ }
+ }
+ for (uint32_t i = 0; i < togglesDescriptor->forceDisabledTogglesCount; ++i) {
+ Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
+ togglesDescriptor->forceDisabledToggles[i]);
+ if (toggle != Toggle::InvalidEnum) {
+ mEnabledToggles.Set(toggle, false);
+ mOverridenToggles.Set(toggle, true);
+ }
+ }
+ }
+
+ void DeviceBase::FlushCallbackTaskQueue() {
+ if (!mCallbackTaskManager->IsEmpty()) {
+ // If a user calls Queue::Submit inside the callback, then the device will be ticked,
+ // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
+ // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
+ // update mCallbackTaskManager, then call all the callbacks.
+ auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+ for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+ callbackTask->Finish();
+ }
+ }
+ }
+
+ const CombinedLimits& DeviceBase::GetLimits() const {
+ return mLimits;
+ }
+
+ AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
+ return mAsyncTaskManager.get();
+ }
+
+ CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
+ return mCallbackTaskManager.get();
+ }
+
+ dawn::platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
+ return mWorkerTaskPool.get();
+ }
+
+ void DeviceBase::AddComputePipelineAsyncCallbackTask(
+ Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ // CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
+ // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
+ struct CreateComputePipelineAsyncWaitableCallbackTask final
+ : CreateComputePipelineAsyncCallbackTask {
+ using CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask;
+ void Finish() final {
+ // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
+ // CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+ // thread-safe.
+ if (mPipeline.Get() != nullptr) {
+ mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(mPipeline);
+ }
+
+ CreateComputePipelineAsyncCallbackTask::Finish();
+ }
+ };
+
+ mCallbackTaskManager->AddCallbackTask(
+ std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
+ std::move(pipeline), errorMessage, callback, userdata));
+ }
+
+ void DeviceBase::AddRenderPipelineAsyncCallbackTask(
+ Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
+ // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
+ struct CreateRenderPipelineAsyncWaitableCallbackTask final
+ : CreateRenderPipelineAsyncCallbackTask {
+ using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
+
+ void Finish() final {
+ // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
+ // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+ // thread-safe.
+ if (mPipeline.Get() != nullptr) {
+ mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
+ }
+
+ CreateRenderPipelineAsyncCallbackTask::Finish();
+ }
+ };
+
+ mCallbackTaskManager->AddCallbackTask(
+ std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
+ std::move(pipeline), errorMessage, callback, userdata));
+ }
+
+ PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
+ return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
+ }
+
+ const std::string& DeviceBase::GetLabel() const {
+ return mLabel;
+ }
+
+ void DeviceBase::APISetLabel(const char* label) {
+ mLabel = label;
+ SetLabelImpl();
+ }
+
+ void DeviceBase::SetLabelImpl() {
+ }
+
+ bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ ComputePipelineBase* computePipeline) const {
+ return false;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Device.h b/chromium/third_party/dawn/src/dawn/native/Device.h
new file mode 100644
index 00000000000..b957dc0b393
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Device.h
@@ -0,0 +1,547 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_DEVICE_H_
+#define DAWNNATIVE_DEVICE_H_
+
+#include "dawn/native/Commands.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/Limits.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/StagingBuffer.h"
+#include "dawn/native/Toggles.h"
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <mutex>
+#include <utility>
+
+namespace dawn::platform {
+ class WorkerTaskPool;
+} // namespace dawn::platform
+
+namespace dawn::native {
+ class AdapterBase;
+ class AsyncTaskManager;
+ class AttachmentState;
+ class AttachmentStateBlueprint;
+ class BindGroupLayoutBase;
+ class CallbackTaskManager;
+ class DynamicUploader;
+ class ErrorScopeStack;
+ class ExternalTextureBase;
+ class OwnedCompilationMessages;
+ class PersistentCache;
+ class StagingBufferBase;
+ struct CallbackTask;
+ struct InternalPipelineStore;
+ struct ShaderModuleParseResult;
+
+ class DeviceBase : public RefCounted {
+ public:
+ DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
+ virtual ~DeviceBase();
+
+ void HandleError(InternalErrorType type, const char* message);
+
+ bool ConsumedError(MaybeError maybeError) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ ConsumeError(maybeError.AcquireError());
+ return true;
+ }
+ return false;
+ }
+
+ template <typename T>
+ bool ConsumedError(ResultOrError<T> resultOrError, T* result) {
+ if (DAWN_UNLIKELY(resultOrError.IsError())) {
+ ConsumeError(resultOrError.AcquireError());
+ return true;
+ }
+ *result = resultOrError.AcquireSuccess();
+ return false;
+ }
+
+ template <typename... Args>
+ bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
+ } else {
+ error->AppendContext(
+ absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
+ }
+ }
+ ConsumeError(std::move(error));
+ return true;
+ }
+ return false;
+ }
+
+ template <typename T, typename... Args>
+ bool ConsumedError(ResultOrError<T> resultOrError,
+ T* result,
+ const char* formatStr,
+ const Args&... args) {
+ if (DAWN_UNLIKELY(resultOrError.IsError())) {
+ std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
+ } else {
+ error->AppendContext(
+ absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
+ }
+ }
+ ConsumeError(std::move(error));
+ return true;
+ }
+ *result = resultOrError.AcquireSuccess();
+ return false;
+ }
+
+ MaybeError ValidateObject(const ApiObjectBase* object) const;
+
+ AdapterBase* GetAdapter() const;
+ dawn::platform::Platform* GetPlatform() const;
+
+ // Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
+ // isn't a valid wgpu::TextureFormat or isn't supported by this device.
+ // The pointer returned has the same lifetime as the device.
+ ResultOrError<const Format*> GetInternalFormat(wgpu::TextureFormat format) const;
+
+ // Returns the Format corresponding to the wgpu::TextureFormat and assumes the format is
+ // valid and supported.
+ // The reference returned has the same lifetime as the device.
+ const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
+
+ virtual ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) = 0;
+
+ ExecutionSerial GetCompletedCommandSerial() const;
+ ExecutionSerial GetLastSubmittedCommandSerial() const;
+ ExecutionSerial GetFutureSerial() const;
+ ExecutionSerial GetPendingCommandSerial() const;
+
+ // Many Dawn objects are completely immutable once created which means that if two
+ // creations are given the same arguments, they can return the same object. Reusing
+ // objects will help make comparisons between objects by a single pointer comparison.
+ //
+ // Technically no object is immutable as they have a reference count, and an
+ // application with reference-counting issues could "see" that objects are reused.
+ // This is solved by automatic-reference counting, and also the fact that when using
+ // the client-server wire every creation will get a different proxy object, with a
+ // different reference count.
+ //
+ // When trying to create an object, we give both the descriptor and an example of what
+ // the created object will be, the "blueprint". The blueprint is just a FooBase object
+ // instead of a backend Foo object. If the blueprint doesn't match an object in the
+ // cache, then the descriptor is used to make a new object.
+ ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+ void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
+
+ BindGroupLayoutBase* GetEmptyBindGroupLayout();
+
+ void UncacheComputePipeline(ComputePipelineBase* obj);
+
+ ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor);
+ void UncachePipelineLayout(PipelineLayoutBase* obj);
+
+ void UncacheRenderPipeline(RenderPipelineBase* obj);
+
+ ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
+ void UncacheSampler(SamplerBase* obj);
+
+ ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* compilationMessages);
+ void UncacheShaderModule(ShaderModuleBase* obj);
+
+ Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint);
+ Ref<AttachmentState> GetOrCreateAttachmentState(
+ const RenderBundleEncoderDescriptor* descriptor);
+ Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
+ Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
+ void UncacheAttachmentState(AttachmentState* obj);
+
+ // Object creation methods that be used in a reentrant manner.
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroup(const BindGroupDescriptor* descriptor);
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayout(
+ const BindGroupLayoutDescriptor* descriptor,
+ bool allowInternalBinding = false);
+ ResultOrError<Ref<BufferBase>> CreateBuffer(const BufferDescriptor* descriptor);
+ ResultOrError<Ref<CommandEncoder>> CreateCommandEncoder(
+ const CommandEncoderDescriptor* descriptor);
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipeline(
+ const ComputePipelineDescriptor* descriptor);
+ MaybeError CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayout(
+ const PipelineLayoutDescriptor* descriptor);
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySet(const QuerySetDescriptor* descriptor);
+ ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor);
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
+ const RenderPipelineDescriptor* descriptor);
+ MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ ResultOrError<Ref<SamplerBase>> CreateSampler(const SamplerDescriptor* descriptor);
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
+ const ShaderModuleDescriptor* descriptor,
+ OwnedCompilationMessages* compilationMessages = nullptr);
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
+ const SwapChainDescriptor* descriptor);
+ ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
+ ResultOrError<Ref<TextureViewBase>> CreateTextureView(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+
+ // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
+ BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
+ BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
+ BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
+ CommandEncoder* APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
+ ComputePipelineBase* APICreateComputePipeline(const ComputePipelineDescriptor* descriptor);
+ PipelineLayoutBase* APICreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
+ QuerySetBase* APICreateQuerySet(const QuerySetDescriptor* descriptor);
+ void APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ void APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ RenderBundleEncoder* APICreateRenderBundleEncoder(
+ const RenderBundleEncoderDescriptor* descriptor);
+ RenderPipelineBase* APICreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
+ ExternalTextureBase* APICreateExternalTexture(const ExternalTextureDescriptor* descriptor);
+ SamplerBase* APICreateSampler(const SamplerDescriptor* descriptor);
+ ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
+ SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
+ TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
+
+ InternalPipelineStore* GetInternalPipelineStore();
+
+ // For Dawn Wire
+ BufferBase* APICreateErrorBuffer();
+
+ QueueBase* APIGetQueue();
+
+ bool APIGetLimits(SupportedLimits* limits) const;
+ bool APIHasFeature(wgpu::FeatureName feature) const;
+ size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+ void APIInjectError(wgpu::ErrorType type, const char* message);
+ bool APITick();
+
+ void APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
+ void APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
+ void APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata);
+ void APIPushErrorScope(wgpu::ErrorFilter filter);
+ bool APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata);
+
+ MaybeError ValidateIsAlive() const;
+
+ PersistentCache* GetPersistentCache();
+
+ virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(
+ size_t size) = 0;
+ virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) = 0;
+ virtual MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) = 0;
+
+ DynamicUploader* GetDynamicUploader() const;
+
+ // The device state which is a combination of creation state and loss state.
+ //
+ // - BeingCreated: the device didn't finish creation yet and the frontend cannot be used
+ // (both for the application calling WebGPU, or re-entrant calls). No work exists on
+ // the GPU timeline.
+ // - Alive: the device is usable and might have work happening on the GPU timeline.
+ // - BeingDisconnected: the device is no longer usable because we are waiting for all
+ // work on the GPU timeline to finish. (this is to make validation prevent the
+ // application from adding more work during the transition from Available to
+ // Disconnected)
+ // - Disconnected: there is no longer work happening on the GPU timeline and the CPU data
+ // structures can be safely destroyed without additional synchronization.
+ // - Destroyed: the device is disconnected and resources have been reclaimed.
+ enum class State {
+ BeingCreated,
+ Alive,
+ BeingDisconnected,
+ Disconnected,
+ Destroyed,
+ };
+ State GetState() const;
+ bool IsLost() const;
+ void TrackObject(ApiObjectBase* object);
+ std::mutex* GetObjectListMutex(ObjectType type);
+
+ std::vector<const char*> GetTogglesUsed() const;
+ bool IsFeatureEnabled(Feature feature) const;
+ bool IsToggleEnabled(Toggle toggle) const;
+ bool IsValidationEnabled() const;
+ bool IsRobustnessEnabled() const;
+ size_t GetLazyClearCountForTesting();
+ void IncrementLazyClearCountForTesting();
+ size_t GetDeprecationWarningCountForTesting();
+ void EmitDeprecationWarning(const char* warning);
+ void EmitLog(const char* message);
+ void EmitLog(WGPULoggingType loggingType, const char* message);
+ void APILoseForTesting();
+ QueueBase* GetQueue() const;
+
+ // AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
+ // ticked in order to clean up all pending callback work or to execute asynchronous resource
+ // writes. It should be given the serial that a callback is tracked with, so that once that
+ // serial is completed, it can be resolved and cleaned up. This is so that when there is no
+ // gpu work (the last submitted serial has not moved beyond the completed serial), Tick can
+ // still check if we have pending work to take care of, rather than hanging and never
+ // reaching the serial the work will be executed on.
+ void AddFutureSerial(ExecutionSerial serial);
+ // Check for passed fences and set the new completed serial
+ MaybeError CheckPassedSerials();
+
+ MaybeError Tick();
+
+ // TODO(crbug.com/dawn/839): Organize the below backend-specific parameters into the struct
+ // BackendMetadata that we can query from the device.
+ virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
+ virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
+
+ virtual float GetTimestampPeriodInNS() const = 0;
+
+ virtual bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ ComputePipelineBase* computePipeline) const;
+
+ const CombinedLimits& GetLimits() const;
+
+ AsyncTaskManager* GetAsyncTaskManager() const;
+ CallbackTaskManager* GetCallbackTaskManager() const;
+ dawn::platform::WorkerTaskPool* GetWorkerTaskPool() const;
+
+ void AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ void AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ PipelineCompatibilityToken GetNextPipelineCompatibilityToken();
+
+ const std::string& GetLabel() const;
+ void APISetLabel(const char* label);
+ void APIDestroy();
+
+ protected:
+ // Constructor used only for mocking and testing.
+ DeviceBase();
+
+ void SetToggle(Toggle toggle, bool isEnabled);
+ void ForceSetToggle(Toggle toggle, bool isEnabled);
+
+ MaybeError Initialize(QueueBase* defaultQueue);
+ void DestroyObjects();
+ void Destroy();
+
+ // Incrememt mLastSubmittedSerial when we submit the next serial
+ void IncrementLastSubmittedCommandSerial();
+
+ private:
+ virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
+ virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl(
+ const ExternalTextureDescriptor* descriptor);
+ virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) = 0;
+ virtual ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) = 0;
+ // Note that previousSwapChain may be nullptr, or come from a different backend.
+ virtual ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+ const TextureDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) = 0;
+ virtual Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) = 0;
+ virtual Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) = 0;
+ virtual void SetLabelImpl();
+
+ virtual MaybeError TickImpl() = 0;
+ void FlushCallbackTaskQueue();
+
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
+
+ Ref<ComputePipelineBase> GetCachedComputePipeline(
+ ComputePipelineBase* uninitializedComputePipeline);
+ Ref<RenderPipelineBase> GetCachedRenderPipeline(
+ RenderPipelineBase* uninitializedRenderPipeline);
+ Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
+ Ref<ComputePipelineBase> computePipeline);
+ Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(
+ Ref<RenderPipelineBase> renderPipeline);
+ virtual void InitializeComputePipelineAsyncImpl(
+ Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ virtual void InitializeRenderPipelineAsyncImpl(
+ Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ void ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor);
+ void ApplyFeatures(const DeviceDescriptor* deviceDescriptor);
+
+ void SetDefaultToggles();
+
+ void ConsumeError(std::unique_ptr<ErrorData> error);
+
+ // Each backend should implement to check their passed fences if there are any and return a
+ // completed serial. Return 0 should indicate no fences to check.
+ virtual ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() = 0;
+ // During shut down of device, some operations might have been started since the last submit
+ // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
+ // make all commands look completed.
+ void AssumeCommandsComplete();
+ bool IsDeviceIdle();
+
+ // mCompletedSerial tracks the last completed command serial that the fence has returned.
+ // mLastSubmittedSerial tracks the last submitted command serial.
+ // During device removal, the serials could be artificially incremented
+ // to make it appear as if commands have been compeleted. They can also be artificially
+ // incremented when no work is being done in the GPU so CPU operations don't have to wait on
+ // stale serials.
+ // mFutureSerial tracks the largest serial we need to tick to for asynchronous commands or
+ // callbacks to fire
+ ExecutionSerial mCompletedSerial = ExecutionSerial(0);
+ ExecutionSerial mLastSubmittedSerial = ExecutionSerial(0);
+ ExecutionSerial mFutureSerial = ExecutionSerial(0);
+
+ // DestroyImpl is used to clean up and release resources used by device, does not wait for
+ // GPU or check errors.
+ virtual void DestroyImpl() = 0;
+
+ // WaitForIdleForDestruction waits for GPU to finish, checks errors and gets ready for
+ // destruction. This is only used when properly destructing the device. For a real
+ // device loss, this function doesn't need to be called since the driver already closed all
+ // resources.
+ virtual MaybeError WaitForIdleForDestruction() = 0;
+
+ wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
+ void* mUncapturedErrorUserdata = nullptr;
+
+ wgpu::LoggingCallback mLoggingCallback = nullptr;
+ void* mLoggingUserdata = nullptr;
+
+ wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
+ void* mDeviceLostUserdata = nullptr;
+
+ std::unique_ptr<ErrorScopeStack> mErrorScopeStack;
+
+ // The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
+ // The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
+ // The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
+ // Instance.
+ Ref<InstanceBase> mInstance;
+ AdapterBase* mAdapter = nullptr;
+
+ // The object caches aren't exposed in the header as they would require a lot of
+ // additional includes.
+ struct Caches;
+ std::unique_ptr<Caches> mCaches;
+
+ Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
+
+ std::unique_ptr<DynamicUploader> mDynamicUploader;
+ std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
+ Ref<QueueBase> mQueue;
+
+ struct DeprecationWarnings;
+ std::unique_ptr<DeprecationWarnings> mDeprecationWarnings;
+
+ State mState = State::BeingCreated;
+
+ // Encompasses the mutex and the actual list that contains all live objects "owned" by the
+ // device.
+ struct ApiObjectList {
+ std::mutex mutex;
+ LinkedList<ApiObjectBase> objects;
+ };
+ PerObjectType<ApiObjectList> mObjectLists;
+
+ FormatTable mFormatTable;
+
+ TogglesSet mEnabledToggles;
+ TogglesSet mOverridenToggles;
+ size_t mLazyClearCountForTesting = 0;
+ std::atomic_uint64_t mNextPipelineCompatibilityToken;
+
+ CombinedLimits mLimits;
+ FeaturesSet mEnabledFeatures;
+
+ std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
+
+ std::unique_ptr<PersistentCache> mPersistentCache;
+
+ std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
+ std::unique_ptr<dawn::platform::WorkerTaskPool> mWorkerTaskPool;
+ std::string mLabel;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_DEVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp b/chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp
new file mode 100644
index 00000000000..262c07d7185
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/DynamicUploader.cpp
@@ -0,0 +1,129 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/Device.h"
+
+namespace dawn::native {
+
+ DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
+ mRingBuffers.emplace_back(
+ std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, {kRingBufferSize}}));
+ }
+
+ void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
+ mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer),
+ mDevice->GetPendingCommandSerial());
+ }
+
+ ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
+ ExecutionSerial serial) {
+ // Disable further sub-allocation should the request be too large.
+ if (allocationSize > kRingBufferSize) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer;
+ DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(allocationSize));
+
+ UploadHandle uploadHandle;
+ uploadHandle.mappedBuffer = static_cast<uint8_t*>(stagingBuffer->GetMappedPointer());
+ uploadHandle.stagingBuffer = stagingBuffer.get();
+
+ ReleaseStagingBuffer(std::move(stagingBuffer));
+ return uploadHandle;
+ }
+
+ // Note: Validation ensures size is already aligned.
+ // First-fit: find next smallest buffer large enough to satisfy the allocation request.
+ RingBuffer* targetRingBuffer = mRingBuffers.back().get();
+ for (auto& ringBuffer : mRingBuffers) {
+ const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
+ // Prevent overflow.
+ ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
+ const uint64_t remainingSize =
+ ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
+ if (allocationSize <= remainingSize) {
+ targetRingBuffer = ringBuffer.get();
+ break;
+ }
+ }
+
+ uint64_t startOffset = RingBufferAllocator::kInvalidOffset;
+ if (targetRingBuffer != nullptr) {
+ startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
+ }
+
+ // Upon failure, append a newly created ring buffer to fulfill the
+ // request.
+ if (startOffset == RingBufferAllocator::kInvalidOffset) {
+ mRingBuffers.emplace_back(
+ std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, {kRingBufferSize}}));
+
+ targetRingBuffer = mRingBuffers.back().get();
+ startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
+ }
+
+ ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
+
+ // Allocate the staging buffer backing the ringbuffer.
+ // Note: the first ringbuffer will be lazily created.
+ if (targetRingBuffer->mStagingBuffer == nullptr) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer;
+ DAWN_TRY_ASSIGN(stagingBuffer,
+ mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
+ targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
+ }
+
+ ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
+
+ UploadHandle uploadHandle;
+ uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
+ uploadHandle.mappedBuffer =
+ static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
+ uploadHandle.startOffset = startOffset;
+
+ return uploadHandle;
+ }
+
+ void DynamicUploader::Deallocate(ExecutionSerial lastCompletedSerial) {
+ // Reclaim memory within the ring buffers by ticking (or removing requests no longer
+ // in-flight).
+ for (size_t i = 0; i < mRingBuffers.size(); ++i) {
+ mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
+
+ // Never erase the last buffer as to prevent re-creating smaller buffers
+ // again. The last buffer is the largest.
+ if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
+ mRingBuffers.erase(mRingBuffers.begin() + i);
+ }
+ }
+ mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
+ }
+
+ // TODO(dawn:512): Optimize this function so that it doesn't allocate additional memory
+ // when it's not necessary.
+ ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize,
+ ExecutionSerial serial,
+ uint64_t offsetAlignment) {
+ ASSERT(offsetAlignment > 0);
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ AllocateInternal(allocationSize + offsetAlignment - 1, serial));
+ uint64_t additionalOffset =
+ Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
+ uploadHandle.mappedBuffer =
+ static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
+ uploadHandle.startOffset += additionalOffset;
+ return uploadHandle;
+ }
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/DynamicUploader.h b/chromium/third_party/dawn/src/dawn/native/DynamicUploader.h
new file mode 100644
index 00000000000..fa3f80a1ccf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/DynamicUploader.h
@@ -0,0 +1,66 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_DYNAMICUPLOADER_H_
+#define DAWNNATIVE_DYNAMICUPLOADER_H_
+
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/RingBufferAllocator.h"
+#include "dawn/native/StagingBuffer.h"
+
+// DynamicUploader is the front-end implementation used to manage multiple ring buffers for upload
+// usage.
+namespace dawn::native {
+
+ struct UploadHandle {
+ uint8_t* mappedBuffer = nullptr;
+ uint64_t startOffset = 0;
+ StagingBufferBase* stagingBuffer = nullptr;
+ };
+
+ class DynamicUploader {
+ public:
+ DynamicUploader(DeviceBase* device);
+ ~DynamicUploader() = default;
+
+ // We add functions to Release StagingBuffers to the DynamicUploader as there's
+ // currently no place to track the allocated staging buffers such that they're freed after
+ // pending commands are finished. This should be changed when better resource allocation is
+ // implemented.
+ void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
+
+ ResultOrError<UploadHandle> Allocate(uint64_t allocationSize,
+ ExecutionSerial serial,
+ uint64_t offsetAlignment);
+ void Deallocate(ExecutionSerial lastCompletedSerial);
+
+ private:
+ static constexpr uint64_t kRingBufferSize = 4 * 1024 * 1024;
+
+ struct RingBuffer {
+ std::unique_ptr<StagingBufferBase> mStagingBuffer;
+ RingBufferAllocator mAllocator;
+ };
+
+ ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize,
+ ExecutionSerial serial);
+
+ std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
+ SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
+ DeviceBase* mDevice;
+ };
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_DYNAMICUPLOADER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp b/chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp
new file mode 100644
index 00000000000..b9ba5298636
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/EncodingContext.cpp
@@ -0,0 +1,217 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/EncodingContext.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/IndirectDrawValidationEncoder.h"
+#include "dawn/native/RenderBundleEncoder.h"
+
+namespace dawn::native {
+
+ EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
+ : mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {
+ }
+
+ EncodingContext::~EncodingContext() {
+ Destroy();
+ }
+
+ void EncodingContext::Destroy() {
+ if (mDestroyed) {
+ return;
+ }
+ if (!mWereCommandsAcquired) {
+ FreeCommands(GetIterator());
+ }
+ // If we weren't already finished, then we want to handle an error here so that any calls
+ // to Finish after Destroy will return a meaningful error.
+ if (!IsFinished()) {
+ HandleError(DAWN_FORMAT_VALIDATION_ERROR("Destroyed encoder cannot be finished."));
+ }
+ mDestroyed = true;
+ mCurrentEncoder = nullptr;
+ }
+
+ CommandIterator EncodingContext::AcquireCommands() {
+ MoveToIterator();
+ ASSERT(!mWereCommandsAcquired);
+ mWereCommandsAcquired = true;
+ return std::move(mIterator);
+ }
+
+ CommandIterator* EncodingContext::GetIterator() {
+ MoveToIterator();
+ ASSERT(!mWereCommandsAcquired);
+ return &mIterator;
+ }
+
+ void EncodingContext::MoveToIterator() {
+ CommitCommands(std::move(mPendingCommands));
+ if (!mWasMovedToIterator) {
+ mIterator.AcquireCommandBlocks(std::move(mAllocators));
+ mWasMovedToIterator = true;
+ }
+ }
+
+ void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
+ // Append in reverse so that the most recently set debug group is printed first, like a
+ // call stack.
+ for (auto iter = mDebugGroupLabels.rbegin(); iter != mDebugGroupLabels.rend(); ++iter) {
+ error->AppendDebugGroup(*iter);
+ }
+
+ if (!IsFinished()) {
+ // Encoding should only generate validation errors.
+ ASSERT(error->GetType() == InternalErrorType::Validation);
+ // If the encoding context is not finished, errors are deferred until
+ // Finish() is called.
+ if (mError == nullptr) {
+ mError = std::move(error);
+ }
+ } else {
+ mDevice->HandleError(error->GetType(), error->GetFormattedMessage().c_str());
+ }
+ }
+
+ void EncodingContext::WillBeginRenderPass() {
+ ASSERT(mCurrentEncoder == mTopLevelEncoder);
+ if (mDevice->IsValidationEnabled()) {
+ // When validation is enabled, we are going to want to capture all commands encoded
+ // between and including BeginRenderPassCmd and EndRenderPassCmd, and defer their
+ // sequencing util after we have a chance to insert any necessary validation
+ // commands. To support this we commit any current commands now, so that the
+ // impending BeginRenderPassCmd starts in a fresh CommandAllocator.
+ CommitCommands(std::move(mPendingCommands));
+ }
+ }
+
+ void EncodingContext::EnterPass(const ApiObjectBase* passEncoder) {
+ // Assert we're at the top level.
+ ASSERT(mCurrentEncoder == mTopLevelEncoder);
+ ASSERT(passEncoder != nullptr);
+
+ mCurrentEncoder = passEncoder;
+ }
+
+ MaybeError EncodingContext::ExitRenderPass(const ApiObjectBase* passEncoder,
+ RenderPassResourceUsageTracker usageTracker,
+ CommandEncoder* commandEncoder,
+ IndirectDrawMetadata indirectDrawMetadata) {
+ ASSERT(mCurrentEncoder != mTopLevelEncoder);
+ ASSERT(mCurrentEncoder == passEncoder);
+
+ mCurrentEncoder = mTopLevelEncoder;
+
+ if (mDevice->IsValidationEnabled()) {
+ // With validation enabled, commands were committed just before BeginRenderPassCmd was
+ // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
+ // mPendingCommands contains only the commands from BeginRenderPassCmd to
+ // EndRenderPassCmd, inclusive. Now we swap out this allocator with a fresh one to give
+ // the validation encoder a chance to insert its commands first.
+ CommandAllocator renderCommands = std::move(mPendingCommands);
+ DAWN_TRY(EncodeIndirectDrawValidationCommands(mDevice, commandEncoder, &usageTracker,
+ &indirectDrawMetadata));
+ CommitCommands(std::move(mPendingCommands));
+ CommitCommands(std::move(renderCommands));
+ }
+
+ mRenderPassUsages.push_back(usageTracker.AcquireResourceUsage());
+ return {};
+ }
+
+ void EncodingContext::ExitComputePass(const ApiObjectBase* passEncoder,
+ ComputePassResourceUsage usages) {
+ ASSERT(mCurrentEncoder != mTopLevelEncoder);
+ ASSERT(mCurrentEncoder == passEncoder);
+
+ mCurrentEncoder = mTopLevelEncoder;
+ mComputePassUsages.push_back(std::move(usages));
+ }
+
+ void EncodingContext::EnsurePassExited(const ApiObjectBase* passEncoder) {
+ if (mCurrentEncoder != mTopLevelEncoder && mCurrentEncoder == passEncoder) {
+ // The current pass encoder is being deleted. Implicitly end the pass with an error.
+ mCurrentEncoder = mTopLevelEncoder;
+ HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+ "Command buffer recording ended before %s was ended.", passEncoder));
+ }
+ }
+
+ const RenderPassUsages& EncodingContext::GetRenderPassUsages() const {
+ ASSERT(!mWereRenderPassUsagesAcquired);
+ return mRenderPassUsages;
+ }
+
+ RenderPassUsages EncodingContext::AcquireRenderPassUsages() {
+ ASSERT(!mWereRenderPassUsagesAcquired);
+ mWereRenderPassUsagesAcquired = true;
+ return std::move(mRenderPassUsages);
+ }
+
+ const ComputePassUsages& EncodingContext::GetComputePassUsages() const {
+ ASSERT(!mWereComputePassUsagesAcquired);
+ return mComputePassUsages;
+ }
+
+ ComputePassUsages EncodingContext::AcquireComputePassUsages() {
+ ASSERT(!mWereComputePassUsagesAcquired);
+ mWereComputePassUsagesAcquired = true;
+ return std::move(mComputePassUsages);
+ }
+
+ void EncodingContext::PushDebugGroupLabel(const char* groupLabel) {
+ mDebugGroupLabels.emplace_back(groupLabel);
+ }
+
+ void EncodingContext::PopDebugGroupLabel() {
+ mDebugGroupLabels.pop_back();
+ }
+
+ MaybeError EncodingContext::Finish() {
+ DAWN_INVALID_IF(IsFinished(), "Command encoding already finished.");
+
+ const ApiObjectBase* currentEncoder = mCurrentEncoder;
+ const ApiObjectBase* topLevelEncoder = mTopLevelEncoder;
+
+ // Even if finish validation fails, it is now invalid to call any encoding commands,
+ // so we clear the encoders. Note: mTopLevelEncoder == nullptr is used as a flag for
+ // if Finish() has been called.
+ mCurrentEncoder = nullptr;
+ mTopLevelEncoder = nullptr;
+ CommitCommands(std::move(mPendingCommands));
+
+ if (mError != nullptr) {
+ return std::move(mError);
+ }
+ DAWN_INVALID_IF(currentEncoder != topLevelEncoder,
+ "Command buffer recording ended before %s was ended.", currentEncoder);
+ return {};
+ }
+
+ void EncodingContext::CommitCommands(CommandAllocator allocator) {
+ if (!allocator.IsEmpty()) {
+ mAllocators.push_back(std::move(allocator));
+ }
+ }
+
+ bool EncodingContext::IsFinished() const {
+ return mTopLevelEncoder == nullptr;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/EncodingContext.h b/chromium/third_party/dawn/src/dawn/native/EncodingContext.h
new file mode 100644
index 00000000000..659a9a72b4b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/EncodingContext.h
@@ -0,0 +1,182 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ENCODINGCONTEXT_H_
+#define DAWNNATIVE_ENCODINGCONTEXT_H_
+
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/IndirectDrawMetadata.h"
+#include "dawn/native/PassResourceUsageTracker.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <string>
+
+namespace dawn::native {
+
+ class CommandEncoder;
+ class DeviceBase;
+ class ApiObjectBase;
+
+ // Base class for allocating/iterating commands.
+ // It performs error tracking as well as encoding state for render/compute passes.
+ class EncodingContext {
+ public:
+ EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder);
+ ~EncodingContext();
+
+ // Marks the encoding context as destroyed so that any future encodes will fail, and all
+ // encoded commands are released.
+ void Destroy();
+
+ CommandIterator AcquireCommands();
+ CommandIterator* GetIterator();
+
+ // Functions to handle encoder errors
+ void HandleError(std::unique_ptr<ErrorData> error);
+
+ inline bool ConsumedError(MaybeError maybeError) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ HandleError(maybeError.AcquireError());
+ return true;
+ }
+ return false;
+ }
+
+ template <typename... Args>
+ inline bool ConsumedError(MaybeError maybeError,
+ const char* formatStr,
+ const Args&... args) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
+ } else {
+ error->AppendContext(absl::StrFormat(
+ "[Failed to format error message: \"%s\"].", formatStr));
+ }
+ }
+ HandleError(std::move(error));
+ return true;
+ }
+ return false;
+ }
+
+ inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
+ if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
+ if (mDestroyed) {
+ HandleError(
+ DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
+ } else if (mCurrentEncoder != mTopLevelEncoder) {
+ // The top level encoder was used when a pass encoder was current.
+ HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+ "Command cannot be recorded while %s is active.", mCurrentEncoder));
+ } else {
+ HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+ "Recording in an error or already ended %s.", encoder));
+ }
+ return false;
+ }
+ return true;
+ }
+
+ template <typename EncodeFunction>
+ inline bool TryEncode(const ApiObjectBase* encoder, EncodeFunction&& encodeFunction) {
+ if (!CheckCurrentEncoder(encoder)) {
+ return false;
+ }
+ ASSERT(!mWasMovedToIterator);
+ return !ConsumedError(encodeFunction(&mPendingCommands));
+ }
+
+ template <typename EncodeFunction, typename... Args>
+ inline bool TryEncode(const ApiObjectBase* encoder,
+ EncodeFunction&& encodeFunction,
+ const char* formatStr,
+ const Args&... args) {
+ if (!CheckCurrentEncoder(encoder)) {
+ return false;
+ }
+ ASSERT(!mWasMovedToIterator);
+ return !ConsumedError(encodeFunction(&mPendingCommands), formatStr, args...);
+ }
+
+ // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this
+ // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup
+ // failed validation before the BeginRenderPassCmd could be encoded.
+ void WillBeginRenderPass();
+
+ // Functions to set current encoder state
+ void EnterPass(const ApiObjectBase* passEncoder);
+ MaybeError ExitRenderPass(const ApiObjectBase* passEncoder,
+ RenderPassResourceUsageTracker usageTracker,
+ CommandEncoder* commandEncoder,
+ IndirectDrawMetadata indirectDrawMetadata);
+ void ExitComputePass(const ApiObjectBase* passEncoder, ComputePassResourceUsage usages);
+ MaybeError Finish();
+
+ // Called when a pass encoder is deleted. Provides an opportunity to clean up if it's the
+ // mCurrentEncoder.
+ void EnsurePassExited(const ApiObjectBase* passEncoder);
+
+ const RenderPassUsages& GetRenderPassUsages() const;
+ const ComputePassUsages& GetComputePassUsages() const;
+ RenderPassUsages AcquireRenderPassUsages();
+ ComputePassUsages AcquireComputePassUsages();
+
+ void PushDebugGroupLabel(const char* groupLabel);
+ void PopDebugGroupLabel();
+
+ private:
+ void CommitCommands(CommandAllocator allocator);
+
+ bool IsFinished() const;
+ void MoveToIterator();
+
+ DeviceBase* mDevice;
+
+ // There can only be two levels of encoders. Top-level and render/compute pass.
+ // The top level encoder is the encoder the EncodingContext is created with.
+ // It doubles as flag to check if encoding has been Finished.
+ const ApiObjectBase* mTopLevelEncoder;
+ // The current encoder must be the same as the encoder provided to TryEncode,
+ // otherwise an error is produced. It may be nullptr if the EncodingContext is an error.
+ // The current encoder changes with Enter/ExitPass which should be called by
+ // CommandEncoder::Begin/EndPass.
+ const ApiObjectBase* mCurrentEncoder;
+
+ RenderPassUsages mRenderPassUsages;
+ bool mWereRenderPassUsagesAcquired = false;
+ ComputePassUsages mComputePassUsages;
+ bool mWereComputePassUsagesAcquired = false;
+
+ CommandAllocator mPendingCommands;
+
+ std::vector<CommandAllocator> mAllocators;
+ CommandIterator mIterator;
+ bool mWasMovedToIterator = false;
+ bool mWereCommandsAcquired = false;
+ bool mDestroyed = false;
+
+ std::unique_ptr<ErrorData> mError;
+ std::vector<std::string> mDebugGroupLabels;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_ENCODINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h b/chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h
new file mode 100644
index 00000000000..671db233256
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/EnumClassBitmasks.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ENUMCLASSBITMASK_H_
+#define DAWNNATIVE_ENUMCLASSBITMASK_H_
+
+#include "dawn/EnumClassBitmasks.h"
+
+namespace dawn::native {
+
+ // EnumClassBitmmasks is a helper in the dawn:: namespace.
+ // Re-export it in the dawn_native namespace.
+ DAWN_IMPORT_BITMASK_OPERATORS
+
+ // Specify this for usage with EnumMaskIterator
+ template <typename T>
+ struct EnumBitmaskSize {
+ static constexpr unsigned value = 0;
+ };
+
+ template <typename T>
+ constexpr bool HasOneBit(T value) {
+ return HasZeroOrOneBits(value) && value != T(0);
+ }
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_ENUMCLASSBITMASK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h b/chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h
new file mode 100644
index 00000000000..6653ef41953
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/EnumMaskIterator.h
@@ -0,0 +1,82 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ENUMMASKITERATOR_H_
+#define DAWNNATIVE_ENUMMASKITERATOR_H_
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/EnumClassBitmasks.h"
+
+namespace dawn::native {
+
+ template <typename T>
+ class EnumMaskIterator final {
+ static constexpr size_t N = EnumBitmaskSize<T>::value;
+ static_assert(N > 0);
+
+ using U = std::underlying_type_t<T>;
+
+ public:
+ EnumMaskIterator(const T& mask) : mBitSetIterator(std::bitset<N>(static_cast<U>(mask))) {
+ // If you hit this ASSERT it means that you forgot to update EnumBitmaskSize<T>::value;
+ ASSERT(U(mask) == 0 || Log2(uint64_t(U(mask))) < N);
+ }
+
+ class Iterator final {
+ public:
+ Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {
+ }
+
+ Iterator& operator++() {
+ ++mIter;
+ return *this;
+ }
+
+ bool operator==(const Iterator& other) const {
+ return mIter == other.mIter;
+ }
+
+ bool operator!=(const Iterator& other) const {
+ return mIter != other.mIter;
+ }
+
+ T operator*() const {
+ U value = *mIter;
+ return static_cast<T>(U(1) << value);
+ }
+
+ private:
+ typename BitSetIterator<N, U>::Iterator mIter;
+ };
+
+ Iterator begin() const {
+ return Iterator(mBitSetIterator.begin());
+ }
+
+ Iterator end() const {
+ return Iterator(mBitSetIterator.end());
+ }
+
+ private:
+ BitSetIterator<N, U> mBitSetIterator;
+ };
+
+ template <typename T>
+ EnumMaskIterator<T> IterateEnumMask(const T& mask) {
+ return EnumMaskIterator<T>(mask);
+ }
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_ENUMMASKITERATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Error.cpp b/chromium/third_party/dawn/src/dawn/native/Error.cpp
new file mode 100644
index 00000000000..d524a327660
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Error.cpp
@@ -0,0 +1,64 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Error.h"
+
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ void IgnoreErrors(MaybeError maybeError) {
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
+ // During shutdown and destruction, device lost errors can be ignored.
+ // We can also ignore other unexpected internal errors on shut down and treat it as
+ // device lost so that we can continue with destruction.
+ ASSERT(errorData->GetType() == InternalErrorType::DeviceLost ||
+ errorData->GetType() == InternalErrorType::Internal);
+ }
+ }
+
+ wgpu::ErrorType ToWGPUErrorType(InternalErrorType type) {
+ switch (type) {
+ case InternalErrorType::Validation:
+ return wgpu::ErrorType::Validation;
+ case InternalErrorType::OutOfMemory:
+ return wgpu::ErrorType::OutOfMemory;
+
+ // There is no equivalent of Internal errors in the WebGPU API. Internal errors cause
+ // the device at the API level to be lost, so treat it like a DeviceLost error.
+ case InternalErrorType::Internal:
+ case InternalErrorType::DeviceLost:
+ return wgpu::ErrorType::DeviceLost;
+
+ default:
+ return wgpu::ErrorType::Unknown;
+ }
+ }
+
+ InternalErrorType FromWGPUErrorType(wgpu::ErrorType type) {
+ switch (type) {
+ case wgpu::ErrorType::Validation:
+ return InternalErrorType::Validation;
+ case wgpu::ErrorType::OutOfMemory:
+ return InternalErrorType::OutOfMemory;
+ case wgpu::ErrorType::DeviceLost:
+ return InternalErrorType::DeviceLost;
+ default:
+ return InternalErrorType::Internal;
+ }
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Error.h b/chromium/third_party/dawn/src/dawn/native/Error.h
new file mode 100644
index 00000000000..9e9e591eff7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Error.h
@@ -0,0 +1,192 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERROR_H_
+#define DAWNNATIVE_ERROR_H_
+
+#include "absl/strings/str_format.h"
+#include "dawn/common/Result.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/webgpu_absl_format.h"
+
+#include <string>
+
+namespace dawn::native {
+
+ enum class InternalErrorType : uint32_t { Validation, DeviceLost, Internal, OutOfMemory };
+
+ // MaybeError and ResultOrError are meant to be used as return value for function that are not
+ // expected to, but might fail. The handling of error is potentially much slower than successes.
+ using MaybeError = Result<void, ErrorData>;
+
+ template <typename T>
+ using ResultOrError = Result<T, ErrorData>;
+
+ // Returning a success is done like so:
+ // return {}; // for Error
+ // return SomethingOfTypeT; // for ResultOrError<T>
+ //
+ // Returning an error is done via:
+ // return DAWN_MAKE_ERROR(errorType, "My error message");
+ //
+ // but shorthand version for specific error types are preferred:
+ // return DAWN_VALIDATION_ERROR("My error message");
+ //
+ // There are different types of errors that should be used for different purpose:
+ //
+ // - Validation: these are errors that show the user did something bad, which causes the
+ // whole call to be a no-op. It's most commonly found in the frontend but there can be some
+ // backend specific validation in non-conformant backends too.
+ //
+ // - Out of memory: creation of a Buffer or Texture failed because there isn't enough memory.
+ // This is similar to validation errors in that the call becomes a no-op and returns an
+ // error object, but is reported separated from validation to the user.
+ //
+ // - Device loss: the backend driver reported that the GPU has been lost, which means all
+ // previous commands magically disappeared and the only thing left to do is clean up.
+ // Note: Device loss should be used rarely and in most case you want to use Internal
+ // instead.
+ //
+ // - Internal: something happened that the backend didn't expect, and it doesn't know
+ // how to recover from that situation. This causes the device to be lost, but is separate
+ // from device loss, because the GPU execution is still happening so we need to clean up
+ // more gracefully.
+ //
+ // - Unimplemented: same as Internal except it puts "unimplemented" in the error message for
+ // more clarity.
+
+#define DAWN_MAKE_ERROR(TYPE, MESSAGE) \
+ ::dawn::native::ErrorData::Create(TYPE, MESSAGE, __FILE__, __func__, __LINE__)
+
+#define DAWN_VALIDATION_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Validation, MESSAGE)
+
+// TODO(dawn:563): Rename to DAWN_VALIDATION_ERROR once all message format strings have been
+// converted to constexpr.
+#define DAWN_FORMAT_VALIDATION_ERROR(...) \
+ DAWN_MAKE_ERROR(InternalErrorType::Validation, absl::StrFormat(__VA_ARGS__))
+
+#define DAWN_INVALID_IF(EXPR, ...) \
+ if (DAWN_UNLIKELY(EXPR)) { \
+ return DAWN_MAKE_ERROR(InternalErrorType::Validation, absl::StrFormat(__VA_ARGS__)); \
+ } \
+ for (;;) \
+ break
+
+// DAWN_DEVICE_LOST_ERROR means that there was a real unrecoverable native device lost error.
+// We can't even do a graceful shutdown because the Device is gone.
+#define DAWN_DEVICE_LOST_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::DeviceLost, MESSAGE)
+
+// DAWN_INTERNAL_ERROR means Dawn hit an unexpected error in the backend and should try to
+// gracefully shut down.
+#define DAWN_INTERNAL_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Internal, MESSAGE)
+
+#define DAWN_FORMAT_INTERNAL_ERROR(...) \
+ DAWN_MAKE_ERROR(InternalErrorType::Internal, absl::StrFormat(__VA_ARGS__))
+
+#define DAWN_UNIMPLEMENTED_ERROR(MESSAGE) \
+ DAWN_MAKE_ERROR(InternalErrorType::Internal, std::string("Unimplemented: ") + MESSAGE)
+
+// DAWN_OUT_OF_MEMORY_ERROR means we ran out of memory. It may be used as a signal internally in
+// Dawn to free up unused resources. Or, it may bubble up to the application to signal an allocation
+// was too large or they should free some existing resources.
+#define DAWN_OUT_OF_MEMORY_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::OutOfMemory, MESSAGE)
+
+#define DAWN_CONCAT1(x, y) x##y
+#define DAWN_CONCAT2(x, y) DAWN_CONCAT1(x, y)
+#define DAWN_LOCAL_VAR DAWN_CONCAT2(_localVar, __LINE__)
+
+ // When Errors aren't handled explicitly, calls to functions returning errors should be
+ // wrapped in an DAWN_TRY. It will return the error if any, otherwise keep executing
+ // the current function.
+#define DAWN_TRY(EXPR) DAWN_TRY_WITH_CLEANUP(EXPR, {})
+
+#define DAWN_TRY_CONTEXT(EXPR, ...) \
+ DAWN_TRY_WITH_CLEANUP(EXPR, { error->AppendContext(absl::StrFormat(__VA_ARGS__)); })
+
+#define DAWN_TRY_WITH_CLEANUP(EXPR, BODY) \
+ { \
+ auto DAWN_LOCAL_VAR = EXPR; \
+ if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
+ std::unique_ptr<::dawn::native::ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
+ {BODY} /* comment to force the formatter to insert a newline */ \
+ error->AppendBacktrace(__FILE__, __func__, __LINE__); \
+ return {std::move(error)}; \
+ } \
+ } \
+ for (;;) \
+ break
+
+ // DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
+ // any, to VAR.
+#define DAWN_TRY_ASSIGN(VAR, EXPR) DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, {})
+
+ // Argument helpers are used to determine which macro implementations should be called when
+ // overloading with different number of variables.
+#define DAWN_ERROR_UNIMPLEMENTED_MACRO_(...) UNREACHABLE()
+#define DAWN_ERROR_GET_5TH_ARG_HELPER_(_1, _2, _3, _4, NAME, ...) NAME
+#define DAWN_ERROR_GET_5TH_ARG_(args) DAWN_ERROR_GET_5TH_ARG_HELPER_ args
+
+ // DAWN_TRY_ASSIGN_WITH_CLEANUP is overloaded with 2 version so that users can override the
+ // return value of the macro when necessary. This is particularly useful if the function
+ // calling the macro may want to return void instead of the error, i.e. in a test where we may
+ // just want to assert and fail if the assign cannot go through. In both the cleanup and return
+ // clauses, users can use the `error` variable to access the pointer to the acquired error.
+ //
+ // Example usages:
+ // 3 Argument Case:
+ // Result res;
+ // DAWN_TRY_ASSIGN_WITH_CLEANUP(
+ // res, GetResultOrErrorFunction(), { AddAdditionalErrorInformation(error.get()); }
+ // );
+ //
+ // 4 Argument Case:
+ // bool FunctionThatReturnsBool() {
+ // DAWN_TRY_ASSIGN_WITH_CLEANUP(
+ // res, GetResultOrErrorFunction(),
+ // { AddAdditionalErrorInformation(error.get()); },
+ // false
+ // );
+ // }
+#define DAWN_TRY_ASSIGN_WITH_CLEANUP(...) \
+ DAWN_ERROR_GET_5TH_ARG_((__VA_ARGS__, DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_, \
+ DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_3_, \
+ DAWN_ERROR_UNIMPLEMENTED_MACRO_)) \
+ (__VA_ARGS__)
+
+#define DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_3_(VAR, EXPR, BODY) \
+ DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_(VAR, EXPR, BODY, std::move(error))
+
+#define DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_(VAR, EXPR, BODY, RET) \
+ { \
+ auto DAWN_LOCAL_VAR = EXPR; \
+ if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
+ std::unique_ptr<ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
+ {BODY} /* comment to force the formatter to insert a newline */ \
+ error->AppendBacktrace(__FILE__, __func__, __LINE__); \
+ return (RET); \
+ } \
+ VAR = DAWN_LOCAL_VAR.AcquireSuccess(); \
+ } \
+ for (;;) \
+ break
+
+ // Assert that errors are device loss so that we can continue with destruction
+ void IgnoreErrors(MaybeError maybeError);
+
+ wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
+ InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_ERROR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorData.cpp b/chromium/third_party/dawn/src/dawn/native/ErrorData.cpp
new file mode 100644
index 00000000000..863d20ffc4d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorData.cpp
@@ -0,0 +1,103 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ErrorData.h"
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ std::unique_ptr<ErrorData> ErrorData::Create(InternalErrorType type,
+ std::string message,
+ const char* file,
+ const char* function,
+ int line) {
+ std::unique_ptr<ErrorData> error = std::make_unique<ErrorData>(type, message);
+ error->AppendBacktrace(file, function, line);
+ return error;
+ }
+
+ ErrorData::ErrorData(InternalErrorType type, std::string message)
+ : mType(type), mMessage(std::move(message)) {
+ }
+
+ void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
+ BacktraceRecord record;
+ record.file = file;
+ record.function = function;
+ record.line = line;
+
+ mBacktrace.push_back(std::move(record));
+ }
+
+ void ErrorData::AppendContext(std::string context) {
+ mContexts.push_back(std::move(context));
+ }
+
+ void ErrorData::AppendDebugGroup(std::string label) {
+ mDebugGroups.push_back(std::move(label));
+ }
+
+ InternalErrorType ErrorData::GetType() const {
+ return mType;
+ }
+
+ const std::string& ErrorData::GetMessage() const {
+ return mMessage;
+ }
+
+ const std::vector<ErrorData::BacktraceRecord>& ErrorData::GetBacktrace() const {
+ return mBacktrace;
+ }
+
+ const std::vector<std::string>& ErrorData::GetContexts() const {
+ return mContexts;
+ }
+
+ const std::vector<std::string>& ErrorData::GetDebugGroups() const {
+ return mDebugGroups;
+ }
+
+ std::string ErrorData::GetFormattedMessage() const {
+ std::ostringstream ss;
+ ss << mMessage << "\n";
+
+ if (!mContexts.empty()) {
+ for (auto context : mContexts) {
+ ss << " - While " << context << "\n";
+ }
+ }
+
+ // For non-validation errors, or erros that lack a context include the
+ // stack trace for debugging purposes.
+ if (mContexts.empty() || mType != InternalErrorType::Validation) {
+ for (const auto& callsite : mBacktrace) {
+ ss << " at " << callsite.function << " (" << callsite.file << ":"
+ << callsite.line << ")\n";
+ }
+ }
+
+ if (!mDebugGroups.empty()) {
+ ss << "\nDebug group stack:\n";
+ for (auto label : mDebugGroups) {
+ ss << " > \"" << label << "\"\n";
+ }
+ }
+
+ return ss.str();
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorData.h b/chromium/third_party/dawn/src/dawn/native/ErrorData.h
new file mode 100644
index 00000000000..901c54f1dc7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorData.h
@@ -0,0 +1,70 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORDATA_H_
+#define DAWNNATIVE_ERRORDATA_H_
+
+#include "dawn/common/Compiler.h"
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace wgpu {
+ enum class ErrorType : uint32_t;
+}
+
+namespace dawn {
+ using ErrorType = wgpu::ErrorType;
+}
+
+namespace dawn::native {
+ enum class InternalErrorType : uint32_t;
+
+ class [[nodiscard]] ErrorData {
+ public:
+ [[nodiscard]] static std::unique_ptr<ErrorData> Create(
+ InternalErrorType type, std::string message, const char* file, const char* function,
+ int line);
+ ErrorData(InternalErrorType type, std::string message);
+
+ struct BacktraceRecord {
+ const char* file;
+ const char* function;
+ int line;
+ };
+ void AppendBacktrace(const char* file, const char* function, int line);
+ void AppendContext(std::string context);
+ void AppendDebugGroup(std::string label);
+
+ InternalErrorType GetType() const;
+ const std::string& GetMessage() const;
+ const std::vector<BacktraceRecord>& GetBacktrace() const;
+ const std::vector<std::string>& GetContexts() const;
+ const std::vector<std::string>& GetDebugGroups() const;
+
+ std::string GetFormattedMessage() const;
+
+ private:
+ InternalErrorType mType;
+ std::string mMessage;
+ std::vector<BacktraceRecord> mBacktrace;
+ std::vector<std::string> mContexts;
+ std::vector<std::string> mDebugGroups;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_ERRORDATA_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp b/chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp
new file mode 100644
index 00000000000..af87498e371
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorInjector.cpp
@@ -0,0 +1,70 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ErrorInjector.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/DawnNative.h"
+
+namespace dawn::native {
+
+ namespace {
+
+ bool sIsEnabled = false;
+ uint64_t sNextIndex = 0;
+ uint64_t sInjectedFailureIndex = 0;
+ bool sHasPendingInjectedError = false;
+
+ } // anonymous namespace
+
+ void EnableErrorInjector() {
+ sIsEnabled = true;
+ }
+
+ void DisableErrorInjector() {
+ sIsEnabled = false;
+ }
+
+ void ClearErrorInjector() {
+ sNextIndex = 0;
+ sHasPendingInjectedError = false;
+ }
+
+ bool ErrorInjectorEnabled() {
+ return sIsEnabled;
+ }
+
+ uint64_t AcquireErrorInjectorCallCount() {
+ uint64_t count = sNextIndex;
+ ClearErrorInjector();
+ return count;
+ }
+
+ bool ShouldInjectError() {
+ uint64_t index = sNextIndex++;
+ if (sHasPendingInjectedError && index == sInjectedFailureIndex) {
+ sHasPendingInjectedError = false;
+ return true;
+ }
+ return false;
+ }
+
+ void InjectErrorAt(uint64_t index) {
+ // Only one error can be injected at a time.
+ ASSERT(!sHasPendingInjectedError);
+ sInjectedFailureIndex = index;
+ sHasPendingInjectedError = true;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorInjector.h b/chromium/third_party/dawn/src/dawn/native/ErrorInjector.h
new file mode 100644
index 00000000000..ab418865292
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorInjector.h
@@ -0,0 +1,68 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORINJECTOR_H_
+#define DAWNNATIVE_ERRORINJECTOR_H_
+
+#include <stdint.h>
+#include <type_traits>
+
+namespace dawn::native {
+
+ template <typename ErrorType>
+ struct InjectedErrorResult {
+ ErrorType error;
+ bool injected;
+ };
+
+ bool ErrorInjectorEnabled();
+
+ bool ShouldInjectError();
+
+ template <typename ErrorType>
+ InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType) {
+ return InjectedErrorResult<ErrorType>{errorType, ShouldInjectError()};
+ }
+
+ template <typename ErrorType, typename... ErrorTypes>
+ InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType, ErrorTypes... errorTypes) {
+ if (ShouldInjectError()) {
+ return InjectedErrorResult<ErrorType>{errorType, true};
+ }
+ return MaybeInjectError(errorTypes...);
+ }
+
+} // namespace dawn::native
+
+#if defined(DAWN_ENABLE_ERROR_INJECTION)
+
+# define INJECT_ERROR_OR_RUN(stmt, ...) \
+ [&]() { \
+ if (DAWN_UNLIKELY(::dawn::native::ErrorInjectorEnabled())) { \
+ /* Only used for testing and fuzzing, so it's okay if this is deoptimized */ \
+ auto injectedError = ::dawn::native::MaybeInjectError(__VA_ARGS__); \
+ if (injectedError.injected) { \
+ return injectedError.error; \
+ } \
+ } \
+ return (stmt); \
+ }()
+
+#else
+
+# define INJECT_ERROR_OR_RUN(stmt, ...) stmt
+
+#endif
+
+#endif // DAWNNATIVE_ERRORINJECTOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp b/chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp
new file mode 100644
index 00000000000..06b7a95472b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorScope.cpp
@@ -0,0 +1,92 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ErrorScope.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native {
+
+ namespace {
+
+ wgpu::ErrorType ErrorFilterToErrorType(wgpu::ErrorFilter filter) {
+ switch (filter) {
+ case wgpu::ErrorFilter::Validation:
+ return wgpu::ErrorType::Validation;
+ case wgpu::ErrorFilter::OutOfMemory:
+ return wgpu::ErrorType::OutOfMemory;
+ }
+ UNREACHABLE();
+ }
+
+ } // namespace
+
+ ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
+ : mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {
+ }
+
+ wgpu::ErrorType ErrorScope::GetErrorType() const {
+ return mCapturedError;
+ }
+
+ const char* ErrorScope::GetErrorMessage() const {
+ return mErrorMessage.c_str();
+ }
+
+ void ErrorScopeStack::Push(wgpu::ErrorFilter filter) {
+ mScopes.push_back(ErrorScope(filter));
+ }
+
+ ErrorScope ErrorScopeStack::Pop() {
+ ASSERT(!mScopes.empty());
+ ErrorScope scope = std::move(mScopes.back());
+ mScopes.pop_back();
+ return scope;
+ }
+
+ bool ErrorScopeStack::Empty() const {
+ return mScopes.empty();
+ }
+
+ bool ErrorScopeStack::HandleError(wgpu::ErrorType type, const char* message) {
+ for (auto it = mScopes.rbegin(); it != mScopes.rend(); ++it) {
+ if (it->mMatchedErrorType != type) {
+ // Error filter does not match. Move on to the next scope.
+ continue;
+ }
+
+ // Filter matches.
+ // Record the error if the scope doesn't have one yet.
+ if (it->mCapturedError == wgpu::ErrorType::NoError) {
+ it->mCapturedError = type;
+ it->mErrorMessage = message;
+ }
+
+ if (type == wgpu::ErrorType::DeviceLost) {
+ if (it->mCapturedError != wgpu::ErrorType::DeviceLost) {
+ // DeviceLost overrides any other error that is not a DeviceLost.
+ it->mCapturedError = type;
+ it->mErrorMessage = message;
+ }
+ } else {
+ // Errors that are not device lost are captured and stop propogating.
+ return true;
+ }
+ }
+
+ // The error was not captured.
+ return false;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ErrorScope.h b/chromium/third_party/dawn/src/dawn/native/ErrorScope.h
new file mode 100644
index 00000000000..766a81eca08
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ErrorScope.h
@@ -0,0 +1,57 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORSCOPE_H_
+#define DAWNNATIVE_ERRORSCOPE_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include <string>
+#include <vector>
+
+namespace dawn::native {
+
+ class ErrorScope {
+ public:
+ wgpu::ErrorType GetErrorType() const;
+ const char* GetErrorMessage() const;
+
+ private:
+ friend class ErrorScopeStack;
+ explicit ErrorScope(wgpu::ErrorFilter errorFilter);
+
+ wgpu::ErrorType mMatchedErrorType;
+ wgpu::ErrorType mCapturedError = wgpu::ErrorType::NoError;
+ std::string mErrorMessage = "";
+ };
+
+ class ErrorScopeStack {
+ public:
+ void Push(wgpu::ErrorFilter errorFilter);
+ ErrorScope Pop();
+
+ bool Empty() const;
+
+ // Pass an error to the scopes in the stack. Returns true if one of the scopes
+ // captured the error. Returns false if the error should be forwarded to the
+ // uncaptured error callback.
+ bool HandleError(wgpu::ErrorType type, const char* message);
+
+ private:
+ std::vector<ErrorScope> mScopes;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_ERRORSCOPE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp b/chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp
new file mode 100644
index 00000000000..fe06ccc176a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ExternalTexture.cpp
@@ -0,0 +1,230 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ExternalTexture.h"
+
+#include "dawn/native/Buffer.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/Texture.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ MaybeError ValidateExternalTexturePlane(const TextureViewBase* textureView) {
+ DAWN_INVALID_IF(
+ (textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) == 0,
+ "The external texture plane (%s) usage (%s) doesn't include the required usage (%s)",
+ textureView, textureView->GetTexture()->GetUsage(), wgpu::TextureUsage::TextureBinding);
+
+ DAWN_INVALID_IF(textureView->GetDimension() != wgpu::TextureViewDimension::e2D,
+ "The external texture plane (%s) dimension (%s) is not 2D.", textureView,
+ textureView->GetDimension());
+
+ DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
+ "The external texture plane (%s) mip level count (%u) is not 1.",
+ textureView, textureView->GetLevelCount());
+
+ DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
+ "The external texture plane (%s) sample count (%u) is not one.",
+ textureView, textureView->GetTexture()->GetSampleCount());
+
+ return {};
+ }
+
+ MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor) {
+ ASSERT(descriptor);
+ ASSERT(descriptor->plane0);
+
+ DAWN_TRY(device->ValidateObject(descriptor->plane0));
+
+ wgpu::TextureFormat plane0Format = descriptor->plane0->GetFormat().format;
+
+ if (descriptor->plane1) {
+ DAWN_INVALID_IF(
+ device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Bi-planar external textures are disabled until the implementation is completed.");
+
+ DAWN_INVALID_IF(descriptor->colorSpace != wgpu::PredefinedColorSpace::Srgb,
+ "The specified color space (%s) is not %s.", descriptor->colorSpace,
+ wgpu::PredefinedColorSpace::Srgb);
+
+ DAWN_TRY(device->ValidateObject(descriptor->plane1));
+ wgpu::TextureFormat plane1Format = descriptor->plane1->GetFormat().format;
+
+ DAWN_INVALID_IF(plane0Format != wgpu::TextureFormat::R8Unorm,
+ "The bi-planar external texture plane (%s) format (%s) is not %s.",
+ descriptor->plane0, plane0Format, wgpu::TextureFormat::R8Unorm);
+ DAWN_INVALID_IF(plane1Format != wgpu::TextureFormat::RG8Unorm,
+ "The bi-planar external texture plane (%s) format (%s) is not %s.",
+ descriptor->plane1, plane1Format, wgpu::TextureFormat::RG8Unorm);
+
+ DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
+ DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane1));
+ } else {
+ switch (plane0Format) {
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::RGBA16Float:
+ DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
+ break;
+ default:
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "The external texture plane (%s) format (%s) is not a supported format "
+ "(%s, %s, %s).",
+ descriptor->plane0, plane0Format, wgpu::TextureFormat::RGBA8Unorm,
+ wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::RGBA16Float);
+ }
+ }
+
+ return {};
+ }
+
+ // static
+ ResultOrError<Ref<ExternalTextureBase>> ExternalTextureBase::Create(
+ DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor) {
+ Ref<ExternalTextureBase> externalTexture =
+ AcquireRef(new ExternalTextureBase(device, descriptor));
+ DAWN_TRY(externalTexture->Initialize(device, descriptor));
+ return std::move(externalTexture);
+ }
+
+ ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label), mState(ExternalTextureState::Alive) {
+ TrackInDevice();
+ }
+
+ ExternalTextureBase::ExternalTextureBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented), mState(ExternalTextureState::Alive) {
+ TrackInDevice();
+ }
+
+ ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ ExternalTextureBase::~ExternalTextureBase() = default;
+
+ MaybeError ExternalTextureBase::Initialize(DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor) {
+ // Store any passed in TextureViews associated with individual planes.
+ mTextureViews[0] = descriptor->plane0;
+
+ if (descriptor->plane1) {
+ mTextureViews[1] = descriptor->plane1;
+ } else {
+ TextureDescriptor textureDesc;
+ textureDesc.dimension = wgpu::TextureDimension::e2D;
+ textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+ textureDesc.label = "Dawn_External_Texture_Dummy_Texture";
+ textureDesc.size = {1, 1, 1};
+ textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+
+ DAWN_TRY_ASSIGN(mDummyTexture, device->CreateTexture(&textureDesc));
+
+ TextureViewDescriptor textureViewDesc;
+ textureViewDesc.arrayLayerCount = 1;
+ textureViewDesc.aspect = wgpu::TextureAspect::All;
+ textureViewDesc.baseArrayLayer = 0;
+ textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
+ textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+ textureViewDesc.label = "Dawn_External_Texture_Dummy_Texture_View";
+ textureViewDesc.mipLevelCount = 1;
+
+ DAWN_TRY_ASSIGN(mTextureViews[1],
+ device->CreateTextureView(mDummyTexture.Get(), &textureViewDesc));
+ }
+
+ // We must create a buffer to store parameters needed by a shader that operates on this
+ // external texture.
+ BufferDescriptor bufferDesc;
+ bufferDesc.size = sizeof(ExternalTextureParams);
+ bufferDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+ bufferDesc.label = "Dawn_External_Texture_Params_Buffer";
+
+ DAWN_TRY_ASSIGN(mParamsBuffer, device->CreateBuffer(&bufferDesc));
+
+ // Dawn & Tint's YUV to RGB conversion implementation was inspired by the conversions found
+ // in libYUV. If this implementation needs expanded to support more colorspaces, this file
+ // is an excellent reference: chromium/src/third_party/libyuv/source/row_common.cc.
+ //
+ // The conversion from YUV to RGB looks like this:
+ // r = Y * 1.164 + V * vr
+ // g = Y * 1.164 - U * ug - V * vg
+ // b = Y * 1.164 + U * ub
+ //
+ // By changing the values of vr, vg, ub, and ug we can change the destination color space.
+ ExternalTextureParams params;
+ params.numPlanes = descriptor->plane1 == nullptr ? 1 : 2;
+
+ switch (descriptor->colorSpace) {
+ case wgpu::PredefinedColorSpace::Srgb:
+ // Numbers derived from ITU-R recommendation for limited range BT.709
+ params.vr = 1.793;
+ params.vg = 0.392;
+ params.ub = 0.813;
+ params.ug = 2.017;
+ break;
+ case wgpu::PredefinedColorSpace::Undefined:
+ break;
+ }
+
+ DAWN_TRY(device->GetQueue()->WriteBuffer(mParamsBuffer.Get(), 0, &params,
+ sizeof(ExternalTextureParams)));
+
+ return {};
+ }
+
+ const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>&
+ ExternalTextureBase::GetTextureViews() const {
+ return mTextureViews;
+ }
+
+ MaybeError ExternalTextureBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+ DAWN_INVALID_IF(mState == ExternalTextureState::Destroyed,
+ "Destroyed external texture %s is used in a submit.", this);
+ return {};
+ }
+
+ void ExternalTextureBase::APIDestroy() {
+ if (GetDevice()->ConsumedError(GetDevice()->ValidateObject(this))) {
+ return;
+ }
+ Destroy();
+ }
+
+ void ExternalTextureBase::DestroyImpl() {
+ mState = ExternalTextureState::Destroyed;
+ }
+
+ // static
+ ExternalTextureBase* ExternalTextureBase::MakeError(DeviceBase* device) {
+ return new ExternalTextureBase(device, ObjectBase::kError);
+ }
+
+ BufferBase* ExternalTextureBase::GetParamsBuffer() const {
+ return mParamsBuffer.Get();
+ }
+
+ ObjectType ExternalTextureBase::GetType() const {
+ return ObjectType::ExternalTexture;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ExternalTexture.h b/chromium/third_party/dawn/src/dawn/native/ExternalTexture.h
new file mode 100644
index 00000000000..e32b6317f44
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ExternalTexture.h
@@ -0,0 +1,77 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_EXTERNALTEXTURE_H_
+#define DAWNNATIVE_EXTERNALTEXTURE_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/Subresource.h"
+
+#include <array>
+
+namespace dawn::native {
+
+ class TextureViewBase;
+
+ struct ExternalTextureParams {
+ uint32_t numPlanes;
+ float vr;
+ float vg;
+ float ub;
+ float ug;
+ };
+
+ MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor);
+
+ class ExternalTextureBase : public ApiObjectBase {
+ public:
+ static ResultOrError<Ref<ExternalTextureBase>> Create(
+ DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor);
+
+ BufferBase* GetParamsBuffer() const;
+ const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& GetTextureViews() const;
+ ObjectType GetType() const override;
+
+ MaybeError ValidateCanUseInSubmitNow() const;
+ static ExternalTextureBase* MakeError(DeviceBase* device);
+
+ void APIDestroy();
+
+ protected:
+ // Constructor used only for mocking and testing.
+ ExternalTextureBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ ~ExternalTextureBase() override;
+
+ private:
+ ExternalTextureBase(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+
+ enum class ExternalTextureState { Alive, Destroyed };
+ ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+
+ Ref<TextureBase> mDummyTexture;
+ Ref<BufferBase> mParamsBuffer;
+ std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews;
+
+ ExternalTextureState mState;
+ };
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_EXTERNALTEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Features.cpp b/chromium/third_party/dawn/src/dawn/native/Features.cpp
new file mode 100644
index 00000000000..56a532c1298
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Features.cpp
@@ -0,0 +1,277 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Features.h"
+
+namespace dawn::native {
+ namespace {
+
+ struct FeatureEnumAndInfo {
+ Feature feature;
+ FeatureInfo info;
+ bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
+ };
+
+ using FeatureEnumAndInfoList =
+ std::array<FeatureEnumAndInfo, static_cast<size_t>(Feature::EnumCount)>;
+
+ static constexpr FeatureEnumAndInfoList kFeatureNameAndInfoList = {
+ {{Feature::TextureCompressionBC,
+ {"texture-compression-bc", "Support Block Compressed (BC) texture formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
+ &WGPUDeviceProperties::textureCompressionBC},
+ {Feature::TextureCompressionETC2,
+ {"texture-compression-etc2",
+ "Support Ericsson Texture Compressed (ETC2/EAC) texture "
+ "formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+ &WGPUDeviceProperties::textureCompressionETC2},
+ {Feature::TextureCompressionASTC,
+ {"texture-compression-astc",
+ "Support Adaptable Scalable Texture Compressed (ASTC) "
+ "texture formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+ &WGPUDeviceProperties::textureCompressionASTC},
+ {Feature::ShaderFloat16,
+ {"shader-float16",
+ "Support 16bit float arithmetic and declarations in uniform and storage buffers",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
+ &WGPUDeviceProperties::shaderFloat16},
+ {Feature::PipelineStatisticsQuery,
+ {"pipeline-statistics-query", "Support Pipeline Statistics Query",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+ &WGPUDeviceProperties::pipelineStatisticsQuery},
+ {Feature::TimestampQuery,
+ {"timestamp-query", "Support Timestamp Query",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+ &WGPUDeviceProperties::timestampQuery},
+ {Feature::DepthClamping,
+ {"depth-clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
+ &WGPUDeviceProperties::depthClamping},
+ {Feature::Depth24UnormStencil8,
+ {"depth24unorm-stencil8", "Support depth24unorm-stencil8 texture format",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
+ &WGPUDeviceProperties::depth24UnormStencil8},
+ {Feature::Depth32FloatStencil8,
+ {"depth32float-stencil8", "Support depth32float-stencil8 texture format",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
+ &WGPUDeviceProperties::depth32FloatStencil8},
+ {Feature::DawnInternalUsages,
+ {"dawn-internal-usages",
+ "Add internal usages to resources to affect how the texture is allocated, but not "
+ "frontend validation. Other internal commands may access this usage.",
+ "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
+ "dawn_internal_usages.md"},
+ &WGPUDeviceProperties::dawnInternalUsages},
+ {Feature::MultiPlanarFormats,
+ {"multiplanar-formats",
+ "Import and use multi-planar texture formats with per plane views",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
+ &WGPUDeviceProperties::multiPlanarFormats},
+ {Feature::DawnNative,
+ {"dawn-native", "WebGPU is running on top of dawn_native.",
+ "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
+ "dawn_native.md"},
+ &WGPUDeviceProperties::dawnNative}}};
+
+ Feature FromAPIFeature(wgpu::FeatureName feature) {
+ switch (feature) {
+ case wgpu::FeatureName::Undefined:
+ return Feature::InvalidEnum;
+
+ case wgpu::FeatureName::TimestampQuery:
+ return Feature::TimestampQuery;
+ case wgpu::FeatureName::PipelineStatisticsQuery:
+ return Feature::PipelineStatisticsQuery;
+ case wgpu::FeatureName::TextureCompressionBC:
+ return Feature::TextureCompressionBC;
+ case wgpu::FeatureName::TextureCompressionETC2:
+ return Feature::TextureCompressionETC2;
+ case wgpu::FeatureName::TextureCompressionASTC:
+ return Feature::TextureCompressionASTC;
+ case wgpu::FeatureName::DepthClamping:
+ return Feature::DepthClamping;
+ case wgpu::FeatureName::Depth24UnormStencil8:
+ return Feature::Depth24UnormStencil8;
+ case wgpu::FeatureName::Depth32FloatStencil8:
+ return Feature::Depth32FloatStencil8;
+ case wgpu::FeatureName::DawnShaderFloat16:
+ return Feature::ShaderFloat16;
+ case wgpu::FeatureName::DawnInternalUsages:
+ return Feature::DawnInternalUsages;
+ case wgpu::FeatureName::DawnMultiPlanarFormats:
+ return Feature::MultiPlanarFormats;
+ case wgpu::FeatureName::DawnNative:
+ return Feature::DawnNative;
+
+ case wgpu::FeatureName::IndirectFirstInstance:
+ return Feature::InvalidEnum;
+ }
+ return Feature::InvalidEnum;
+ }
+
+ wgpu::FeatureName ToAPIFeature(Feature feature) {
+ switch (feature) {
+ case Feature::TextureCompressionBC:
+ return wgpu::FeatureName::TextureCompressionBC;
+ case Feature::TextureCompressionETC2:
+ return wgpu::FeatureName::TextureCompressionETC2;
+ case Feature::TextureCompressionASTC:
+ return wgpu::FeatureName::TextureCompressionASTC;
+ case Feature::PipelineStatisticsQuery:
+ return wgpu::FeatureName::PipelineStatisticsQuery;
+ case Feature::TimestampQuery:
+ return wgpu::FeatureName::TimestampQuery;
+ case Feature::DepthClamping:
+ return wgpu::FeatureName::DepthClamping;
+ case Feature::Depth24UnormStencil8:
+ return wgpu::FeatureName::Depth24UnormStencil8;
+ case Feature::Depth32FloatStencil8:
+ return wgpu::FeatureName::Depth32FloatStencil8;
+ case Feature::ShaderFloat16:
+ return wgpu::FeatureName::DawnShaderFloat16;
+ case Feature::DawnInternalUsages:
+ return wgpu::FeatureName::DawnInternalUsages;
+ case Feature::MultiPlanarFormats:
+ return wgpu::FeatureName::DawnMultiPlanarFormats;
+ case Feature::DawnNative:
+ return wgpu::FeatureName::DawnNative;
+
+ case Feature::EnumCount:
+ UNREACHABLE();
+ }
+ }
+
+ } // anonymous namespace
+
+ void FeaturesSet::EnableFeature(Feature feature) {
+ ASSERT(feature != Feature::InvalidEnum);
+ const size_t featureIndex = static_cast<size_t>(feature);
+ featuresBitSet.set(featureIndex);
+ }
+
+ void FeaturesSet::EnableFeature(wgpu::FeatureName feature) {
+ EnableFeature(FromAPIFeature(feature));
+ }
+
+ bool FeaturesSet::IsEnabled(Feature feature) const {
+ ASSERT(feature != Feature::InvalidEnum);
+ const size_t featureIndex = static_cast<size_t>(feature);
+ return featuresBitSet[featureIndex];
+ }
+
+ bool FeaturesSet::IsEnabled(wgpu::FeatureName feature) const {
+ Feature f = FromAPIFeature(feature);
+ return f != Feature::InvalidEnum && IsEnabled(f);
+ }
+
+ size_t FeaturesSet::EnumerateFeatures(wgpu::FeatureName* features) const {
+ for (uint32_t i : IterateBitSet(featuresBitSet)) {
+ wgpu::FeatureName feature = ToAPIFeature(static_cast<Feature>(i));
+ if (features != nullptr) {
+ *features = feature;
+ features += 1;
+ }
+ }
+ return featuresBitSet.count();
+ }
+
+ std::vector<const char*> FeaturesSet::GetEnabledFeatureNames() const {
+ std::vector<const char*> enabledFeatureNames(featuresBitSet.count());
+
+ uint32_t index = 0;
+ for (uint32_t i : IterateBitSet(featuresBitSet)) {
+ Feature feature = static_cast<Feature>(i);
+ ASSERT(feature != Feature::InvalidEnum);
+
+ const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[i];
+ ASSERT(featureNameAndInfo.feature == feature);
+
+ enabledFeatureNames[index] = featureNameAndInfo.info.name;
+ ++index;
+ }
+ return enabledFeatureNames;
+ }
+
+ void FeaturesSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
+ ASSERT(properties != nullptr);
+
+ for (uint32_t i : IterateBitSet(featuresBitSet)) {
+ properties->*(kFeatureNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
+ }
+ }
+
+ wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature) {
+ ASSERT(feature != Feature::InvalidEnum);
+ return ToAPIFeature(feature);
+ }
+
+ FeaturesInfo::FeaturesInfo() {
+ for (size_t index = 0; index < kFeatureNameAndInfoList.size(); ++index) {
+ const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[index];
+ ASSERT(index == static_cast<size_t>(featureNameAndInfo.feature));
+ mFeatureNameToEnumMap[featureNameAndInfo.info.name] = featureNameAndInfo.feature;
+ }
+ }
+
+ const FeatureInfo* FeaturesInfo::GetFeatureInfo(wgpu::FeatureName feature) const {
+ Feature f = FromAPIFeature(feature);
+ if (f == Feature::InvalidEnum) {
+ return nullptr;
+ }
+ return &kFeatureNameAndInfoList[static_cast<size_t>(f)].info;
+ }
+
+ Feature FeaturesInfo::FeatureNameToEnum(const char* featureName) const {
+ ASSERT(featureName);
+
+ const auto& iter = mFeatureNameToEnumMap.find(featureName);
+ if (iter != mFeatureNameToEnumMap.cend()) {
+ return kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].feature;
+ }
+
+ // TODO(dawn:550): Remove this fallback logic when Chromium is updated.
+ constexpr std::array<std::pair<const char*, const char*>, 6>
+ kReplacementsForDeprecatedNames = {{
+ {"texture_compression_bc", "texture-compression-bc"},
+ {"depth_clamping", "depth-clamping"},
+ {"pipeline_statistics_query", "pipeline-statistics-query"},
+ {"shader_float16", "shader-float16"},
+ {"timestamp_query", "timestamp-query"},
+ {"multiplanar_formats", "multiplanar-formats"},
+ }};
+ for (const auto& [name, replacement] : kReplacementsForDeprecatedNames) {
+ if (strcmp(featureName, name) == 0) {
+ return FeatureNameToEnum(replacement);
+ }
+ }
+
+ return Feature::InvalidEnum;
+ }
+
+ wgpu::FeatureName FeaturesInfo::FeatureNameToAPIEnum(const char* featureName) const {
+ Feature f = FeatureNameToEnum(featureName);
+ if (f != Feature::InvalidEnum) {
+ return ToAPIFeature(f);
+ }
+ // Pass something invalid.
+ return static_cast<wgpu::FeatureName>(-1);
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Features.h b/chromium/third_party/dawn/src/dawn/native/Features.h
new file mode 100644
index 00000000000..de75e99ed00
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Features.h
@@ -0,0 +1,83 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_FEATURES_H_
+#define DAWNNATIVE_FEATURES_H_
+
+#include <bitset>
+#include <unordered_map>
+#include <vector>
+
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+
+namespace dawn::native {
+
+ enum class Feature {
+ TextureCompressionBC,
+ TextureCompressionETC2,
+ TextureCompressionASTC,
+ ShaderFloat16,
+ PipelineStatisticsQuery,
+ TimestampQuery,
+ DepthClamping,
+ Depth24UnormStencil8,
+ Depth32FloatStencil8,
+
+ // Dawn-specific
+ DawnInternalUsages,
+ MultiPlanarFormats,
+ DawnNative,
+
+ EnumCount,
+ InvalidEnum = EnumCount,
+ FeatureMin = TextureCompressionBC,
+ };
+
+ // A wrapper of the bitset to store if an feature is enabled or not. This wrapper provides the
+ // convenience to convert the enums of enum class Feature to the indices of a bitset.
+ struct FeaturesSet {
+ std::bitset<static_cast<size_t>(Feature::EnumCount)> featuresBitSet;
+
+ void EnableFeature(Feature feature);
+ void EnableFeature(wgpu::FeatureName feature);
+ bool IsEnabled(Feature feature) const;
+ bool IsEnabled(wgpu::FeatureName feature) const;
+ // Returns |count|, the number of features. Writes out all |count| values if |features| is
+ // non-null.
+ size_t EnumerateFeatures(wgpu::FeatureName* features) const;
+ std::vector<const char*> GetEnabledFeatureNames() const;
+ void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
+ };
+
+ wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature);
+
+ class FeaturesInfo {
+ public:
+ FeaturesInfo();
+
+ // Used to query the details of an feature. Return nullptr if featureName is not a valid
+ // name of an feature supported in Dawn
+ const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature) const;
+ Feature FeatureNameToEnum(const char* featureName) const;
+ wgpu::FeatureName FeatureNameToAPIEnum(const char* featureName) const;
+
+ private:
+ std::unordered_map<std::string, Feature> mFeatureNameToEnumMap;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_FEATURES_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Format.cpp b/chromium/third_party/dawn/src/dawn/native/Format.cpp
new file mode 100644
index 00000000000..403291d1cf5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Format.cpp
@@ -0,0 +1,474 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Format.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/Texture.h"
+
+#include <bitset>
+
+namespace dawn::native {
+
+ // Format
+
+ // TODO(dawn:527): Remove when unused.
+ SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type) {
+ switch (type) {
+ case wgpu::TextureComponentType::Float:
+ return SampleTypeBit::Float;
+ case wgpu::TextureComponentType::Sint:
+ return SampleTypeBit::Sint;
+ case wgpu::TextureComponentType::Uint:
+ return SampleTypeBit::Uint;
+ case wgpu::TextureComponentType::DepthComparison:
+ return SampleTypeBit::Depth;
+ }
+ UNREACHABLE();
+ }
+
+ SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType) {
+ switch (sampleType) {
+ case wgpu::TextureSampleType::Float:
+ case wgpu::TextureSampleType::UnfilterableFloat:
+ case wgpu::TextureSampleType::Sint:
+ case wgpu::TextureSampleType::Uint:
+ case wgpu::TextureSampleType::Depth:
+ case wgpu::TextureSampleType::Undefined:
+ // When the compiler complains that you need to add a case statement here, please
+ // also add a corresponding static assert below!
+ break;
+ }
+
+ static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0);
+ if (sampleType == wgpu::TextureSampleType::Undefined) {
+ return SampleTypeBit::None;
+ }
+
+ // Check that SampleTypeBit bits are in the same position / order as the respective
+ // wgpu::TextureSampleType value.
+ static_assert(SampleTypeBit::Float ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)));
+ static_assert(
+ SampleTypeBit::UnfilterableFloat ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)));
+ static_assert(SampleTypeBit::Uint ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)));
+ static_assert(SampleTypeBit::Sint ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)));
+ static_assert(SampleTypeBit::Depth ==
+ static_cast<SampleTypeBit>(
+ 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)));
+ return static_cast<SampleTypeBit>(1 << (static_cast<uint32_t>(sampleType) - 1));
+ }
+
+ bool Format::IsColor() const {
+ return aspects == Aspect::Color;
+ }
+
+ bool Format::HasDepth() const {
+ return (aspects & Aspect::Depth) != 0;
+ }
+
+ bool Format::HasStencil() const {
+ return (aspects & Aspect::Stencil) != 0;
+ }
+
+ bool Format::HasDepthOrStencil() const {
+ return (aspects & (Aspect::Depth | Aspect::Stencil)) != 0;
+ }
+
+ bool Format::IsMultiPlanar() const {
+ return (aspects & (Aspect::Plane0 | Aspect::Plane1)) != 0;
+ }
+
+ bool Format::CopyCompatibleWith(const Format& format) const {
+ return baseFormat == format.baseFormat;
+ }
+
+ const AspectInfo& Format::GetAspectInfo(wgpu::TextureAspect aspect) const {
+ return GetAspectInfo(SelectFormatAspects(*this, aspect));
+ }
+
+ const AspectInfo& Format::GetAspectInfo(Aspect aspect) const {
+ ASSERT(HasOneBit(aspect));
+ ASSERT(aspects & aspect);
+ const size_t aspectIndex = GetAspectIndex(aspect);
+ ASSERT(aspectIndex < GetAspectCount(aspects));
+ return aspectInfo[aspectIndex];
+ }
+
+ size_t Format::GetIndex() const {
+ return ComputeFormatIndex(format);
+ }
+
+ // Implementation details of the format table of the DeviceBase
+
+ // For the enum for formats are packed but this might change when we have a broader feature
+ // mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
+ size_t ComputeFormatIndex(wgpu::TextureFormat format) {
+ // This takes advantage of overflows to make the index of TextureFormat::Undefined outside
+ // of the range of the FormatTable.
+ static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 >
+ kKnownFormatCount);
+ return static_cast<size_t>(static_cast<uint32_t>(format) - 1);
+ }
+
+ FormatTable BuildFormatTable(const DeviceBase* device) {
+ FormatTable table;
+ std::bitset<kKnownFormatCount> formatsSet;
+
+ static constexpr SampleTypeBit kAnyFloat =
+ SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
+
+ auto AddFormat = [&table, &formatsSet](Format format) {
+ size_t index = ComputeFormatIndex(format.format);
+ ASSERT(index < table.size());
+
+ // This checks that each format is set at most once, the first part of checking that all
+ // formats are set exactly once.
+ ASSERT(!formatsSet[index]);
+
+ // Vulkan describes bytesPerRow in units of texels. If there's any format for which this
+ // ASSERT isn't true, then additional validation on bytesPerRow must be added.
+ const bool hasMultipleAspects = !HasOneBit(format.aspects);
+ ASSERT(hasMultipleAspects ||
+ (kTextureBytesPerRowAlignment % format.aspectInfo[0].block.byteSize) == 0);
+
+ table[index] = format;
+ formatsSet.set(index);
+ };
+
+ auto AddColorFormat =
+ [&AddFormat](wgpu::TextureFormat format, bool renderable, bool supportsStorageUsage,
+ bool supportsMultisample, uint32_t byteSize, SampleTypeBit sampleTypes,
+ uint8_t componentCount,
+ wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
+ Format internalFormat;
+ internalFormat.format = format;
+ internalFormat.isRenderable = renderable;
+ internalFormat.isCompressed = false;
+ internalFormat.isSupported = true;
+ internalFormat.supportsStorageUsage = supportsStorageUsage;
+ internalFormat.supportsMultisample = supportsMultisample;
+ internalFormat.aspects = Aspect::Color;
+ internalFormat.componentCount = componentCount;
+
+ // Default baseFormat of each color formats should be themselves.
+ if (baseFormat == wgpu::TextureFormat::Undefined) {
+ internalFormat.baseFormat = format;
+ } else {
+ internalFormat.baseFormat = baseFormat;
+ }
+
+ AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+ firstAspect->block.byteSize = byteSize;
+ firstAspect->block.width = 1;
+ firstAspect->block.height = 1;
+ if (HasOneBit(sampleTypes)) {
+ switch (sampleTypes) {
+ case SampleTypeBit::Float:
+ case SampleTypeBit::UnfilterableFloat:
+ firstAspect->baseType = wgpu::TextureComponentType::Float;
+ break;
+ case SampleTypeBit::Sint:
+ firstAspect->baseType = wgpu::TextureComponentType::Sint;
+ break;
+ case SampleTypeBit::Uint:
+ firstAspect->baseType = wgpu::TextureComponentType::Uint;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ ASSERT((sampleTypes & SampleTypeBit::Float) != 0);
+ firstAspect->baseType = wgpu::TextureComponentType::Float;
+ }
+ firstAspect->supportedSampleTypes = sampleTypes;
+ firstAspect->format = format;
+ AddFormat(internalFormat);
+ };
+
+ auto AddDepthFormat = [&AddFormat](
+ wgpu::TextureFormat format, uint32_t byteSize, bool isSupported,
+ wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
+ Format internalFormat;
+ internalFormat.format = format;
+ internalFormat.isRenderable = true;
+ internalFormat.isCompressed = false;
+ internalFormat.isSupported = isSupported;
+ internalFormat.supportsStorageUsage = false;
+ internalFormat.supportsMultisample = true;
+ internalFormat.aspects = Aspect::Depth;
+ internalFormat.componentCount = 1;
+
+ // Default baseFormat of each depth formats should be themselves.
+ if (baseFormat == wgpu::TextureFormat::Undefined) {
+ internalFormat.baseFormat = format;
+ } else {
+ internalFormat.baseFormat = baseFormat;
+ }
+
+ AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+ firstAspect->block.byteSize = byteSize;
+ firstAspect->block.width = 1;
+ firstAspect->block.height = 1;
+ firstAspect->baseType = wgpu::TextureComponentType::Float;
+ firstAspect->supportedSampleTypes = SampleTypeBit::Depth;
+ firstAspect->format = format;
+ AddFormat(internalFormat);
+ };
+
+ auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format, bool isSupported,
+ wgpu::TextureFormat baseFormat =
+ wgpu::TextureFormat::Undefined) {
+ Format internalFormat;
+ internalFormat.format = format;
+ internalFormat.isRenderable = true;
+ internalFormat.isCompressed = false;
+ internalFormat.isSupported = isSupported;
+ internalFormat.supportsStorageUsage = false;
+ internalFormat.supportsMultisample = true;
+ internalFormat.aspects = Aspect::Stencil;
+ internalFormat.componentCount = 1;
+ internalFormat.baseFormat = baseFormat;
+
+ // Default baseFormat of each stencil formats should be themselves.
+ if (baseFormat == wgpu::TextureFormat::Undefined) {
+ internalFormat.baseFormat = format;
+ } else {
+ internalFormat.baseFormat = baseFormat;
+ }
+
+ AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+ firstAspect->block.byteSize = 1;
+ firstAspect->block.width = 1;
+ firstAspect->block.height = 1;
+ firstAspect->baseType = wgpu::TextureComponentType::Uint;
+ firstAspect->supportedSampleTypes = SampleTypeBit::Uint;
+ firstAspect->format = format;
+ AddFormat(internalFormat);
+ };
+
+ auto AddCompressedFormat =
+ [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width,
+ uint32_t height, bool isSupported, uint8_t componentCount,
+ wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
+ Format internalFormat;
+ internalFormat.format = format;
+ internalFormat.isRenderable = false;
+ internalFormat.isCompressed = true;
+ internalFormat.isSupported = isSupported;
+ internalFormat.supportsStorageUsage = false;
+ internalFormat.supportsMultisample = false;
+ internalFormat.aspects = Aspect::Color;
+ internalFormat.componentCount = componentCount;
+
+ // Default baseFormat of each compressed formats should be themselves.
+ if (baseFormat == wgpu::TextureFormat::Undefined) {
+ internalFormat.baseFormat = format;
+ } else {
+ internalFormat.baseFormat = baseFormat;
+ }
+
+ AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+ firstAspect->block.byteSize = byteSize;
+ firstAspect->block.width = width;
+ firstAspect->block.height = height;
+ firstAspect->baseType = wgpu::TextureComponentType::Float;
+ firstAspect->supportedSampleTypes = kAnyFloat;
+ firstAspect->format = format;
+ AddFormat(internalFormat);
+ };
+
+ auto AddMultiAspectFormat =
+ [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
+ wgpu::TextureFormat firstFormat, wgpu::TextureFormat secondFormat,
+ bool isRenderable, bool isSupported, bool supportsMultisample,
+ uint8_t componentCount,
+ wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
+ Format internalFormat;
+ internalFormat.format = format;
+ internalFormat.isRenderable = isRenderable;
+ internalFormat.isCompressed = false;
+ internalFormat.isSupported = isSupported;
+ internalFormat.supportsStorageUsage = false;
+ internalFormat.supportsMultisample = supportsMultisample;
+ internalFormat.aspects = aspects;
+ internalFormat.componentCount = componentCount;
+
+ // Default baseFormat of each multi aspect formats should be themselves.
+ if (baseFormat == wgpu::TextureFormat::Undefined) {
+ internalFormat.baseFormat = format;
+ } else {
+ internalFormat.baseFormat = baseFormat;
+ }
+
+ const size_t firstFormatIndex = ComputeFormatIndex(firstFormat);
+ const size_t secondFormatIndex = ComputeFormatIndex(secondFormat);
+
+ internalFormat.aspectInfo[0] = table[firstFormatIndex].aspectInfo[0];
+ internalFormat.aspectInfo[1] = table[secondFormatIndex].aspectInfo[0];
+
+ AddFormat(internalFormat);
+ };
+
+ // clang-format off
+ // 1 byte color formats
+ AddColorFormat(wgpu::TextureFormat::R8Unorm, true, false, true, 1, kAnyFloat, 1);
+ AddColorFormat(wgpu::TextureFormat::R8Snorm, false, false, true, 1, kAnyFloat, 1);
+ AddColorFormat(wgpu::TextureFormat::R8Uint, true, false, true, 1, SampleTypeBit::Uint, 1);
+ AddColorFormat(wgpu::TextureFormat::R8Sint, true, false, true, 1, SampleTypeBit::Sint, 1);
+
+ // 2 bytes color formats
+ AddColorFormat(wgpu::TextureFormat::R16Uint, true, false, true, 2, SampleTypeBit::Uint, 1);
+ AddColorFormat(wgpu::TextureFormat::R16Sint, true, false, true, 2, SampleTypeBit::Sint, 1);
+ AddColorFormat(wgpu::TextureFormat::R16Float, true, false, true, 2, kAnyFloat, 1);
+ AddColorFormat(wgpu::TextureFormat::RG8Unorm, true, false, true, 2, kAnyFloat, 2);
+ AddColorFormat(wgpu::TextureFormat::RG8Snorm, false, false, true, 2, kAnyFloat, 2);
+ AddColorFormat(wgpu::TextureFormat::RG8Uint, true, false, true, 2, SampleTypeBit::Uint, 2);
+ AddColorFormat(wgpu::TextureFormat::RG8Sint, true, false, true, 2, SampleTypeBit::Sint, 2);
+
+ // 4 bytes color formats
+ AddColorFormat(wgpu::TextureFormat::R32Uint, true, true, false, 4, SampleTypeBit::Uint, 1);
+ AddColorFormat(wgpu::TextureFormat::R32Sint, true, true, false, 4, SampleTypeBit::Sint, 1);
+ AddColorFormat(wgpu::TextureFormat::R32Float, true, true, true, 4, SampleTypeBit::UnfilterableFloat, 1);
+ AddColorFormat(wgpu::TextureFormat::RG16Uint, true, false, true, 4, SampleTypeBit::Uint, 2);
+ AddColorFormat(wgpu::TextureFormat::RG16Sint, true, false, true, 4, SampleTypeBit::Sint, 2);
+ AddColorFormat(wgpu::TextureFormat::RG16Float, true, false, true, 4, kAnyFloat, 2);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Unorm, true, true, true, 4, kAnyFloat, 4);
+ AddColorFormat(wgpu::TextureFormat::RGBA8UnormSrgb, true, false, true, 4, kAnyFloat, 4, wgpu::TextureFormat::RGBA8Unorm);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Snorm, false, true, true, 4, kAnyFloat, 4);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Uint, true, true, true, 4, SampleTypeBit::Uint, 4);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Sint, true, true, true, 4, SampleTypeBit::Sint, 4);
+ AddColorFormat(wgpu::TextureFormat::BGRA8Unorm, true, false, true, 4, kAnyFloat, 4);
+ AddColorFormat(wgpu::TextureFormat::BGRA8UnormSrgb, true, false, true, 4, kAnyFloat, 4, wgpu::TextureFormat::BGRA8Unorm);
+ AddColorFormat(wgpu::TextureFormat::RGB10A2Unorm, true, false, true, 4, kAnyFloat, 4);
+
+ AddColorFormat(wgpu::TextureFormat::RG11B10Ufloat, false, false, true, 4, kAnyFloat, 3);
+ AddColorFormat(wgpu::TextureFormat::RGB9E5Ufloat, false, false, false, 4, kAnyFloat, 3);
+
+ // 8 bytes color formats
+ AddColorFormat(wgpu::TextureFormat::RG32Uint, true, true, false, 8, SampleTypeBit::Uint, 2);
+ AddColorFormat(wgpu::TextureFormat::RG32Sint, true, true, false, 8, SampleTypeBit::Sint, 2);
+ AddColorFormat(wgpu::TextureFormat::RG32Float, true, true, false, 8, SampleTypeBit::UnfilterableFloat, 2);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Uint, true, true, true, 8, SampleTypeBit::Uint, 4);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Sint, true, true, true, 8, SampleTypeBit::Sint, 4);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Float, true, true, true, 8, kAnyFloat, 4);
+
+ // 16 bytes color formats
+ AddColorFormat(wgpu::TextureFormat::RGBA32Uint, true, true, false, 16, SampleTypeBit::Uint, 4);
+ AddColorFormat(wgpu::TextureFormat::RGBA32Sint, true, true, false, 16, SampleTypeBit::Sint, 4);
+ AddColorFormat(wgpu::TextureFormat::RGBA32Float, true, true, false, 16, SampleTypeBit::UnfilterableFloat, 4);
+
+ // Depth-stencil formats
+ // TODO(dawn:666): Implement the stencil8 format
+ AddStencilFormat(wgpu::TextureFormat::Stencil8, false);
+ AddDepthFormat(wgpu::TextureFormat::Depth16Unorm, 2, true);
+ // TODO(crbug.com/dawn/843): This is 4 because we read this to perform zero initialization,
+ // and textures are always use depth32float. We should improve this to be more robust. Perhaps,
+ // using 0 here to mean "unsized" and adding a backend-specific query for the block size.
+ AddDepthFormat(wgpu::TextureFormat::Depth24Plus, 4, true);
+ AddMultiAspectFormat(wgpu::TextureFormat::Depth24PlusStencil8,
+ Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Stencil8, true, true, true, 2);
+ bool isD24S8Supported = device->IsFeatureEnabled(Feature::Depth24UnormStencil8);
+ AddMultiAspectFormat(wgpu::TextureFormat::Depth24UnormStencil8,
+ Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Stencil8, true, isD24S8Supported, true, 2);
+ AddDepthFormat(wgpu::TextureFormat::Depth32Float, 4, true);
+ bool isD32S8Supported = device->IsFeatureEnabled(Feature::Depth32FloatStencil8);
+ AddMultiAspectFormat(wgpu::TextureFormat::Depth32FloatStencil8,
+ Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth32Float, wgpu::TextureFormat::Stencil8, true, isD32S8Supported, true, 2);
+
+ // BC compressed formats
+ bool isBCFormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionBC);
+ AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnorm, 8, 4, 4, isBCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, 8, 4, 4, isBCFormatSupported, 4, wgpu::TextureFormat::BC1RGBAUnorm);
+ AddCompressedFormat(wgpu::TextureFormat::BC4RSnorm, 8, 4, 4, isBCFormatSupported, 1);
+ AddCompressedFormat(wgpu::TextureFormat::BC4RUnorm, 8, 4, 4, isBCFormatSupported, 1);
+ AddCompressedFormat(wgpu::TextureFormat::BC2RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::BC2RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4, wgpu::TextureFormat::BC2RGBAUnorm);
+ AddCompressedFormat(wgpu::TextureFormat::BC3RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::BC3RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4, wgpu::TextureFormat::BC3RGBAUnorm);
+ AddCompressedFormat(wgpu::TextureFormat::BC5RGSnorm, 16, 4, 4, isBCFormatSupported, 2);
+ AddCompressedFormat(wgpu::TextureFormat::BC5RGUnorm, 16, 4, 4, isBCFormatSupported, 2);
+ AddCompressedFormat(wgpu::TextureFormat::BC6HRGBFloat, 16, 4, 4, isBCFormatSupported, 3);
+ AddCompressedFormat(wgpu::TextureFormat::BC6HRGBUfloat, 16, 4, 4, isBCFormatSupported, 3);
+ AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4, wgpu::TextureFormat::BC7RGBAUnorm);
+
+ // ETC2/EAC compressed formats
+ bool isETC2FormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionETC2);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8Unorm, 8, 4, 4, isETC2FormatSupported, 3);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8UnormSrgb, 8, 4, 4, isETC2FormatSupported, 3, wgpu::TextureFormat::ETC2RGB8Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8A1Unorm, 8, 4, 4, isETC2FormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8A1UnormSrgb, 8, 4, 4, isETC2FormatSupported, 4, wgpu::TextureFormat::ETC2RGB8A1Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGBA8Unorm, 16, 4, 4, isETC2FormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGBA8UnormSrgb, 16, 4, 4, isETC2FormatSupported, 4, wgpu::TextureFormat::ETC2RGBA8Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::EACR11Unorm, 8, 4, 4, isETC2FormatSupported, 1);
+ AddCompressedFormat(wgpu::TextureFormat::EACR11Snorm, 8, 4, 4, isETC2FormatSupported, 1);
+ AddCompressedFormat(wgpu::TextureFormat::EACRG11Unorm, 16, 4, 4, isETC2FormatSupported, 2);
+ AddCompressedFormat(wgpu::TextureFormat::EACRG11Snorm, 16, 4, 4, isETC2FormatSupported, 2);
+
+ // ASTC compressed formats
+ bool isASTCFormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionASTC);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC4x4Unorm, 16, 4, 4, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC4x4UnormSrgb, 16, 4, 4, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC4x4Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC5x4Unorm, 16, 5, 4, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC5x4UnormSrgb, 16, 5, 4, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC5x4Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC5x5Unorm, 16, 5, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC5x5UnormSrgb, 16, 5, 5, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC5x5Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC6x5Unorm, 16, 6, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC6x5UnormSrgb, 16, 6, 5, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC6x5Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC6x6Unorm, 16, 6, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC6x6UnormSrgb, 16, 6, 6, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC6x6Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x5Unorm, 16, 8, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x5UnormSrgb, 16, 8, 5, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC8x5Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x6Unorm, 16, 8, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x6UnormSrgb, 16, 8, 6, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC8x6Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x8Unorm, 16, 8, 8, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x8UnormSrgb, 16, 8, 8, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC8x8Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x5Unorm, 16, 10, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x5UnormSrgb, 16, 10, 5, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC10x5Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x6Unorm, 16, 10, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x6UnormSrgb, 16, 10, 6, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC10x6Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x8Unorm, 16, 10, 8, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x8UnormSrgb, 16, 10, 8, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC10x8Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x10Unorm, 16, 10, 10, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x10UnormSrgb, 16, 10, 10, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC10x10Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC12x10Unorm, 16, 12, 10, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC12x10UnormSrgb, 16, 12, 10, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC12x10Unorm);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC12x12Unorm, 16, 12, 12, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC12x12UnormSrgb, 16, 12, 12, isASTCFormatSupported, 4, wgpu::TextureFormat::ASTC12x12Unorm);
+
+ // multi-planar formats
+ const bool isMultiPlanarFormatSupported = device->IsFeatureEnabled(Feature::MultiPlanarFormats);
+ AddMultiAspectFormat(wgpu::TextureFormat::R8BG8Biplanar420Unorm, Aspect::Plane0 | Aspect::Plane1,
+ wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::RG8Unorm, false, isMultiPlanarFormatSupported, false, 3);
+
+ // clang-format on
+
+ // This checks that each format is set at least once, the second part of checking that all
+ // formats are checked exactly once.
+ ASSERT(formatsSet.all());
+
+ return table;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Format.h b/chromium/third_party/dawn/src/dawn/native/Format.h
new file mode 100644
index 00000000000..228913ca2db
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Format.h
@@ -0,0 +1,151 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_FORMAT_H_
+#define DAWNNATIVE_FORMAT_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/EnumClassBitmasks.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Subresource.h"
+
+#include <array>
+
+// About multi-planar formats.
+//
+// Dawn supports additional multi-planar formats when the multiplanar-formats extension is enabled.
+// When enabled, Dawn treats planar data as sub-resources (ie. 1 sub-resource == 1 view == 1 plane).
+// A multi-planar format name encodes the channel mapping and order of planes. For example,
+// R8BG8Biplanar420Unorm is YUV 4:2:0 where Plane 0 = R8, and Plane 1 = BG8.
+//
+// Requirements:
+// * Plane aspects cannot be combined with color, depth, or stencil aspects.
+// * Only compatible multi-planar formats of planes can be used with multi-planar texture
+// formats.
+// * Can't access multiple planes without creating per plane views (no color conversion).
+// * Multi-planar format cannot be written or read without a per plane view.
+//
+// TODO(dawn:551): Consider moving this comment.
+
+namespace dawn::native {
+
+ enum class Aspect : uint8_t;
+ class DeviceBase;
+
+ // This mirrors wgpu::TextureSampleType as a bitmask instead.
+ enum class SampleTypeBit : uint8_t {
+ None = 0x0,
+ Float = 0x1,
+ UnfilterableFloat = 0x2,
+ Depth = 0x4,
+ Sint = 0x8,
+ Uint = 0x10,
+ };
+
+ // Converts an wgpu::TextureComponentType to its bitmask representation.
+ SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type);
+ // Converts an wgpu::TextureSampleType to its bitmask representation.
+ SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType);
+
+ struct TexelBlockInfo {
+ uint32_t byteSize;
+ uint32_t width;
+ uint32_t height;
+ };
+
+ struct AspectInfo {
+ TexelBlockInfo block;
+ // TODO(crbug.com/dawn/367): Replace TextureComponentType with TextureSampleType, or make it
+ // an internal Dawn enum.
+ wgpu::TextureComponentType baseType;
+ SampleTypeBit supportedSampleTypes;
+ wgpu::TextureFormat format;
+ };
+
+ // The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
+ // exact number of known format.
+ static constexpr size_t kKnownFormatCount = 96;
+
+ struct Format;
+ using FormatTable = std::array<Format, kKnownFormatCount>;
+
+ // A wgpu::TextureFormat along with all the information about it necessary for validation.
+ struct Format {
+ wgpu::TextureFormat format;
+
+ bool isRenderable;
+ bool isCompressed;
+ // A format can be known but not supported because it is part of a disabled extension.
+ bool isSupported;
+ bool supportsStorageUsage;
+ bool supportsMultisample;
+ Aspect aspects;
+ // Only used for renderable color formats, number of color channels.
+ uint8_t componentCount;
+
+ bool IsColor() const;
+ bool HasDepth() const;
+ bool HasStencil() const;
+ bool HasDepthOrStencil() const;
+
+ // IsMultiPlanar() returns true if the format allows selecting a plane index. This is only
+ // allowed by multi-planar formats (ex. NV12).
+ bool IsMultiPlanar() const;
+
+ const AspectInfo& GetAspectInfo(wgpu::TextureAspect aspect) const;
+ const AspectInfo& GetAspectInfo(Aspect aspect) const;
+
+ // The index of the format in the list of all known formats: a unique number for each format
+ // in [0, kKnownFormatCount)
+ size_t GetIndex() const;
+
+ // baseFormat represents the memory layout of the format.
+ // If two formats has the same baseFormat, they could copy to each other.
+ wgpu::TextureFormat baseFormat;
+
+ // CopyCompatibleWith() returns true if the input format has the same baseFormat
+ // with current format.
+ bool CopyCompatibleWith(const Format& format) const;
+
+ private:
+ // Used to store the aspectInfo for one or more planes. For single plane "color" formats,
+ // only the first aspect info or aspectInfo[0] is valid. For depth-stencil, the first aspect
+ // info is depth and the second aspect info is stencil. For multi-planar formats,
+ // aspectInfo[i] is the ith plane.
+ std::array<AspectInfo, kMaxPlanesPerFormat> aspectInfo;
+
+ friend FormatTable BuildFormatTable(const DeviceBase* device);
+ };
+
+ // Implementation details of the format table in the device.
+
+ // Returns the index of a format in the FormatTable.
+ size_t ComputeFormatIndex(wgpu::TextureFormat format);
+ // Builds the format table with the extensions enabled on the device.
+ FormatTable BuildFormatTable(const DeviceBase* device);
+
+} // namespace dawn::native
+
+namespace dawn {
+
+ template <>
+ struct IsDawnBitmask<dawn::native::SampleTypeBit> {
+ static constexpr bool enable = true;
+ };
+
+} // namespace dawn
+
+#endif // DAWNNATIVE_FORMAT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Forward.h b/chromium/third_party/dawn/src/dawn/native/Forward.h
new file mode 100644
index 00000000000..36b092c371f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Forward.h
@@ -0,0 +1,71 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_FORWARD_H_
+#define DAWNNATIVE_FORWARD_H_
+
+#include <cstdint>
+
+template <typename T>
+class Ref;
+
+namespace dawn::native {
+
+ enum class ObjectType : uint32_t;
+
+ class AdapterBase;
+ class BindGroupBase;
+ class BindGroupLayoutBase;
+ class BufferBase;
+ class ComputePipelineBase;
+ class CommandBufferBase;
+ class CommandEncoder;
+ class ComputePassEncoder;
+ class ExternalTextureBase;
+ class InstanceBase;
+ class PipelineBase;
+ class PipelineLayoutBase;
+ class QuerySetBase;
+ class QueueBase;
+ class RenderBundleBase;
+ class RenderBundleEncoder;
+ class RenderPassEncoder;
+ class RenderPipelineBase;
+ class ResourceHeapBase;
+ class SamplerBase;
+ class Surface;
+ class ShaderModuleBase;
+ class StagingBufferBase;
+ class SwapChainBase;
+ class NewSwapChainBase;
+ class TextureBase;
+ class TextureViewBase;
+
+ class DeviceBase;
+
+ template <typename T>
+ class PerStage;
+
+ struct Format;
+
+ // Aliases for frontend-only types.
+ using CommandEncoderBase = CommandEncoder;
+ using ComputePassEncoderBase = ComputePassEncoder;
+ using RenderBundleEncoderBase = RenderBundleEncoder;
+ using RenderPassEncoderBase = RenderPassEncoder;
+ using SurfaceBase = Surface;
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp b/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp
new file mode 100644
index 00000000000..ebe0e7fb909
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.cpp
@@ -0,0 +1,193 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/IndirectDrawMetadata.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/IndirectDrawValidationEncoder.h"
+#include "dawn/native/Limits.h"
+#include "dawn/native/RenderBundle.h"
+
+#include <algorithm>
+#include <utility>
+
+namespace dawn::native {
+
+ uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits) {
+ return limits.v1.maxStorageBufferBindingSize - limits.v1.minStorageBufferOffsetAlignment -
+ kDrawIndexedIndirectSize;
+ }
+
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
+ BufferBase* indirectBuffer)
+ : mIndirectBuffer(indirectBuffer) {
+ }
+
+ void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndexedIndirectDraw(
+ uint32_t maxDrawCallsPerIndirectValidationBatch,
+ uint32_t maxBatchOffsetRange,
+ IndexedIndirectDraw draw) {
+ const uint64_t newOffset = draw.clientBufferOffset;
+ auto it = mBatches.begin();
+ while (it != mBatches.end()) {
+ IndexedIndirectValidationBatch& batch = *it;
+ if (batch.draws.size() >= maxDrawCallsPerIndirectValidationBatch) {
+ // This batch is full. If its minOffset is to the right of the new offset, we can
+ // just insert a new batch here.
+ if (newOffset < batch.minOffset) {
+ break;
+ }
+
+ // Otherwise keep looking.
+ ++it;
+ continue;
+ }
+
+ if (newOffset >= batch.minOffset && newOffset <= batch.maxOffset) {
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
+
+ if (newOffset < batch.minOffset && batch.maxOffset - newOffset <= maxBatchOffsetRange) {
+ // We can extend this batch to the left in order to fit the new offset.
+ batch.minOffset = newOffset;
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
+
+ if (newOffset > batch.maxOffset && newOffset - batch.minOffset <= maxBatchOffsetRange) {
+ // We can extend this batch to the right in order to fit the new offset.
+ batch.maxOffset = newOffset;
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
+
+ if (newOffset < batch.minOffset) {
+ // We want to insert a new batch just before this one.
+ break;
+ }
+
+ ++it;
+ }
+
+ IndexedIndirectValidationBatch newBatch;
+ newBatch.minOffset = newOffset;
+ newBatch.maxOffset = newOffset;
+ newBatch.draws.push_back(std::move(draw));
+
+ mBatches.insert(it, std::move(newBatch));
+ }
+
+ void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
+ uint32_t maxDrawCallsPerIndirectValidationBatch,
+ uint32_t maxBatchOffsetRange,
+ const IndexedIndirectValidationBatch& newBatch) {
+ auto it = mBatches.begin();
+ while (it != mBatches.end()) {
+ IndexedIndirectValidationBatch& batch = *it;
+ uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
+ uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
+ if (max - min <= maxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
+ maxDrawCallsPerIndirectValidationBatch) {
+ // This batch fits within the limits of an existing batch. Merge it.
+ batch.minOffset = min;
+ batch.maxOffset = max;
+ batch.draws.insert(batch.draws.end(), newBatch.draws.begin(), newBatch.draws.end());
+ return;
+ }
+
+ if (newBatch.minOffset < batch.minOffset) {
+ break;
+ }
+
+ ++it;
+ }
+ mBatches.push_back(newBatch);
+ }
+
+ const std::vector<IndirectDrawMetadata::IndexedIndirectValidationBatch>&
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::GetBatches() const {
+ return mBatches;
+ }
+
+ IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
+ : mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)),
+ mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)) {
+ }
+
+ IndirectDrawMetadata::~IndirectDrawMetadata() = default;
+
+ IndirectDrawMetadata::IndirectDrawMetadata(IndirectDrawMetadata&&) = default;
+
+ IndirectDrawMetadata& IndirectDrawMetadata::operator=(IndirectDrawMetadata&&) = default;
+
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap*
+ IndirectDrawMetadata::GetIndexedIndirectBufferValidationInfo() {
+ return &mIndexedIndirectBufferValidationInfo;
+ }
+
+ void IndirectDrawMetadata::AddBundle(RenderBundleBase* bundle) {
+ auto [_, inserted] = mAddedBundles.insert(bundle);
+ if (!inserted) {
+ return;
+ }
+
+ for (const auto& [config, validationInfo] :
+ bundle->GetIndirectDrawMetadata().mIndexedIndirectBufferValidationInfo) {
+ auto it = mIndexedIndirectBufferValidationInfo.lower_bound(config);
+ if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
+ // We already have batches for the same config. Merge the new ones in.
+ for (const IndexedIndirectValidationBatch& batch : validationInfo.GetBatches()) {
+ it->second.AddBatch(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, batch);
+ }
+ } else {
+ mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, validationInfo);
+ }
+ }
+ }
+
+ void IndirectDrawMetadata::AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+ uint64_t indexBufferSize,
+ BufferBase* indirectBuffer,
+ uint64_t indirectOffset,
+ DrawIndexedIndirectCmd* cmd) {
+ uint64_t numIndexBufferElements;
+ switch (indexFormat) {
+ case wgpu::IndexFormat::Uint16:
+ numIndexBufferElements = indexBufferSize / 2;
+ break;
+ case wgpu::IndexFormat::Uint32:
+ numIndexBufferElements = indexBufferSize / 4;
+ break;
+ case wgpu::IndexFormat::Undefined:
+ UNREACHABLE();
+ }
+
+ const IndexedIndirectConfig config(indirectBuffer, numIndexBufferElements);
+ auto it = mIndexedIndirectBufferValidationInfo.find(config);
+ if (it == mIndexedIndirectBufferValidationInfo.end()) {
+ auto result = mIndexedIndirectBufferValidationInfo.emplace(
+ config, IndexedIndirectBufferValidationInfo(indirectBuffer));
+ it = result.first;
+ }
+
+ IndexedIndirectDraw draw;
+ draw.clientBufferOffset = indirectOffset;
+ draw.cmd = cmd;
+ it->second.AddIndexedIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange,
+ std::move(draw));
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h b/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h
new file mode 100644
index 00000000000..602be861f76
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/IndirectDrawMetadata.h
@@ -0,0 +1,126 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INDIRECTDRAWMETADATA_H_
+#define DAWNNATIVE_INDIRECTDRAWMETADATA_H_
+
+#include "dawn/common/NonCopyable.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Commands.h"
+
+#include <cstdint>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace dawn::native {
+
+ class RenderBundleBase;
+ struct CombinedLimits;
+
+ // In the unlikely scenario that indirect offsets used over a single buffer span more than
+ // this length of the buffer, we split the validation work into multiple batches.
+ uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits);
+
+ // Metadata corresponding to the validation requirements of a single render pass. This metadata
+ // is accumulated while its corresponding render pass is encoded, and is later used to encode
+ // validation commands to be inserted into the command buffer just before the render pass's own
+ // commands.
+ class IndirectDrawMetadata : public NonCopyable {
+ public:
+ struct IndexedIndirectDraw {
+ uint64_t clientBufferOffset;
+ // This is a pointer to the command that should be populated with the validated
+ // indirect scratch buffer. It is only valid up until the encoded command buffer
+ // is submitted.
+ DrawIndexedIndirectCmd* cmd;
+ };
+
+ struct IndexedIndirectValidationBatch {
+ uint64_t minOffset;
+ uint64_t maxOffset;
+ std::vector<IndexedIndirectDraw> draws;
+ };
+
+ // Tracks information about every draw call in this render pass which uses the same indirect
+ // buffer and the same-sized index buffer. Calls are grouped by indirect offset ranges so
+ // that validation work can be chunked efficiently if necessary.
+ class IndexedIndirectBufferValidationInfo {
+ public:
+ explicit IndexedIndirectBufferValidationInfo(BufferBase* indirectBuffer);
+
+ // Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
+ // assigned (and deferred) buffer ref and relative offset before returning.
+ void AddIndexedIndirectDraw(uint32_t maxDrawCallsPerIndirectValidationBatch,
+ uint32_t maxBatchOffsetRange,
+ IndexedIndirectDraw draw);
+
+ // Adds draw calls from an already-computed batch, e.g. from a previously encoded
+ // RenderBundle. The added batch is merged into an existing batch if possible, otherwise
+ // it's added to mBatch.
+ void AddBatch(uint32_t maxDrawCallsPerIndirectValidationBatch,
+ uint32_t maxBatchOffsetRange,
+ const IndexedIndirectValidationBatch& batch);
+
+ const std::vector<IndexedIndirectValidationBatch>& GetBatches() const;
+
+ private:
+ Ref<BufferBase> mIndirectBuffer;
+
+ // A list of information about validation batches that will need to be executed for the
+ // corresponding indirect buffer prior to a single render pass. These are kept sorted by
+ // minOffset and may overlap iff the number of offsets in one batch would otherwise
+ // exceed some large upper bound (roughly ~33M draw calls).
+ //
+ // Since the most common expected cases will overwhelmingly require only a single
+ // validation pass per render pass, this is optimized for efficient updates to a single
+ // batch rather than for efficient manipulation of a large number of batches.
+ std::vector<IndexedIndirectValidationBatch> mBatches;
+ };
+
+ // Combination of an indirect buffer reference, and the number of addressable index buffer
+ // elements at the time of a draw call.
+ using IndexedIndirectConfig = std::pair<BufferBase*, uint64_t>;
+ using IndexedIndirectBufferValidationInfoMap =
+ std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
+
+ explicit IndirectDrawMetadata(const CombinedLimits& limits);
+ ~IndirectDrawMetadata();
+
+ IndirectDrawMetadata(IndirectDrawMetadata&&);
+ IndirectDrawMetadata& operator=(IndirectDrawMetadata&&);
+
+ IndexedIndirectBufferValidationInfoMap* GetIndexedIndirectBufferValidationInfo();
+
+ void AddBundle(RenderBundleBase* bundle);
+ void AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+ uint64_t indexBufferSize,
+ BufferBase* indirectBuffer,
+ uint64_t indirectOffset,
+ DrawIndexedIndirectCmd* cmd);
+
+ private:
+ IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
+ std::set<RenderBundleBase*> mAddedBundles;
+
+ uint32_t mMaxDrawCallsPerBatch;
+ uint32_t mMaxBatchOffsetRange;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_INDIRECTDRAWMETADATA_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp
new file mode 100644
index 00000000000..efabb225a48
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.cpp
@@ -0,0 +1,385 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/IndirectDrawValidationEncoder.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+
+#include <cstdlib>
+#include <limits>
+
+namespace dawn::native {
+
+ namespace {
+ // NOTE: This must match the workgroup_size attribute on the compute entry point below.
+ constexpr uint64_t kWorkgroupSize = 64;
+
+ // Equivalent to the BatchInfo struct defined in the shader below.
+ struct BatchInfo {
+ uint64_t numIndexBufferElements;
+ uint32_t numDraws;
+ uint32_t padding;
+ };
+
+ // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this shader in
+ // various failure modes.
+ static const char sRenderValidationShaderSource[] = R"(
+ let kNumIndirectParamsPerDrawCall = 5u;
+
+ let kIndexCountEntry = 0u;
+ let kInstanceCountEntry = 1u;
+ let kFirstIndexEntry = 2u;
+ let kBaseVertexEntry = 3u;
+ let kFirstInstanceEntry = 4u;
+
+ struct BatchInfo {
+ numIndexBufferElementsLow: u32;
+ numIndexBufferElementsHigh: u32;
+ numDraws: u32;
+ padding: u32;
+ indirectOffsets: array<u32>;
+ };
+
+ struct IndirectParams {
+ data: array<u32>;
+ };
+
+ @group(0) @binding(0) var<storage, read> batch: BatchInfo;
+ @group(0) @binding(1) var<storage, read_write> clientParams: IndirectParams;
+ @group(0) @binding(2) var<storage, write> validatedParams: IndirectParams;
+
+ fn fail(drawIndex: u32) {
+ let index = drawIndex * kNumIndirectParamsPerDrawCall;
+ validatedParams.data[index + kIndexCountEntry] = 0u;
+ validatedParams.data[index + kInstanceCountEntry] = 0u;
+ validatedParams.data[index + kFirstIndexEntry] = 0u;
+ validatedParams.data[index + kBaseVertexEntry] = 0u;
+ validatedParams.data[index + kFirstInstanceEntry] = 0u;
+ }
+
+ fn pass(drawIndex: u32) {
+ let vIndex = drawIndex * kNumIndirectParamsPerDrawCall;
+ let cIndex = batch.indirectOffsets[drawIndex];
+ validatedParams.data[vIndex + kIndexCountEntry] =
+ clientParams.data[cIndex + kIndexCountEntry];
+ validatedParams.data[vIndex + kInstanceCountEntry] =
+ clientParams.data[cIndex + kInstanceCountEntry];
+ validatedParams.data[vIndex + kFirstIndexEntry] =
+ clientParams.data[cIndex + kFirstIndexEntry];
+ validatedParams.data[vIndex + kBaseVertexEntry] =
+ clientParams.data[cIndex + kBaseVertexEntry];
+ validatedParams.data[vIndex + kFirstInstanceEntry] =
+ clientParams.data[cIndex + kFirstInstanceEntry];
+ }
+
+ @stage(compute) @workgroup_size(64, 1, 1)
+ fn main(@builtin(global_invocation_id) id : vec3<u32>) {
+ if (id.x >= batch.numDraws) {
+ return;
+ }
+
+ let clientIndex = batch.indirectOffsets[id.x];
+ let firstInstance = clientParams.data[clientIndex + kFirstInstanceEntry];
+ if (firstInstance != 0u) {
+ fail(id.x);
+ return;
+ }
+
+ if (batch.numIndexBufferElementsHigh >= 2u) {
+ // firstIndex and indexCount are both u32. The maximum possible sum of these
+ // values is 0x1fffffffe, which is less than 0x200000000. Nothing to validate.
+ pass(id.x);
+ return;
+ }
+
+ let firstIndex = clientParams.data[clientIndex + kFirstIndexEntry];
+ if (batch.numIndexBufferElementsHigh == 0u &&
+ batch.numIndexBufferElementsLow < firstIndex) {
+ fail(id.x);
+ return;
+ }
+
+ // Note that this subtraction may underflow, but only when
+ // numIndexBufferElementsHigh is 1u. The result is still correct in that case.
+ let maxIndexCount = batch.numIndexBufferElementsLow - firstIndex;
+ let indexCount = clientParams.data[clientIndex + kIndexCountEntry];
+ if (indexCount > maxIndexCount) {
+ fail(id.x);
+ return;
+ }
+ pass(id.x);
+ }
+ )";
+
+ ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(
+ DeviceBase* device) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+ if (store->renderValidationPipeline == nullptr) {
+ // Create compute shader module if not cached before.
+ if (store->renderValidationShader == nullptr) {
+ DAWN_TRY_ASSIGN(
+ store->renderValidationShader,
+ utils::CreateShaderModule(device, sRenderValidationShaderSource));
+ }
+
+ Ref<BindGroupLayoutBase> bindGroupLayout;
+ DAWN_TRY_ASSIGN(
+ bindGroupLayout,
+ utils::MakeBindGroupLayout(
+ device,
+ {
+ {0, wgpu::ShaderStage::Compute,
+ wgpu::BufferBindingType::ReadOnlyStorage},
+ {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+ {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+ },
+ /* allowInternalBinding */ true));
+
+ Ref<PipelineLayoutBase> pipelineLayout;
+ DAWN_TRY_ASSIGN(pipelineLayout,
+ utils::MakeBasicPipelineLayout(device, bindGroupLayout));
+
+ ComputePipelineDescriptor computePipelineDescriptor = {};
+ computePipelineDescriptor.layout = pipelineLayout.Get();
+ computePipelineDescriptor.compute.module = store->renderValidationShader.Get();
+ computePipelineDescriptor.compute.entryPoint = "main";
+
+ DAWN_TRY_ASSIGN(store->renderValidationPipeline,
+ device->CreateComputePipeline(&computePipelineDescriptor));
+ }
+
+ return store->renderValidationPipeline.Get();
+ }
+
+ size_t GetBatchDataSize(uint32_t numDraws) {
+ return sizeof(BatchInfo) + numDraws * sizeof(uint32_t);
+ }
+
+ } // namespace
+
+ uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits) {
+ const uint64_t batchDrawCallLimitByDispatchSize =
+ static_cast<uint64_t>(limits.v1.maxComputeWorkgroupsPerDimension) * kWorkgroupSize;
+ const uint64_t batchDrawCallLimitByStorageBindingSize =
+ (limits.v1.maxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
+ return static_cast<uint32_t>(
+ std::min({batchDrawCallLimitByDispatchSize, batchDrawCallLimitByStorageBindingSize,
+ uint64_t(std::numeric_limits<uint32_t>::max())}));
+ }
+
+ MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ RenderPassResourceUsageTracker* usageTracker,
+ IndirectDrawMetadata* indirectDrawMetadata) {
+ struct Batch {
+ const IndirectDrawMetadata::IndexedIndirectValidationBatch* metadata;
+ uint64_t numIndexBufferElements;
+ uint64_t dataBufferOffset;
+ uint64_t dataSize;
+ uint64_t clientIndirectOffset;
+ uint64_t clientIndirectSize;
+ uint64_t validatedParamsOffset;
+ uint64_t validatedParamsSize;
+ BatchInfo* batchInfo;
+ };
+
+ struct Pass {
+ BufferBase* clientIndirectBuffer;
+ uint64_t validatedParamsSize = 0;
+ uint64_t batchDataSize = 0;
+ std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
+ std::vector<Batch> batches;
+ };
+
+ // First stage is grouping all batches into passes. We try to pack as many batches into a
+ // single pass as possible. Batches can be grouped together as long as they're validating
+ // data from the same indirect buffer, but they may still be split into multiple passes if
+ // the number of draw calls in a pass would exceed some (very high) upper bound.
+ size_t validatedParamsSize = 0;
+ std::vector<Pass> passes;
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
+ *indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
+ if (bufferInfoMap.empty()) {
+ return {};
+ }
+
+ const uint32_t maxStorageBufferBindingSize =
+ device->GetLimits().v1.maxStorageBufferBindingSize;
+ const uint32_t minStorageBufferOffsetAlignment =
+ device->GetLimits().v1.minStorageBufferOffsetAlignment;
+
+ for (auto& [config, validationInfo] : bufferInfoMap) {
+ BufferBase* clientIndirectBuffer = config.first;
+ for (const IndirectDrawMetadata::IndexedIndirectValidationBatch& batch :
+ validationInfo.GetBatches()) {
+ const uint64_t minOffsetFromAlignedBoundary =
+ batch.minOffset % minStorageBufferOffsetAlignment;
+ const uint64_t minOffsetAlignedDown =
+ batch.minOffset - minOffsetFromAlignedBoundary;
+
+ Batch newBatch;
+ newBatch.metadata = &batch;
+ newBatch.numIndexBufferElements = config.second;
+ newBatch.dataSize = GetBatchDataSize(batch.draws.size());
+ newBatch.clientIndirectOffset = minOffsetAlignedDown;
+ newBatch.clientIndirectSize =
+ batch.maxOffset + kDrawIndexedIndirectSize - minOffsetAlignedDown;
+
+ newBatch.validatedParamsSize = batch.draws.size() * kDrawIndexedIndirectSize;
+ newBatch.validatedParamsOffset =
+ Align(validatedParamsSize, minStorageBufferOffsetAlignment);
+ validatedParamsSize = newBatch.validatedParamsOffset + newBatch.validatedParamsSize;
+ if (validatedParamsSize > maxStorageBufferBindingSize) {
+ return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
+ }
+
+ Pass* currentPass = passes.empty() ? nullptr : &passes.back();
+ if (currentPass && currentPass->clientIndirectBuffer == clientIndirectBuffer) {
+ uint64_t nextBatchDataOffset =
+ Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
+ uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
+ if (newPassBatchDataSize <= maxStorageBufferBindingSize) {
+ // We can fit this batch in the current pass.
+ newBatch.dataBufferOffset = nextBatchDataOffset;
+ currentPass->batchDataSize = newPassBatchDataSize;
+ currentPass->batches.push_back(newBatch);
+ continue;
+ }
+ }
+
+ // We need to start a new pass for this batch.
+ newBatch.dataBufferOffset = 0;
+
+ Pass newPass;
+ newPass.clientIndirectBuffer = clientIndirectBuffer;
+ newPass.batchDataSize = newBatch.dataSize;
+ newPass.batches.push_back(newBatch);
+ passes.push_back(std::move(newPass));
+ }
+ }
+
+ auto* const store = device->GetInternalPipelineStore();
+ ScratchBuffer& validatedParamsBuffer = store->scratchIndirectStorage;
+ ScratchBuffer& batchDataBuffer = store->scratchStorage;
+
+ uint64_t requiredBatchDataBufferSize = 0;
+ for (const Pass& pass : passes) {
+ requiredBatchDataBufferSize = std::max(requiredBatchDataBufferSize, pass.batchDataSize);
+ }
+ DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
+ usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
+
+ DAWN_TRY(validatedParamsBuffer.EnsureCapacity(validatedParamsSize));
+ usageTracker->BufferUsedAs(validatedParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
+
+ // Now we allocate and populate host-side batch data to be copied to the GPU.
+ for (Pass& pass : passes) {
+ // We use std::malloc here because it guarantees maximal scalar alignment.
+ pass.batchData = {std::malloc(pass.batchDataSize), std::free};
+ memset(pass.batchData.get(), 0, pass.batchDataSize);
+ uint8_t* batchData = static_cast<uint8_t*>(pass.batchData.get());
+ for (Batch& batch : pass.batches) {
+ batch.batchInfo = new (&batchData[batch.dataBufferOffset]) BatchInfo();
+ batch.batchInfo->numIndexBufferElements = batch.numIndexBufferElements;
+ batch.batchInfo->numDraws = static_cast<uint32_t>(batch.metadata->draws.size());
+
+ uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
+ uint64_t validatedParamsOffset = batch.validatedParamsOffset;
+ for (auto& draw : batch.metadata->draws) {
+ // The shader uses this to index an array of u32, hence the division by 4 bytes.
+ *indirectOffsets++ = static_cast<uint32_t>(
+ (draw.clientBufferOffset - batch.clientIndirectOffset) / 4);
+
+ draw.cmd->indirectBuffer = validatedParamsBuffer.GetBuffer();
+ draw.cmd->indirectOffset = validatedParamsOffset;
+
+ validatedParamsOffset += kDrawIndexedIndirectSize;
+ }
+ }
+ }
+
+ ComputePipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateRenderValidationPipeline(device));
+
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+ BindGroupEntry bindings[3];
+ BindGroupEntry& bufferDataBinding = bindings[0];
+ bufferDataBinding.binding = 0;
+ bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
+
+ BindGroupEntry& clientIndirectBinding = bindings[1];
+ clientIndirectBinding.binding = 1;
+
+ BindGroupEntry& validatedParamsBinding = bindings[2];
+ validatedParamsBinding.binding = 2;
+ validatedParamsBinding.buffer = validatedParamsBuffer.GetBuffer();
+
+ BindGroupDescriptor bindGroupDescriptor = {};
+ bindGroupDescriptor.layout = layout.Get();
+ bindGroupDescriptor.entryCount = 3;
+ bindGroupDescriptor.entries = bindings;
+
+ // Finally, we can now encode our validation passes. Each pass first does a single
+ // WriteBuffer to get batch data over to the GPU, followed by a single compute pass. The
+ // compute pass encodes a separate SetBindGroup and Dispatch command for each batch.
+ for (const Pass& pass : passes) {
+ commandEncoder->APIWriteBuffer(batchDataBuffer.GetBuffer(), 0,
+ static_cast<const uint8_t*>(pass.batchData.get()),
+ pass.batchDataSize);
+
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ ComputePassDescriptor descriptor = {};
+ Ref<ComputePassEncoder> passEncoder =
+ AcquireRef(commandEncoder->APIBeginComputePass(&descriptor));
+ passEncoder->APISetPipeline(pipeline);
+
+ clientIndirectBinding.buffer = pass.clientIndirectBuffer;
+
+ for (const Batch& batch : pass.batches) {
+ bufferDataBinding.offset = batch.dataBufferOffset;
+ bufferDataBinding.size = batch.dataSize;
+ clientIndirectBinding.offset = batch.clientIndirectOffset;
+ clientIndirectBinding.size = batch.clientIndirectSize;
+ validatedParamsBinding.offset = batch.validatedParamsOffset;
+ validatedParamsBinding.size = batch.validatedParamsSize;
+
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));
+
+ const uint32_t numDrawsRoundedUp =
+ (batch.batchInfo->numDraws + kWorkgroupSize - 1) / kWorkgroupSize;
+ passEncoder->APISetBindGroup(0, bindGroup.Get());
+ passEncoder->APIDispatch(numDrawsRoundedUp);
+ }
+
+ passEncoder->APIEnd();
+ }
+
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h b/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h
new file mode 100644
index 00000000000..6714137037f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/IndirectDrawValidationEncoder.h
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
+#define DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/IndirectDrawMetadata.h"
+
+namespace dawn::native {
+
+ class CommandEncoder;
+ struct CombinedLimits;
+ class DeviceBase;
+ class RenderPassResourceUsageTracker;
+
+ // The maximum number of draws call we can fit into a single validation batch. This is
+ // essentially limited by the number of indirect parameter blocks that can fit into the maximum
+ // allowed storage binding size (with the base limits, it is about 6.7M).
+ uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits);
+
+ MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ RenderPassResourceUsageTracker* usageTracker,
+ IndirectDrawMetadata* indirectDrawMetadata);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Instance.cpp b/chromium/third_party/dawn/src/dawn/native/Instance.cpp
new file mode 100644
index 00000000000..48bf74064df
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Instance.cpp
@@ -0,0 +1,435 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Instance.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/platform/DawnPlatform.h"
+
+// For SwiftShader fallback
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+# include "dawn/native/VulkanBackend.h"
+#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+#if defined(DAWN_USE_X11)
+# include "dawn/native/XlibXcbFunctions.h"
+#endif // defined(DAWN_USE_X11)
+
+#include <optional>
+
+namespace dawn::native {
+
+ // Forward definitions of each backend's "Connect" function that creates new BackendConnection.
+ // Conditionally compiled declarations are used to avoid using static constructors instead.
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+ namespace d3d12 {
+ BackendConnection* Connect(InstanceBase* instance);
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ namespace metal {
+ BackendConnection* Connect(InstanceBase* instance);
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+ namespace null {
+ BackendConnection* Connect(InstanceBase* instance);
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_NULL)
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+ namespace opengl {
+ BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType);
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+ namespace vulkan {
+ BackendConnection* Connect(InstanceBase* instance);
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+ namespace {
+
+ BackendsBitset GetEnabledBackends() {
+ BackendsBitset enabledBackends;
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+ enabledBackends.set(wgpu::BackendType::Null);
+#endif // defined(DAWN_ENABLE_BACKEND_NULL)
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+ enabledBackends.set(wgpu::BackendType::D3D12);
+#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ enabledBackends.set(wgpu::BackendType::Metal);
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+ enabledBackends.set(wgpu::BackendType::Vulkan);
+#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+ enabledBackends.set(wgpu::BackendType::OpenGL);
+#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+ enabledBackends.set(wgpu::BackendType::OpenGLES);
+#endif // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+ return enabledBackends;
+ }
+
+ } // anonymous namespace
+
+ // InstanceBase
+
+ // static
+ InstanceBase* InstanceBase::Create(const InstanceDescriptor* descriptor) {
+ Ref<InstanceBase> instance = AcquireRef(new InstanceBase);
+ static constexpr InstanceDescriptor kDefaultDesc = {};
+ if (descriptor == nullptr) {
+ descriptor = &kDefaultDesc;
+ }
+ if (instance->ConsumedError(instance->Initialize(descriptor))) {
+ return nullptr;
+ }
+ return instance.Detach();
+ }
+
+ // TODO(crbug.com/dawn/832): make the platform an initialization parameter of the instance.
+ MaybeError InstanceBase::Initialize(const InstanceDescriptor* descriptor) {
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain, wgpu::SType::DawnInstanceDescriptor));
+ const DawnInstanceDescriptor* dawnDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &dawnDesc);
+ if (dawnDesc != nullptr) {
+ for (uint32_t i = 0; i < dawnDesc->additionalRuntimeSearchPathsCount; ++i) {
+ mRuntimeSearchPaths.push_back(dawnDesc->additionalRuntimeSearchPaths[i]);
+ }
+ }
+ // Default paths to search are next to the shared library, next to the executable, and
+ // no path (just libvulkan.so).
+ if (auto p = GetModuleDirectory()) {
+ mRuntimeSearchPaths.push_back(std::move(*p));
+ }
+ if (auto p = GetExecutableDirectory()) {
+ mRuntimeSearchPaths.push_back(std::move(*p));
+ }
+ mRuntimeSearchPaths.push_back("");
+ return {};
+ }
+
+ void InstanceBase::APIRequestAdapter(const RequestAdapterOptions* options,
+ WGPURequestAdapterCallback callback,
+ void* userdata) {
+ static constexpr RequestAdapterOptions kDefaultOptions = {};
+ if (options == nullptr) {
+ options = &kDefaultOptions;
+ }
+ auto result = RequestAdapterInternal(options);
+ if (result.IsError()) {
+ auto err = result.AcquireError();
+ std::string msg = err->GetFormattedMessage();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPURequestAdapterStatus_Error, nullptr, msg.c_str(), userdata);
+ } else {
+ Ref<AdapterBase> adapter = result.AcquireSuccess();
+ // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+ callback(WGPURequestAdapterStatus_Success, ToAPI(adapter.Detach()), nullptr, userdata);
+ }
+ }
+
+ ResultOrError<Ref<AdapterBase>> InstanceBase::RequestAdapterInternal(
+ const RequestAdapterOptions* options) {
+ ASSERT(options != nullptr);
+ if (options->forceFallbackAdapter) {
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+ if (GetEnabledBackends()[wgpu::BackendType::Vulkan]) {
+ dawn_native::vulkan::AdapterDiscoveryOptions vulkanOptions;
+ vulkanOptions.forceSwiftShader = true;
+ DAWN_TRY(DiscoverAdaptersInternal(&vulkanOptions));
+ }
+#else
+ return Ref<AdapterBase>(nullptr);
+#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
+ } else {
+ DiscoverDefaultAdapters();
+ }
+
+ wgpu::AdapterType preferredType;
+ switch (options->powerPreference) {
+ case wgpu::PowerPreference::LowPower:
+ preferredType = wgpu::AdapterType::IntegratedGPU;
+ break;
+ case wgpu::PowerPreference::Undefined:
+ case wgpu::PowerPreference::HighPerformance:
+ preferredType = wgpu::AdapterType::DiscreteGPU;
+ break;
+ }
+
+ std::optional<size_t> discreteGPUAdapterIndex;
+ std::optional<size_t> integratedGPUAdapterIndex;
+ std::optional<size_t> cpuAdapterIndex;
+ std::optional<size_t> unknownAdapterIndex;
+
+ for (size_t i = 0; i < mAdapters.size(); ++i) {
+ AdapterProperties properties;
+ mAdapters[i]->APIGetProperties(&properties);
+
+ if (options->forceFallbackAdapter) {
+ if (!gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID)) {
+ continue;
+ }
+ return mAdapters[i];
+ }
+ if (properties.adapterType == preferredType) {
+ return mAdapters[i];
+ }
+ switch (properties.adapterType) {
+ case wgpu::AdapterType::DiscreteGPU:
+ discreteGPUAdapterIndex = i;
+ break;
+ case wgpu::AdapterType::IntegratedGPU:
+ integratedGPUAdapterIndex = i;
+ break;
+ case wgpu::AdapterType::CPU:
+ cpuAdapterIndex = i;
+ break;
+ case wgpu::AdapterType::Unknown:
+ unknownAdapterIndex = i;
+ break;
+ }
+ }
+
+ // For now, we always prefer the discrete GPU
+ if (discreteGPUAdapterIndex) {
+ return mAdapters[*discreteGPUAdapterIndex];
+ }
+ if (integratedGPUAdapterIndex) {
+ return mAdapters[*integratedGPUAdapterIndex];
+ }
+ if (cpuAdapterIndex) {
+ return mAdapters[*cpuAdapterIndex];
+ }
+ if (unknownAdapterIndex) {
+ return mAdapters[*unknownAdapterIndex];
+ }
+
+ return Ref<AdapterBase>(nullptr);
+ }
+
+ void InstanceBase::DiscoverDefaultAdapters() {
+ for (wgpu::BackendType b : IterateBitSet(GetEnabledBackends())) {
+ EnsureBackendConnection(b);
+ }
+
+ if (mDiscoveredDefaultAdapters) {
+ return;
+ }
+
+ // Query and merge all default adapters for all backends
+ for (std::unique_ptr<BackendConnection>& backend : mBackends) {
+ std::vector<Ref<AdapterBase>> backendAdapters = backend->DiscoverDefaultAdapters();
+
+ for (Ref<AdapterBase>& adapter : backendAdapters) {
+ ASSERT(adapter->GetBackendType() == backend->GetType());
+ ASSERT(adapter->GetInstance() == this);
+ mAdapters.push_back(std::move(adapter));
+ }
+ }
+
+ mDiscoveredDefaultAdapters = true;
+ }
+
+ // This is just a wrapper around the real logic that uses Error.h error handling.
+ bool InstanceBase::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
+ return !ConsumedError(DiscoverAdaptersInternal(options));
+ }
+
+ const ToggleInfo* InstanceBase::GetToggleInfo(const char* toggleName) {
+ return mTogglesInfo.GetToggleInfo(toggleName);
+ }
+
+ Toggle InstanceBase::ToggleNameToEnum(const char* toggleName) {
+ return mTogglesInfo.ToggleNameToEnum(toggleName);
+ }
+
+ const FeatureInfo* InstanceBase::GetFeatureInfo(wgpu::FeatureName feature) {
+ return mFeaturesInfo.GetFeatureInfo(feature);
+ }
+
+ const std::vector<Ref<AdapterBase>>& InstanceBase::GetAdapters() const {
+ return mAdapters;
+ }
+
+ void InstanceBase::EnsureBackendConnection(wgpu::BackendType backendType) {
+ if (mBackendsConnected[backendType]) {
+ return;
+ }
+
+ auto Register = [this](BackendConnection* connection, wgpu::BackendType expectedType) {
+ if (connection != nullptr) {
+ ASSERT(connection->GetType() == expectedType);
+ ASSERT(connection->GetInstance() == this);
+ mBackends.push_back(std::unique_ptr<BackendConnection>(connection));
+ }
+ };
+
+ switch (backendType) {
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+ case wgpu::BackendType::Null:
+ Register(null::Connect(this), wgpu::BackendType::Null);
+ break;
+#endif // defined(DAWN_ENABLE_BACKEND_NULL)
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+ case wgpu::BackendType::D3D12:
+ Register(d3d12::Connect(this), wgpu::BackendType::D3D12);
+ break;
+#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ case wgpu::BackendType::Metal:
+ Register(metal::Connect(this), wgpu::BackendType::Metal);
+ break;
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+ case wgpu::BackendType::Vulkan:
+ Register(vulkan::Connect(this), wgpu::BackendType::Vulkan);
+ break;
+#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+ case wgpu::BackendType::OpenGL:
+ Register(opengl::Connect(this, wgpu::BackendType::OpenGL),
+ wgpu::BackendType::OpenGL);
+ break;
+#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+ case wgpu::BackendType::OpenGLES:
+ Register(opengl::Connect(this, wgpu::BackendType::OpenGLES),
+ wgpu::BackendType::OpenGLES);
+ break;
+#endif // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+ default:
+ UNREACHABLE();
+ }
+
+ mBackendsConnected.set(backendType);
+ }
+
+ MaybeError InstanceBase::DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options) {
+ wgpu::BackendType backendType = static_cast<wgpu::BackendType>(options->backendType);
+ DAWN_TRY(ValidateBackendType(backendType));
+
+ if (!GetEnabledBackends()[backendType]) {
+ return DAWN_FORMAT_VALIDATION_ERROR("%s not supported.", backendType);
+ }
+
+ EnsureBackendConnection(backendType);
+
+ bool foundBackend = false;
+ for (std::unique_ptr<BackendConnection>& backend : mBackends) {
+ if (backend->GetType() != backendType) {
+ continue;
+ }
+ foundBackend = true;
+
+ std::vector<Ref<AdapterBase>> newAdapters;
+ DAWN_TRY_ASSIGN(newAdapters, backend->DiscoverAdapters(options));
+
+ for (Ref<AdapterBase>& adapter : newAdapters) {
+ ASSERT(adapter->GetBackendType() == backend->GetType());
+ ASSERT(adapter->GetInstance() == this);
+ mAdapters.push_back(std::move(adapter));
+ }
+ }
+
+ DAWN_INVALID_IF(!foundBackend, "%s not available.", backendType);
+ return {};
+ }
+
+ bool InstanceBase::ConsumedError(MaybeError maybeError) {
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+
+ ASSERT(error != nullptr);
+ dawn::ErrorLog() << error->GetFormattedMessage();
+ return true;
+ }
+ return false;
+ }
+
+ bool InstanceBase::IsBackendValidationEnabled() const {
+ return mBackendValidationLevel != BackendValidationLevel::Disabled;
+ }
+
+ void InstanceBase::SetBackendValidationLevel(BackendValidationLevel level) {
+ mBackendValidationLevel = level;
+ }
+
+ BackendValidationLevel InstanceBase::GetBackendValidationLevel() const {
+ return mBackendValidationLevel;
+ }
+
+ void InstanceBase::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
+ mBeginCaptureOnStartup = beginCaptureOnStartup;
+ }
+
+ bool InstanceBase::IsBeginCaptureOnStartupEnabled() const {
+ return mBeginCaptureOnStartup;
+ }
+
+ void InstanceBase::SetPlatform(dawn::platform::Platform* platform) {
+ mPlatform = platform;
+ }
+
+ dawn::platform::Platform* InstanceBase::GetPlatform() {
+ if (mPlatform != nullptr) {
+ return mPlatform;
+ }
+
+ if (mDefaultPlatform == nullptr) {
+ mDefaultPlatform = std::make_unique<dawn::platform::Platform>();
+ }
+ return mDefaultPlatform.get();
+ }
+
+ const std::vector<std::string>& InstanceBase::GetRuntimeSearchPaths() const {
+ return mRuntimeSearchPaths;
+ }
+
+ const XlibXcbFunctions* InstanceBase::GetOrCreateXlibXcbFunctions() {
+#if defined(DAWN_USE_X11)
+ if (mXlibXcbFunctions == nullptr) {
+ mXlibXcbFunctions = std::make_unique<XlibXcbFunctions>();
+ }
+ return mXlibXcbFunctions.get();
+#else
+ UNREACHABLE();
+#endif // defined(DAWN_USE_X11)
+ }
+
+ Surface* InstanceBase::APICreateSurface(const SurfaceDescriptor* descriptor) {
+ if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
+ return nullptr;
+ }
+
+ return new Surface(this, descriptor);
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Instance.h b/chromium/third_party/dawn/src/dawn/native/Instance.h
new file mode 100644
index 00000000000..58986893a14
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Instance.h
@@ -0,0 +1,129 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INSTANCE_H_
+#define DAWNNATIVE_INSTANCE_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/Adapter.h"
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/Toggles.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+namespace dawn::platform {
+ class Platform;
+} // namespace dawn::platform
+
+namespace dawn::native {
+
+ class Surface;
+ class XlibXcbFunctions;
+
+ using BackendsBitset = ityp::bitset<wgpu::BackendType, kEnumCount<wgpu::BackendType>>;
+
+ // This is called InstanceBase for consistency across the frontend, even if the backends don't
+ // specialize this class.
+ class InstanceBase final : public RefCounted {
+ public:
+ static InstanceBase* Create(const InstanceDescriptor* descriptor = nullptr);
+
+ void APIRequestAdapter(const RequestAdapterOptions* options,
+ WGPURequestAdapterCallback callback,
+ void* userdata);
+
+ void DiscoverDefaultAdapters();
+ bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
+
+ const std::vector<Ref<AdapterBase>>& GetAdapters() const;
+
+ // Used to handle error that happen up to device creation.
+ bool ConsumedError(MaybeError maybeError);
+
+ // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
+ // of a toggle supported in Dawn.
+ const ToggleInfo* GetToggleInfo(const char* toggleName);
+ Toggle ToggleNameToEnum(const char* toggleName);
+
+ // Used to query the details of an feature. Return nullptr if featureName is not a valid
+ // name of an feature supported in Dawn.
+ const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature);
+
+ bool IsBackendValidationEnabled() const;
+ void SetBackendValidationLevel(BackendValidationLevel level);
+ BackendValidationLevel GetBackendValidationLevel() const;
+
+ void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
+ bool IsBeginCaptureOnStartupEnabled() const;
+
+ void SetPlatform(dawn::platform::Platform* platform);
+ dawn::platform::Platform* GetPlatform();
+
+ const std::vector<std::string>& GetRuntimeSearchPaths() const;
+
+ // Get backend-independent libraries that need to be loaded dynamically.
+ const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
+
+ // Dawn API
+ Surface* APICreateSurface(const SurfaceDescriptor* descriptor);
+
+ private:
+ InstanceBase() = default;
+ ~InstanceBase() = default;
+
+ InstanceBase(const InstanceBase& other) = delete;
+ InstanceBase& operator=(const InstanceBase& other) = delete;
+
+ MaybeError Initialize(const InstanceDescriptor* descriptor);
+
+ // Lazily creates connections to all backends that have been compiled.
+ void EnsureBackendConnection(wgpu::BackendType backendType);
+
+ MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
+
+ ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(
+ const RequestAdapterOptions* options);
+
+ std::vector<std::string> mRuntimeSearchPaths;
+
+ BackendsBitset mBackendsConnected;
+
+ bool mDiscoveredDefaultAdapters = false;
+
+ bool mBeginCaptureOnStartup = false;
+ BackendValidationLevel mBackendValidationLevel = BackendValidationLevel::Disabled;
+
+ dawn::platform::Platform* mPlatform = nullptr;
+ std::unique_ptr<dawn::platform::Platform> mDefaultPlatform;
+
+ std::vector<std::unique_ptr<BackendConnection>> mBackends;
+ std::vector<Ref<AdapterBase>> mAdapters;
+
+ FeaturesInfo mFeaturesInfo;
+ TogglesInfo mTogglesInfo;
+
+#if defined(DAWN_USE_X11)
+ std::unique_ptr<XlibXcbFunctions> mXlibXcbFunctions;
+#endif // defined(DAWN_USE_X11)
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_INSTANCE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/IntegerTypes.h b/chromium/third_party/dawn/src/dawn/native/IntegerTypes.h
new file mode 100644
index 00000000000..fd4c2f14eb1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/IntegerTypes.h
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INTEGERTYPES_H_
+#define DAWNNATIVE_INTEGERTYPES_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/TypedInteger.h"
+
+#include <cstdint>
+
+namespace dawn::native {
+ // Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
+ using BindingNumber = TypedInteger<struct BindingNumberT, uint32_t>;
+ constexpr BindingNumber kMaxBindingNumberTyped = BindingNumber(kMaxBindingNumber);
+
+ // Binding numbers get mapped to a packed range of indices
+ using BindingIndex = TypedInteger<struct BindingIndexT, uint32_t>;
+
+ using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
+
+ constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
+
+ using ColorAttachmentIndex = TypedInteger<struct ColorAttachmentIndexT, uint8_t>;
+
+ constexpr ColorAttachmentIndex kMaxColorAttachmentsTyped =
+ ColorAttachmentIndex(kMaxColorAttachments);
+
+ using VertexBufferSlot = TypedInteger<struct VertexBufferSlotT, uint8_t>;
+ using VertexAttributeLocation = TypedInteger<struct VertexAttributeLocationT, uint8_t>;
+
+ constexpr VertexBufferSlot kMaxVertexBuffersTyped = VertexBufferSlot(kMaxVertexBuffers);
+ constexpr VertexAttributeLocation kMaxVertexAttributesTyped =
+ VertexAttributeLocation(kMaxVertexAttributes);
+
+ // Serials are 64bit integers that are incremented by one each time to produce unique values.
+ // Some serials (like queue serials) are compared numerically to know which one is before
+ // another, while some serials are only checked for equality. We call serials only checked
+ // for equality IDs.
+
+ // Buffer mapping requests are stored outside of the buffer while they are being processed and
+ // cannot be invalidated. Instead they are associated with an ID, and when a map request is
+ // finished, the mapping callback is fired only if its ID matches the ID if the last request
+ // that was sent.
+ using MapRequestID = TypedInteger<struct MapRequestIDT, uint64_t>;
+
+ // The type for the WebGPU API fence serial values.
+ using FenceAPISerial = TypedInteger<struct FenceAPISerialT, uint64_t>;
+
+ // A serial used to watch the progression of GPU execution on a queue, each time operations
+ // that need to be followed individually are scheduled for execution on a queue, the serial
+ // is incremented by one. This way to know if something is done executing, we just need to
+ // compare its serial with the currently completed serial.
+ using ExecutionSerial = TypedInteger<struct QueueSerialT, uint64_t>;
+ constexpr ExecutionSerial kMaxExecutionSerial = ExecutionSerial(~uint64_t(0));
+
+ // An identifier that indicates which Pipeline a BindGroupLayout is compatible with. Pipelines
+ // created with a default layout will produce BindGroupLayouts with a non-zero compatibility
+ // token, which prevents them (and any BindGroups created with them) from being used with any
+ // other pipelines.
+ using PipelineCompatibilityToken = TypedInteger<struct PipelineCompatibilityTokenT, uint64_t>;
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_INTEGERTYPES_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp b/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp
new file mode 100644
index 00000000000..a2532aa8a68
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.cpp
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/InternalPipelineStore.h"
+
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ShaderModule.h"
+
+#include <unordered_map>
+
+namespace dawn::native {
+
+ class RenderPipelineBase;
+ class ShaderModuleBase;
+
+ InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
+ : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
+ scratchIndirectStorage(device,
+ wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect |
+ wgpu::BufferUsage::Storage) {
+ }
+
+ InternalPipelineStore::~InternalPipelineStore() = default;
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h b/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h
new file mode 100644
index 00000000000..64e7728266f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/InternalPipelineStore.h
@@ -0,0 +1,60 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INTERNALPIPELINESTORE_H_
+#define DAWNNATIVE_INTERNALPIPELINESTORE_H_
+
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ScratchBuffer.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <unordered_map>
+
+namespace dawn::native {
+
+ class DeviceBase;
+ class RenderPipelineBase;
+ class ShaderModuleBase;
+
+ // Every DeviceBase owns an InternalPipelineStore. This is a general-purpose cache for
+ // long-lived objects scoped to a device and used to support arbitrary pipeline operations.
+ struct InternalPipelineStore {
+ explicit InternalPipelineStore(DeviceBase* device);
+ ~InternalPipelineStore();
+
+ std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>>
+ copyTextureForBrowserPipelines;
+
+ Ref<ShaderModuleBase> copyTextureForBrowser;
+
+ Ref<ComputePipelineBase> timestampComputePipeline;
+ Ref<ShaderModuleBase> timestampCS;
+
+ Ref<ShaderModuleBase> dummyFragmentShader;
+
+ // A scratch buffer suitable for use as a copy destination and storage binding.
+ ScratchBuffer scratchStorage;
+
+ // A scratch buffer suitable for use as a copy destination, storage binding, and indirect
+ // buffer for indirect dispatch or draw calls.
+ ScratchBuffer scratchIndirectStorage;
+
+ Ref<ComputePipelineBase> renderValidationPipeline;
+ Ref<ShaderModuleBase> renderValidationShader;
+ Ref<ComputePipelineBase> dispatchIndirectValidationPipeline;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_INTERNALPIPELINESTORE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Limits.cpp b/chromium/third_party/dawn/src/dawn/native/Limits.cpp
new file mode 100644
index 00000000000..a7b8ec9bcc5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Limits.cpp
@@ -0,0 +1,213 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Limits.h"
+
+#include "dawn/common/Assert.h"
+
+#include <array>
+
+// clang-format off
+// TODO(crbug.com/dawn/685):
+// For now, only expose these tiers until metrics can determine better ones.
+#define LIMITS_WORKGROUP_STORAGE_SIZE(X) \
+ X(Higher, maxComputeWorkgroupStorageSize, 16352, 32768, 49152, 65536)
+
+#define LIMITS_STORAGE_BUFFER_BINDING_SIZE(X) \
+ X(Higher, maxStorageBufferBindingSize, 134217728, 1073741824, 2147483647, 4294967295)
+
+// TODO(crbug.com/dawn/685):
+// These limits don't have tiers yet. Define two tiers with the same values since the macros
+// in this file expect more than one tier.
+#define LIMITS_OTHER(X) \
+ X(Higher, maxTextureDimension1D, 8192, 8192) \
+ X(Higher, maxTextureDimension2D, 8192, 8192) \
+ X(Higher, maxTextureDimension3D, 2048, 2048) \
+ X(Higher, maxTextureArrayLayers, 256, 256) \
+ X(Higher, maxBindGroups, 4, 4) \
+ X(Higher, maxDynamicUniformBuffersPerPipelineLayout, 8, 8) \
+ X(Higher, maxDynamicStorageBuffersPerPipelineLayout, 4, 4) \
+ X(Higher, maxSampledTexturesPerShaderStage, 16, 16) \
+ X(Higher, maxSamplersPerShaderStage, 16, 16) \
+ X(Higher, maxStorageBuffersPerShaderStage, 8, 8) \
+ X(Higher, maxStorageTexturesPerShaderStage, 4, 4) \
+ X(Higher, maxUniformBuffersPerShaderStage, 12, 12) \
+ X(Higher, maxUniformBufferBindingSize, 65536, 65536) \
+ X( Lower, minUniformBufferOffsetAlignment, 256, 256) \
+ X( Lower, minStorageBufferOffsetAlignment, 256, 256) \
+ X(Higher, maxVertexBuffers, 8, 8) \
+ X(Higher, maxVertexAttributes, 16, 16) \
+ X(Higher, maxVertexBufferArrayStride, 2048, 2048) \
+ X(Higher, maxInterStageShaderComponents, 60, 60) \
+ X(Higher, maxComputeInvocationsPerWorkgroup, 256, 256) \
+ X(Higher, maxComputeWorkgroupSizeX, 256, 256) \
+ X(Higher, maxComputeWorkgroupSizeY, 256, 256) \
+ X(Higher, maxComputeWorkgroupSizeZ, 64, 64) \
+ X(Higher, maxComputeWorkgroupsPerDimension, 65535, 65535)
+// clang-format on
+
+#define LIMITS_EACH_GROUP(X) \
+ X(LIMITS_WORKGROUP_STORAGE_SIZE) \
+ X(LIMITS_STORAGE_BUFFER_BINDING_SIZE) \
+ X(LIMITS_OTHER)
+
+#define LIMITS(X) \
+ LIMITS_WORKGROUP_STORAGE_SIZE(X) \
+ LIMITS_STORAGE_BUFFER_BINDING_SIZE(X) \
+ LIMITS_OTHER(X)
+
+namespace dawn::native {
+ namespace {
+ template <uint32_t A, uint32_t B>
+ constexpr void StaticAssertSame() {
+ static_assert(A == B, "Mismatching tier count in limit group.");
+ }
+
+ template <uint32_t I, uint32_t... Is>
+ constexpr uint32_t ReduceSameValue(std::integer_sequence<uint32_t, I, Is...>) {
+ int unused[] = {0, (StaticAssertSame<I, Is>(), 0)...};
+ DAWN_UNUSED(unused);
+ return I;
+ }
+
+ enum class LimitBetterDirection {
+ Lower,
+ Higher,
+ };
+
+ template <LimitBetterDirection Better>
+ struct CheckLimit;
+
+ template <>
+ struct CheckLimit<LimitBetterDirection::Lower> {
+ template <typename T>
+ static bool IsBetter(T lhs, T rhs) {
+ return lhs < rhs;
+ }
+
+ template <typename T>
+ static MaybeError Validate(T supported, T required) {
+ DAWN_INVALID_IF(IsBetter(required, supported),
+ "Required limit (%u) is lower than the supported limit (%u).",
+ required, supported);
+ return {};
+ }
+ };
+
+ template <>
+ struct CheckLimit<LimitBetterDirection::Higher> {
+ template <typename T>
+ static bool IsBetter(T lhs, T rhs) {
+ return lhs > rhs;
+ }
+
+ template <typename T>
+ static MaybeError Validate(T supported, T required) {
+ DAWN_INVALID_IF(IsBetter(required, supported),
+ "Required limit (%u) is greater than the supported limit (%u).",
+ required, supported);
+ return {};
+ }
+ };
+
+ template <typename T>
+ bool IsLimitUndefined(T value) {
+ static_assert(sizeof(T) != sizeof(T), "IsLimitUndefined not implemented for this type");
+ return false;
+ }
+
+ template <>
+ bool IsLimitUndefined<uint32_t>(uint32_t value) {
+ return value == wgpu::kLimitU32Undefined;
+ }
+
+ template <>
+ bool IsLimitUndefined<uint64_t>(uint64_t value) {
+ return value == wgpu::kLimitU64Undefined;
+ }
+
+ } // namespace
+
+ void GetDefaultLimits(Limits* limits) {
+ ASSERT(limits != nullptr);
+#define X(Better, limitName, base, ...) limits->limitName = base;
+ LIMITS(X)
+#undef X
+ }
+
+ Limits ReifyDefaultLimits(const Limits& limits) {
+ Limits out;
+#define X(Better, limitName, base, ...) \
+ if (IsLimitUndefined(limits.limitName) || \
+ CheckLimit<LimitBetterDirection::Better>::IsBetter( \
+ static_cast<decltype(limits.limitName)>(base), limits.limitName)) { \
+ /* If the limit is undefined or the default is better, use the default */ \
+ out.limitName = base; \
+ } else { \
+ out.limitName = limits.limitName; \
+ }
+ LIMITS(X)
+#undef X
+ return out;
+ }
+
+ MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits) {
+#define X(Better, limitName, ...) \
+ if (!IsLimitUndefined(requiredLimits.limitName)) { \
+ DAWN_TRY_CONTEXT(CheckLimit<LimitBetterDirection::Better>::Validate( \
+ supportedLimits.limitName, requiredLimits.limitName), \
+ "validating " #limitName); \
+ }
+ LIMITS(X)
+#undef X
+ return {};
+ }
+
+ Limits ApplyLimitTiers(Limits limits) {
+#define X_TIER_COUNT(Better, limitName, ...) , std::integer_sequence<uint64_t, __VA_ARGS__>{}.size()
+#define GET_TIER_COUNT(LIMIT_GROUP) \
+ ReduceSameValue(std::integer_sequence<uint32_t LIMIT_GROUP(X_TIER_COUNT)>{})
+
+#define X_EACH_GROUP(LIMIT_GROUP) \
+ { \
+ constexpr uint32_t kTierCount = GET_TIER_COUNT(LIMIT_GROUP); \
+ for (uint32_t i = kTierCount; i != 0; --i) { \
+ LIMIT_GROUP(X_CHECK_BETTER_AND_CLAMP) \
+ /* Limits fit in tier and have been clamped. Break. */ \
+ break; \
+ } \
+ }
+
+#define X_CHECK_BETTER_AND_CLAMP(Better, limitName, ...) \
+ { \
+ constexpr std::array<decltype(Limits::limitName), kTierCount> tiers{__VA_ARGS__}; \
+ decltype(Limits::limitName) tierValue = tiers[i - 1]; \
+ if (CheckLimit<LimitBetterDirection::Better>::IsBetter(tierValue, limits.limitName)) { \
+ /* The tier is better. Go to the next tier. */ \
+ continue; \
+ } else if (tierValue != limits.limitName) { \
+ /* Better than the tier. Degrade |limits| to the tier. */ \
+ limits.limitName = tiers[i - 1]; \
+ } \
+ }
+
+ LIMITS_EACH_GROUP(X_EACH_GROUP)
+#undef X_CHECK_BETTER
+#undef X_EACH_GROUP
+#undef GET_TIER_COUNT
+#undef X_TIER_COUNT
+ return limits;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Limits.h b/chromium/third_party/dawn/src/dawn/native/Limits.h
new file mode 100644
index 00000000000..f41eaa86976
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Limits.h
@@ -0,0 +1,43 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_LIMITS_H_
+#define DAWNNATIVE_LIMITS_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ struct CombinedLimits {
+ Limits v1;
+ };
+
+ // Populate |limits| with the default limits.
+ void GetDefaultLimits(Limits* limits);
+
+ // Returns a copy of |limits| where all undefined values are replaced
+ // with their defaults. Also clamps to the defaults if the provided limits
+ // are worse.
+ Limits ReifyDefaultLimits(const Limits& limits);
+
+ // Validate that |requiredLimits| are no better than |supportedLimits|.
+ MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits);
+
+ // Returns a copy of |limits| where limit tiers are applied.
+ Limits ApplyLimitTiers(Limits limits);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_LIMITS_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp b/chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp
new file mode 100644
index 00000000000..3cafdb79bb5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ObjectBase.cpp
@@ -0,0 +1,90 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/Device.h"
+
+#include <mutex>
+
+namespace dawn::native {
+
+ static constexpr uint64_t kErrorPayload = 0;
+ static constexpr uint64_t kNotErrorPayload = 1;
+
+ ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {
+ }
+
+ ObjectBase::ObjectBase(DeviceBase* device, ErrorTag)
+ : RefCounted(kErrorPayload), mDevice(device) {
+ }
+
+ DeviceBase* ObjectBase::GetDevice() const {
+ return mDevice;
+ }
+
+ bool ObjectBase::IsError() const {
+ return GetRefCountPayload() == kErrorPayload;
+ }
+
+ ApiObjectBase::ApiObjectBase(DeviceBase* device, const char* label) : ObjectBase(device) {
+ if (label) {
+ mLabel = label;
+ }
+ }
+
+ ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {
+ }
+
+ ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag)
+ : ObjectBase(device) {
+ }
+
+ ApiObjectBase::~ApiObjectBase() {
+ ASSERT(!IsAlive());
+ }
+
+ void ApiObjectBase::APISetLabel(const char* label) {
+ mLabel = label;
+ SetLabelImpl();
+ }
+
+ const std::string& ApiObjectBase::GetLabel() const {
+ return mLabel;
+ }
+
+ void ApiObjectBase::SetLabelImpl() {
+ }
+
+ bool ApiObjectBase::IsAlive() const {
+ return IsInList();
+ }
+
+ void ApiObjectBase::DeleteThis() {
+ Destroy();
+ RefCounted::DeleteThis();
+ }
+
+ void ApiObjectBase::TrackInDevice() {
+ ASSERT(GetDevice() != nullptr);
+ GetDevice()->TrackObject(this);
+ }
+
+ void ApiObjectBase::Destroy() {
+ const std::lock_guard<std::mutex> lock(*GetDevice()->GetObjectListMutex(GetType()));
+ if (RemoveFromList()) {
+ DestroyImpl();
+ }
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ObjectBase.h b/chromium/third_party/dawn/src/dawn/native/ObjectBase.h
new file mode 100644
index 00000000000..8f110a18c42
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ObjectBase.h
@@ -0,0 +1,97 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OBJECTBASE_H_
+#define DAWNNATIVE_OBJECTBASE_H_
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Forward.h"
+
+#include <string>
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ class ObjectBase : public RefCounted {
+ public:
+ struct ErrorTag {};
+ static constexpr ErrorTag kError = {};
+
+ explicit ObjectBase(DeviceBase* device);
+ ObjectBase(DeviceBase* device, ErrorTag tag);
+
+ DeviceBase* GetDevice() const;
+ bool IsError() const;
+
+ private:
+ // Pointer to owning device.
+ DeviceBase* mDevice;
+ };
+
+ class ApiObjectBase : public ObjectBase, public LinkNode<ApiObjectBase> {
+ public:
+ struct LabelNotImplementedTag {};
+ static constexpr LabelNotImplementedTag kLabelNotImplemented = {};
+ struct UntrackedByDeviceTag {};
+ static constexpr UntrackedByDeviceTag kUntrackedByDevice = {};
+
+ ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag);
+ ApiObjectBase(DeviceBase* device, const char* label);
+ ApiObjectBase(DeviceBase* device, ErrorTag tag);
+ ~ApiObjectBase() override;
+
+ virtual ObjectType GetType() const = 0;
+ const std::string& GetLabel() const;
+
+ // The ApiObjectBase is considered alive if it is tracked in a respective linked list owned
+ // by the owning device.
+ bool IsAlive() const;
+
+ // This needs to be public because it can be called from the device owning the object.
+ void Destroy();
+
+ // Dawn API
+ void APISetLabel(const char* label);
+
+ protected:
+ // Overriding of the RefCounted's DeleteThis function ensures that instances of objects
+ // always call their derived class implementation of Destroy prior to the derived
+ // class being destroyed. This guarantees that when ApiObjects' reference counts drop to 0,
+ // then the underlying backend's Destroy calls are executed. We cannot naively put the call
+ // to Destroy in the destructor of this class because it calls DestroyImpl
+ // which is a virtual function often implemented in the Derived class which would already
+ // have been destroyed by the time ApiObject's destructor is called by C++'s destruction
+ // order. Note that some classes like BindGroup may override the DeleteThis function again,
+ // and they should ensure that their overriding versions call this underlying version
+ // somewhere.
+ void DeleteThis() override;
+ void TrackInDevice();
+
+ // Sub-classes may override this function multiple times. Whenever overriding this function,
+ // however, users should be sure to call their parent's version in the new override to make
+ // sure that all destroy functionality is kept. This function is guaranteed to only be
+ // called once through the exposed Destroy function.
+ virtual void DestroyImpl() = 0;
+
+ private:
+ virtual void SetLabelImpl();
+
+ std::string mLabel;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_OBJECTBASE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp b/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp
new file mode 100644
index 00000000000..58c892e9275
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.cpp
@@ -0,0 +1,22 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ObjectContentHasher.h"
+
+namespace dawn::native {
+
+ size_t ObjectContentHasher::GetContentHash() const {
+ return mContentHash;
+ }
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h b/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h
new file mode 100644
index 00000000000..c1ca32a9142
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ObjectContentHasher.h
@@ -0,0 +1,82 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
+#define DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
+
+#include "dawn/common/HashUtils.h"
+
+#include <string>
+#include <vector>
+
+namespace dawn::native {
+
+ // ObjectContentHasher records a hash that can be used as a key to lookup a cached object in a
+ // cache.
+ class ObjectContentHasher {
+ public:
+ // Record calls the appropriate record function based on the type.
+ template <typename T, typename... Args>
+ void Record(const T& value, const Args&... args) {
+ RecordImpl<T, Args...>::Call(this, value, args...);
+ }
+
+ size_t GetContentHash() const;
+
+ private:
+ template <typename T, typename... Args>
+ struct RecordImpl {
+ static constexpr void Call(ObjectContentHasher* recorder,
+ const T& value,
+ const Args&... args) {
+ HashCombine(&recorder->mContentHash, value, args...);
+ }
+ };
+
+ template <typename T>
+ struct RecordImpl<T*> {
+ static constexpr void Call(ObjectContentHasher* recorder, T* obj) {
+ // Calling Record(objPtr) is not allowed. This check exists to only prevent such
+ // mistakes.
+ static_assert(obj == nullptr);
+ }
+ };
+
+ template <typename T>
+ struct RecordImpl<std::vector<T>> {
+ static constexpr void Call(ObjectContentHasher* recorder, const std::vector<T>& vec) {
+ recorder->RecordIterable<std::vector<T>>(vec);
+ }
+ };
+
+ template <typename IteratorT>
+ constexpr void RecordIterable(const IteratorT& iterable) {
+ for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+ Record(*it);
+ }
+ }
+
+ size_t mContentHash = 0;
+ };
+
+ template <>
+ struct ObjectContentHasher::RecordImpl<std::string> {
+ static constexpr void Call(ObjectContentHasher* recorder, const std::string& str) {
+ recorder->RecordIterable<std::string>(str);
+ }
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h b/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h
new file mode 100644
index 00000000000..c6fe5350099
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PassResourceUsage.h
@@ -0,0 +1,100 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PASSRESOURCEUSAGE_H
+#define DAWNNATIVE_PASSRESOURCEUSAGE_H
+
+#include "dawn/native/SubresourceStorage.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <set>
+#include <vector>
+
+namespace dawn::native {
+
+ // This file declares various "ResourceUsage" structures. They are produced by the frontend
+ // while recording commands to be used for later validation and also some operations in the
+ // backends. The are produced by the "Encoder" objects that finalize them on "EndPass" or
+ // "Finish". Internally the "Encoder" may use the "StateTracker" to create them.
+
+ class BufferBase;
+ class QuerySetBase;
+ class TextureBase;
+
+ // The texture usage inside passes must be tracked per-subresource.
+ using TextureSubresourceUsage = SubresourceStorage<wgpu::TextureUsage>;
+
+ // Which resources are used by a synchronization scope and how they are used. The command
+ // buffer validation pre-computes this information so that backends with explicit barriers
+ // don't have to re-compute it.
+ struct SyncScopeResourceUsage {
+ std::vector<BufferBase*> buffers;
+ std::vector<wgpu::BufferUsage> bufferUsages;
+
+ std::vector<TextureBase*> textures;
+ std::vector<TextureSubresourceUsage> textureUsages;
+
+ std::vector<ExternalTextureBase*> externalTextures;
+ };
+
+ // Contains all the resource usage data for a compute pass.
+ //
+ // Essentially a list of SyncScopeResourceUsage, one per Dispatch as required by the WebGPU
+ // specification. ComputePassResourceUsage also stores nline the set of all buffers and
+ // textures used, because some unused BindGroups may not be used at all in synchronization
+ // scope but their resources still need to be validated on Queue::Submit.
+ struct ComputePassResourceUsage {
+ // Somehow without this defaulted constructor, MSVC or its STDlib have an issue where they
+ // use the copy constructor (that's deleted) when doing operations on a
+ // vector<ComputePassResourceUsage>
+ ComputePassResourceUsage(ComputePassResourceUsage&&) = default;
+ ComputePassResourceUsage() = default;
+
+ std::vector<SyncScopeResourceUsage> dispatchUsages;
+
+ // All the resources referenced by this compute pass for validation in Queue::Submit.
+ std::set<BufferBase*> referencedBuffers;
+ std::set<TextureBase*> referencedTextures;
+ std::set<ExternalTextureBase*> referencedExternalTextures;
+ };
+
+ // Contains all the resource usage data for a render pass.
+ //
+ // In the WebGPU specification render passes are synchronization scopes but we also need to
+ // track additional data. It is stored for render passes used by a CommandBuffer, but also in
+ // RenderBundle so they can be merged into the render passes' usage on ExecuteBundles().
+ struct RenderPassResourceUsage : public SyncScopeResourceUsage {
+ // Storage to track the occlusion queries used during the pass.
+ std::vector<QuerySetBase*> querySets;
+ std::vector<std::vector<bool>> queryAvailabilities;
+ };
+
+ using RenderPassUsages = std::vector<RenderPassResourceUsage>;
+ using ComputePassUsages = std::vector<ComputePassResourceUsage>;
+
+ // Contains a hierarchy of "ResourceUsage" that mirrors the hierarchy of the CommandBuffer and
+ // is used for validation and to produce barriers and lazy clears in the backends.
+ struct CommandBufferResourceUsage {
+ RenderPassUsages renderPasses;
+ ComputePassUsages computePasses;
+
+ // Resources used in commands that aren't in a pass.
+ std::set<BufferBase*> topLevelBuffers;
+ std::set<TextureBase*> topLevelTextures;
+ std::set<QuerySetBase*> usedQuerySets;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_PASSRESOURCEUSAGE_H
diff --git a/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp
new file mode 100644
index 00000000000..b4814cfa626
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.cpp
@@ -0,0 +1,243 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PassResourceUsageTracker.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/Texture.h"
+
+#include <utility>
+
+namespace dawn::native {
+
+ void SyncScopeUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
+ // std::map's operator[] will create the key and return 0 if the key didn't exist
+ // before.
+ mBufferUsages[buffer] |= usage;
+ }
+
+ void SyncScopeUsageTracker::TextureViewUsedAs(TextureViewBase* view, wgpu::TextureUsage usage) {
+ TextureBase* texture = view->GetTexture();
+ const SubresourceRange& range = view->GetSubresourceRange();
+
+ // Get or create a new TextureSubresourceUsage for that texture (initially filled with
+ // wgpu::TextureUsage::None)
+ auto it = mTextureUsages.emplace(
+ std::piecewise_construct, std::forward_as_tuple(texture),
+ std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
+ texture->GetNumMipLevels(), wgpu::TextureUsage::None));
+ TextureSubresourceUsage& textureUsage = it.first->second;
+
+ textureUsage.Update(range,
+ [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
+ // TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
+ // branches.
+ if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
+ (usage & wgpu::TextureUsage::RenderAttachment) != 0) {
+ // Using the same subresource as an attachment for two different
+ // render attachments is a write-write hazard. Add this internal
+ // usage so we will fail the check that a subresource with
+ // writable usage is the single usage.
+ *storedUsage |= kAgainAsRenderAttachment;
+ }
+ *storedUsage |= usage;
+ });
+ }
+
+ void SyncScopeUsageTracker::AddRenderBundleTextureUsage(
+ TextureBase* texture,
+ const TextureSubresourceUsage& textureUsage) {
+ // Get or create a new TextureSubresourceUsage for that texture (initially filled with
+ // wgpu::TextureUsage::None)
+ auto it = mTextureUsages.emplace(
+ std::piecewise_construct, std::forward_as_tuple(texture),
+ std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
+ texture->GetNumMipLevels(), wgpu::TextureUsage::None));
+ TextureSubresourceUsage* passTextureUsage = &it.first->second;
+
+ passTextureUsage->Merge(
+ textureUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
+ const wgpu::TextureUsage& addedUsage) {
+ ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
+ *storedUsage |= addedUsage;
+ });
+ }
+
+ void SyncScopeUsageTracker::AddBindGroup(BindGroupBase* group) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
+ break;
+ case wgpu::BufferBindingType::Storage:
+ BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
+ break;
+ case kInternalStorageBufferBinding:
+ BufferUsedAs(buffer, kInternalStorageBuffer);
+ break;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ BufferUsedAs(buffer, kReadOnlyStorageBuffer);
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ case BindingInfoType::Texture: {
+ TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+ TextureViewUsedAs(view, wgpu::TextureUsage::TextureBinding);
+ break;
+ }
+
+ case BindingInfoType::StorageTexture: {
+ TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::WriteOnly:
+ TextureViewUsedAs(view, wgpu::TextureUsage::StorageBinding);
+ break;
+ case wgpu::StorageTextureAccess::Undefined:
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ break;
+
+ case BindingInfoType::Sampler:
+ break;
+ }
+ }
+
+ for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
+ mExternalTextureUsages.insert(externalTexture.Get());
+ }
+ }
+
+ SyncScopeResourceUsage SyncScopeUsageTracker::AcquireSyncScopeUsage() {
+ SyncScopeResourceUsage result;
+ result.buffers.reserve(mBufferUsages.size());
+ result.bufferUsages.reserve(mBufferUsages.size());
+ result.textures.reserve(mTextureUsages.size());
+ result.textureUsages.reserve(mTextureUsages.size());
+
+ for (auto& [buffer, usage] : mBufferUsages) {
+ result.buffers.push_back(buffer);
+ result.bufferUsages.push_back(usage);
+ }
+
+ for (auto& [texture, usage] : mTextureUsages) {
+ result.textures.push_back(texture);
+ result.textureUsages.push_back(std::move(usage));
+ }
+
+ for (auto& it : mExternalTextureUsages) {
+ result.externalTextures.push_back(it);
+ }
+
+ mBufferUsages.clear();
+ mTextureUsages.clear();
+ mExternalTextureUsages.clear();
+
+ return result;
+ }
+
+ void ComputePassResourceUsageTracker::AddDispatch(SyncScopeResourceUsage scope) {
+ mUsage.dispatchUsages.push_back(std::move(scope));
+ }
+
+ void ComputePassResourceUsageTracker::AddReferencedBuffer(BufferBase* buffer) {
+ mUsage.referencedBuffers.insert(buffer);
+ }
+
+ void ComputePassResourceUsageTracker::AddResourcesReferencedByBindGroup(BindGroupBase* group) {
+ for (BindingIndex index{0}; index < group->GetLayout()->GetBindingCount(); ++index) {
+ const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(index);
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ mUsage.referencedBuffers.insert(group->GetBindingAsBufferBinding(index).buffer);
+ break;
+ }
+
+ case BindingInfoType::Texture: {
+ mUsage.referencedTextures.insert(
+ group->GetBindingAsTextureView(index)->GetTexture());
+ break;
+ }
+
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ case BindingInfoType::StorageTexture:
+ case BindingInfoType::Sampler:
+ break;
+ }
+ }
+
+ for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
+ mUsage.referencedExternalTextures.insert(externalTexture.Get());
+ }
+ }
+
+ ComputePassResourceUsage ComputePassResourceUsageTracker::AcquireResourceUsage() {
+ return std::move(mUsage);
+ }
+
+ RenderPassResourceUsage RenderPassResourceUsageTracker::AcquireResourceUsage() {
+ RenderPassResourceUsage result;
+ *static_cast<SyncScopeResourceUsage*>(&result) = AcquireSyncScopeUsage();
+
+ result.querySets.reserve(mQueryAvailabilities.size());
+ result.queryAvailabilities.reserve(mQueryAvailabilities.size());
+
+ for (auto& it : mQueryAvailabilities) {
+ result.querySets.push_back(it.first);
+ result.queryAvailabilities.push_back(std::move(it.second));
+ }
+
+ mQueryAvailabilities.clear();
+
+ return result;
+ }
+
+ void RenderPassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
+ uint32_t queryIndex) {
+ // The query availability only needs to be tracked again on render passes for checking
+ // query overwrite on render pass and resetting query sets on the Vulkan backend.
+ DAWN_ASSERT(querySet != nullptr);
+
+ // Gets the iterator for that querySet or create a new vector of bool set to false
+ // if the querySet wasn't registered.
+ auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
+ it->second[queryIndex] = true;
+ }
+
+ const QueryAvailabilityMap& RenderPassResourceUsageTracker::GetQueryAvailabilityMap() const {
+ return mQueryAvailabilities;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h b/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h
new file mode 100644
index 00000000000..ad0ef92fe14
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PassResourceUsageTracker.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
+#define DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
+
+#include "dawn/native/PassResourceUsage.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <map>
+
+namespace dawn::native {
+
+ class BindGroupBase;
+ class BufferBase;
+ class ExternalTextureBase;
+ class QuerySetBase;
+ class TextureBase;
+
+ using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
+
+ // Helper class to build SyncScopeResourceUsages
+ class SyncScopeUsageTracker {
+ public:
+ void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
+ void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
+ void AddRenderBundleTextureUsage(TextureBase* texture,
+ const TextureSubresourceUsage& textureUsage);
+
+ // Walks the bind groups and tracks all its resources.
+ void AddBindGroup(BindGroupBase* group);
+
+ // Returns the per-pass usage for use by backends for APIs with explicit barriers.
+ SyncScopeResourceUsage AcquireSyncScopeUsage();
+
+ private:
+ std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
+ std::map<TextureBase*, TextureSubresourceUsage> mTextureUsages;
+ std::set<ExternalTextureBase*> mExternalTextureUsages;
+ };
+
+ // Helper class to build ComputePassResourceUsages
+ class ComputePassResourceUsageTracker {
+ public:
+ void AddDispatch(SyncScopeResourceUsage scope);
+ void AddReferencedBuffer(BufferBase* buffer);
+ void AddResourcesReferencedByBindGroup(BindGroupBase* group);
+
+ ComputePassResourceUsage AcquireResourceUsage();
+
+ private:
+ ComputePassResourceUsage mUsage;
+ };
+
+ // Helper class to build RenderPassResourceUsages
+ class RenderPassResourceUsageTracker : public SyncScopeUsageTracker {
+ public:
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+ const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
+
+ RenderPassResourceUsage AcquireResourceUsage();
+
+ private:
+ // Hide AcquireSyncScopeUsage since users of this class should use AcquireResourceUsage
+ // instead.
+ using SyncScopeUsageTracker::AcquireSyncScopeUsage;
+
+ // Tracks queries used in the render pass to validate that they aren't written twice.
+ QueryAvailabilityMap mQueryAvailabilities;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/PerStage.cpp b/chromium/third_party/dawn/src/dawn/native/PerStage.cpp
new file mode 100644
index 00000000000..365d54480dc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PerStage.cpp
@@ -0,0 +1,49 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PerStage.h"
+
+namespace dawn::native {
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ SingleShaderStage value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case SingleShaderStage::Compute:
+ s->Append("Compute");
+ break;
+ case SingleShaderStage::Vertex:
+ s->Append("Vertex");
+ break;
+ case SingleShaderStage::Fragment:
+ s->Append("Fragment");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages) {
+ std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
+ return BitSetIterator<kNumStages, SingleShaderStage>(bits);
+ }
+
+ wgpu::ShaderStage StageBit(SingleShaderStage stage) {
+ ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+ return static_cast<wgpu::ShaderStage>(1 << static_cast<uint32_t>(stage));
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PerStage.h b/chromium/third_party/dawn/src/dawn/native/PerStage.h
new file mode 100644
index 00000000000..47e0abc3d28
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PerStage.h
@@ -0,0 +1,87 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PERSTAGE_H_
+#define DAWNNATIVE_PERSTAGE_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Constants.h"
+#include "dawn/native/Error.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn::native {
+
+ enum class SingleShaderStage { Vertex, Fragment, Compute };
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ SingleShaderStage value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+ static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages);
+ static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages);
+ static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages);
+
+ static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
+ (1 << static_cast<uint32_t>(SingleShaderStage::Vertex)));
+ static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
+ (1 << static_cast<uint32_t>(SingleShaderStage::Fragment)));
+ static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
+ (1 << static_cast<uint32_t>(SingleShaderStage::Compute)));
+
+ BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
+ wgpu::ShaderStage StageBit(SingleShaderStage stage);
+
+ static constexpr wgpu::ShaderStage kAllStages =
+ static_cast<wgpu::ShaderStage>((1 << kNumStages) - 1);
+
+ template <typename T>
+ class PerStage {
+ public:
+ PerStage() = default;
+ PerStage(const T& initialValue) {
+ mData.fill(initialValue);
+ }
+
+ T& operator[](SingleShaderStage stage) {
+ DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+ return mData[static_cast<uint32_t>(stage)];
+ }
+ const T& operator[](SingleShaderStage stage) const {
+ DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+ return mData[static_cast<uint32_t>(stage)];
+ }
+
+ T& operator[](wgpu::ShaderStage stageBit) {
+ uint32_t bit = static_cast<uint32_t>(stageBit);
+ DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
+ return mData[Log2(bit)];
+ }
+ const T& operator[](wgpu::ShaderStage stageBit) const {
+ uint32_t bit = static_cast<uint32_t>(stageBit);
+ DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
+ return mData[Log2(bit)];
+ }
+
+ private:
+ std::array<T, kNumStages> mData;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_PERSTAGE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/PersistentCache.cpp b/chromium/third_party/dawn/src/dawn/native/PersistentCache.cpp
new file mode 100644
index 00000000000..ce3ab492320
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PersistentCache.cpp
@@ -0,0 +1,64 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PersistentCache.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Device.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::native {
+
+ PersistentCache::PersistentCache(DeviceBase* device)
+ : mDevice(device), mCache(GetPlatformCache()) {
+ }
+
+ ScopedCachedBlob PersistentCache::LoadData(const PersistentCacheKey& key) {
+ ScopedCachedBlob blob = {};
+ if (mCache == nullptr) {
+ return blob;
+ }
+ std::lock_guard<std::mutex> lock(mMutex);
+ blob.bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(), nullptr, 0);
+ if (blob.bufferSize > 0) {
+ blob.buffer.reset(new uint8_t[blob.bufferSize]);
+ const size_t bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(),
+ blob.buffer.get(), blob.bufferSize);
+ ASSERT(bufferSize == blob.bufferSize);
+ return blob;
+ }
+ return blob;
+ }
+
+ void PersistentCache::StoreData(const PersistentCacheKey& key, const void* value, size_t size) {
+ if (mCache == nullptr) {
+ return;
+ }
+ ASSERT(value != nullptr);
+ ASSERT(size > 0);
+ std::lock_guard<std::mutex> lock(mMutex);
+ mCache->StoreData(ToAPI(mDevice), key.data(), key.size(), value, size);
+ }
+
+ dawn::platform::CachingInterface* PersistentCache::GetPlatformCache() {
+ // TODO(dawn:549): Create a fingerprint of concatenated version strings (ex. Tint commit
+ // hash, Dawn commit hash). This will be used by the client so it may know when to discard
+ // previously cached Dawn objects should this fingerprint change.
+ dawn::platform::Platform* platform = mDevice->GetPlatform();
+ if (platform != nullptr) {
+ return platform->GetCachingInterface(/*fingerprint*/ nullptr, /*fingerprintSize*/ 0);
+ }
+ return nullptr;
+ }
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PersistentCache.h b/chromium/third_party/dawn/src/dawn/native/PersistentCache.h
new file mode 100644
index 00000000000..7854d59f4e1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PersistentCache.h
@@ -0,0 +1,92 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PERSISTENTCACHE_H_
+#define DAWNNATIVE_PERSISTENTCACHE_H_
+
+#include "dawn/native/Error.h"
+
+#include <mutex>
+#include <vector>
+
+namespace dawn::platform {
+ class CachingInterface;
+}
+
+namespace dawn::native {
+
+ using PersistentCacheKey = std::vector<uint8_t>;
+
+ struct ScopedCachedBlob {
+ std::unique_ptr<uint8_t[]> buffer;
+ size_t bufferSize = 0;
+ };
+
+ class DeviceBase;
+
+ enum class PersistentKeyType { Shader };
+
+ // This class should always be thread-safe as it is used in Create*PipelineAsync() where it is
+ // called asynchronously.
+ // The thread-safety of any access to mCache (the function LoadData() and StoreData()) is
+ // protected by mMutex.
+ class PersistentCache {
+ public:
+ PersistentCache(DeviceBase* device);
+
+ // Combines load/store operations into a single call.
+ // If the load was successful, a non-empty blob is returned to the caller.
+ // Else, the creation callback |createFn| gets invoked with a callback
+ // |doCache| to store the newly created blob back in the cache.
+ //
+ // Example usage:
+ //
+ // ScopedCachedBlob cachedBlob = {};
+ // DAWN_TRY_ASSIGN(cachedBlob, GetOrCreate(key, [&](auto doCache)) {
+ // // Create a new blob to be stored
+ // doCache(newBlobPtr, newBlobSize); // store
+ // }));
+ //
+ template <typename CreateFn>
+ ResultOrError<ScopedCachedBlob> GetOrCreate(const PersistentCacheKey& key,
+ CreateFn&& createFn) {
+ // Attempt to load an existing blob from the cache.
+ ScopedCachedBlob blob = LoadData(key);
+ if (blob.bufferSize > 0) {
+ return std::move(blob);
+ }
+
+ // Allow the caller to create a new blob to be stored for the given key.
+ DAWN_TRY(createFn([this, key](const void* value, size_t size) {
+ this->StoreData(key, value, size);
+ }));
+
+ return std::move(blob);
+ }
+
+ private:
+ // PersistentCache impl
+ ScopedCachedBlob LoadData(const PersistentCacheKey& key);
+ void StoreData(const PersistentCacheKey& key, const void* value, size_t size);
+
+ dawn::platform::CachingInterface* GetPlatformCache();
+
+ DeviceBase* mDevice = nullptr;
+
+ std::mutex mMutex;
+ dawn::platform::CachingInterface* mCache = nullptr;
+ };
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_PERSISTENTCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn/native/Pipeline.cpp
new file mode 100644
index 00000000000..73a46bec76c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Pipeline.cpp
@@ -0,0 +1,250 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Pipeline.h"
+
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/ShaderModule.h"
+
+namespace dawn::native {
+ MaybeError ValidateProgrammableStage(DeviceBase* device,
+ const ShaderModuleBase* module,
+ const std::string& entryPoint,
+ uint32_t constantCount,
+ const ConstantEntry* constants,
+ const PipelineLayoutBase* layout,
+ SingleShaderStage stage) {
+ DAWN_TRY(device->ValidateObject(module));
+
+ DAWN_INVALID_IF(!module->HasEntryPoint(entryPoint),
+ "Entry point \"%s\" doesn't exist in the shader module %s.", entryPoint,
+ module);
+
+ const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
+
+ DAWN_INVALID_IF(metadata.stage != stage,
+ "The stage (%s) of the entry point \"%s\" isn't the expected one (%s).",
+ metadata.stage, entryPoint, stage);
+
+ if (layout != nullptr) {
+ DAWN_TRY(ValidateCompatibilityWithPipelineLayout(device, metadata, layout));
+ }
+
+ if (constantCount > 0u && device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+ return DAWN_VALIDATION_ERROR(
+ "Pipeline overridable constants are disallowed because they are partially "
+ "implemented.");
+ }
+
+ // Validate if overridable constants exist in shader module
+ // pipelineBase is not yet constructed at this moment so iterate constants from descriptor
+ size_t numUninitializedConstants = metadata.uninitializedOverridableConstants.size();
+ // Keep an initialized constants sets to handle duplicate initialization cases
+ std::unordered_set<std::string> stageInitializedConstantIdentifiers;
+ for (uint32_t i = 0; i < constantCount; i++) {
+ DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
+ "Pipeline overridable constant \"%s\" not found in %s.",
+ constants[i].key, module);
+
+ if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
+ if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
+ numUninitializedConstants--;
+ }
+ stageInitializedConstantIdentifiers.insert(constants[i].key);
+ } else {
+ // There are duplicate initializations
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Pipeline overridable constants \"%s\" is set more than once in %s",
+ constants[i].key, module);
+ }
+ }
+
+ // Validate if any overridable constant is left uninitialized
+ if (DAWN_UNLIKELY(numUninitializedConstants > 0)) {
+ std::string uninitializedConstantsArray;
+ bool isFirst = true;
+ for (std::string identifier : metadata.uninitializedOverridableConstants) {
+ if (stageInitializedConstantIdentifiers.count(identifier) > 0) {
+ continue;
+ }
+
+ if (isFirst) {
+ isFirst = false;
+ } else {
+ uninitializedConstantsArray.append(", ");
+ }
+ uninitializedConstantsArray.append(identifier);
+ }
+
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "There are uninitialized pipeline overridable constants in shader module %s, their "
+ "identifiers:[%s]",
+ module, uninitializedConstantsArray);
+ }
+
+ return {};
+ }
+
+ // PipelineBase
+
+ PipelineBase::PipelineBase(DeviceBase* device,
+ PipelineLayoutBase* layout,
+ const char* label,
+ std::vector<StageAndDescriptor> stages)
+ : ApiObjectBase(device, label), mLayout(layout) {
+ ASSERT(!stages.empty());
+
+ for (const StageAndDescriptor& stage : stages) {
+ // Extract argument for this stage.
+ SingleShaderStage shaderStage = stage.shaderStage;
+ ShaderModuleBase* module = stage.module;
+ const char* entryPointName = stage.entryPoint.c_str();
+
+ const EntryPointMetadata& metadata = module->GetEntryPoint(entryPointName);
+ ASSERT(metadata.stage == shaderStage);
+
+ // Record them internally.
+ bool isFirstStage = mStageMask == wgpu::ShaderStage::None;
+ mStageMask |= StageBit(shaderStage);
+ mStages[shaderStage] = {module, entryPointName, &metadata, {}};
+ auto& constants = mStages[shaderStage].constants;
+ for (uint32_t i = 0; i < stage.constantCount; i++) {
+ constants.emplace(stage.constants[i].key, stage.constants[i].value);
+ }
+
+ // Compute the max() of all minBufferSizes across all stages.
+ RequiredBufferSizes stageMinBufferSizes =
+ ComputeRequiredBufferSizesForLayout(metadata, layout);
+
+ if (isFirstStage) {
+ mMinBufferSizes = std::move(stageMinBufferSizes);
+ } else {
+ for (BindGroupIndex group(0); group < mMinBufferSizes.size(); ++group) {
+ ASSERT(stageMinBufferSizes[group].size() == mMinBufferSizes[group].size());
+
+ for (size_t i = 0; i < stageMinBufferSizes[group].size(); ++i) {
+ mMinBufferSizes[group][i] =
+ std::max(mMinBufferSizes[group][i], stageMinBufferSizes[group][i]);
+ }
+ }
+ }
+ }
+ }
+
+ PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ }
+
+ PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ PipelineBase::~PipelineBase() = default;
+
+ PipelineLayoutBase* PipelineBase::GetLayout() {
+ ASSERT(!IsError());
+ return mLayout.Get();
+ }
+
+ const PipelineLayoutBase* PipelineBase::GetLayout() const {
+ ASSERT(!IsError());
+ return mLayout.Get();
+ }
+
+ const RequiredBufferSizes& PipelineBase::GetMinBufferSizes() const {
+ ASSERT(!IsError());
+ return mMinBufferSizes;
+ }
+
+ const ProgrammableStage& PipelineBase::GetStage(SingleShaderStage stage) const {
+ ASSERT(!IsError());
+ return mStages[stage];
+ }
+
+ const PerStage<ProgrammableStage>& PipelineBase::GetAllStages() const {
+ return mStages;
+ }
+
+ wgpu::ShaderStage PipelineBase::GetStageMask() const {
+ return mStageMask;
+ }
+
+ MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
+ DAWN_INVALID_IF(
+ groupIndex >= kMaxBindGroups,
+ "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
+ groupIndex, kMaxBindGroups);
+ return {};
+ }
+
+ ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(
+ uint32_t groupIndexIn) {
+ DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
+
+ BindGroupIndex groupIndex(groupIndexIn);
+ if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
+ return Ref<BindGroupLayoutBase>(GetDevice()->GetEmptyBindGroupLayout());
+ } else {
+ return Ref<BindGroupLayoutBase>(mLayout->GetBindGroupLayout(groupIndex));
+ }
+ }
+
+ BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
+ Ref<BindGroupLayoutBase> result;
+ if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result,
+ "Validating GetBindGroupLayout (%u) on %s", groupIndexIn,
+ this)) {
+ return BindGroupLayoutBase::MakeError(GetDevice());
+ }
+ return result.Detach();
+ }
+
+ size_t PipelineBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mLayout->GetContentHash());
+
+ recorder.Record(mStageMask);
+ for (SingleShaderStage stage : IterateStages(mStageMask)) {
+ recorder.Record(mStages[stage].module->GetContentHash());
+ recorder.Record(mStages[stage].entryPoint);
+ }
+
+ return recorder.GetContentHash();
+ }
+
+ // static
+ bool PipelineBase::EqualForCache(const PipelineBase* a, const PipelineBase* b) {
+ // The layout is deduplicated so it can be compared by pointer.
+ if (a->mLayout.Get() != b->mLayout.Get() || a->mStageMask != b->mStageMask) {
+ return false;
+ }
+
+ for (SingleShaderStage stage : IterateStages(a->mStageMask)) {
+ // The module is deduplicated so it can be compared by pointer.
+ if (a->mStages[stage].module.Get() != b->mStages[stage].module.Get() ||
+ a->mStages[stage].entryPoint != b->mStages[stage].entryPoint) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Pipeline.h b/chromium/third_party/dawn/src/dawn/native/Pipeline.h
new file mode 100644
index 00000000000..ab078c35631
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Pipeline.h
@@ -0,0 +1,98 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PIPELINE_H_
+#define DAWNNATIVE_PIPELINE_H_
+
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+ MaybeError ValidateProgrammableStage(DeviceBase* device,
+ const ShaderModuleBase* module,
+ const std::string& entryPoint,
+ uint32_t constantCount,
+ const ConstantEntry* constants,
+ const PipelineLayoutBase* layout,
+ SingleShaderStage stage);
+
+ // Use map to make sure constant keys are sorted for creating shader cache keys
+ using PipelineConstantEntries = std::map<std::string, double>;
+
+ struct ProgrammableStage {
+ Ref<ShaderModuleBase> module;
+ std::string entryPoint;
+
+ // The metadata lives as long as module, that's ref-ed in the same structure.
+ const EntryPointMetadata* metadata = nullptr;
+
+ PipelineConstantEntries constants;
+ };
+
+ class PipelineBase : public ApiObjectBase, public CachedObject {
+ public:
+ ~PipelineBase() override;
+
+ PipelineLayoutBase* GetLayout();
+ const PipelineLayoutBase* GetLayout() const;
+ const RequiredBufferSizes& GetMinBufferSizes() const;
+ const ProgrammableStage& GetStage(SingleShaderStage stage) const;
+ const PerStage<ProgrammableStage>& GetAllStages() const;
+ wgpu::ShaderStage GetStageMask() const;
+
+ ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
+
+ // Helper functions for std::unordered_map-based pipeline caches.
+ size_t ComputeContentHash() override;
+ static bool EqualForCache(const PipelineBase* a, const PipelineBase* b);
+
+ // Implementation of the API entrypoint. Do not use in a reentrant manner.
+ BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
+
+ // Initialize() should only be called once by the frontend.
+ virtual MaybeError Initialize() = 0;
+
+ protected:
+ PipelineBase(DeviceBase* device,
+ PipelineLayoutBase* layout,
+ const char* label,
+ std::vector<StageAndDescriptor> stages);
+ PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ // Constructor used only for mocking and testing.
+ PipelineBase(DeviceBase* device);
+
+ private:
+ MaybeError ValidateGetBindGroupLayout(uint32_t group);
+
+ wgpu::ShaderStage mStageMask = wgpu::ShaderStage::None;
+ PerStage<ProgrammableStage> mStages;
+
+ Ref<PipelineLayoutBase> mLayout;
+ RequiredBufferSizes mMinBufferSizes;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_PIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp
new file mode 100644
index 00000000000..56ab1004221
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PipelineLayout.cpp
@@ -0,0 +1,409 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PipelineLayout.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/ShaderModule.h"
+
+namespace dawn::native {
+
+ MaybeError ValidatePipelineLayoutDescriptor(
+ DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+ }
+
+ if (descriptor->bindGroupLayoutCount > kMaxBindGroups) {
+ return DAWN_VALIDATION_ERROR("too many bind group layouts");
+ }
+
+ BindingCounts bindingCounts = {};
+ for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
+ DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
+ if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() !=
+ pipelineCompatibilityToken) {
+ return DAWN_VALIDATION_ERROR(
+ "cannot create a pipeline layout using a bind group layout that was created as "
+ "part of a pipeline's default layout");
+ }
+ AccumulateBindingCounts(&bindingCounts,
+ descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
+ }
+
+ DAWN_TRY(ValidateBindingCounts(bindingCounts));
+ return {};
+ }
+
+ // PipelineLayoutBase
+
+ PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag)
+ : ApiObjectBase(device, descriptor->label) {
+ ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
+ for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
+ ++group) {
+ mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)];
+ mMask.set(group);
+ }
+ }
+
+ PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor)
+ : PipelineLayoutBase(device, descriptor, kUntrackedByDevice) {
+ TrackInDevice();
+ }
+
+ PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+ }
+
+ PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ PipelineLayoutBase::~PipelineLayoutBase() = default;
+
+ void PipelineLayoutBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncachePipelineLayout(this);
+ }
+ }
+
+ // static
+ PipelineLayoutBase* PipelineLayoutBase::MakeError(DeviceBase* device) {
+ return new PipelineLayoutBase(device, ObjectBase::kError);
+ }
+
+ // static
+ ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault(
+ DeviceBase* device,
+ std::vector<StageAndDescriptor> stages) {
+ using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>;
+
+ // Merges two entries at the same location, if they are allowed to be merged.
+ auto MergeEntries = [](BindGroupLayoutEntry* modifiedEntry,
+ const BindGroupLayoutEntry& mergedEntry) -> MaybeError {
+ // Visibility is excluded because we take the OR across stages.
+ bool compatible =
+ modifiedEntry->binding == mergedEntry.binding &&
+ modifiedEntry->buffer.type == mergedEntry.buffer.type &&
+ modifiedEntry->sampler.type == mergedEntry.sampler.type &&
+ // Compatibility between these sample types is checked below.
+ (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) ==
+ (mergedEntry.texture.sampleType != wgpu::TextureSampleType::Undefined) &&
+ modifiedEntry->storageTexture.access == mergedEntry.storageTexture.access;
+
+ // Minimum buffer binding size excluded because we take the maximum seen across stages.
+ if (modifiedEntry->buffer.type != wgpu::BufferBindingType::Undefined) {
+ compatible = compatible && modifiedEntry->buffer.hasDynamicOffset ==
+ mergedEntry.buffer.hasDynamicOffset;
+ }
+
+ if (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) {
+ // Sample types are compatible if they are exactly equal,
+ // or if the |modifiedEntry| is Float and the |mergedEntry| is UnfilterableFloat.
+ // Note that the |mergedEntry| never has type Float. Texture bindings all start
+ // as UnfilterableFloat and are promoted to Float if they are statically used with
+ // a sampler.
+ ASSERT(mergedEntry.texture.sampleType != wgpu::TextureSampleType::Float);
+ bool compatibleSampleTypes =
+ modifiedEntry->texture.sampleType == mergedEntry.texture.sampleType ||
+ (modifiedEntry->texture.sampleType == wgpu::TextureSampleType::Float &&
+ mergedEntry.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat);
+ compatible =
+ compatible && compatibleSampleTypes &&
+ modifiedEntry->texture.viewDimension == mergedEntry.texture.viewDimension &&
+ modifiedEntry->texture.multisampled == mergedEntry.texture.multisampled;
+ }
+
+ if (modifiedEntry->storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+ compatible =
+ compatible &&
+ modifiedEntry->storageTexture.format == mergedEntry.storageTexture.format &&
+ modifiedEntry->storageTexture.viewDimension ==
+ mergedEntry.storageTexture.viewDimension;
+ }
+
+ // Check if any properties are incompatible with existing entry
+ // If compatible, we will merge some properties
+ if (!compatible) {
+ return DAWN_VALIDATION_ERROR(
+ "Duplicate binding in default pipeline layout initialization "
+ "not compatible with previous declaration");
+ }
+
+ // Use the max |minBufferBindingSize| we find.
+ modifiedEntry->buffer.minBindingSize =
+ std::max(modifiedEntry->buffer.minBindingSize, mergedEntry.buffer.minBindingSize);
+
+ // Use the OR of all the stages at which we find this binding.
+ modifiedEntry->visibility |= mergedEntry.visibility;
+
+ return {};
+ };
+
+ // Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry
+ auto ConvertMetadataToEntry =
+ [](const ShaderBindingInfo& shaderBinding,
+ const ExternalTextureBindingLayout* externalTextureBindingEntry)
+ -> BindGroupLayoutEntry {
+ BindGroupLayoutEntry entry = {};
+ switch (shaderBinding.bindingType) {
+ case BindingInfoType::Buffer:
+ entry.buffer.type = shaderBinding.buffer.type;
+ entry.buffer.hasDynamicOffset = shaderBinding.buffer.hasDynamicOffset;
+ entry.buffer.minBindingSize = shaderBinding.buffer.minBindingSize;
+ break;
+ case BindingInfoType::Sampler:
+ if (shaderBinding.sampler.isComparison) {
+ entry.sampler.type = wgpu::SamplerBindingType::Comparison;
+ } else {
+ entry.sampler.type = wgpu::SamplerBindingType::Filtering;
+ }
+ break;
+ case BindingInfoType::Texture:
+ switch (shaderBinding.texture.compatibleSampleTypes) {
+ case SampleTypeBit::Depth:
+ entry.texture.sampleType = wgpu::TextureSampleType::Depth;
+ break;
+ case SampleTypeBit::Sint:
+ entry.texture.sampleType = wgpu::TextureSampleType::Sint;
+ break;
+ case SampleTypeBit::Uint:
+ entry.texture.sampleType = wgpu::TextureSampleType::Uint;
+ break;
+ case SampleTypeBit::Float:
+ case SampleTypeBit::UnfilterableFloat:
+ case SampleTypeBit::None:
+ UNREACHABLE();
+ break;
+ default:
+ if (shaderBinding.texture.compatibleSampleTypes ==
+ (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
+ // Default to UnfilterableFloat. It will be promoted to Float if it
+ // is used with a sampler.
+ entry.texture.sampleType =
+ wgpu::TextureSampleType::UnfilterableFloat;
+ } else {
+ UNREACHABLE();
+ }
+ }
+ entry.texture.viewDimension = shaderBinding.texture.viewDimension;
+ entry.texture.multisampled = shaderBinding.texture.multisampled;
+ break;
+ case BindingInfoType::StorageTexture:
+ entry.storageTexture.access = shaderBinding.storageTexture.access;
+ entry.storageTexture.format = shaderBinding.storageTexture.format;
+ entry.storageTexture.viewDimension = shaderBinding.storageTexture.viewDimension;
+ break;
+ case BindingInfoType::ExternalTexture:
+ entry.nextInChain = externalTextureBindingEntry;
+ break;
+ }
+ return entry;
+ };
+
+ PipelineCompatibilityToken pipelineCompatibilityToken =
+ device->GetNextPipelineCompatibilityToken();
+
+ // Creates the BGL from the entries for a stage, checking it is valid.
+ auto CreateBGL = [](DeviceBase* device, const EntryMap& entries,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ -> ResultOrError<Ref<BindGroupLayoutBase>> {
+ std::vector<BindGroupLayoutEntry> entryVec;
+ entryVec.reserve(entries.size());
+ for (auto& [_, entry] : entries) {
+ entryVec.push_back(entry);
+ }
+
+ BindGroupLayoutDescriptor desc = {};
+ desc.entries = entryVec.data();
+ desc.entryCount = entryVec.size();
+
+ if (device->IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(device, &desc), "validating %s",
+ &desc);
+ }
+ return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken);
+ };
+
+ ASSERT(!stages.empty());
+
+ // Data which BindGroupLayoutDescriptor will point to for creation
+ ityp::array<BindGroupIndex, std::map<BindingNumber, BindGroupLayoutEntry>, kMaxBindGroups>
+ entryData = {};
+
+ // External texture binding layouts are chained structs that are set as a pointer within
+ // the bind group layout entry. We declare an entry here so that it can be used when needed
+ // in each BindGroupLayoutEntry and so it can stay alive until the call to
+ // GetOrCreateBindGroupLayout. Because ExternalTextureBindingLayout is an empty struct,
+ // there's no issue with using the same struct multiple times.
+ ExternalTextureBindingLayout externalTextureBindingLayout;
+
+ // Loops over all the reflected BindGroupLayoutEntries from shaders.
+ for (const StageAndDescriptor& stage : stages) {
+ const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+
+ for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) {
+ for (const auto& [bindingNumber, shaderBinding] : metadata.bindings[group]) {
+ // Create the BindGroupLayoutEntry
+ BindGroupLayoutEntry entry =
+ ConvertMetadataToEntry(shaderBinding, &externalTextureBindingLayout);
+ entry.binding = static_cast<uint32_t>(bindingNumber);
+ entry.visibility = StageBit(stage.shaderStage);
+
+ // Add it to our map of all entries, if there is an existing entry, then we
+ // need to merge, if we can.
+ const auto& [existingEntry, inserted] =
+ entryData[group].insert({bindingNumber, entry});
+ if (!inserted) {
+ DAWN_TRY(MergeEntries(&existingEntry->second, entry));
+ }
+ }
+ }
+
+ // Promote any Unfilterable textures used with a sampler to Filtering.
+ for (const EntryPointMetadata::SamplerTexturePair& pair :
+ metadata.samplerTexturePairs) {
+ BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
+ if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
+ entry->texture.sampleType = wgpu::TextureSampleType::Float;
+ }
+ }
+ }
+
+ // Create the bind group layouts. We need to keep track of the last non-empty BGL because
+ // Dawn doesn't yet know that an empty BGL and a null BGL are the same thing.
+ // TODO(cwallez@chromium.org): remove this when Dawn knows that empty and null BGL are the
+ // same.
+ BindGroupIndex pipelineBGLCount = BindGroupIndex(0);
+ ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
+ for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
+ DAWN_TRY_ASSIGN(bindGroupLayouts[group],
+ CreateBGL(device, entryData[group], pipelineCompatibilityToken));
+ if (entryData[group].size() != 0) {
+ pipelineBGLCount = group + BindGroupIndex(1);
+ }
+ }
+
+ // Create the deduced pipeline layout, validating if it is valid.
+ ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
+ for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
+ bgls[group] = bindGroupLayouts[group].Get();
+ }
+
+ PipelineLayoutDescriptor desc = {};
+ desc.bindGroupLayouts = bgls.data();
+ desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
+
+ DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken));
+
+ Ref<PipelineLayoutBase> result;
+ DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
+ ASSERT(!result->IsError());
+
+ // Sanity check in debug that the pipeline layout is compatible with the current
+ // pipeline.
+ for (const StageAndDescriptor& stage : stages) {
+ const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+ ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())
+ .IsSuccess());
+ }
+
+ return std::move(result);
+ }
+
+ ObjectType PipelineLayoutBase::GetType() const {
+ return ObjectType::PipelineLayout;
+ }
+
+ const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
+ ASSERT(!IsError());
+ ASSERT(group < kMaxBindGroupsTyped);
+ ASSERT(mMask[group]);
+ const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+ ASSERT(bgl != nullptr);
+ return bgl;
+ }
+
+ BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) {
+ ASSERT(!IsError());
+ ASSERT(group < kMaxBindGroupsTyped);
+ ASSERT(mMask[group]);
+ BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+ ASSERT(bgl != nullptr);
+ return bgl;
+ }
+
+ const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const {
+ ASSERT(!IsError());
+ return mMask;
+ }
+
+ BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(
+ const PipelineLayoutBase* other) const {
+ ASSERT(!IsError());
+ return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
+ }
+
+ BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
+ ASSERT(!IsError());
+
+ for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
+ if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) {
+ return i;
+ }
+ }
+ return kMaxBindGroupsTyped;
+ }
+
+ size_t PipelineLayoutBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mMask);
+
+ for (BindGroupIndex group : IterateBitSet(mMask)) {
+ recorder.Record(GetBindGroupLayout(group)->GetContentHash());
+ }
+
+ return recorder.GetContentHash();
+ }
+
+ bool PipelineLayoutBase::EqualityFunc::operator()(const PipelineLayoutBase* a,
+ const PipelineLayoutBase* b) const {
+ if (a->mMask != b->mMask) {
+ return false;
+ }
+
+ for (BindGroupIndex group : IterateBitSet(a->mMask)) {
+ if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PipelineLayout.h b/chromium/third_party/dawn/src/dawn/native/PipelineLayout.h
new file mode 100644
index 00000000000..4850536d42e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PipelineLayout.h
@@ -0,0 +1,97 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PIPELINELAYOUT_H_
+#define DAWNNATIVE_PIPELINELAYOUT_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+ MaybeError ValidatePipelineLayoutDescriptor(
+ DeviceBase*,
+ const PipelineLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+
+ using BindGroupLayoutArray =
+ ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
+ using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
+
+ struct StageAndDescriptor {
+ SingleShaderStage shaderStage;
+ ShaderModuleBase* module;
+ std::string entryPoint;
+ uint32_t constantCount = 0u;
+ ConstantEntry const* constants = nullptr;
+ };
+
+ class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
+ public:
+ PipelineLayoutBase(DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag);
+ PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
+ ~PipelineLayoutBase() override;
+
+ static PipelineLayoutBase* MakeError(DeviceBase* device);
+ static ResultOrError<Ref<PipelineLayoutBase>> CreateDefault(
+ DeviceBase* device,
+ std::vector<StageAndDescriptor> stages);
+
+ ObjectType GetType() const override;
+
+ const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
+ BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
+ const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
+
+ // Utility functions to compute inherited bind groups.
+ // Returns the inherited bind groups as a mask.
+ BindGroupLayoutMask InheritedGroupsMask(const PipelineLayoutBase* other) const;
+
+ // Returns the index of the first incompatible bind group in the range
+ // [0, kMaxBindGroups]
+ BindGroupIndex GroupsInheritUpTo(const PipelineLayoutBase* other) const;
+
+ // Functions necessary for the unordered_set<PipelineLayoutBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const PipelineLayoutBase* a, const PipelineLayoutBase* b) const;
+ };
+
+ protected:
+ // Constructor used only for mocking and testing.
+ PipelineLayoutBase(DeviceBase* device);
+ PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ void DestroyImpl() override;
+
+ BindGroupLayoutArray mBindGroupLayouts;
+ BindGroupLayoutMask mMask;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_PIPELINELAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp
new file mode 100644
index 00000000000..0a01a99ab53
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.cpp
@@ -0,0 +1,60 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/PooledResourceMemoryAllocator.h"
+#include "dawn/native/Device.h"
+
+namespace dawn::native {
+
+ PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(
+ ResourceHeapAllocator* heapAllocator)
+ : mHeapAllocator(heapAllocator) {
+ }
+
+ void PooledResourceMemoryAllocator::DestroyPool() {
+ for (auto& resourceHeap : mPool) {
+ ASSERT(resourceHeap != nullptr);
+ mHeapAllocator->DeallocateResourceHeap(std::move(resourceHeap));
+ }
+
+ mPool.clear();
+ }
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>>
+ PooledResourceMemoryAllocator::AllocateResourceHeap(uint64_t size) {
+ // Pooled memory is LIFO because memory can be evicted by LRU. However, this means
+ // pooling is disabled in-frame when the memory is still pending. For high in-frame
+ // memory users, FIFO might be preferable when memory consumption is a higher priority.
+ std::unique_ptr<ResourceHeapBase> memory;
+ if (!mPool.empty()) {
+ memory = std::move(mPool.front());
+ mPool.pop_front();
+ }
+
+ if (memory == nullptr) {
+ DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(size));
+ }
+
+ return std::move(memory);
+ }
+
+ void PooledResourceMemoryAllocator::DeallocateResourceHeap(
+ std::unique_ptr<ResourceHeapBase> allocation) {
+ mPool.push_front(std::move(allocation));
+ }
+
+ uint64_t PooledResourceMemoryAllocator::GetPoolSizeForTesting() const {
+ return mPool.size();
+ }
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h b/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h
new file mode 100644
index 00000000000..898bafe497e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/PooledResourceMemoryAllocator.h
@@ -0,0 +1,53 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
+#define DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+
+#include <deque>
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ // |PooledResourceMemoryAllocator| allocates a fixed-size resource memory from a resource memory
+ // pool. Internally, it manages a list of heaps using LIFO (newest heaps are recycled first).
+ // The heap is in one of two states: AVAILABLE or not. Upon de-allocate, the heap is returned
+ // the pool and made AVAILABLE.
+ class PooledResourceMemoryAllocator : public ResourceHeapAllocator {
+ public:
+ PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
+ ~PooledResourceMemoryAllocator() override = default;
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) override;
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+
+ void DestroyPool();
+
+ // For testing purposes.
+ uint64_t GetPoolSizeForTesting() const;
+
+ private:
+ ResourceHeapAllocator* mHeapAllocator = nullptr;
+
+ std::deque<std::unique_ptr<ResourceHeapBase>> mPool;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp
new file mode 100644
index 00000000000..8bdc08b0c06
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.cpp
@@ -0,0 +1,203 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ProgrammableEncoder.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <cstring>
+
+namespace dawn::native {
+
+ ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
+ const char* label,
+ EncodingContext* encodingContext)
+ : ApiObjectBase(device, label),
+ mEncodingContext(encodingContext),
+ mValidationEnabled(device->IsValidationEnabled()) {
+ }
+
+ ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
+ : ApiObjectBase(device, errorTag),
+ mEncodingContext(encodingContext),
+ mValidationEnabled(device->IsValidationEnabled()) {
+ }
+
+ bool ProgrammableEncoder::IsValidationEnabled() const {
+ return mValidationEnabled;
+ }
+
+ MaybeError ProgrammableEncoder::ValidateProgrammableEncoderEnd() const {
+ DAWN_INVALID_IF(mDebugGroupStackSize != 0,
+ "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup.",
+ mDebugGroupStackSize);
+ return {};
+ }
+
+ void ProgrammableEncoder::APIInsertDebugMarker(const char* groupLabel) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ InsertDebugMarkerCmd* cmd =
+ allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ return {};
+ },
+ "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
+ }
+
+ void ProgrammableEncoder::APIPopDebugGroup() {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(
+ mDebugGroupStackSize == 0,
+ "PopDebugGroup called when no debug groups are currently pushed.");
+ }
+ allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+ mDebugGroupStackSize--;
+ mEncodingContext->PopDebugGroupLabel();
+
+ return {};
+ },
+ "encoding %s.PopDebugGroup().", this);
+ }
+
+ void ProgrammableEncoder::APIPushDebugGroup(const char* groupLabel) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ PushDebugGroupCmd* cmd =
+ allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ mDebugGroupStackSize++;
+ mEncodingContext->PushDebugGroupLabel(groupLabel);
+
+ return {};
+ },
+ "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
+ }
+
+ MaybeError ProgrammableEncoder::ValidateSetBindGroup(BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint32_t* dynamicOffsetsIn) const {
+ DAWN_TRY(GetDevice()->ValidateObject(group));
+
+ DAWN_INVALID_IF(index >= kMaxBindGroupsTyped,
+ "Bind group index (%u) exceeds the maximum (%u).",
+ static_cast<uint32_t>(index), kMaxBindGroups);
+
+ ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
+ BindingIndex(dynamicOffsetCountIn));
+
+ // Dynamic offsets count must match the number required by the layout perfectly.
+ const BindGroupLayoutBase* layout = group->GetLayout();
+ DAWN_INVALID_IF(
+ layout->GetDynamicBufferCount() != dynamicOffsets.size(),
+ "The number of dynamic offsets (%u) does not match the number of dynamic buffers (%u) "
+ "in %s.",
+ static_cast<uint32_t>(dynamicOffsets.size()),
+ static_cast<uint32_t>(layout->GetDynamicBufferCount()), layout);
+
+ for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
+
+ // BGL creation sorts bindings such that the dynamic buffer bindings are first.
+ // ASSERT that this true.
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+ ASSERT(bindingInfo.buffer.hasDynamicOffset);
+
+ uint64_t requiredAlignment;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ requiredAlignment = GetDevice()->GetLimits().v1.minUniformBufferOffsetAlignment;
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ case kInternalStorageBufferBinding:
+ requiredAlignment = GetDevice()->GetLimits().v1.minStorageBufferOffsetAlignment;
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+
+ DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
+ "Dynamic Offset[%u] (%u) is not %u byte aligned.",
+ static_cast<uint32_t>(i), dynamicOffsets[i], requiredAlignment);
+
+ BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
+
+ // During BindGroup creation, validation ensures binding offset + binding size
+ // <= buffer size.
+ ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
+ ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >= bufferBinding.offset);
+
+ if ((dynamicOffsets[i] >
+ bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
+ DAWN_INVALID_IF(
+ (bufferBinding.buffer->GetSize() - bufferBinding.offset) == bufferBinding.size,
+ "Dynamic Offset[%u] (%u) is out of bounds of %s with a size of %u and a bound "
+ "range of (offset: %u, size: %u). The binding goes to the end of the buffer "
+ "even with a dynamic offset of 0. Did you forget to specify "
+ "the binding's size?",
+ static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
+ bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Dynamic Offset[%u] (%u) is out of bounds of "
+ "%s with a size of %u and a bound range of (offset: %u, size: %u).",
+ static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
+ bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+ }
+ }
+
+ return {};
+ }
+
+ void ProgrammableEncoder::RecordSetBindGroup(CommandAllocator* allocator,
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) const {
+ SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
+ cmd->index = index;
+ cmd->group = group;
+ cmd->dynamicOffsetCount = dynamicOffsetCount;
+ if (dynamicOffsetCount > 0) {
+ uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
+ memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
+ }
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h b/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h
new file mode 100644
index 00000000000..6bba7a27526
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ProgrammableEncoder.h
@@ -0,0 +1,72 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_PROGRAMMABLEENCODER_H_
+#define DAWNNATIVE_PROGRAMMABLEENCODER_H_
+
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ // Base class for shared functionality between programmable encoders.
+ class ProgrammableEncoder : public ApiObjectBase {
+ public:
+ ProgrammableEncoder(DeviceBase* device,
+ const char* label,
+ EncodingContext* encodingContext);
+
+ void APIInsertDebugMarker(const char* groupLabel);
+ void APIPopDebugGroup();
+ void APIPushDebugGroup(const char* groupLabel);
+
+ protected:
+ bool IsValidationEnabled() const;
+ MaybeError ValidateProgrammableEncoderEnd() const;
+
+ // Compute and render passes do different things on SetBindGroup. These are helper functions
+ // for the logic they have in common.
+ MaybeError ValidateSetBindGroup(BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint32_t* dynamicOffsetsIn) const;
+ void RecordSetBindGroup(CommandAllocator* allocator,
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) const;
+
+ // Construct an "error" programmable pass encoder.
+ ProgrammableEncoder(DeviceBase* device,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag);
+
+ EncodingContext* mEncodingContext = nullptr;
+
+ uint64_t mDebugGroupStackSize = 0;
+
+ private:
+ const bool mValidationEnabled;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_PROGRAMMABLEENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp b/chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp
new file mode 100644
index 00000000000..8fb6103eaf8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/QueryHelper.cpp
@@ -0,0 +1,219 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/QueryHelper.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/ComputePassEncoder.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/utils/WGPUHelpers.h"
+
+#include <cmath>
+
+namespace dawn::native {
+
+ namespace {
+
+ // Assert the offsets in dawn::native::TimestampParams are same with the ones in the shader
+ static_assert(offsetof(dawn::native::TimestampParams, first) == 0);
+ static_assert(offsetof(dawn::native::TimestampParams, count) == 4);
+ static_assert(offsetof(dawn::native::TimestampParams, offset) == 8);
+ static_assert(offsetof(dawn::native::TimestampParams, multiplier) == 12);
+ static_assert(offsetof(dawn::native::TimestampParams, rightShift) == 16);
+
+ static const char sConvertTimestampsToNanoseconds[] = R"(
+ struct Timestamp {
+ low : u32;
+ high : u32;
+ };
+
+ struct TimestampArr {
+ t : array<Timestamp>;
+ };
+
+ struct AvailabilityArr {
+ v : array<u32>;
+ };
+
+ struct TimestampParams {
+ first : u32;
+ count : u32;
+ offset : u32;
+ multiplier : u32;
+ right_shift : u32;
+ };
+
+ @group(0) @binding(0) var<storage, read_write> timestamps : TimestampArr;
+ @group(0) @binding(1) var<storage, read> availability : AvailabilityArr;
+ @group(0) @binding(2) var<uniform> params : TimestampParams;
+
+ let sizeofTimestamp : u32 = 8u;
+
+ @stage(compute) @workgroup_size(8, 1, 1)
+ fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+ if (GlobalInvocationID.x >= params.count) { return; }
+
+ var index = GlobalInvocationID.x + params.offset / sizeofTimestamp;
+
+ // Return 0 for the unavailable value.
+ if (availability.v[GlobalInvocationID.x + params.first] == 0u) {
+ timestamps.t[index].low = 0u;
+ timestamps.t[index].high = 0u;
+ return;
+ }
+
+ var timestamp = timestamps.t[index];
+
+ // TODO(dawn:1250): Consider using the umulExtended and uaddCarry intrinsics once
+ // available.
+ var chunks : array<u32, 5>;
+ chunks[0] = timestamp.low & 0xFFFFu;
+ chunks[1] = timestamp.low >> 16u;
+ chunks[2] = timestamp.high & 0xFFFFu;
+ chunks[3] = timestamp.high >> 16u;
+ chunks[4] = 0u;
+
+ // Multiply all the chunks with the integer period.
+ for (var i = 0u; i < 4u; i = i + 1u) {
+ chunks[i] = chunks[i] * params.multiplier;
+ }
+
+ // Propagate the carry
+ var carry = 0u;
+ for (var i = 0u; i < 4u; i = i + 1u) {
+ var chunk_with_carry = chunks[i] + carry;
+ carry = chunk_with_carry >> 16u;
+ chunks[i] = chunk_with_carry & 0xFFFFu;
+ }
+ chunks[4] = carry;
+
+ // Apply the right shift.
+ for (var i = 0u; i < 4u; i = i + 1u) {
+ var low = chunks[i] >> params.right_shift;
+ var high = (chunks[i + 1u] << (16u - params.right_shift)) & 0xFFFFu;
+ chunks[i] = low | high;
+ }
+
+ timestamps.t[index].low = chunks[0] | (chunks[1] << 16u);
+ timestamps.t[index].high = chunks[2] | (chunks[3] << 16u);
+ }
+ )";
+
+ ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(
+ DeviceBase* device) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+ if (store->timestampComputePipeline == nullptr) {
+ // Create compute shader module if not cached before.
+ if (store->timestampCS == nullptr) {
+ DAWN_TRY_ASSIGN(
+ store->timestampCS,
+ utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
+ }
+
+ // Create binding group layout
+ Ref<BindGroupLayoutBase> bgl;
+ DAWN_TRY_ASSIGN(
+ bgl, utils::MakeBindGroupLayout(
+ device,
+ {
+ {0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+ {1, wgpu::ShaderStage::Compute,
+ wgpu::BufferBindingType::ReadOnlyStorage},
+ {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+ },
+ /* allowInternalBinding */ true));
+
+ // Create pipeline layout
+ Ref<PipelineLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, utils::MakeBasicPipelineLayout(device, bgl));
+
+ // Create ComputePipeline.
+ ComputePipelineDescriptor computePipelineDesc = {};
+ // Generate the layout based on shader module.
+ computePipelineDesc.layout = layout.Get();
+ computePipelineDesc.compute.module = store->timestampCS.Get();
+ computePipelineDesc.compute.entryPoint = "main";
+
+ DAWN_TRY_ASSIGN(store->timestampComputePipeline,
+ device->CreateComputePipeline(&computePipelineDesc));
+ }
+
+ return store->timestampComputePipeline.Get();
+ }
+
+ } // anonymous namespace
+
+ TimestampParams::TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period)
+ : first(first), count(count), offset(offset) {
+ // The overall conversion happening, if p is the period, m the multiplier, s the shift, is::
+ //
+ // m = round(p * 2^s)
+ //
+ // Then in the shader we compute:
+ //
+ // m / 2^s = round(p * 2^s) / 2*s ~= p
+ //
+ // The goal is to find the best shift to keep the precision of computations. The
+ // conversion shader uses chunks of 16 bits to compute the multiplication with the perios,
+ // so we need to keep the multiplier under 2^16. At the same time, the larger the
+ // multiplier, the better the precision, so we maximize the value of the right shift while
+ // keeping the multiplier under 2 ^ 16
+ uint32_t upperLog2 = ceil(log2(period));
+
+ // Clamp the shift to 16 because we're doing computations in 16bit chunks. The
+ // multiplication by the period will overflow the chunks, but timestamps are mostly
+ // informational so that's ok.
+ rightShift = 16u - std::min(upperLog2, 16u);
+ multiplier = uint32_t(period * (1 << rightShift));
+ }
+
+ MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+ BufferBase* timestamps,
+ BufferBase* availability,
+ BufferBase* params) {
+ DeviceBase* device = encoder->GetDevice();
+
+ ComputePipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateTimestampComputePipeline(device));
+
+ // Prepare bind group layout.
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+ // Create bind group after all binding entries are set.
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(bindGroup,
+ utils::MakeBindGroup(device, layout,
+ {{0, timestamps}, {1, availability}, {2, params}}));
+
+ // Create compute encoder and issue dispatch.
+ ComputePassDescriptor passDesc = {};
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<ComputePassEncoder> pass = AcquireRef(encoder->APIBeginComputePass(&passDesc));
+ pass->APISetPipeline(pipeline);
+ pass->APISetBindGroup(0, bindGroup.Get());
+ pass->APIDispatch(
+ static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
+ pass->APIEnd();
+
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/QueryHelper.h b/chromium/third_party/dawn/src/dawn/native/QueryHelper.h
new file mode 100644
index 00000000000..111b1952cdf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/QueryHelper.h
@@ -0,0 +1,43 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_QUERYHELPER_H_
+#define DAWNNATIVE_QUERYHELPER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ObjectBase.h"
+
+namespace dawn::native {
+
+ class BufferBase;
+ class CommandEncoder;
+
+ struct TimestampParams {
+ TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period);
+
+ uint32_t first;
+ uint32_t count;
+ uint32_t offset;
+ uint32_t multiplier;
+ uint32_t rightShift;
+ };
+
+ MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+ BufferBase* timestamps,
+ BufferBase* availability,
+ BufferBase* params);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_QUERYHELPER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/QuerySet.cpp b/chromium/third_party/dawn/src/dawn/native/QuerySet.cpp
new file mode 100644
index 00000000000..3f20dab9de5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/QuerySet.cpp
@@ -0,0 +1,180 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/QuerySet.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/Features.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <set>
+
+namespace dawn::native {
+
+ namespace {
+
+ class ErrorQuerySet final : public QuerySetBase {
+ public:
+ ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {
+ }
+
+ private:
+ void DestroyImpl() override {
+ UNREACHABLE();
+ }
+ };
+
+ } // anonymous namespace
+
+ MaybeError ValidateQuerySetDescriptor(DeviceBase* device,
+ const QuerySetDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+ DAWN_TRY(ValidateQueryType(descriptor->type));
+
+ DAWN_INVALID_IF(descriptor->count > kMaxQueryCount,
+ "Query count (%u) exceeds the maximum query count (%u).", descriptor->count,
+ kMaxQueryCount);
+
+ switch (descriptor->type) {
+ case wgpu::QueryType::Occlusion:
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+ "Pipeline statistics specified for a query of type %s.",
+ descriptor->type);
+ break;
+
+ case wgpu::QueryType::PipelineStatistics: {
+ // TODO(crbug.com/1177506): Pipeline statistics query is not fully implemented.
+ // Disallow it as unsafe until the implementaion is completed.
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Pipeline statistics queries are disallowed because they are not "
+ "fully implemented");
+
+ DAWN_INVALID_IF(
+ !device->IsFeatureEnabled(Feature::PipelineStatisticsQuery),
+ "Pipeline statistics query set created without the feature being enabled.");
+
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount == 0,
+ "Pipeline statistics query set created with 0 statistics.");
+
+ std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
+ for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+ DAWN_TRY(ValidatePipelineStatisticName(descriptor->pipelineStatistics[i]));
+
+ auto [_, inserted] =
+ pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
+ DAWN_INVALID_IF(!inserted, "Statistic %s is specified more than once.",
+ descriptor->pipelineStatistics[i]);
+ }
+ } break;
+
+ case wgpu::QueryType::Timestamp:
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Timestamp queries are disallowed because they may expose precise "
+ "timing information.");
+
+ DAWN_INVALID_IF(!device->IsFeatureEnabled(Feature::TimestampQuery),
+ "Timestamp query set created without the feature being enabled.");
+
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+ "Pipeline statistics specified for a query of type %s.",
+ descriptor->type);
+ break;
+
+ default:
+ break;
+ }
+
+ return {};
+ }
+
+ QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
+ : ApiObjectBase(device, descriptor->label),
+ mQueryType(descriptor->type),
+ mQueryCount(descriptor->count),
+ mState(QuerySetState::Available) {
+ for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+ mPipelineStatistics.push_back(descriptor->pipelineStatistics[i]);
+ }
+
+ mQueryAvailability.resize(descriptor->count);
+ TrackInDevice();
+ }
+
+ QuerySetBase::QuerySetBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+ }
+
+ QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ QuerySetBase::~QuerySetBase() {
+ // Uninitialized or already destroyed
+ ASSERT(mState == QuerySetState::Unavailable || mState == QuerySetState::Destroyed);
+ }
+
+ void QuerySetBase::DestroyImpl() {
+ mState = QuerySetState::Destroyed;
+ }
+
+ // static
+ QuerySetBase* QuerySetBase::MakeError(DeviceBase* device) {
+ return new ErrorQuerySet(device);
+ }
+
+ ObjectType QuerySetBase::GetType() const {
+ return ObjectType::QuerySet;
+ }
+
+ wgpu::QueryType QuerySetBase::GetQueryType() const {
+ return mQueryType;
+ }
+
+ uint32_t QuerySetBase::GetQueryCount() const {
+ return mQueryCount;
+ }
+
+ const std::vector<wgpu::PipelineStatisticName>& QuerySetBase::GetPipelineStatistics() const {
+ return mPipelineStatistics;
+ }
+
+ const std::vector<bool>& QuerySetBase::GetQueryAvailability() const {
+ return mQueryAvailability;
+ }
+
+ void QuerySetBase::SetQueryAvailability(uint32_t index, bool available) {
+ mQueryAvailability[index] = available;
+ }
+
+ MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+ DAWN_INVALID_IF(mState == QuerySetState::Destroyed, "%s used while destroyed.", this);
+ return {};
+ }
+
+ void QuerySetBase::APIDestroy() {
+ if (GetDevice()->ConsumedError(ValidateDestroy())) {
+ return;
+ }
+ Destroy();
+ }
+
+ MaybeError QuerySetBase::ValidateDestroy() const {
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/QuerySet.h b/chromium/third_party/dawn/src/dawn/native/QuerySet.h
new file mode 100644
index 00000000000..39a69df4255
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/QuerySet.h
@@ -0,0 +1,72 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_QUERYSET_H_
+#define DAWNNATIVE_QUERYSET_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
+
+ class QuerySetBase : public ApiObjectBase {
+ public:
+ QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
+
+ static QuerySetBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ wgpu::QueryType GetQueryType() const;
+ uint32_t GetQueryCount() const;
+ const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
+
+ const std::vector<bool>& GetQueryAvailability() const;
+ void SetQueryAvailability(uint32_t index, bool available);
+
+ MaybeError ValidateCanUseInSubmitNow() const;
+
+ void APIDestroy();
+
+ protected:
+ QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ // Constructor used only for mocking and testing.
+ QuerySetBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ ~QuerySetBase() override;
+
+ private:
+ MaybeError ValidateDestroy() const;
+
+ wgpu::QueryType mQueryType;
+ uint32_t mQueryCount;
+ std::vector<wgpu::PipelineStatisticName> mPipelineStatistics;
+
+ enum class QuerySetState { Unavailable, Available, Destroyed };
+ QuerySetState mState = QuerySetState::Unavailable;
+
+ // Indicates the available queries on the query set for resolving
+ std::vector<bool> mQueryAvailability;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_QUERYSET_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Queue.cpp b/chromium/third_party/dawn/src/dawn/native/Queue.cpp
new file mode 100644
index 00000000000..6c061d3233f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Queue.cpp
@@ -0,0 +1,512 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Queue.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/CopyTextureForBrowserHelper.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/RenderPassEncoder.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/Texture.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <cstring>
+
+namespace dawn::native {
+
+ namespace {
+
+ void CopyTextureData(uint8_t* dstPointer,
+ const uint8_t* srcPointer,
+ uint32_t depth,
+ uint32_t rowsPerImage,
+ uint64_t imageAdditionalStride,
+ uint32_t actualBytesPerRow,
+ uint32_t dstBytesPerRow,
+ uint32_t srcBytesPerRow) {
+ bool copyWholeLayer =
+ actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
+ bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
+
+ if (!copyWholeLayer) { // copy row by row
+ for (uint32_t d = 0; d < depth; ++d) {
+ for (uint32_t h = 0; h < rowsPerImage; ++h) {
+ memcpy(dstPointer, srcPointer, actualBytesPerRow);
+ dstPointer += dstBytesPerRow;
+ srcPointer += srcBytesPerRow;
+ }
+ srcPointer += imageAdditionalStride;
+ }
+ } else {
+ uint64_t layerSize = uint64_t(rowsPerImage) * actualBytesPerRow;
+ if (!copyWholeData) { // copy layer by layer
+ for (uint32_t d = 0; d < depth; ++d) {
+ memcpy(dstPointer, srcPointer, layerSize);
+ dstPointer += layerSize;
+ srcPointer += layerSize + imageAdditionalStride;
+ }
+ } else { // do a single copy
+ memcpy(dstPointer, srcPointer, layerSize * depth);
+ }
+ }
+ }
+
+ ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
+ DeviceBase* device,
+ const void* data,
+ uint32_t alignedBytesPerRow,
+ uint32_t optimallyAlignedBytesPerRow,
+ uint32_t alignedRowsPerImage,
+ const TextureDataLayout& dataLayout,
+ bool hasDepthOrStencil,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& writeSizePixel) {
+ uint64_t newDataSizeBytes;
+ DAWN_TRY_ASSIGN(
+ newDataSizeBytes,
+ ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
+ alignedRowsPerImage));
+
+ uint64_t optimalOffsetAlignment =
+ device->GetOptimalBufferToTextureCopyOffsetAlignment();
+ ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
+ ASSERT(IsPowerOfTwo(blockInfo.byteSize));
+ // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
+ // since both of them are powers of two, we only need to align to the max value.
+ uint64_t offsetAlignment =
+ std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
+
+ // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
+ // by WebGPU and Vulkan SPEC.
+ if (hasDepthOrStencil) {
+ constexpr uint64_t kOffsetAlignmentForDepthStencil = 4;
+ offsetAlignment = std::max(offsetAlignment, kOffsetAlignmentForDepthStencil);
+ }
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ newDataSizeBytes, device->GetPendingCommandSerial(),
+ offsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+ uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
+ const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
+ srcPointer += dataLayout.offset;
+
+ uint32_t dataRowsPerImage = dataLayout.rowsPerImage;
+ if (dataRowsPerImage == 0) {
+ dataRowsPerImage = writeSizePixel.height / blockInfo.height;
+ }
+
+ ASSERT(dataRowsPerImage >= alignedRowsPerImage);
+ uint64_t imageAdditionalStride =
+ dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
+
+ CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers,
+ alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow,
+ optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
+
+ return uploadHandle;
+ }
+
+ struct SubmittedWorkDone : QueueBase::TaskInFlight {
+ SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
+ : mCallback(callback), mUserdata(userdata) {
+ }
+ void Finish() override {
+ ASSERT(mCallback != nullptr);
+ mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
+ mCallback = nullptr;
+ }
+ void HandleDeviceLoss() override {
+ ASSERT(mCallback != nullptr);
+ mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
+ mCallback = nullptr;
+ }
+ ~SubmittedWorkDone() override = default;
+
+ private:
+ WGPUQueueWorkDoneCallback mCallback = nullptr;
+ void* mUserdata;
+ };
+
+ class ErrorQueue : public QueueBase {
+ public:
+ ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {
+ }
+
+ private:
+ MaybeError SubmitImpl(uint32_t commandCount,
+ CommandBufferBase* const* commands) override {
+ UNREACHABLE();
+ }
+ };
+ } // namespace
+
+ // QueueBase
+
+ QueueBase::TaskInFlight::~TaskInFlight() {
+ }
+
+ QueueBase::QueueBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ }
+
+ QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ QueueBase::~QueueBase() {
+ ASSERT(mTasksInFlight.Empty());
+ }
+
+ void QueueBase::DestroyImpl() {
+ }
+
+ // static
+ QueueBase* QueueBase::MakeError(DeviceBase* device) {
+ return new ErrorQueue(device);
+ }
+
+ ObjectType QueueBase::GetType() const {
+ return ObjectType::Queue;
+ }
+
+ void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
+ SubmitInternal(commandCount, commands);
+
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ commands[i]->Destroy();
+ }
+ }
+
+ void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata) {
+ // The error status depends on the type of error so we let the validation function choose it
+ WGPUQueueWorkDoneStatus status;
+ if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
+ callback(status, userdata);
+ return;
+ }
+
+ std::unique_ptr<SubmittedWorkDone> task =
+ std::make_unique<SubmittedWorkDone>(callback, userdata);
+
+ // Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
+ // also used to make sure ALL queue work is finished in tests, so we also wait for pending
+ // commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
+ // spec).
+ TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
+ }
+
+ void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
+ mTasksInFlight.Enqueue(std::move(task), serial);
+ GetDevice()->AddFutureSerial(serial);
+ }
+
+ void QueueBase::Tick(ExecutionSerial finishedSerial) {
+ // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
+ // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
+ // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
+ // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
+ // callbacks.
+ std::vector<std::unique_ptr<TaskInFlight>> tasks;
+ for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
+ tasks.push_back(std::move(task));
+ }
+ mTasksInFlight.ClearUpTo(finishedSerial);
+
+ for (auto& task : tasks) {
+ task->Finish();
+ }
+ }
+
+ void QueueBase::HandleDeviceLoss() {
+ for (auto& task : mTasksInFlight.IterateAll()) {
+ task->HandleDeviceLoss();
+ }
+ mTasksInFlight.Clear();
+ }
+
+ void QueueBase::APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
+ }
+
+ MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+ return WriteBufferImpl(buffer, bufferOffset, data, size);
+ }
+
+ MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ if (size == 0) {
+ return {};
+ }
+
+ DeviceBase* device = GetDevice();
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ device->AddFutureSerial(device->GetPendingCommandSerial());
+
+ return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
+ buffer, bufferOffset, size);
+ }
+
+ void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize) {
+ GetDevice()->ConsumedError(
+ WriteTextureInternal(destination, data, dataSize, *dataLayout, writeSize));
+ }
+
+ MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout& dataLayout,
+ const Extent3D* writeSize) {
+ DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
+
+ if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
+ return {};
+ }
+
+ const TexelBlockInfo& blockInfo =
+ destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+ TextureDataLayout layout = dataLayout;
+ ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
+ return WriteTextureImpl(*destination, data, layout, *writeSize);
+ }
+
+ MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) {
+ const Format& format = destination.texture->GetFormat();
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(destination.aspect).block;
+
+ // We are only copying the part of the data that will appear in the texture.
+ // Note that validating texture copy range ensures that writeSizePixel->width and
+ // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
+ ASSERT(writeSizePixel.width % blockInfo.width == 0);
+ ASSERT(writeSizePixel.height % blockInfo.height == 0);
+ uint32_t alignedBytesPerRow = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
+ uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
+
+ uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
+ uint32_t optimallyAlignedBytesPerRow =
+ Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ UploadTextureDataAligningBytesPerRowAndOffset(
+ GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
+ alignedRowsPerImage, dataLayout, format.HasDepthOrStencil(), blockInfo,
+ writeSizePixel));
+
+ TextureDataLayout passDataLayout = dataLayout;
+ passDataLayout.offset = uploadHandle.startOffset;
+ passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
+ passDataLayout.rowsPerImage = alignedRowsPerImage;
+
+ TextureCopy textureCopy;
+ textureCopy.texture = destination.texture;
+ textureCopy.mipLevel = destination.mipLevel;
+ textureCopy.origin = destination.origin;
+ textureCopy.aspect = ConvertAspect(format, destination.aspect);
+
+ DeviceBase* device = GetDevice();
+
+ device->AddFutureSerial(device->GetPendingCommandSerial());
+
+ return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
+ &textureCopy, writeSizePixel);
+ }
+
+ void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
+ GetDevice()->ConsumedError(
+ CopyTextureForBrowserInternal(source, destination, copySize, options));
+ }
+
+ MaybeError QueueBase::CopyTextureForBrowserInternal(
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY_CONTEXT(
+ ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
+ "validating CopyTextureForBrowser from %s to %s", source->texture,
+ destination->texture);
+ }
+
+ return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
+ }
+
+ MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
+ CommandBufferBase* const* commands) const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
+ DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
+
+ const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
+
+ for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
+ for (const BufferBase* buffer : scope.buffers) {
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+ }
+
+ for (const TextureBase* texture : scope.textures) {
+ DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+ }
+
+ for (const ExternalTextureBase* externalTexture : scope.externalTextures) {
+ DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
+ }
+ }
+
+ for (const ComputePassResourceUsage& pass : usages.computePasses) {
+ for (const BufferBase* buffer : pass.referencedBuffers) {
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+ }
+ for (const TextureBase* texture : pass.referencedTextures) {
+ DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+ }
+ for (const ExternalTextureBase* externalTexture : pass.referencedExternalTextures) {
+ DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
+ }
+ }
+
+ for (const BufferBase* buffer : usages.topLevelBuffers) {
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+ }
+ for (const TextureBase* texture : usages.topLevelTextures) {
+ DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+ }
+ for (const QuerySetBase* querySet : usages.usedQuerySets) {
+ DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError QueueBase::ValidateOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneStatus* status) const {
+ *status = WGPUQueueWorkDoneStatus_DeviceLost;
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+ *status = WGPUQueueWorkDoneStatus_Error;
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ DAWN_INVALID_IF(signalValue != 0, "SignalValue (%u) is not 0.", signalValue);
+
+ return {};
+ }
+
+ MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
+ size_t dataSize,
+ const TextureDataLayout& dataLayout,
+ const Extent3D* writeSize) const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
+
+ DAWN_INVALID_IF(dataLayout.offset > dataSize,
+ "Data offset (%u) is greater than the data size (%u).", dataLayout.offset,
+ dataSize);
+
+ DAWN_INVALID_IF(!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst),
+ "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
+ destination->texture, wgpu::TextureUsage::CopyDst);
+
+ DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1,
+ "Sample count (%u) of %s is not 1", destination->texture->GetSampleCount(),
+ destination->texture);
+
+ DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
+
+ const TexelBlockInfo& blockInfo =
+ destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+
+ DAWN_TRY(ValidateLinearTextureData(dataLayout, dataSize, blockInfo, *writeSize));
+
+ DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
+
+ return {};
+ }
+
+ void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
+ DeviceBase* device = GetDevice();
+ if (device->ConsumedError(device->ValidateIsAlive())) {
+ // If device is lost, don't let any commands be submitted
+ return;
+ }
+
+ TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
+ if (device->IsValidationEnabled() &&
+ device->ConsumedError(ValidateSubmit(commandCount, commands))) {
+ return;
+ }
+ ASSERT(!IsError());
+
+ if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
+ return;
+ }
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Queue.h b/chromium/third_party/dawn/src/dawn/native/Queue.h
new file mode 100644
index 00000000000..ee1074de878
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Queue.h
@@ -0,0 +1,111 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_QUEUE_H_
+#define DAWNNATIVE_QUEUE_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ class QueueBase : public ApiObjectBase {
+ public:
+ struct TaskInFlight {
+ virtual ~TaskInFlight();
+ virtual void Finish() = 0;
+ virtual void HandleDeviceLoss() = 0;
+ };
+
+ ~QueueBase() override;
+
+ static QueueBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ // Dawn API
+ void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
+ void APIOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata);
+ void APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
+ void APIWriteTexture(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize);
+ void APICopyTextureForBrowser(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
+
+ MaybeError WriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
+ void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
+ void Tick(ExecutionSerial finishedSerial);
+ void HandleDeviceLoss();
+
+ protected:
+ QueueBase(DeviceBase* device);
+ QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ void DestroyImpl() override;
+
+ private:
+ MaybeError WriteTextureInternal(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout& dataLayout,
+ const Extent3D* writeSize);
+ MaybeError CopyTextureForBrowserInternal(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
+
+ virtual MaybeError SubmitImpl(uint32_t commandCount,
+ CommandBufferBase* const* commands) = 0;
+ virtual MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
+ virtual MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSize);
+
+ MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
+ MaybeError ValidateOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneStatus* status) const;
+ MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
+ size_t dataSize,
+ const TextureDataLayout& dataLayout,
+ const Extent3D* writeSize) const;
+
+ void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
+
+ SerialQueue<ExecutionSerial, std::unique_ptr<TaskInFlight>> mTasksInFlight;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_QUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp b/chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp
new file mode 100644
index 00000000000..da1018823ba
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderBundle.cpp
@@ -0,0 +1,91 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderBundle.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/RenderBundleEncoder.h"
+
+namespace dawn::native {
+
+ RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
+ const RenderBundleDescriptor* descriptor,
+ Ref<AttachmentState> attachmentState,
+ bool depthReadOnly,
+ bool stencilReadOnly,
+ RenderPassResourceUsage resourceUsage,
+ IndirectDrawMetadata indirectDrawMetadata)
+ : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
+ mCommands(encoder->AcquireCommands()),
+ mIndirectDrawMetadata(std::move(indirectDrawMetadata)),
+ mAttachmentState(std::move(attachmentState)),
+ mDepthReadOnly(depthReadOnly),
+ mStencilReadOnly(stencilReadOnly),
+ mResourceUsage(std::move(resourceUsage)) {
+ TrackInDevice();
+ }
+
+ void RenderBundleBase::DestroyImpl() {
+ FreeCommands(&mCommands);
+
+ // Remove reference to the attachment state so that we don't have lingering references to
+ // it preventing it from being uncached in the device.
+ mAttachmentState = nullptr;
+ }
+
+ // static
+ RenderBundleBase* RenderBundleBase::MakeError(DeviceBase* device) {
+ return new RenderBundleBase(device, ObjectBase::kError);
+ }
+
+ RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
+ : ApiObjectBase(device, errorTag), mIndirectDrawMetadata(device->GetLimits()) {
+ }
+
+ ObjectType RenderBundleBase::GetType() const {
+ return ObjectType::RenderBundle;
+ }
+
+ CommandIterator* RenderBundleBase::GetCommands() {
+ return &mCommands;
+ }
+
+ const AttachmentState* RenderBundleBase::GetAttachmentState() const {
+ ASSERT(!IsError());
+ return mAttachmentState.Get();
+ }
+
+ bool RenderBundleBase::IsDepthReadOnly() const {
+ ASSERT(!IsError());
+ return mDepthReadOnly;
+ }
+
+ bool RenderBundleBase::IsStencilReadOnly() const {
+ ASSERT(!IsError());
+ return mStencilReadOnly;
+ }
+
+ const RenderPassResourceUsage& RenderBundleBase::GetResourceUsage() const {
+ ASSERT(!IsError());
+ return mResourceUsage;
+ }
+
+ const IndirectDrawMetadata& RenderBundleBase::GetIndirectDrawMetadata() {
+ return mIndirectDrawMetadata;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderBundle.h b/chromium/third_party/dawn/src/dawn/native/RenderBundle.h
new file mode 100644
index 00000000000..9112b77c998
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderBundle.h
@@ -0,0 +1,73 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERBUNDLE_H_
+#define DAWNNATIVE_RENDERBUNDLE_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/CommandAllocator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IndirectDrawMetadata.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PassResourceUsage.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <bitset>
+
+namespace dawn::native {
+
+ struct RenderBundleDescriptor;
+ class RenderBundleEncoder;
+
+ class RenderBundleBase final : public ApiObjectBase {
+ public:
+ RenderBundleBase(RenderBundleEncoder* encoder,
+ const RenderBundleDescriptor* descriptor,
+ Ref<AttachmentState> attachmentState,
+ bool depthReadOnly,
+ bool stencilReadOnly,
+ RenderPassResourceUsage resourceUsage,
+ IndirectDrawMetadata indirectDrawMetadata);
+
+ static RenderBundleBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ CommandIterator* GetCommands();
+
+ const AttachmentState* GetAttachmentState() const;
+ bool IsDepthReadOnly() const;
+ bool IsStencilReadOnly() const;
+ const RenderPassResourceUsage& GetResourceUsage() const;
+ const IndirectDrawMetadata& GetIndirectDrawMetadata();
+
+ private:
+ RenderBundleBase(DeviceBase* device, ErrorTag errorTag);
+
+ void DestroyImpl() override;
+
+ CommandIterator mCommands;
+ IndirectDrawMetadata mIndirectDrawMetadata;
+ Ref<AttachmentState> mAttachmentState;
+ bool mDepthReadOnly;
+ bool mStencilReadOnly;
+ RenderPassResourceUsage mResourceUsage;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RENDERBUNDLE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp
new file mode 100644
index 00000000000..421774c18d6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.cpp
@@ -0,0 +1,167 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderBundleEncoder.h"
+
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native {
+
+ MaybeError ValidateColorAttachmentFormat(const DeviceBase* device,
+ wgpu::TextureFormat textureFormat) {
+ DAWN_TRY(ValidateTextureFormat(textureFormat));
+ const Format* format = nullptr;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
+ DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+ "Texture format %s is not color renderable.", textureFormat);
+ return {};
+ }
+
+ MaybeError ValidateDepthStencilAttachmentFormat(const DeviceBase* device,
+ wgpu::TextureFormat textureFormat,
+ bool depthReadOnly,
+ bool stencilReadOnly) {
+ DAWN_TRY(ValidateTextureFormat(textureFormat));
+ const Format* format = nullptr;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
+ DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+ "Texture format %s is not depth/stencil renderable.", textureFormat);
+
+ DAWN_INVALID_IF(
+ format->HasDepth() && format->HasStencil() && depthReadOnly != stencilReadOnly,
+ "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when format %s has "
+ "both depth and stencil aspects.",
+ depthReadOnly, stencilReadOnly, textureFormat);
+
+ return {};
+ }
+
+ MaybeError ValidateRenderBundleEncoderDescriptor(
+ const DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor) {
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+ "Sample count (%u) is not supported.", descriptor->sampleCount);
+
+ DAWN_INVALID_IF(
+ descriptor->colorFormatsCount > kMaxColorAttachments,
+ "Color formats count (%u) exceeds maximum number of color attachements (%u).",
+ descriptor->colorFormatsCount, kMaxColorAttachments);
+
+ DAWN_INVALID_IF(descriptor->colorFormatsCount == 0 &&
+ descriptor->depthStencilFormat == wgpu::TextureFormat::Undefined,
+ "No color or depth/stencil attachment formats specified.");
+
+ for (uint32_t i = 0; i < descriptor->colorFormatsCount; ++i) {
+ DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, descriptor->colorFormats[i]),
+ "validating colorFormats[%u]", i);
+ }
+
+ if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
+ DAWN_TRY_CONTEXT(ValidateDepthStencilAttachmentFormat(
+ device, descriptor->depthStencilFormat, descriptor->depthReadOnly,
+ descriptor->stencilReadOnly),
+ "validating depthStencilFormat");
+ }
+
+ return {};
+ }
+
+ RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor)
+ : RenderEncoderBase(device,
+ descriptor->label,
+ &mBundleEncodingContext,
+ device->GetOrCreateAttachmentState(descriptor),
+ descriptor->depthReadOnly,
+ descriptor->stencilReadOnly),
+ mBundleEncodingContext(device, this) {
+ TrackInDevice();
+ }
+
+ RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag)
+ : RenderEncoderBase(device, &mBundleEncodingContext, errorTag),
+ mBundleEncodingContext(device, this) {
+ }
+
+ void RenderBundleEncoder::DestroyImpl() {
+ RenderEncoderBase::DestroyImpl();
+ mBundleEncodingContext.Destroy();
+ }
+
+ // static
+ Ref<RenderBundleEncoder> RenderBundleEncoder::Create(
+ DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor) {
+ return AcquireRef(new RenderBundleEncoder(device, descriptor));
+ }
+
+ // static
+ RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
+ return new RenderBundleEncoder(device, ObjectBase::kError);
+ }
+
+ ObjectType RenderBundleEncoder::GetType() const {
+ return ObjectType::RenderBundleEncoder;
+ }
+
+ CommandIterator RenderBundleEncoder::AcquireCommands() {
+ return mBundleEncodingContext.AcquireCommands();
+ }
+
+ RenderBundleBase* RenderBundleEncoder::APIFinish(const RenderBundleDescriptor* descriptor) {
+ RenderBundleBase* result = nullptr;
+
+ if (GetDevice()->ConsumedError(FinishImpl(descriptor), &result, "calling %s.Finish(%s).",
+ this, descriptor)) {
+ return RenderBundleBase::MakeError(GetDevice());
+ }
+
+ return result;
+ }
+
+ ResultOrError<RenderBundleBase*> RenderBundleEncoder::FinishImpl(
+ const RenderBundleDescriptor* descriptor) {
+ // Even if mBundleEncodingContext.Finish() validation fails, calling it will mutate the
+ // internal state of the encoding context. Subsequent calls to encode commands will generate
+ // errors.
+ DAWN_TRY(mBundleEncodingContext.Finish());
+
+ RenderPassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(ValidateProgrammableEncoderEnd());
+ DAWN_TRY(ValidateFinish(usages));
+ }
+
+ return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), IsDepthReadOnly(),
+ IsStencilReadOnly(), std::move(usages),
+ std::move(mIndirectDrawMetadata));
+ }
+
+ MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(ValidateSyncScopeResourceUsage(usages));
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h
new file mode 100644
index 00000000000..46c147005e7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderBundleEncoder.h
@@ -0,0 +1,56 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERBUNDLEENCODER_H_
+#define DAWNNATIVE_RENDERBUNDLEENCODER_H_
+
+#include "dawn/native/EncodingContext.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/RenderEncoderBase.h"
+
+namespace dawn::native {
+
+ MaybeError ValidateRenderBundleEncoderDescriptor(
+ const DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor);
+
+ class RenderBundleEncoder final : public RenderEncoderBase {
+ public:
+ static Ref<RenderBundleEncoder> Create(DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor);
+ static RenderBundleEncoder* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
+
+ CommandIterator AcquireCommands();
+
+ private:
+ RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
+ RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
+
+ void DestroyImpl() override;
+
+ ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
+ MaybeError ValidateFinish(const RenderPassResourceUsage& usages) const;
+
+ EncodingContext mBundleEncodingContext;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RENDERBUNDLEENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp
new file mode 100644
index 00000000000..eeaf0e81ac3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.cpp
@@ -0,0 +1,414 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderEncoderBase.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <math.h>
+#include <cstring>
+
+namespace dawn::native {
+
+ RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
+ const char* label,
+ EncodingContext* encodingContext,
+ Ref<AttachmentState> attachmentState,
+ bool depthReadOnly,
+ bool stencilReadOnly)
+ : ProgrammableEncoder(device, label, encodingContext),
+ mIndirectDrawMetadata(device->GetLimits()),
+ mAttachmentState(std::move(attachmentState)),
+ mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+ mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
+ mDepthReadOnly = depthReadOnly;
+ mStencilReadOnly = stencilReadOnly;
+ }
+
+ RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
+ : ProgrammableEncoder(device, encodingContext, errorTag),
+ mIndirectDrawMetadata(device->GetLimits()),
+ mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+ mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
+ }
+
+ void RenderEncoderBase::DestroyImpl() {
+ // Remove reference to the attachment state so that we don't have lingering references to
+ // it preventing it from being uncached in the device.
+ mAttachmentState = nullptr;
+ }
+
+ const AttachmentState* RenderEncoderBase::GetAttachmentState() const {
+ ASSERT(!IsError());
+ ASSERT(mAttachmentState != nullptr);
+ return mAttachmentState.Get();
+ }
+
+ bool RenderEncoderBase::IsDepthReadOnly() const {
+ ASSERT(!IsError());
+ return mDepthReadOnly;
+ }
+
+ bool RenderEncoderBase::IsStencilReadOnly() const {
+ ASSERT(!IsError());
+ return mStencilReadOnly;
+ }
+
+ Ref<AttachmentState> RenderEncoderBase::AcquireAttachmentState() {
+ return std::move(mAttachmentState);
+ }
+
+ void RenderEncoderBase::APIDraw(uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+
+ DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+ "First instance (%u) must be zero.", firstInstance);
+
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
+ firstVertex));
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
+ instanceCount, firstInstance));
+ }
+
+ DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
+ draw->vertexCount = vertexCount;
+ draw->instanceCount = instanceCount;
+ draw->firstVertex = firstVertex;
+ draw->firstInstance = firstInstance;
+
+ return {};
+ },
+ "encoding %s.Draw(%u, %u, %u, %u).", this, vertexCount, instanceCount, firstVertex,
+ firstInstance);
+ }
+
+ void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t baseVertex,
+ uint32_t firstInstance) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+ DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+ "First instance (%u) must be zero.", firstInstance);
+
+ DAWN_INVALID_IF(mDisableBaseVertex && baseVertex != 0,
+ "Base vertex (%u) must be zero.", baseVertex);
+
+ DAWN_TRY(
+ mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
+
+ // Although we don't know actual vertex access range in CPU, we still call the
+ // ValidateBufferInRangeForVertexBuffer in order to deal with those vertex step
+ // mode vertex buffer with an array stride of zero.
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(0, 0));
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
+ instanceCount, firstInstance));
+ }
+
+ DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
+ draw->indexCount = indexCount;
+ draw->instanceCount = instanceCount;
+ draw->firstIndex = firstIndex;
+ draw->baseVertex = baseVertex;
+ draw->firstInstance = firstInstance;
+
+ return {};
+ },
+ "encoding %s.DrawIndexed(%u, %u, %u, %i, %u).", this, indexCount, instanceCount,
+ firstIndex, baseVertex, firstInstance);
+ }
+
+ void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+
+ DAWN_INVALID_IF(indirectOffset % 4 != 0,
+ "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+ DAWN_INVALID_IF(
+ indirectOffset >= indirectBuffer->GetSize() ||
+ kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset,
+ "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+ indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+ }
+
+ DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
+ cmd->indirectBuffer = indirectBuffer;
+ cmd->indirectOffset = indirectOffset;
+
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
+ return {};
+ },
+ "encoding %s.DrawIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+ }
+
+ void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
+ uint64_t indirectOffset) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+ DAWN_INVALID_IF(indirectOffset % 4 != 0,
+ "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+ DAWN_INVALID_IF(
+ (indirectOffset >= indirectBuffer->GetSize() ||
+ kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset),
+ "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+ indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+ }
+
+ DrawIndexedIndirectCmd* cmd =
+ allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
+ if (IsValidationEnabled()) {
+ // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
+ // buffer which will store the validated indirect data. The buffer and offset
+ // will be updated to point to it.
+ // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
+ // render pass, while the |cmd| pointer is still valid.
+ cmd->indirectBuffer = nullptr;
+
+ mIndirectDrawMetadata.AddIndexedIndirectDraw(
+ mCommandBufferState.GetIndexFormat(),
+ mCommandBufferState.GetIndexBufferSize(), indirectBuffer, indirectOffset,
+ cmd);
+ } else {
+ cmd->indirectBuffer = indirectBuffer;
+ cmd->indirectOffset = indirectOffset;
+ }
+
+ // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
+ // validation, but it will unecessarily transition to indirectBuffer usage in the
+ // backend.
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
+ return {};
+ },
+ "encoding %s.DrawIndexedIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+ }
+
+ void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+
+ // TODO(dawn:563): More detail about why the states are incompatible would be
+ // nice.
+ DAWN_INVALID_IF(
+ pipeline->GetAttachmentState() != mAttachmentState.Get(),
+ "Attachment state of %s is not compatible with the attachment state of %s",
+ pipeline, this);
+
+ DAWN_INVALID_IF(pipeline->WritesDepth() && mDepthReadOnly,
+ "%s writes depth while %s's depthReadOnly is true", pipeline,
+ this);
+
+ DAWN_INVALID_IF(pipeline->WritesStencil() && mStencilReadOnly,
+ "%s writes stencil while %s's stencilReadOnly is true",
+ pipeline, this);
+ }
+
+ mCommandBufferState.SetRenderPipeline(pipeline);
+
+ SetRenderPipelineCmd* cmd =
+ allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
+ cmd->pipeline = pipeline;
+
+ return {};
+ },
+ "encoding %s.SetPipeline(%s).", this, pipeline);
+ }
+
+ void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
+ wgpu::IndexFormat format,
+ uint64_t offset,
+ uint64_t size) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+ DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
+
+ DAWN_TRY(ValidateIndexFormat(format));
+
+ DAWN_INVALID_IF(format == wgpu::IndexFormat::Undefined,
+ "Index format must be specified");
+
+ DAWN_INVALID_IF(offset % uint64_t(IndexFormatSize(format)) != 0,
+ "Index buffer offset (%u) is not a multiple of the size (%u) "
+ "of %s.",
+ offset, IndexFormatSize(format), format);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(offset > bufferSize,
+ "Index buffer offset (%u) is larger than the size (%u) of %s.",
+ offset, bufferSize, buffer);
+
+ uint64_t remainingSize = bufferSize - offset;
+
+ if (size == wgpu::kWholeSize) {
+ size = remainingSize;
+ } else {
+ DAWN_INVALID_IF(size > remainingSize,
+ "Index buffer range (offset: %u, size: %u) doesn't fit in "
+ "the size (%u) of "
+ "%s.",
+ offset, size, bufferSize, buffer);
+ }
+ } else {
+ if (size == wgpu::kWholeSize) {
+ DAWN_ASSERT(buffer->GetSize() >= offset);
+ size = buffer->GetSize() - offset;
+ }
+ }
+
+ mCommandBufferState.SetIndexBuffer(format, size);
+
+ SetIndexBufferCmd* cmd =
+ allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
+ cmd->buffer = buffer;
+ cmd->format = format;
+ cmd->offset = offset;
+ cmd->size = size;
+
+ mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
+
+ return {};
+ },
+ "encoding %s.SetIndexBuffer(%s, %s, %u, %u).", this, buffer, format, offset, size);
+ }
+
+ void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
+ BufferBase* buffer,
+ uint64_t offset,
+ uint64_t size) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+ DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
+
+ DAWN_INVALID_IF(slot >= kMaxVertexBuffers,
+ "Vertex buffer slot (%u) is larger the maximum (%u)", slot,
+ kMaxVertexBuffers - 1);
+
+ DAWN_INVALID_IF(offset % 4 != 0,
+ "Vertex buffer offset (%u) is not a multiple of 4", offset);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(offset > bufferSize,
+ "Vertex buffer offset (%u) is larger than the size (%u) of %s.",
+ offset, bufferSize, buffer);
+
+ uint64_t remainingSize = bufferSize - offset;
+
+ if (size == wgpu::kWholeSize) {
+ size = remainingSize;
+ } else {
+ DAWN_INVALID_IF(size > remainingSize,
+ "Vertex buffer range (offset: %u, size: %u) doesn't fit in "
+ "the size (%u) "
+ "of %s.",
+ offset, size, bufferSize, buffer);
+ }
+ } else {
+ if (size == wgpu::kWholeSize) {
+ DAWN_ASSERT(buffer->GetSize() >= offset);
+ size = buffer->GetSize() - offset;
+ }
+ }
+
+ mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
+
+ SetVertexBufferCmd* cmd =
+ allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
+ cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
+ cmd->buffer = buffer;
+ cmd->offset = offset;
+ cmd->size = size;
+
+ mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
+
+ return {};
+ },
+ "encoding %s.SetVertexBuffer(%u, %s, %u, %u).", this, slot, buffer, offset, size);
+ }
+
+ void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
+
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets));
+ }
+
+ RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets);
+ mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets);
+ mUsageTracker.AddBindGroup(group);
+
+ return {};
+ },
+ // TODO(dawn:1190): For unknown reasons formatting this message fails if `group` is used
+ // as a string value in the message. This despite the exact same code working as
+ // intended in ComputePassEncoder::APISetBindGroup. Replacing with a static [BindGroup]
+ // until the reason for the failure can be determined.
+ "encoding %s.SetBindGroup(%u, [BindGroup], %u, ...).", this, groupIndexIn,
+ dynamicOffsetCount);
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h b/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h
new file mode 100644
index 00000000000..80128f30821
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderEncoderBase.h
@@ -0,0 +1,87 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERENCODERBASE_H_
+#define DAWNNATIVE_RENDERENCODERBASE_H_
+
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/CommandBufferStateTracker.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IndirectDrawMetadata.h"
+#include "dawn/native/PassResourceUsageTracker.h"
+#include "dawn/native/ProgrammableEncoder.h"
+
+namespace dawn::native {
+
+ class RenderEncoderBase : public ProgrammableEncoder {
+ public:
+ RenderEncoderBase(DeviceBase* device,
+ const char* label,
+ EncodingContext* encodingContext,
+ Ref<AttachmentState> attachmentState,
+ bool depthReadOnly,
+ bool stencilReadOnly);
+
+ void APIDraw(uint32_t vertexCount,
+ uint32_t instanceCount = 1,
+ uint32_t firstVertex = 0,
+ uint32_t firstInstance = 0);
+ void APIDrawIndexed(uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t baseVertex,
+ uint32_t firstInstance);
+
+ void APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+ void APIDrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+
+ void APISetPipeline(RenderPipelineBase* pipeline);
+
+ void APISetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
+ void APISetIndexBuffer(BufferBase* buffer,
+ wgpu::IndexFormat format,
+ uint64_t offset,
+ uint64_t size);
+
+ void APISetBindGroup(uint32_t groupIndex,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount = 0,
+ const uint32_t* dynamicOffsets = nullptr);
+
+ const AttachmentState* GetAttachmentState() const;
+ bool IsDepthReadOnly() const;
+ bool IsStencilReadOnly() const;
+ Ref<AttachmentState> AcquireAttachmentState();
+
+ protected:
+ // Construct an "error" render encoder base.
+ RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
+
+ void DestroyImpl() override;
+
+ CommandBufferStateTracker mCommandBufferState;
+ RenderPassResourceUsageTracker mUsageTracker;
+ IndirectDrawMetadata mIndirectDrawMetadata;
+
+ private:
+ Ref<AttachmentState> mAttachmentState;
+ const bool mDisableBaseVertex;
+ const bool mDisableBaseInstance;
+ bool mDepthReadOnly = false;
+ bool mStencilReadOnly = false;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RENDERENCODERBASE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp
new file mode 100644
index 00000000000..fd2a46e45b3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.cpp
@@ -0,0 +1,398 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderPassEncoder.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/RenderPipeline.h"
+
+#include <math.h>
+#include <cstring>
+
+namespace dawn::native {
+ namespace {
+
+ // Check the query at queryIndex is unavailable, otherwise it cannot be written.
+ MaybeError ValidateQueryIndexOverwrite(QuerySetBase* querySet,
+ uint32_t queryIndex,
+ const QueryAvailabilityMap& queryAvailabilityMap) {
+ auto it = queryAvailabilityMap.find(querySet);
+ DAWN_INVALID_IF(it != queryAvailabilityMap.end() && it->second[queryIndex],
+ "Query index %u of %s is written to twice in a render pass.",
+ queryIndex, querySet);
+
+ return {};
+ }
+
+ } // namespace
+
+ // The usage tracker is passed in here, because it is prepopulated with usages from the
+ // BeginRenderPassCmd. If we had RenderPassEncoder responsible for recording the
+ // command, then this wouldn't be necessary.
+ RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+ const RenderPassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ RenderPassResourceUsageTracker usageTracker,
+ Ref<AttachmentState> attachmentState,
+ QuerySetBase* occlusionQuerySet,
+ uint32_t renderTargetWidth,
+ uint32_t renderTargetHeight,
+ bool depthReadOnly,
+ bool stencilReadOnly)
+ : RenderEncoderBase(device,
+ descriptor->label,
+ encodingContext,
+ std::move(attachmentState),
+ depthReadOnly,
+ stencilReadOnly),
+ mCommandEncoder(commandEncoder),
+ mRenderTargetWidth(renderTargetWidth),
+ mRenderTargetHeight(renderTargetHeight),
+ mOcclusionQuerySet(occlusionQuerySet) {
+ mUsageTracker = std::move(usageTracker);
+ TrackInDevice();
+ }
+
+ RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
+ : RenderEncoderBase(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
+ }
+
+ RenderPassEncoder* RenderPassEncoder::MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext) {
+ return new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
+ }
+
+ void RenderPassEncoder::DestroyImpl() {
+ RenderEncoderBase::DestroyImpl();
+ // Ensure that the pass has exited. This is done for passes only since validation requires
+ // they exit before destruction while bundles do not.
+ mEncodingContext->EnsurePassExited(this);
+ }
+
+ ObjectType RenderPassEncoder::GetType() const {
+ return ObjectType::RenderPassEncoder;
+ }
+
+ void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
+ DAWN_ASSERT(querySet != nullptr);
+
+ // Track the query availability with true on render pass for rewrite validation and query
+ // reset on render pass on Vulkan
+ mUsageTracker.TrackQueryAvailability(querySet, queryIndex);
+
+ // Track it again on command encoder for zero-initializing when resolving unused queries.
+ mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+ }
+
+ void RenderPassEncoder::APIEnd() {
+ if (mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateProgrammableEncoderEnd());
+
+ DAWN_INVALID_IF(
+ mOcclusionQueryActive,
+ "Render pass %s ended with incomplete occlusion query index %u of %s.",
+ this, mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
+ }
+
+ allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
+ DAWN_TRY(mEncodingContext->ExitRenderPass(this, std::move(mUsageTracker),
+ mCommandEncoder.Get(),
+ std::move(mIndirectDrawMetadata)));
+ return {};
+ },
+ "encoding %s.End().", this)) {
+ }
+ }
+
+ void RenderPassEncoder::APIEndPass() {
+ GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
+ APIEnd();
+ }
+
+ void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ SetStencilReferenceCmd* cmd =
+ allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
+ cmd->reference = reference;
+
+ return {};
+ },
+ "encoding %s.SetStencilReference(%u).", this, reference);
+ }
+
+ void RenderPassEncoder::APISetBlendConstant(const Color* color) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ SetBlendConstantCmd* cmd =
+ allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
+ cmd->color = *color;
+
+ return {};
+ },
+ "encoding %s.SetBlendConstant(%s).", this, color);
+ }
+
+ void RenderPassEncoder::APISetViewport(float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(
+ (isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
+ isnan(maxDepth)),
+ "A parameter of the viewport (x: %f, y: %f, width: %f, height: %f, "
+ "minDepth: %f, maxDepth: %f) is NaN.",
+ x, y, width, height, minDepth, maxDepth);
+
+ DAWN_INVALID_IF(
+ x < 0 || y < 0 || width < 0 || height < 0,
+ "Viewport bounds (x: %f, y: %f, width: %f, height: %f) contains a negative "
+ "value.",
+ x, y, width, height);
+
+ DAWN_INVALID_IF(
+ x + width > mRenderTargetWidth || y + height > mRenderTargetHeight,
+ "Viewport bounds (x: %f, y: %f, width: %f, height: %f) are not contained "
+ "in "
+ "the render target dimensions (%u x %u).",
+ x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
+
+ // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
+ DAWN_INVALID_IF(minDepth < 0 || minDepth > maxDepth || maxDepth > 1,
+ "Viewport minDepth (%f) and maxDepth (%f) are not in [0, 1] or "
+ "minDepth was "
+ "greater than maxDepth.",
+ minDepth, maxDepth);
+ }
+
+ SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
+ cmd->x = x;
+ cmd->y = y;
+ cmd->width = width;
+ cmd->height = height;
+ cmd->minDepth = minDepth;
+ cmd->maxDepth = maxDepth;
+
+ return {};
+ },
+ "encoding %s.SetViewport(%f, %f, %f, %f, %f, %f).", this, x, y, width, height, minDepth,
+ maxDepth);
+ }
+
+ void RenderPassEncoder::APISetScissorRect(uint32_t x,
+ uint32_t y,
+ uint32_t width,
+ uint32_t height) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(
+ width > mRenderTargetWidth || height > mRenderTargetHeight ||
+ x > mRenderTargetWidth - width || y > mRenderTargetHeight - height,
+ "Scissor rect (x: %u, y: %u, width: %u, height: %u) is not contained in "
+ "the render target dimensions (%u x %u).",
+ x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
+ }
+
+ SetScissorRectCmd* cmd =
+ allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
+ cmd->x = x;
+ cmd->y = y;
+ cmd->width = width;
+ cmd->height = height;
+
+ return {};
+ },
+ "encoding %s.SetScissorRect(%u, %u, %u, %u).", this, x, y, width, height);
+ }
+
+ void RenderPassEncoder::APIExecuteBundles(uint32_t count,
+ RenderBundleBase* const* renderBundles) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ const AttachmentState* attachmentState = GetAttachmentState();
+ bool depthReadOnlyInPass = IsDepthReadOnly();
+ bool stencilReadOnlyInPass = IsStencilReadOnly();
+ for (uint32_t i = 0; i < count; ++i) {
+ DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
+
+ // TODO(dawn:563): Give more detail about why the states are incompatible.
+ DAWN_INVALID_IF(
+ attachmentState != renderBundles[i]->GetAttachmentState(),
+ "Attachment state of renderBundles[%i] (%s) is not compatible with "
+ "attachment state of %s.",
+ i, renderBundles[i], this);
+
+ bool depthReadOnlyInBundle = renderBundles[i]->IsDepthReadOnly();
+ DAWN_INVALID_IF(
+ depthReadOnlyInPass && !depthReadOnlyInBundle,
+ "DepthReadOnly (%u) of renderBundle[%i] (%s) is not compatible "
+ "with DepthReadOnly (%u) of %s.",
+ depthReadOnlyInBundle, i, renderBundles[i], depthReadOnlyInPass, this);
+
+ bool stencilReadOnlyInBundle = renderBundles[i]->IsStencilReadOnly();
+ DAWN_INVALID_IF(stencilReadOnlyInPass && !stencilReadOnlyInBundle,
+ "StencilReadOnly (%u) of renderBundle[%i] (%s) is not "
+ "compatible with StencilReadOnly (%u) of %s.",
+ stencilReadOnlyInBundle, i, renderBundles[i],
+ stencilReadOnlyInPass, this);
+ }
+ }
+
+ mCommandBufferState = CommandBufferStateTracker{};
+
+ ExecuteBundlesCmd* cmd =
+ allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
+ cmd->count = count;
+
+ Ref<RenderBundleBase>* bundles =
+ allocator->AllocateData<Ref<RenderBundleBase>>(count);
+ for (uint32_t i = 0; i < count; ++i) {
+ bundles[i] = renderBundles[i];
+
+ const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
+ for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
+ mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
+ }
+
+ for (uint32_t i = 0; i < usages.textures.size(); ++i) {
+ mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
+ usages.textureUsages[i]);
+ }
+
+ if (IsValidationEnabled()) {
+ mIndirectDrawMetadata.AddBundle(renderBundles[i]);
+ }
+ }
+
+ return {};
+ },
+ "encoding %s.ExecuteBundles(%u, ...).", this, count);
+ }
+
+ void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(mOcclusionQuerySet.Get() == nullptr,
+ "The occlusionQuerySet in RenderPassDescriptor is not set.");
+
+ // The type of querySet has been validated by ValidateRenderPassDescriptor
+
+ DAWN_INVALID_IF(queryIndex >= mOcclusionQuerySet->GetQueryCount(),
+ "Query index (%u) exceeds the number of queries (%u) in %s.",
+ queryIndex, mOcclusionQuerySet->GetQueryCount(),
+ mOcclusionQuerySet.Get());
+
+ DAWN_INVALID_IF(mOcclusionQueryActive,
+ "An occlusion query (%u) in %s is already active.",
+ mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
+
+ DAWN_TRY_CONTEXT(
+ ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
+ mUsageTracker.GetQueryAvailabilityMap()),
+ "validating the occlusion query index (%u) in %s", queryIndex,
+ mOcclusionQuerySet.Get());
+ }
+
+ // Record the current query index for endOcclusionQuery.
+ mCurrentOcclusionQueryIndex = queryIndex;
+ mOcclusionQueryActive = true;
+
+ BeginOcclusionQueryCmd* cmd =
+ allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
+ cmd->querySet = mOcclusionQuerySet.Get();
+ cmd->queryIndex = queryIndex;
+
+ return {};
+ },
+ "encoding %s.BeginOcclusionQuery(%u).", this, queryIndex);
+ }
+
+ void RenderPassEncoder::APIEndOcclusionQuery() {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(!mOcclusionQueryActive, "No occlusion queries are active.");
+ }
+
+ TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
+
+ mOcclusionQueryActive = false;
+
+ EndOcclusionQueryCmd* cmd =
+ allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
+ cmd->querySet = mOcclusionQuerySet.Get();
+ cmd->queryIndex = mCurrentOcclusionQueryIndex;
+
+ return {};
+ },
+ "encoding %s.EndOcclusionQuery().", this);
+ }
+
+ void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ DAWN_TRY_CONTEXT(
+ ValidateQueryIndexOverwrite(querySet, queryIndex,
+ mUsageTracker.GetQueryAvailabilityMap()),
+ "validating the timestamp query index (%u) of %s", queryIndex, querySet);
+ }
+
+ TrackQueryAvailability(querySet, queryIndex);
+
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
+
+ return {};
+ },
+ "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h b/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h
new file mode 100644
index 00000000000..d9ac1c2838b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderPassEncoder.h
@@ -0,0 +1,91 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERPASSENCODER_H_
+#define DAWNNATIVE_RENDERPASSENCODER_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/RenderEncoderBase.h"
+
+namespace dawn::native {
+
+ class RenderBundleBase;
+
+ class RenderPassEncoder final : public RenderEncoderBase {
+ public:
+ RenderPassEncoder(DeviceBase* device,
+ const RenderPassDescriptor* descriptor,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ RenderPassResourceUsageTracker usageTracker,
+ Ref<AttachmentState> attachmentState,
+ QuerySetBase* occlusionQuerySet,
+ uint32_t renderTargetWidth,
+ uint32_t renderTargetHeight,
+ bool depthReadOnly,
+ bool stencilReadOnly);
+
+ static RenderPassEncoder* MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext);
+
+ ObjectType GetType() const override;
+
+ void APIEnd();
+ void APIEndPass(); // TODO(dawn:1286): Remove after deprecation period.
+
+ void APISetStencilReference(uint32_t reference);
+ void APISetBlendConstant(const Color* color);
+ void APISetViewport(float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth);
+ void APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
+ void APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
+
+ void APIBeginOcclusionQuery(uint32_t queryIndex);
+ void APIEndOcclusionQuery();
+
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+ protected:
+ RenderPassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag);
+
+ private:
+ void DestroyImpl() override;
+
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+
+ // For render and compute passes, the encoding context is borrowed from the command encoder.
+ // Keep a reference to the encoder to make sure the context isn't freed.
+ Ref<CommandEncoder> mCommandEncoder;
+
+ uint32_t mRenderTargetWidth;
+ uint32_t mRenderTargetHeight;
+
+ // The resources for occlusion query
+ Ref<QuerySetBase> mOcclusionQuerySet;
+ uint32_t mCurrentOcclusionQueryIndex = 0;
+ bool mOcclusionQueryActive = false;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RENDERPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp
new file mode 100644
index 00000000000..8cf13b94c15
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderPipeline.cpp
@@ -0,0 +1,1080 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/InternalPipelineStore.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+#include "dawn/native/VertexFormat.h"
+
+#include <cmath>
+#include <sstream>
+
+namespace dawn::native {
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ VertexFormatBaseType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case VertexFormatBaseType::Float:
+ s->Append("Float");
+ break;
+ case VertexFormatBaseType::Uint:
+ s->Append("Uint");
+ break;
+ case VertexFormatBaseType::Sint:
+ s->Append("Sint");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterStageComponentType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterStageComponentType::Float:
+ s->Append("Float");
+ break;
+ case InterStageComponentType::Uint:
+ s->Append("Uint");
+ break;
+ case InterStageComponentType::Sint:
+ s->Append("Sint");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterpolationType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterpolationType::Perspective:
+ s->Append("Perspective");
+ break;
+ case InterpolationType::Linear:
+ s->Append("Linear");
+ break;
+ case InterpolationType::Flat:
+ s->Append("Flat");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterpolationSampling value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterpolationSampling::None:
+ s->Append("None");
+ break;
+ case InterpolationSampling::Center:
+ s->Append("Center");
+ break;
+ case InterpolationSampling::Centroid:
+ s->Append("Centroid");
+ break;
+ case InterpolationSampling::Sample:
+ s->Append("Sample");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ // Helper functions
+ namespace {
+ MaybeError ValidateVertexAttribute(
+ DeviceBase* device,
+ const VertexAttribute* attribute,
+ const EntryPointMetadata& metadata,
+ uint64_t vertexBufferStride,
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
+ DAWN_TRY(ValidateVertexFormat(attribute->format));
+ const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format);
+
+ DAWN_INVALID_IF(
+ attribute->shaderLocation >= kMaxVertexAttributes,
+ "Attribute shader location (%u) exceeds the maximum number of vertex attributes "
+ "(%u).",
+ attribute->shaderLocation, kMaxVertexAttributes);
+
+ VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation));
+
+ // No underflow is possible because the max vertex format size is smaller than
+ // kMaxVertexBufferArrayStride.
+ ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize);
+ DAWN_INVALID_IF(
+ attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize,
+ "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex "
+ "buffer stride (%u).",
+ attribute->offset, attribute->format, formatInfo.byteSize,
+ kMaxVertexBufferArrayStride);
+
+ // No overflow is possible because the offset is already validated to be less
+ // than kMaxVertexBufferArrayStride.
+ ASSERT(attribute->offset < kMaxVertexBufferArrayStride);
+ DAWN_INVALID_IF(
+ vertexBufferStride > 0 &&
+ attribute->offset + formatInfo.byteSize > vertexBufferStride,
+ "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer "
+ "stride (%u).",
+ attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride);
+
+ DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0,
+ "Attribute offset (%u) in not a multiple of %u.", attribute->offset,
+ std::min(4u, formatInfo.byteSize));
+
+ DAWN_INVALID_IF(metadata.usedVertexInputs[location] &&
+ formatInfo.baseType != metadata.vertexInputBaseTypes[location],
+ "Attribute base type (%s) does not match the "
+ "shader's base type (%s) in location (%u).",
+ formatInfo.baseType, metadata.vertexInputBaseTypes[location],
+ attribute->shaderLocation);
+
+ DAWN_INVALID_IF((*attributesSetMask)[location],
+ "Attribute shader location (%u) is used more than once.",
+ attribute->shaderLocation);
+
+ attributesSetMask->set(location);
+ return {};
+ }
+
+ MaybeError ValidateVertexBufferLayout(
+ DeviceBase* device,
+ const VertexBufferLayout* buffer,
+ const EntryPointMetadata& metadata,
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
+ DAWN_TRY(ValidateVertexStepMode(buffer->stepMode));
+ DAWN_INVALID_IF(
+ buffer->arrayStride > kMaxVertexBufferArrayStride,
+ "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).",
+ buffer->arrayStride, kMaxVertexBufferArrayStride);
+
+ DAWN_INVALID_IF(buffer->arrayStride % 4 != 0,
+ "Vertex buffer arrayStride (%u) is not a multiple of 4.",
+ buffer->arrayStride);
+
+ for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
+ DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
+ buffer->arrayStride, attributesSetMask),
+ "validating attributes[%u].", i);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateVertexState(DeviceBase* device,
+ const VertexState* descriptor,
+ const PipelineLayoutBase* layout) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_INVALID_IF(
+ descriptor->bufferCount > kMaxVertexBuffers,
+ "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).",
+ descriptor->bufferCount, kMaxVertexBuffers);
+
+ DAWN_TRY_CONTEXT(
+ ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+ descriptor->constantCount, descriptor->constants, layout,
+ SingleShaderStage::Vertex),
+ "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module,
+ descriptor->entryPoint);
+ const EntryPointMetadata& vertexMetadata =
+ descriptor->module->GetEntryPoint(descriptor->entryPoint);
+
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask;
+ uint32_t totalAttributesNum = 0;
+ for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
+ DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i],
+ vertexMetadata, &attributesSetMask),
+ "validating buffers[%u].", i);
+ totalAttributesNum += descriptor->buffers[i].attributeCount;
+ }
+
+ // Every vertex attribute has a member called shaderLocation, and there are some
+ // requirements for shaderLocation: 1) >=0, 2) values are different across different
+ // attributes, 3) can't exceed kMaxVertexAttributes. So it can ensure that total
+ // attribute number never exceed kMaxVertexAttributes.
+ ASSERT(totalAttributesNum <= kMaxVertexAttributes);
+
+ // TODO(dawn:563): Specify which inputs were not used in error message.
+ DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask),
+ "Pipeline vertex stage uses vertex buffers not in the vertex state");
+
+ return {};
+ }
+
+ MaybeError ValidatePrimitiveState(const DeviceBase* device,
+ const PrimitiveState* descriptor) {
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+ wgpu::SType::PrimitiveDepthClampingState));
+ const PrimitiveDepthClampingState* clampInfo = nullptr;
+ FindInChain(descriptor->nextInChain, &clampInfo);
+ if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) {
+ return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
+ }
+ DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
+ DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
+ DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
+ DAWN_TRY(ValidateCullMode(descriptor->cullMode));
+
+ // Pipeline descriptors must have stripIndexFormat == undefined if they are using
+ // non-strip topologies.
+ if (!IsStripPrimitiveTopology(descriptor->topology)) {
+ DAWN_INVALID_IF(
+ descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined,
+ "StripIndexFormat (%s) is not undefined when using a non-strip primitive "
+ "topology (%s).",
+ descriptor->stripIndexFormat, descriptor->topology);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateDepthStencilState(const DeviceBase* device,
+ const DepthStencilState* descriptor) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+ }
+
+ DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
+ DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
+ DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
+
+ const Format* format;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+ DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+ "Depth stencil format (%s) is not depth-stencil renderable.",
+ descriptor->format);
+
+ DAWN_INVALID_IF(std::isnan(descriptor->depthBiasSlopeScale) ||
+ std::isnan(descriptor->depthBiasClamp),
+ "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.",
+ descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp);
+
+ DAWN_INVALID_IF(
+ !format->HasDepth() && (descriptor->depthCompare != wgpu::CompareFunction::Always ||
+ descriptor->depthWriteEnabled),
+ "Depth stencil format (%s) doesn't have depth aspect while depthCompare (%s) is "
+ "not %s or depthWriteEnabled (%u) is true.",
+ descriptor->format, descriptor->depthCompare, wgpu::CompareFunction::Always,
+ descriptor->depthWriteEnabled);
+
+ DAWN_INVALID_IF(!format->HasStencil() && StencilTestEnabled(descriptor),
+ "Depth stencil format (%s) doesn't have stencil aspect while stencil "
+ "test or stencil write is enabled.",
+ descriptor->format);
+
+ return {};
+ }
+
+ MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count),
+ "Multisample count (%u) is not supported.", descriptor->count);
+
+ DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1,
+ "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.",
+ descriptor->count);
+
+ return {};
+ }
+
+ MaybeError ValidateBlendComponent(BlendComponent blendComponent) {
+ if (blendComponent.operation == wgpu::BlendOperation::Min ||
+ blendComponent.operation == wgpu::BlendOperation::Max) {
+ DAWN_INVALID_IF(blendComponent.srcFactor != wgpu::BlendFactor::One ||
+ blendComponent.dstFactor != wgpu::BlendFactor::One,
+ "Blend factor is not %s when blend operation is %s.",
+ wgpu::BlendFactor::One, blendComponent.operation);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
+ DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
+ DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
+ DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
+ DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
+ DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
+ DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
+ DAWN_TRY(ValidateBlendComponent(descriptor->alpha));
+ DAWN_TRY(ValidateBlendComponent(descriptor->color));
+
+ return {};
+ }
+
+ bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
+ return blendFactor == wgpu::BlendFactor::SrcAlpha ||
+ blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
+ blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
+ }
+
+ MaybeError ValidateColorTargetState(
+ DeviceBase* device,
+ const ColorTargetState* descriptor,
+ bool fragmentWritten,
+ const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ if (descriptor->blend) {
+ DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend),
+ "validating blend state.");
+ }
+
+ DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
+
+ const Format* format;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+ DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+ "Color format (%s) is not color renderable.", descriptor->format);
+
+ DAWN_INVALID_IF(
+ descriptor->blend && !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes &
+ SampleTypeBit::Float),
+ "Blending is enabled but color format (%s) is not blendable.", descriptor->format);
+
+ if (fragmentWritten) {
+ DAWN_INVALID_IF(fragmentOutputVariable.baseType !=
+ format->GetAspectInfo(Aspect::Color).baseType,
+ "Color format (%s) base type (%s) doesn't match the fragment "
+ "module output type (%s).",
+ descriptor->format, format->GetAspectInfo(Aspect::Color).baseType,
+ fragmentOutputVariable.baseType);
+
+ DAWN_INVALID_IF(
+ fragmentOutputVariable.componentCount < format->componentCount,
+ "The fragment stage has fewer output components (%u) than the color format "
+ "(%s) component count (%u).",
+ fragmentOutputVariable.componentCount, descriptor->format,
+ format->componentCount);
+
+ if (descriptor->blend) {
+ if (fragmentOutputVariable.componentCount < 4u) {
+ // No alpha channel output
+ // Make sure there's no alpha involved in the blending operation
+ DAWN_INVALID_IF(
+ BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
+ BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor),
+ "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha "
+ "but it is missing from fragment output.",
+ descriptor->blend->color.srcFactor, descriptor->blend->color.dstFactor);
+ }
+ }
+ } else {
+ DAWN_INVALID_IF(
+ descriptor->writeMask != wgpu::ColorWriteMask::None,
+ "Color target has no corresponding fragment stage output but writeMask (%s) is "
+ "not zero.",
+ descriptor->writeMask);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateFragmentState(DeviceBase* device,
+ const FragmentState* descriptor,
+ const PipelineLayoutBase* layout) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_TRY_CONTEXT(
+ ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+ descriptor->constantCount, descriptor->constants, layout,
+ SingleShaderStage::Fragment),
+ "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module,
+ descriptor->entryPoint);
+
+ DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments,
+ "Number of targets (%u) exceeds the maximum (%u).",
+ descriptor->targetCount, kMaxColorAttachments);
+
+ const EntryPointMetadata& fragmentMetadata =
+ descriptor->module->GetEntryPoint(descriptor->entryPoint);
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
+ DAWN_TRY_CONTEXT(
+ ValidateColorTargetState(device, &descriptor->targets[static_cast<uint8_t>(i)],
+ fragmentMetadata.fragmentOutputsWritten[i],
+ fragmentMetadata.fragmentOutputVariables[i]),
+ "validating targets[%u].", static_cast<uint8_t>(i));
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateInterStageMatching(DeviceBase* device,
+ const VertexState& vertexState,
+ const FragmentState& fragmentState) {
+ const EntryPointMetadata& vertexMetadata =
+ vertexState.module->GetEntryPoint(vertexState.entryPoint);
+ const EntryPointMetadata& fragmentMetadata =
+ fragmentState.module->GetEntryPoint(fragmentState.entryPoint);
+
+ // TODO(dawn:563): Can this message give more details?
+ DAWN_INVALID_IF(
+ vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables,
+ "One or more fragment inputs and vertex outputs are not one-to-one matching");
+
+ // TODO(dawn:802): Validate interpolation types and interpolition sampling types
+ for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) {
+ const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i];
+ const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i];
+ DAWN_INVALID_IF(
+ vertexOutputInfo.baseType != fragmentInputInfo.baseType,
+ "The base type (%s) of the vertex output at location %u is different from the "
+ "base type (%s) of the fragment input at location %u.",
+ vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i);
+
+ DAWN_INVALID_IF(
+ vertexOutputInfo.componentCount != fragmentInputInfo.componentCount,
+ "The component count (%u) of the vertex output at location %u is different "
+ "from the component count (%u) of the fragment input at location %u.",
+ vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i);
+
+ DAWN_INVALID_IF(
+ vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType,
+ "The interpolation type (%s) of the vertex output at location %u is different "
+ "from the interpolation type (%s) of the fragment input at location %u.",
+ vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i);
+
+ DAWN_INVALID_IF(
+ vertexOutputInfo.interpolationSampling !=
+ fragmentInputInfo.interpolationSampling,
+ "The interpolation sampling (%s) of the vertex output at location %u is "
+ "different from the interpolation sampling (%s) of the fragment input at "
+ "location %u.",
+ vertexOutputInfo.interpolationSampling, i,
+ fragmentInputInfo.interpolationSampling, i);
+ }
+
+ return {};
+ }
+ } // anonymous namespace
+
+ // Helper functions
+ size_t IndexFormatSize(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Uint16:
+ return sizeof(uint16_t);
+ case wgpu::IndexFormat::Uint32:
+ return sizeof(uint32_t);
+ case wgpu::IndexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ return primitiveTopology == wgpu::PrimitiveTopology::LineStrip ||
+ primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip;
+ }
+
+ MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ if (descriptor->layout != nullptr) {
+ DAWN_TRY(device->ValidateObject(descriptor->layout));
+ }
+
+ DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout),
+ "validating vertex state.");
+
+ DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive),
+ "validating primitive state.");
+
+ if (descriptor->depthStencil) {
+ DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil),
+ "validating depthStencil state.");
+ }
+
+ DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample),
+ "validating multisample state.");
+
+ if (descriptor->fragment != nullptr) {
+ DAWN_TRY_CONTEXT(
+ ValidateFragmentState(device, descriptor->fragment, descriptor->layout),
+ "validating fragment state.");
+
+ DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil,
+ "Must have at least one color or depthStencil target.");
+
+ DAWN_TRY(
+ ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
+ }
+
+ return {};
+ }
+
+ std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
+ DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor) {
+ std::vector<StageAndDescriptor> stages;
+ stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module,
+ descriptor->vertex.entryPoint, descriptor->vertex.constantCount,
+ descriptor->vertex.constants});
+ if (descriptor->fragment != nullptr) {
+ stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
+ descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
+ descriptor->fragment->constants});
+ } else if (device->IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+ // The dummy fragment shader module should already be initialized
+ DAWN_ASSERT(store->dummyFragmentShader != nullptr);
+ ShaderModuleBase* dummyFragmentShader = store->dummyFragmentShader.Get();
+ stages.push_back(
+ {SingleShaderStage::Fragment, dummyFragmentShader, "fs_empty_main", 0, nullptr});
+ }
+ return stages;
+ }
+
+ bool StencilTestEnabled(const DepthStencilState* depthStencil) {
+ return depthStencil->stencilBack.compare != wgpu::CompareFunction::Always ||
+ depthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilFront.compare != wgpu::CompareFunction::Always ||
+ depthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+ depthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep;
+ }
+
+ // RenderPipelineBase
+
+ RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor)
+ : PipelineBase(device,
+ descriptor->layout,
+ descriptor->label,
+ GetRenderStagesAndSetDummyShader(device, descriptor)),
+ mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
+ mVertexBufferCount = descriptor->vertex.bufferCount;
+ const VertexBufferLayout* buffers = descriptor->vertex.buffers;
+ for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) {
+ if (buffers[slot].attributeCount == 0) {
+ continue;
+ }
+
+ VertexBufferSlot typedSlot(slot);
+
+ mVertexBufferSlotsUsed.set(typedSlot);
+ mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride;
+ mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode;
+ mVertexBufferInfos[typedSlot].usedBytesInStride = 0;
+ switch (buffers[slot].stepMode) {
+ case wgpu::VertexStepMode::Vertex:
+ mVertexBufferSlotsUsedAsVertexBuffer.set(typedSlot);
+ break;
+ case wgpu::VertexStepMode::Instance:
+ mVertexBufferSlotsUsedAsInstanceBuffer.set(typedSlot);
+ break;
+ default:
+ DAWN_UNREACHABLE();
+ }
+
+ for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) {
+ VertexAttributeLocation location = VertexAttributeLocation(
+ static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation));
+ mAttributeLocationsUsed.set(location);
+ mAttributeInfos[location].shaderLocation = location;
+ mAttributeInfos[location].vertexBufferSlot = typedSlot;
+ mAttributeInfos[location].offset = buffers[slot].attributes[i].offset;
+ mAttributeInfos[location].format = buffers[slot].attributes[i].format;
+ // Compute the access boundary of this attribute by adding attribute format size to
+ // attribute offset. Although offset is in uint64_t, such sum must be no larger than
+ // maxVertexBufferArrayStride (2048), which is promised by the GPUVertexBufferLayout
+ // validation of creating render pipeline. Therefore, calculating in uint16_t will
+ // cause no overflow.
+ DAWN_ASSERT(buffers[slot].attributes[i].offset <= 2048);
+ uint16_t accessBoundary =
+ uint16_t(buffers[slot].attributes[i].offset) +
+ uint16_t(GetVertexFormatInfo(buffers[slot].attributes[i].format).byteSize);
+ mVertexBufferInfos[typedSlot].usedBytesInStride =
+ std::max(mVertexBufferInfos[typedSlot].usedBytesInStride, accessBoundary);
+ }
+ }
+
+ mPrimitive = descriptor->primitive;
+ const PrimitiveDepthClampingState* clampInfo = nullptr;
+ FindInChain(mPrimitive.nextInChain, &clampInfo);
+ if (clampInfo) {
+ mClampDepth = clampInfo->clampDepth;
+ }
+ mMultisample = descriptor->multisample;
+
+ if (mAttachmentState->HasDepthStencilAttachment()) {
+ mDepthStencil = *descriptor->depthStencil;
+ mWritesDepth = mDepthStencil.depthWriteEnabled;
+ if (mDepthStencil.stencilWriteMask) {
+ if ((mPrimitive.cullMode != wgpu::CullMode::Front &&
+ (mDepthStencil.stencilFront.failOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil.stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil.stencilFront.passOp != wgpu::StencilOperation::Keep)) ||
+ (mPrimitive.cullMode != wgpu::CullMode::Back &&
+ (mDepthStencil.stencilBack.failOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil.stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil.stencilBack.passOp != wgpu::StencilOperation::Keep))) {
+ mWritesStencil = true;
+ }
+ }
+ } else {
+ // These default values below are useful for backends to fill information.
+ // The values indicate that depth and stencil test are disabled when backends
+ // set their own depth stencil states/descriptors according to the values in
+ // mDepthStencil.
+ mDepthStencil.format = wgpu::TextureFormat::Undefined;
+ mDepthStencil.depthWriteEnabled = false;
+ mDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilReadMask = 0xff;
+ mDepthStencil.stencilWriteMask = 0xff;
+ mDepthStencil.depthBias = 0;
+ mDepthStencil.depthBiasSlopeScale = 0.0f;
+ mDepthStencil.depthBiasClamp = 0.0f;
+ }
+
+ for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+ // Vertex-only render pipeline have no color attachment. For a render pipeline with
+ // color attachments, there must be a valid FragmentState.
+ ASSERT(descriptor->fragment != nullptr);
+ const ColorTargetState* target =
+ &descriptor->fragment->targets[static_cast<uint8_t>(i)];
+ mTargets[i] = *target;
+
+ if (target->blend != nullptr) {
+ mTargetBlend[i] = *target->blend;
+ mTargets[i].blend = &mTargetBlend[i];
+ }
+ }
+
+ SetContentHash(ComputeContentHash());
+ TrackInDevice();
+ }
+
+ RenderPipelineBase::RenderPipelineBase(DeviceBase* device) : PipelineBase(device) {
+ TrackInDevice();
+ }
+
+ RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : PipelineBase(device, tag) {
+ }
+
+ RenderPipelineBase::~RenderPipelineBase() = default;
+
+ void RenderPipelineBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheRenderPipeline(this);
+ }
+
+ // Remove reference to the attachment state so that we don't have lingering references to
+ // it preventing it from being uncached in the device.
+ mAttachmentState = nullptr;
+ }
+
+ // static
+ RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) {
+ class ErrorRenderPipeline final : public RenderPipelineBase {
+ public:
+ ErrorRenderPipeline(DeviceBase* device)
+ : RenderPipelineBase(device, ObjectBase::kError) {
+ }
+
+ MaybeError Initialize() override {
+ UNREACHABLE();
+ return {};
+ }
+ };
+
+ return new ErrorRenderPipeline(device);
+ }
+
+ ObjectType RenderPipelineBase::GetType() const {
+ return ObjectType::RenderPipeline;
+ }
+
+ const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
+ RenderPipelineBase::GetAttributeLocationsUsed() const {
+ ASSERT(!IsError());
+ return mAttributeLocationsUsed;
+ }
+
+ const VertexAttributeInfo& RenderPipelineBase::GetAttribute(
+ VertexAttributeLocation location) const {
+ ASSERT(!IsError());
+ ASSERT(mAttributeLocationsUsed[location]);
+ return mAttributeInfos[location];
+ }
+
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ RenderPipelineBase::GetVertexBufferSlotsUsed() const {
+ ASSERT(!IsError());
+ return mVertexBufferSlotsUsed;
+ }
+
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ RenderPipelineBase::GetVertexBufferSlotsUsedAsVertexBuffer() const {
+ ASSERT(!IsError());
+ return mVertexBufferSlotsUsedAsVertexBuffer;
+ }
+
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ RenderPipelineBase::GetVertexBufferSlotsUsedAsInstanceBuffer() const {
+ ASSERT(!IsError());
+ return mVertexBufferSlotsUsedAsInstanceBuffer;
+ }
+
+ const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(VertexBufferSlot slot) const {
+ ASSERT(!IsError());
+ ASSERT(mVertexBufferSlotsUsed[slot]);
+ return mVertexBufferInfos[slot];
+ }
+
+ uint32_t RenderPipelineBase::GetVertexBufferCount() const {
+ ASSERT(!IsError());
+ return mVertexBufferCount;
+ }
+
+ const ColorTargetState* RenderPipelineBase::GetColorTargetState(
+ ColorAttachmentIndex attachmentSlot) const {
+ ASSERT(!IsError());
+ ASSERT(attachmentSlot < mTargets.size());
+ return &mTargets[attachmentSlot];
+ }
+
+ const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const {
+ ASSERT(!IsError());
+ return &mDepthStencil;
+ }
+
+ wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
+ ASSERT(!IsError());
+ return mPrimitive.topology;
+ }
+
+ wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const {
+ ASSERT(!IsError());
+ return mPrimitive.stripIndexFormat;
+ }
+
+ wgpu::CullMode RenderPipelineBase::GetCullMode() const {
+ ASSERT(!IsError());
+ return mPrimitive.cullMode;
+ }
+
+ wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
+ ASSERT(!IsError());
+ return mPrimitive.frontFace;
+ }
+
+ bool RenderPipelineBase::IsDepthBiasEnabled() const {
+ ASSERT(!IsError());
+ return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0;
+ }
+
+ int32_t RenderPipelineBase::GetDepthBias() const {
+ ASSERT(!IsError());
+ return mDepthStencil.depthBias;
+ }
+
+ float RenderPipelineBase::GetDepthBiasSlopeScale() const {
+ ASSERT(!IsError());
+ return mDepthStencil.depthBiasSlopeScale;
+ }
+
+ float RenderPipelineBase::GetDepthBiasClamp() const {
+ ASSERT(!IsError());
+ return mDepthStencil.depthBiasClamp;
+ }
+
+ bool RenderPipelineBase::ShouldClampDepth() const {
+ ASSERT(!IsError());
+ return mClampDepth;
+ }
+
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
+ RenderPipelineBase::GetColorAttachmentsMask() const {
+ ASSERT(!IsError());
+ return mAttachmentState->GetColorAttachmentsMask();
+ }
+
+ bool RenderPipelineBase::HasDepthStencilAttachment() const {
+ ASSERT(!IsError());
+ return mAttachmentState->HasDepthStencilAttachment();
+ }
+
+ wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(
+ ColorAttachmentIndex attachment) const {
+ ASSERT(!IsError());
+ return mTargets[attachment].format;
+ }
+
+ wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
+ ASSERT(!IsError());
+ ASSERT(mAttachmentState->HasDepthStencilAttachment());
+ return mDepthStencil.format;
+ }
+
+ uint32_t RenderPipelineBase::GetSampleCount() const {
+ ASSERT(!IsError());
+ return mAttachmentState->GetSampleCount();
+ }
+
+ uint32_t RenderPipelineBase::GetSampleMask() const {
+ ASSERT(!IsError());
+ return mMultisample.mask;
+ }
+
+ bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
+ ASSERT(!IsError());
+ return mMultisample.alphaToCoverageEnabled;
+ }
+
+ const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
+ ASSERT(!IsError());
+
+ return mAttachmentState.Get();
+ }
+
+ bool RenderPipelineBase::WritesDepth() const {
+ ASSERT(!IsError());
+
+ return mWritesDepth;
+ }
+
+ bool RenderPipelineBase::WritesStencil() const {
+ ASSERT(!IsError());
+
+ return mWritesStencil;
+ }
+
+ size_t RenderPipelineBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+
+ // Record modules and layout
+ recorder.Record(PipelineBase::ComputeContentHash());
+
+ // Hierarchically record the attachment state.
+ // It contains the attachments set, texture formats, and sample count.
+ recorder.Record(mAttachmentState->GetContentHash());
+
+ // Record attachments
+ for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+ const ColorTargetState& desc = *GetColorTargetState(i);
+ recorder.Record(desc.writeMask);
+ if (desc.blend != nullptr) {
+ recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor,
+ desc.blend->color.dstFactor);
+ recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor,
+ desc.blend->alpha.dstFactor);
+ }
+ }
+
+ if (mAttachmentState->HasDepthStencilAttachment()) {
+ const DepthStencilState& desc = mDepthStencil;
+ recorder.Record(desc.depthWriteEnabled, desc.depthCompare);
+ recorder.Record(desc.stencilReadMask, desc.stencilWriteMask);
+ recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp,
+ desc.stencilFront.depthFailOp, desc.stencilFront.passOp);
+ recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp,
+ desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
+ recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
+ }
+
+ // Record vertex state
+ recorder.Record(mAttributeLocationsUsed);
+ for (VertexAttributeLocation location : IterateBitSet(mAttributeLocationsUsed)) {
+ const VertexAttributeInfo& desc = GetAttribute(location);
+ recorder.Record(desc.shaderLocation, desc.vertexBufferSlot, desc.offset, desc.format);
+ }
+
+ recorder.Record(mVertexBufferSlotsUsed);
+ for (VertexBufferSlot slot : IterateBitSet(mVertexBufferSlotsUsed)) {
+ const VertexBufferInfo& desc = GetVertexBuffer(slot);
+ recorder.Record(desc.arrayStride, desc.stepMode);
+ }
+
+ // Record primitive state
+ recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace,
+ mPrimitive.cullMode, mClampDepth);
+
+ // Record multisample state
+ // Sample count hashed as part of the attachment state
+ recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled);
+
+ return recorder.GetContentHash();
+ }
+
+ bool RenderPipelineBase::EqualityFunc::operator()(const RenderPipelineBase* a,
+ const RenderPipelineBase* b) const {
+ // Check the layout and shader stages.
+ if (!PipelineBase::EqualForCache(a, b)) {
+ return false;
+ }
+
+ // Check the attachment state.
+ // It contains the attachments set, texture formats, and sample count.
+ if (a->mAttachmentState.Get() != b->mAttachmentState.Get()) {
+ return false;
+ }
+
+ if (a->mAttachmentState.Get() != nullptr) {
+ for (ColorAttachmentIndex i :
+ IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) {
+ const ColorTargetState& descA = *a->GetColorTargetState(i);
+ const ColorTargetState& descB = *b->GetColorTargetState(i);
+ if (descA.writeMask != descB.writeMask) {
+ return false;
+ }
+ if ((descA.blend == nullptr) != (descB.blend == nullptr)) {
+ return false;
+ }
+ if (descA.blend != nullptr) {
+ if (descA.blend->color.operation != descB.blend->color.operation ||
+ descA.blend->color.srcFactor != descB.blend->color.srcFactor ||
+ descA.blend->color.dstFactor != descB.blend->color.dstFactor) {
+ return false;
+ }
+ if (descA.blend->alpha.operation != descB.blend->alpha.operation ||
+ descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor ||
+ descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) {
+ return false;
+ }
+ }
+ }
+
+ // Check depth/stencil state
+ if (a->mAttachmentState->HasDepthStencilAttachment()) {
+ const DepthStencilState& stateA = a->mDepthStencil;
+ const DepthStencilState& stateB = b->mDepthStencil;
+
+ ASSERT(!std::isnan(stateA.depthBiasSlopeScale));
+ ASSERT(!std::isnan(stateB.depthBiasSlopeScale));
+ ASSERT(!std::isnan(stateA.depthBiasClamp));
+ ASSERT(!std::isnan(stateB.depthBiasClamp));
+
+ if (stateA.depthWriteEnabled != stateB.depthWriteEnabled ||
+ stateA.depthCompare != stateB.depthCompare ||
+ stateA.depthBias != stateB.depthBias ||
+ stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale ||
+ stateA.depthBiasClamp != stateB.depthBiasClamp) {
+ return false;
+ }
+ if (stateA.stencilFront.compare != stateB.stencilFront.compare ||
+ stateA.stencilFront.failOp != stateB.stencilFront.failOp ||
+ stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp ||
+ stateA.stencilFront.passOp != stateB.stencilFront.passOp) {
+ return false;
+ }
+ if (stateA.stencilBack.compare != stateB.stencilBack.compare ||
+ stateA.stencilBack.failOp != stateB.stencilBack.failOp ||
+ stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp ||
+ stateA.stencilBack.passOp != stateB.stencilBack.passOp) {
+ return false;
+ }
+ if (stateA.stencilReadMask != stateB.stencilReadMask ||
+ stateA.stencilWriteMask != stateB.stencilWriteMask) {
+ return false;
+ }
+ }
+ }
+
+ // Check vertex state
+ if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) {
+ return false;
+ }
+
+ for (VertexAttributeLocation loc : IterateBitSet(a->mAttributeLocationsUsed)) {
+ const VertexAttributeInfo& descA = a->GetAttribute(loc);
+ const VertexAttributeInfo& descB = b->GetAttribute(loc);
+ if (descA.shaderLocation != descB.shaderLocation ||
+ descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset ||
+ descA.format != descB.format) {
+ return false;
+ }
+ }
+
+ if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) {
+ return false;
+ }
+
+ for (VertexBufferSlot slot : IterateBitSet(a->mVertexBufferSlotsUsed)) {
+ const VertexBufferInfo& descA = a->GetVertexBuffer(slot);
+ const VertexBufferInfo& descB = b->GetVertexBuffer(slot);
+ if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) {
+ return false;
+ }
+ }
+
+ // Check primitive state
+ {
+ const PrimitiveState& stateA = a->mPrimitive;
+ const PrimitiveState& stateB = b->mPrimitive;
+ if (stateA.topology != stateB.topology ||
+ stateA.stripIndexFormat != stateB.stripIndexFormat ||
+ stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode ||
+ a->mClampDepth != b->mClampDepth) {
+ return false;
+ }
+ }
+
+ // Check multisample state
+ {
+ const MultisampleState& stateA = a->mMultisample;
+ const MultisampleState& stateB = b->mMultisample;
+ // Sample count already checked as part of the attachment state.
+ if (stateA.mask != stateB.mask ||
+ stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RenderPipeline.h b/chromium/third_party/dawn/src/dawn/native/RenderPipeline.h
new file mode 100644
index 00000000000..da6cdd2380d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RenderPipeline.h
@@ -0,0 +1,144 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RENDERPIPELINE_H_
+#define DAWNNATIVE_RENDERPIPELINE_H_
+
+#include "dawn/common/TypedInteger.h"
+#include "dawn/native/AttachmentState.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/Pipeline.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor);
+
+ std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
+ DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor);
+
+ size_t IndexFormatSize(wgpu::IndexFormat format);
+
+ bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology);
+
+ bool StencilTestEnabled(const DepthStencilState* depthStencil);
+
+ struct VertexAttributeInfo {
+ wgpu::VertexFormat format;
+ uint64_t offset;
+ VertexAttributeLocation shaderLocation;
+ VertexBufferSlot vertexBufferSlot;
+ };
+
+ struct VertexBufferInfo {
+ uint64_t arrayStride;
+ wgpu::VertexStepMode stepMode;
+ uint16_t usedBytesInStride;
+ };
+
+ class RenderPipelineBase : public PipelineBase {
+ public:
+ RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
+ ~RenderPipelineBase() override;
+
+ static RenderPipelineBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
+ GetAttributeLocationsUsed() const;
+ const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ GetVertexBufferSlotsUsedAsVertexBuffer() const;
+ const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+ GetVertexBufferSlotsUsedAsInstanceBuffer() const;
+ const VertexBufferInfo& GetVertexBuffer(VertexBufferSlot slot) const;
+ uint32_t GetVertexBufferCount() const;
+
+ const ColorTargetState* GetColorTargetState(ColorAttachmentIndex attachmentSlot) const;
+ const DepthStencilState* GetDepthStencilState() const;
+ wgpu::PrimitiveTopology GetPrimitiveTopology() const;
+ wgpu::IndexFormat GetStripIndexFormat() const;
+ wgpu::CullMode GetCullMode() const;
+ wgpu::FrontFace GetFrontFace() const;
+ bool IsDepthBiasEnabled() const;
+ int32_t GetDepthBias() const;
+ float GetDepthBiasSlopeScale() const;
+ float GetDepthBiasClamp() const;
+ bool ShouldClampDepth() const;
+
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+ bool HasDepthStencilAttachment() const;
+ wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex attachment) const;
+ wgpu::TextureFormat GetDepthStencilFormat() const;
+ uint32_t GetSampleCount() const;
+ uint32_t GetSampleMask() const;
+ bool IsAlphaToCoverageEnabled() const;
+ bool WritesDepth() const;
+ bool WritesStencil() const;
+
+ const AttachmentState* GetAttachmentState() const;
+
+ // Functions necessary for the unordered_set<RenderPipelineBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const RenderPipelineBase* a, const RenderPipelineBase* b) const;
+ };
+
+ protected:
+ // Constructor used only for mocking and testing.
+ RenderPipelineBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ private:
+ RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ // Vertex state
+ uint32_t mVertexBufferCount;
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> mAttributeLocationsUsed;
+ ityp::array<VertexAttributeLocation, VertexAttributeInfo, kMaxVertexAttributes>
+ mAttributeInfos;
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsVertexBuffer;
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsInstanceBuffer;
+ ityp::array<VertexBufferSlot, VertexBufferInfo, kMaxVertexBuffers> mVertexBufferInfos;
+
+ // Attachments
+ Ref<AttachmentState> mAttachmentState;
+ ityp::array<ColorAttachmentIndex, ColorTargetState, kMaxColorAttachments> mTargets;
+ ityp::array<ColorAttachmentIndex, BlendState, kMaxColorAttachments> mTargetBlend;
+
+ // Other state
+ PrimitiveState mPrimitive;
+ DepthStencilState mDepthStencil;
+ MultisampleState mMultisample;
+ bool mClampDepth = false;
+ bool mWritesDepth = false;
+ bool mWritesStencil = false;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RENDERPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ResourceHeap.h b/chromium/third_party/dawn/src/dawn/native/ResourceHeap.h
new file mode 100644
index 00000000000..cb45c88280d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ResourceHeap.h
@@ -0,0 +1,31 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RESOURCEHEAP_H_
+#define DAWNNATIVE_RESOURCEHEAP_H_
+
+#include "dawn/native/Error.h"
+
+namespace dawn::native {
+
+ // Wrapper for a resource backed by a heap.
+ class ResourceHeapBase {
+ public:
+ ResourceHeapBase() = default;
+ virtual ~ResourceHeapBase() = default;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RESOURCEHEAP_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h b/chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h
new file mode 100644
index 00000000000..3c861543b87
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ResourceHeapAllocator.h
@@ -0,0 +1,37 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
+#define DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ResourceHeap.h"
+
+#include <memory>
+
+namespace dawn::native {
+
+ // Interface for backend allocators that create memory heaps resoruces can be suballocated in.
+ class ResourceHeapAllocator {
+ public:
+ virtual ~ResourceHeapAllocator() = default;
+
+ virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) = 0;
+ virtual void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) = 0;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp b/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp
new file mode 100644
index 00000000000..8848c18a6b9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.cpp
@@ -0,0 +1,53 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/common/Assert.h"
+
+namespace dawn::native {
+
+ ResourceMemoryAllocation::ResourceMemoryAllocation()
+ : mOffset(0), mResourceHeap(nullptr), mMappedPointer(nullptr) {
+ }
+
+ ResourceMemoryAllocation::ResourceMemoryAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ResourceHeapBase* resourceHeap,
+ uint8_t* mappedPointer)
+ : mInfo(info), mOffset(offset), mResourceHeap(resourceHeap), mMappedPointer(mappedPointer) {
+ }
+
+ ResourceHeapBase* ResourceMemoryAllocation::GetResourceHeap() const {
+ ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
+ return mResourceHeap;
+ }
+
+ uint64_t ResourceMemoryAllocation::GetOffset() const {
+ ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
+ return mOffset;
+ }
+
+ AllocationInfo ResourceMemoryAllocation::GetInfo() const {
+ return mInfo;
+ }
+
+ uint8_t* ResourceMemoryAllocation::GetMappedPointer() const {
+ return mMappedPointer;
+ }
+
+ void ResourceMemoryAllocation::Invalidate() {
+ mResourceHeap = nullptr;
+ mInfo = {};
+ }
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h b/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h
new file mode 100644
index 00000000000..307d90a1dbd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ResourceMemoryAllocation.h
@@ -0,0 +1,80 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
+#define DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
+
+#include <cstdint>
+
+namespace dawn::native {
+
+ class ResourceHeapBase;
+
+ // Allocation method determines how memory was sub-divided.
+ // Used by the device to get the allocator that was responsible for the allocation.
+ enum class AllocationMethod {
+
+ // Memory not sub-divided.
+ kDirect,
+
+ // Memory sub-divided using one or more blocks of various sizes.
+ kSubAllocated,
+
+ // Memory was allocated outside of Dawn.
+ kExternal,
+
+ // Memory not allocated or freed.
+ kInvalid
+ };
+
+ // Metadata that describes how the allocation was allocated.
+ struct AllocationInfo {
+ // AllocationInfo contains a separate offset to not confuse block vs memory offsets.
+ // The block offset is within the entire allocator memory range and only required by the
+ // buddy sub-allocator to get the corresponding memory. Unlike the block offset, the
+ // allocation offset is always local to the memory.
+ uint64_t mBlockOffset = 0;
+
+ AllocationMethod mMethod = AllocationMethod::kInvalid;
+ };
+
+ // Handle into a resource heap pool.
+ class ResourceMemoryAllocation {
+ public:
+ ResourceMemoryAllocation();
+ ResourceMemoryAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ResourceHeapBase* resourceHeap,
+ uint8_t* mappedPointer = nullptr);
+ virtual ~ResourceMemoryAllocation() = default;
+
+ ResourceMemoryAllocation(const ResourceMemoryAllocation&) = default;
+ ResourceMemoryAllocation& operator=(const ResourceMemoryAllocation&) = default;
+
+ ResourceHeapBase* GetResourceHeap() const;
+ uint64_t GetOffset() const;
+ uint8_t* GetMappedPointer() const;
+ AllocationInfo GetInfo() const;
+
+ virtual void Invalidate();
+
+ private:
+ AllocationInfo mInfo;
+ uint64_t mOffset;
+ ResourceHeapBase* mResourceHeap;
+ uint8_t* mMappedPointer;
+ };
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp
new file mode 100644
index 00000000000..e1dc7aeb67a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.cpp
@@ -0,0 +1,121 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/RingBufferAllocator.h"
+
+// Note: Current RingBufferAllocator implementation uses two indices (start and end) to implement a
+// circular queue. However, this approach defines a full queue when one element is still unused.
+//
+// For example, [E,E,E,E] would be equivelent to [U,U,U,U].
+// ^ ^
+// S=E=1 S=E=1
+//
+// The latter case is eliminated by counting used bytes >= capacity. This definition prevents
+// (the last) byte and requires an extra variable to count used bytes. Alternatively, we could use
+// only two indices that keep increasing (unbounded) but can be still indexed using bit masks.
+// However, this 1) requires the size to always be a power-of-two and 2) remove tests that check
+// used bytes.
+namespace dawn::native {
+
+ RingBufferAllocator::RingBufferAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
+ }
+
+ void RingBufferAllocator::Deallocate(ExecutionSerial lastCompletedSerial) {
+ // Reclaim memory from previously recorded blocks.
+ for (Request& request : mInflightRequests.IterateUpTo(lastCompletedSerial)) {
+ mUsedStartOffset = request.endOffset;
+ mUsedSize -= request.size;
+ }
+
+ // Dequeue previously recorded requests.
+ mInflightRequests.ClearUpTo(lastCompletedSerial);
+ }
+
+ uint64_t RingBufferAllocator::GetSize() const {
+ return mMaxBlockSize;
+ }
+
+ uint64_t RingBufferAllocator::GetUsedSize() const {
+ return mUsedSize;
+ }
+
+ bool RingBufferAllocator::Empty() const {
+ return mInflightRequests.Empty();
+ }
+
+ // Sub-allocate the ring-buffer by requesting a chunk of the specified size.
+ // This is a serial-based resource scheme, the life-span of resources (and the allocations) get
+ // tracked by GPU progress via serials. Memory can be reused by determining if the GPU has
+ // completed up to a given serial. Each sub-allocation request is tracked in the serial offset
+ // queue, which identifies an existing (or new) frames-worth of resources. Internally, the
+ // ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
+ // in FIFO order as older frames would free resources before newer ones.
+ uint64_t RingBufferAllocator::Allocate(uint64_t allocationSize, ExecutionSerial serial) {
+ // Check if the buffer is full by comparing the used size.
+ // If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
+ // subsequent sub-alloc could fail where the used size was previously adjusted to include
+ // the wasted.
+ if (mUsedSize >= mMaxBlockSize) {
+ return kInvalidOffset;
+ }
+
+ // Ensure adding allocationSize does not overflow.
+ const uint64_t remainingSize = (mMaxBlockSize - mUsedSize);
+ if (allocationSize > remainingSize) {
+ return kInvalidOffset;
+ }
+
+ uint64_t startOffset = kInvalidOffset;
+
+ // Check if the buffer is NOT split (i.e sub-alloc on ends)
+ if (mUsedStartOffset <= mUsedEndOffset) {
+ // Order is important (try to sub-alloc at end first).
+ // This is due to FIFO order where sub-allocs are inserted from left-to-right (when not
+ // wrapped).
+ if (mUsedEndOffset + allocationSize <= mMaxBlockSize) {
+ startOffset = mUsedEndOffset;
+ mUsedEndOffset += allocationSize;
+ mUsedSize += allocationSize;
+ mCurrentRequestSize += allocationSize;
+ } else if (allocationSize <= mUsedStartOffset) { // Try to sub-alloc at front.
+ // Count the space at the end so that a subsequent
+ // sub-alloc cannot not succeed when the buffer is full.
+ const uint64_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
+
+ startOffset = 0;
+ mUsedEndOffset = allocationSize;
+ mUsedSize += requestSize;
+ mCurrentRequestSize += requestSize;
+ }
+ } else if (mUsedEndOffset + allocationSize <=
+ mUsedStartOffset) { // Otherwise, buffer is split where sub-alloc must be
+ // in-between.
+ startOffset = mUsedEndOffset;
+ mUsedEndOffset += allocationSize;
+ mUsedSize += allocationSize;
+ mCurrentRequestSize += allocationSize;
+ }
+
+ if (startOffset != kInvalidOffset) {
+ Request request;
+ request.endOffset = mUsedEndOffset;
+ request.size = mCurrentRequestSize;
+
+ mInflightRequests.Enqueue(std::move(request), serial);
+ mCurrentRequestSize = 0; // reset
+ }
+
+ return startOffset;
+ }
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h b/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h
new file mode 100644
index 00000000000..8049470f4c5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/RingBufferAllocator.h
@@ -0,0 +1,63 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_RINGBUFFERALLOCATOR_H_
+#define DAWNNATIVE_RINGBUFFERALLOCATOR_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/IntegerTypes.h"
+
+#include <limits>
+#include <memory>
+
+// RingBufferAllocator is the front-end implementation used to manage a ring buffer in GPU memory.
+namespace dawn::native {
+
+ class RingBufferAllocator {
+ public:
+ RingBufferAllocator() = default;
+ RingBufferAllocator(uint64_t maxSize);
+ ~RingBufferAllocator() = default;
+ RingBufferAllocator(const RingBufferAllocator&) = default;
+ RingBufferAllocator& operator=(const RingBufferAllocator&) = default;
+
+ uint64_t Allocate(uint64_t allocationSize, ExecutionSerial serial);
+ void Deallocate(ExecutionSerial lastCompletedSerial);
+
+ uint64_t GetSize() const;
+ bool Empty() const;
+ uint64_t GetUsedSize() const;
+
+ static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+
+ private:
+ struct Request {
+ uint64_t endOffset;
+ uint64_t size;
+ };
+
+ SerialQueue<ExecutionSerial, Request>
+ mInflightRequests; // Queue of the recorded sub-alloc requests
+ // (e.g. frame of resources).
+
+ uint64_t mUsedEndOffset = 0; // Tail of used sub-alloc requests (in bytes).
+ uint64_t mUsedStartOffset = 0; // Head of used sub-alloc requests (in bytes).
+ uint64_t mMaxBlockSize = 0; // Max size of the ring buffer (in bytes).
+ uint64_t mUsedSize = 0; // Size of the sub-alloc requests (in bytes) of the ring buffer.
+ uint64_t mCurrentRequestSize =
+ 0; // Size of the sub-alloc requests (in bytes) of the current serial.
+ };
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_RINGBUFFERALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Sampler.cpp b/chromium/third_party/dawn/src/dawn/native/Sampler.cpp
new file mode 100644
index 00000000000..ffd8a724cb0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Sampler.cpp
@@ -0,0 +1,153 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+#include <cmath>
+
+namespace dawn::native {
+
+ MaybeError ValidateSamplerDescriptor(DeviceBase*, const SamplerDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+ DAWN_INVALID_IF(std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp),
+ "LOD clamp bounds [%f, %f] contain a NaN.", descriptor->lodMinClamp,
+ descriptor->lodMaxClamp);
+
+ DAWN_INVALID_IF(descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0,
+ "LOD clamp bounds [%f, %f] contain contain a negative number.",
+ descriptor->lodMinClamp, descriptor->lodMaxClamp);
+
+ DAWN_INVALID_IF(descriptor->lodMinClamp > descriptor->lodMaxClamp,
+ "LOD min clamp (%f) is larger than the max clamp (%f).",
+ descriptor->lodMinClamp, descriptor->lodMaxClamp);
+
+ if (descriptor->maxAnisotropy > 1) {
+ DAWN_INVALID_IF(descriptor->minFilter != wgpu::FilterMode::Linear ||
+ descriptor->magFilter != wgpu::FilterMode::Linear ||
+ descriptor->mipmapFilter != wgpu::FilterMode::Linear,
+ "One of minFilter (%s), magFilter (%s) or mipmapFilter (%s) is not %s "
+ "while using anisotropic filter (maxAnisotropy is %f)",
+ descriptor->magFilter, descriptor->minFilter, descriptor->mipmapFilter,
+ wgpu::FilterMode::Linear, descriptor->maxAnisotropy);
+ } else if (descriptor->maxAnisotropy == 0u) {
+ return DAWN_FORMAT_VALIDATION_ERROR("Max anisotropy (%f) is less than 1.",
+ descriptor->maxAnisotropy);
+ }
+
+ DAWN_TRY(ValidateFilterMode(descriptor->minFilter));
+ DAWN_TRY(ValidateFilterMode(descriptor->magFilter));
+ DAWN_TRY(ValidateFilterMode(descriptor->mipmapFilter));
+ DAWN_TRY(ValidateAddressMode(descriptor->addressModeU));
+ DAWN_TRY(ValidateAddressMode(descriptor->addressModeV));
+ DAWN_TRY(ValidateAddressMode(descriptor->addressModeW));
+
+ // CompareFunction::Undefined is tagged as invalid because it can't be used, except for the
+ // SamplerDescriptor where it is a special value that means the sampler is not a
+ // comparison-sampler.
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ DAWN_TRY(ValidateCompareFunction(descriptor->compare));
+ }
+
+ return {};
+ }
+
+ // SamplerBase
+
+ SamplerBase::SamplerBase(DeviceBase* device,
+ const SamplerDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag)
+ : ApiObjectBase(device, descriptor->label),
+ mAddressModeU(descriptor->addressModeU),
+ mAddressModeV(descriptor->addressModeV),
+ mAddressModeW(descriptor->addressModeW),
+ mMagFilter(descriptor->magFilter),
+ mMinFilter(descriptor->minFilter),
+ mMipmapFilter(descriptor->mipmapFilter),
+ mLodMinClamp(descriptor->lodMinClamp),
+ mLodMaxClamp(descriptor->lodMaxClamp),
+ mCompareFunction(descriptor->compare),
+ mMaxAnisotropy(descriptor->maxAnisotropy) {
+ }
+
+ SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
+ : SamplerBase(device, descriptor, kUntrackedByDevice) {
+ TrackInDevice();
+ }
+
+ SamplerBase::SamplerBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+ }
+
+ SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ SamplerBase::~SamplerBase() = default;
+
+ void SamplerBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheSampler(this);
+ }
+ }
+
+ // static
+ SamplerBase* SamplerBase::MakeError(DeviceBase* device) {
+ return new SamplerBase(device, ObjectBase::kError);
+ }
+
+ ObjectType SamplerBase::GetType() const {
+ return ObjectType::Sampler;
+ }
+
+ bool SamplerBase::IsComparison() const {
+ return mCompareFunction != wgpu::CompareFunction::Undefined;
+ }
+
+ bool SamplerBase::IsFiltering() const {
+ return mMinFilter == wgpu::FilterMode::Linear || mMagFilter == wgpu::FilterMode::Linear ||
+ mMipmapFilter == wgpu::FilterMode::Linear;
+ }
+
+ size_t SamplerBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mAddressModeU, mAddressModeV, mAddressModeW, mMagFilter, mMinFilter,
+ mMipmapFilter, mLodMinClamp, mLodMaxClamp, mCompareFunction,
+ mMaxAnisotropy);
+ return recorder.GetContentHash();
+ }
+
+ bool SamplerBase::EqualityFunc::operator()(const SamplerBase* a, const SamplerBase* b) const {
+ if (a == b) {
+ return true;
+ }
+
+ ASSERT(!std::isnan(a->mLodMinClamp));
+ ASSERT(!std::isnan(b->mLodMinClamp));
+ ASSERT(!std::isnan(a->mLodMaxClamp));
+ ASSERT(!std::isnan(b->mLodMaxClamp));
+
+ return a->mAddressModeU == b->mAddressModeU && a->mAddressModeV == b->mAddressModeV &&
+ a->mAddressModeW == b->mAddressModeW && a->mMagFilter == b->mMagFilter &&
+ a->mMinFilter == b->mMinFilter && a->mMipmapFilter == b->mMipmapFilter &&
+ a->mLodMinClamp == b->mLodMinClamp && a->mLodMaxClamp == b->mLodMaxClamp &&
+ a->mCompareFunction == b->mCompareFunction && a->mMaxAnisotropy == b->mMaxAnisotropy;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Sampler.h b/chromium/third_party/dawn/src/dawn/native/Sampler.h
new file mode 100644
index 00000000000..e21b52ce7a2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Sampler.h
@@ -0,0 +1,80 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SAMPLER_H_
+#define DAWNNATIVE_SAMPLER_H_
+
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
+
+ class SamplerBase : public ApiObjectBase, public CachedObject {
+ public:
+ SamplerBase(DeviceBase* device,
+ const SamplerDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag);
+ SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
+ ~SamplerBase() override;
+
+ static SamplerBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ bool IsComparison() const;
+ bool IsFiltering() const;
+
+ // Functions necessary for the unordered_set<SamplerBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const SamplerBase* a, const SamplerBase* b) const;
+ };
+
+ uint16_t GetMaxAnisotropy() const {
+ return mMaxAnisotropy;
+ }
+
+ protected:
+ // Constructor used only for mocking and testing.
+ SamplerBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ private:
+ SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ // TODO(cwallez@chromium.org): Store a crypto hash of the items instead?
+ wgpu::AddressMode mAddressModeU;
+ wgpu::AddressMode mAddressModeV;
+ wgpu::AddressMode mAddressModeW;
+ wgpu::FilterMode mMagFilter;
+ wgpu::FilterMode mMinFilter;
+ wgpu::FilterMode mMipmapFilter;
+ float mLodMinClamp;
+ float mLodMaxClamp;
+ wgpu::CompareFunction mCompareFunction;
+ uint16_t mMaxAnisotropy;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_SAMPLER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp b/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp
new file mode 100644
index 00000000000..be536836ade
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.cpp
@@ -0,0 +1,47 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ScratchBuffer.h"
+
+#include "dawn/native/Device.h"
+
+namespace dawn::native {
+
+ ScratchBuffer::ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage)
+ : mDevice(device), mUsage(usage) {
+ }
+
+ ScratchBuffer::~ScratchBuffer() = default;
+
+ void ScratchBuffer::Reset() {
+ mBuffer = nullptr;
+ }
+
+ MaybeError ScratchBuffer::EnsureCapacity(uint64_t capacity) {
+ if (!mBuffer.Get() || mBuffer->GetSize() < capacity) {
+ BufferDescriptor descriptor;
+ descriptor.size = capacity;
+ descriptor.usage = mUsage;
+ DAWN_TRY_ASSIGN(mBuffer, mDevice->CreateBuffer(&descriptor));
+ mBuffer->SetIsDataInitialized();
+ }
+ return {};
+ }
+
+ BufferBase* ScratchBuffer::GetBuffer() const {
+ ASSERT(mBuffer.Get() != nullptr);
+ return mBuffer.Get();
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h b/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h
new file mode 100644
index 00000000000..7845022b1e8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ScratchBuffer.h
@@ -0,0 +1,55 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SCRATCHBUFFER_H_
+#define DAWNNATIVE_SCRATCHBUFFER_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Buffer.h"
+
+#include <cstdint>
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ // A ScratchBuffer is a lazily allocated and lazily grown GPU buffer for intermittent use by
+ // commands in the GPU queue. Note that scratch buffers are not zero-initialized, so users must
+ // be careful not to exposed uninitialized bytes to client shaders.
+ class ScratchBuffer {
+ public:
+ // Note that this object does not retain a reference to `device`, so `device` MUST outlive
+ // this object.
+ ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage);
+ ~ScratchBuffer();
+
+ // Resets this ScratchBuffer, guaranteeing that the next EnsureCapacity call allocates a
+ // fresh buffer.
+ void Reset();
+
+ // Ensures that this ScratchBuffer is backed by a buffer on `device` with at least
+ // `capacity` bytes of storage.
+ MaybeError EnsureCapacity(uint64_t capacity);
+
+ BufferBase* GetBuffer() const;
+
+ private:
+ DeviceBase* const mDevice;
+ const wgpu::BufferUsage mUsage;
+ Ref<BufferBase> mBuffer;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_SCRATCHBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp
new file mode 100644
index 00000000000..3b46d156ba3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ShaderModule.cpp
@@ -0,0 +1,1333 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/ShaderModule.h"
+
+#include "absl/strings/str_format.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/HashUtils.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/CompilationMessages.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectContentHasher.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/TintUtils.h"
+
+#include <tint/tint.h>
+
+#include <sstream>
+
+namespace dawn::native {
+
+ namespace {
+
+ tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ return tint::transform::VertexFormat::kUint8x2;
+ case wgpu::VertexFormat::Uint8x4:
+ return tint::transform::VertexFormat::kUint8x4;
+ case wgpu::VertexFormat::Sint8x2:
+ return tint::transform::VertexFormat::kSint8x2;
+ case wgpu::VertexFormat::Sint8x4:
+ return tint::transform::VertexFormat::kSint8x4;
+ case wgpu::VertexFormat::Unorm8x2:
+ return tint::transform::VertexFormat::kUnorm8x2;
+ case wgpu::VertexFormat::Unorm8x4:
+ return tint::transform::VertexFormat::kUnorm8x4;
+ case wgpu::VertexFormat::Snorm8x2:
+ return tint::transform::VertexFormat::kSnorm8x2;
+ case wgpu::VertexFormat::Snorm8x4:
+ return tint::transform::VertexFormat::kSnorm8x4;
+ case wgpu::VertexFormat::Uint16x2:
+ return tint::transform::VertexFormat::kUint16x2;
+ case wgpu::VertexFormat::Uint16x4:
+ return tint::transform::VertexFormat::kUint16x4;
+ case wgpu::VertexFormat::Sint16x2:
+ return tint::transform::VertexFormat::kSint16x2;
+ case wgpu::VertexFormat::Sint16x4:
+ return tint::transform::VertexFormat::kSint16x4;
+ case wgpu::VertexFormat::Unorm16x2:
+ return tint::transform::VertexFormat::kUnorm16x2;
+ case wgpu::VertexFormat::Unorm16x4:
+ return tint::transform::VertexFormat::kUnorm16x4;
+ case wgpu::VertexFormat::Snorm16x2:
+ return tint::transform::VertexFormat::kSnorm16x2;
+ case wgpu::VertexFormat::Snorm16x4:
+ return tint::transform::VertexFormat::kSnorm16x4;
+ case wgpu::VertexFormat::Float16x2:
+ return tint::transform::VertexFormat::kFloat16x2;
+ case wgpu::VertexFormat::Float16x4:
+ return tint::transform::VertexFormat::kFloat16x4;
+ case wgpu::VertexFormat::Float32:
+ return tint::transform::VertexFormat::kFloat32;
+ case wgpu::VertexFormat::Float32x2:
+ return tint::transform::VertexFormat::kFloat32x2;
+ case wgpu::VertexFormat::Float32x3:
+ return tint::transform::VertexFormat::kFloat32x3;
+ case wgpu::VertexFormat::Float32x4:
+ return tint::transform::VertexFormat::kFloat32x4;
+ case wgpu::VertexFormat::Uint32:
+ return tint::transform::VertexFormat::kUint32;
+ case wgpu::VertexFormat::Uint32x2:
+ return tint::transform::VertexFormat::kUint32x2;
+ case wgpu::VertexFormat::Uint32x3:
+ return tint::transform::VertexFormat::kUint32x3;
+ case wgpu::VertexFormat::Uint32x4:
+ return tint::transform::VertexFormat::kUint32x4;
+ case wgpu::VertexFormat::Sint32:
+ return tint::transform::VertexFormat::kSint32;
+ case wgpu::VertexFormat::Sint32x2:
+ return tint::transform::VertexFormat::kSint32x2;
+ case wgpu::VertexFormat::Sint32x3:
+ return tint::transform::VertexFormat::kSint32x3;
+ case wgpu::VertexFormat::Sint32x4:
+ return tint::transform::VertexFormat::kSint32x4;
+
+ case wgpu::VertexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ tint::transform::VertexStepMode ToTintVertexStepMode(wgpu::VertexStepMode mode) {
+ switch (mode) {
+ case wgpu::VertexStepMode::Vertex:
+ return tint::transform::VertexStepMode::kVertex;
+ case wgpu::VertexStepMode::Instance:
+ return tint::transform::VertexStepMode::kInstance;
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(
+ tint::ast::PipelineStage stage) {
+ switch (stage) {
+ case tint::ast::PipelineStage::kVertex:
+ return SingleShaderStage::Vertex;
+ case tint::ast::PipelineStage::kFragment:
+ return SingleShaderStage::Fragment;
+ case tint::ast::PipelineStage::kCompute:
+ return SingleShaderStage::Compute;
+ case tint::ast::PipelineStage::kNone:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ BindingInfoType TintResourceTypeToBindingInfoType(
+ tint::inspector::ResourceBinding::ResourceType type) {
+ switch (type) {
+ case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+ case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+ return BindingInfoType::Buffer;
+ case tint::inspector::ResourceBinding::ResourceType::kSampler:
+ case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+ return BindingInfoType::Sampler;
+ case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
+ return BindingInfoType::Texture;
+ case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+ return BindingInfoType::StorageTexture;
+ case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
+ return BindingInfoType::ExternalTexture;
+
+ default:
+ UNREACHABLE();
+ return BindingInfoType::Buffer;
+ }
+ }
+
+ wgpu::TextureFormat TintImageFormatToTextureFormat(
+ tint::inspector::ResourceBinding::TexelFormat format) {
+ switch (format) {
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Uint:
+ return wgpu::TextureFormat::R32Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Sint:
+ return wgpu::TextureFormat::R32Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kR32Float:
+ return wgpu::TextureFormat::R32Float;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Unorm:
+ return wgpu::TextureFormat::RGBA8Unorm;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Snorm:
+ return wgpu::TextureFormat::RGBA8Snorm;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Uint:
+ return wgpu::TextureFormat::RGBA8Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba8Sint:
+ return wgpu::TextureFormat::RGBA8Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Uint:
+ return wgpu::TextureFormat::RG32Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Sint:
+ return wgpu::TextureFormat::RG32Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRg32Float:
+ return wgpu::TextureFormat::RG32Float;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Uint:
+ return wgpu::TextureFormat::RGBA16Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Sint:
+ return wgpu::TextureFormat::RGBA16Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba16Float:
+ return wgpu::TextureFormat::RGBA16Float;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Uint:
+ return wgpu::TextureFormat::RGBA32Uint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Sint:
+ return wgpu::TextureFormat::RGBA32Sint;
+ case tint::inspector::ResourceBinding::TexelFormat::kRgba32Float:
+ return wgpu::TextureFormat::RGBA32Float;
+ case tint::inspector::ResourceBinding::TexelFormat::kNone:
+ return wgpu::TextureFormat::Undefined;
+
+ default:
+ UNREACHABLE();
+ return wgpu::TextureFormat::Undefined;
+ }
+ }
+
+ wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
+ tint::inspector::ResourceBinding::TextureDimension dim) {
+ switch (dim) {
+ case tint::inspector::ResourceBinding::TextureDimension::k1d:
+ return wgpu::TextureViewDimension::e1D;
+ case tint::inspector::ResourceBinding::TextureDimension::k2d:
+ return wgpu::TextureViewDimension::e2D;
+ case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
+ return wgpu::TextureViewDimension::e2DArray;
+ case tint::inspector::ResourceBinding::TextureDimension::k3d:
+ return wgpu::TextureViewDimension::e3D;
+ case tint::inspector::ResourceBinding::TextureDimension::kCube:
+ return wgpu::TextureViewDimension::Cube;
+ case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
+ return wgpu::TextureViewDimension::CubeArray;
+ case tint::inspector::ResourceBinding::TextureDimension::kNone:
+ return wgpu::TextureViewDimension::Undefined;
+ }
+ UNREACHABLE();
+ }
+
+ SampleTypeBit TintSampledKindToSampleTypeBit(
+ tint::inspector::ResourceBinding::SampledKind s) {
+ switch (s) {
+ case tint::inspector::ResourceBinding::SampledKind::kSInt:
+ return SampleTypeBit::Sint;
+ case tint::inspector::ResourceBinding::SampledKind::kUInt:
+ return SampleTypeBit::Uint;
+ case tint::inspector::ResourceBinding::SampledKind::kFloat:
+ return SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
+ case tint::inspector::ResourceBinding::SampledKind::kUnknown:
+ return SampleTypeBit::None;
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
+ tint::inspector::ComponentType type) {
+ switch (type) {
+ case tint::inspector::ComponentType::kFloat:
+ return wgpu::TextureComponentType::Float;
+ case tint::inspector::ComponentType::kSInt:
+ return wgpu::TextureComponentType::Sint;
+ case tint::inspector::ComponentType::kUInt:
+ return wgpu::TextureComponentType::Uint;
+ case tint::inspector::ComponentType::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert 'Unknown' component type from Tint");
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<VertexFormatBaseType> TintComponentTypeToVertexFormatBaseType(
+ tint::inspector::ComponentType type) {
+ switch (type) {
+ case tint::inspector::ComponentType::kFloat:
+ return VertexFormatBaseType::Float;
+ case tint::inspector::ComponentType::kSInt:
+ return VertexFormatBaseType::Sint;
+ case tint::inspector::ComponentType::kUInt:
+ return VertexFormatBaseType::Uint;
+ case tint::inspector::ComponentType::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert 'Unknown' component type from Tint");
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
+ tint::inspector::ResourceBinding::ResourceType resource_type) {
+ switch (resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+ return wgpu::BufferBindingType::Uniform;
+ case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+ return wgpu::BufferBindingType::Storage;
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+ return wgpu::BufferBindingType::ReadOnlyStorage;
+ default:
+ return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
+ tint::inspector::ResourceBinding::ResourceType resource_type) {
+ switch (resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+ return wgpu::StorageTextureAccess::WriteOnly;
+ default:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert non-storage texture resource type");
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<InterStageComponentType> TintComponentTypeToInterStageComponentType(
+ tint::inspector::ComponentType type) {
+ switch (type) {
+ case tint::inspector::ComponentType::kFloat:
+ return InterStageComponentType::Float;
+ case tint::inspector::ComponentType::kSInt:
+ return InterStageComponentType::Sint;
+ case tint::inspector::ComponentType::kUInt:
+ return InterStageComponentType::Uint;
+ case tint::inspector::ComponentType::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert 'Unknown' component type from Tint");
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<uint32_t> TintCompositionTypeToInterStageComponentCount(
+ tint::inspector::CompositionType type) {
+ switch (type) {
+ case tint::inspector::CompositionType::kScalar:
+ return 1u;
+ case tint::inspector::CompositionType::kVec2:
+ return 2u;
+ case tint::inspector::CompositionType::kVec3:
+ return 3u;
+ case tint::inspector::CompositionType::kVec4:
+ return 4u;
+ case tint::inspector::CompositionType::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempt to convert 'Unknown' composition type from Tint");
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<InterpolationType> TintInterpolationTypeToInterpolationType(
+ tint::inspector::InterpolationType type) {
+ switch (type) {
+ case tint::inspector::InterpolationType::kPerspective:
+ return InterpolationType::Perspective;
+ case tint::inspector::InterpolationType::kLinear:
+ return InterpolationType::Linear;
+ case tint::inspector::InterpolationType::kFlat:
+ return InterpolationType::Flat;
+ case tint::inspector::InterpolationType::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert 'Unknown' interpolation type from Tint");
+ }
+ UNREACHABLE();
+ }
+
+ ResultOrError<InterpolationSampling> TintInterpolationSamplingToInterpolationSamplingType(
+ tint::inspector::InterpolationSampling type) {
+ switch (type) {
+ case tint::inspector::InterpolationSampling::kNone:
+ return InterpolationSampling::None;
+ case tint::inspector::InterpolationSampling::kCenter:
+ return InterpolationSampling::Center;
+ case tint::inspector::InterpolationSampling::kCentroid:
+ return InterpolationSampling::Centroid;
+ case tint::inspector::InterpolationSampling::kSample:
+ return InterpolationSampling::Sample;
+ case tint::inspector::InterpolationSampling::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert 'Unknown' interpolation sampling type from Tint");
+ }
+ UNREACHABLE();
+ }
+
+ EntryPointMetadata::OverridableConstant::Type FromTintOverridableConstantType(
+ tint::inspector::OverridableConstant::Type type) {
+ switch (type) {
+ case tint::inspector::OverridableConstant::Type::kBool:
+ return EntryPointMetadata::OverridableConstant::Type::Boolean;
+ case tint::inspector::OverridableConstant::Type::kFloat32:
+ return EntryPointMetadata::OverridableConstant::Type::Float32;
+ case tint::inspector::OverridableConstant::Type::kInt32:
+ return EntryPointMetadata::OverridableConstant::Type::Int32;
+ case tint::inspector::OverridableConstant::Type::kUint32:
+ return EntryPointMetadata::OverridableConstant::Type::Uint32;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
+ OwnedCompilationMessages* outMessages) {
+ tint::Program program = tint::reader::wgsl::Parse(file);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(program.Diagnostics());
+ }
+ if (!program.IsValid()) {
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Tint WGSL reader failure:\nParser: %s\nShader:\n%s\n",
+ program.Diagnostics().str(), file->content.data);
+ }
+
+ return std::move(program);
+ }
+
+ ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv,
+ OwnedCompilationMessages* outMessages) {
+ tint::Program program = tint::reader::spirv::Parse(spirv);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(program.Diagnostics());
+ }
+ if (!program.IsValid()) {
+ return DAWN_FORMAT_VALIDATION_ERROR("Tint SPIR-V reader failure:\nParser: %s\n",
+ program.Diagnostics().str());
+ }
+
+ return std::move(program);
+ }
+
+ std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingGroupInfoMap& shaderBindings,
+ const BindGroupLayoutBase* layout) {
+ std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
+ uint32_t packedIdx = 0;
+
+ for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
+ if (bindingInfo.buffer.minBindingSize != 0) {
+ // Skip bindings that have minimum buffer size set in the layout
+ continue;
+ }
+
+ ASSERT(packedIdx < requiredBufferSizes.size());
+ const auto& shaderInfo = shaderBindings.find(bindingInfo.binding);
+ if (shaderInfo != shaderBindings.end()) {
+ requiredBufferSizes[packedIdx] = shaderInfo->second.buffer.minBindingSize;
+ } else {
+ // We have to include buffers if they are included in the bind group's
+ // packed vector. We don't actually need to check these at draw time, so
+ // if this is a problem in the future we can optimize it further.
+ requiredBufferSizes[packedIdx] = 0;
+ }
+ ++packedIdx;
+ }
+
+ return requiredBufferSizes;
+ }
+
+ MaybeError ValidateCompatibilityOfSingleBindingWithLayout(
+ const DeviceBase* device,
+ const BindGroupLayoutBase* layout,
+ SingleShaderStage entryPointStage,
+ BindingNumber bindingNumber,
+ const ShaderBindingInfo& shaderInfo) {
+ const BindGroupLayoutBase::BindingMap& layoutBindings = layout->GetBindingMap();
+
+ // An external texture binding found in the shader will later be expanded into multiple
+ // bindings at compile time. This expansion will have already happened in the bgl - so
+ // the shader and bgl will always mismatch at this point. Expansion info is contained in
+ // the bgl object, so we can still verify the bgl used to have an external texture in
+ // the slot corresponding to the shader reflection.
+ if (shaderInfo.bindingType == BindingInfoType::ExternalTexture) {
+ // If an external texture binding used to exist in the bgl, it will be found as a
+ // key in the ExternalTextureBindingExpansions map.
+ ExternalTextureBindingExpansionMap expansions =
+ layout->GetExternalTextureBindingExpansionMap();
+ std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
+ expansions.find(bindingNumber);
+ // TODO(dawn:563): Provide info about the binding types.
+ DAWN_INVALID_IF(it == expansions.end(),
+ "Binding type in the shader (texture_external) doesn't match the "
+ "type in the layout.");
+
+ return {};
+ }
+
+ const auto& bindingIt = layoutBindings.find(bindingNumber);
+ DAWN_INVALID_IF(bindingIt == layoutBindings.end(), "Binding doesn't exist in %s.",
+ layout);
+
+ BindingIndex bindingIndex(bindingIt->second);
+ const BindingInfo& layoutInfo = layout->GetBindingInfo(bindingIndex);
+
+ // TODO(dawn:563): Provide info about the binding types.
+ DAWN_INVALID_IF(
+ layoutInfo.bindingType != shaderInfo.bindingType,
+ "Binding type (buffer vs. texture vs. sampler vs. external) doesn't match the type "
+ "in the layout.");
+
+ ExternalTextureBindingExpansionMap expansions =
+ layout->GetExternalTextureBindingExpansionMap();
+ DAWN_INVALID_IF(expansions.find(bindingNumber) != expansions.end(),
+ "Binding type (buffer vs. texture vs. sampler vs. external) doesn't "
+ "match the type in the layout.");
+
+ // TODO(dawn:563): Provide info about the visibility.
+ DAWN_INVALID_IF(
+ (layoutInfo.visibility & StageBit(entryPointStage)) == 0,
+ "Entry point's stage is not in the binding visibility in the layout (%s)",
+ layoutInfo.visibility);
+
+ switch (layoutInfo.bindingType) {
+ case BindingInfoType::Texture: {
+ DAWN_INVALID_IF(
+ layoutInfo.texture.multisampled != shaderInfo.texture.multisampled,
+ "Binding multisampled flag (%u) doesn't match the layout's multisampled "
+ "flag (%u)",
+ layoutInfo.texture.multisampled, shaderInfo.texture.multisampled);
+
+ // TODO(dawn:563): Provide info about the sample types.
+ DAWN_INVALID_IF((SampleTypeToSampleTypeBit(layoutInfo.texture.sampleType) &
+ shaderInfo.texture.compatibleSampleTypes) == 0,
+ "The sample type in the shader is not compatible with the "
+ "sample type of the layout.");
+
+ DAWN_INVALID_IF(
+ layoutInfo.texture.viewDimension != shaderInfo.texture.viewDimension,
+ "The shader's binding dimension (%s) doesn't match the shader's binding "
+ "dimension (%s).",
+ layoutInfo.texture.viewDimension, shaderInfo.texture.viewDimension);
+ break;
+ }
+
+ case BindingInfoType::StorageTexture: {
+ ASSERT(layoutInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
+ ASSERT(shaderInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
+
+ DAWN_INVALID_IF(
+ layoutInfo.storageTexture.access != shaderInfo.storageTexture.access,
+ "The layout's binding access (%s) isn't compatible with the shader's "
+ "binding access (%s).",
+ layoutInfo.storageTexture.access, shaderInfo.storageTexture.access);
+
+ DAWN_INVALID_IF(
+ layoutInfo.storageTexture.format != shaderInfo.storageTexture.format,
+ "The layout's binding format (%s) doesn't match the shader's binding "
+ "format (%s).",
+ layoutInfo.storageTexture.format, shaderInfo.storageTexture.format);
+
+ DAWN_INVALID_IF(layoutInfo.storageTexture.viewDimension !=
+ shaderInfo.storageTexture.viewDimension,
+ "The layout's binding dimension (%s) doesn't match the "
+ "shader's binding dimension (%s).",
+ layoutInfo.storageTexture.viewDimension,
+ shaderInfo.storageTexture.viewDimension);
+ break;
+ }
+
+ case BindingInfoType::Buffer: {
+ // Binding mismatch between shader and bind group is invalid. For example, a
+ // writable binding in the shader with a readonly storage buffer in the bind
+ // group layout is invalid. However, a readonly binding in the shader with a
+ // writable storage buffer in the bind group layout is valid, a storage
+ // binding in the shader with an internal storage buffer in the bind group
+ // layout is also valid.
+ bool validBindingConversion =
+ (layoutInfo.buffer.type == wgpu::BufferBindingType::Storage &&
+ shaderInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage) ||
+ (layoutInfo.buffer.type == kInternalStorageBufferBinding &&
+ shaderInfo.buffer.type == wgpu::BufferBindingType::Storage);
+
+ DAWN_INVALID_IF(
+ layoutInfo.buffer.type != shaderInfo.buffer.type && !validBindingConversion,
+ "The buffer type in the shader (%s) is not compatible with the type in the "
+ "layout (%s).",
+ shaderInfo.buffer.type, layoutInfo.buffer.type);
+
+ DAWN_INVALID_IF(
+ layoutInfo.buffer.minBindingSize != 0 &&
+ shaderInfo.buffer.minBindingSize > layoutInfo.buffer.minBindingSize,
+ "The shader uses more bytes of the buffer (%u) than the layout's "
+ "minBindingSize (%u).",
+ shaderInfo.buffer.minBindingSize, layoutInfo.buffer.minBindingSize);
+ break;
+ }
+
+ case BindingInfoType::Sampler:
+ DAWN_INVALID_IF(
+ (layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison) !=
+ shaderInfo.sampler.isComparison,
+ "The sampler type in the shader (comparison: %u) doesn't match the type in "
+ "the layout (comparison: %u).",
+ shaderInfo.sampler.isComparison,
+ layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison);
+ break;
+
+ case BindingInfoType::ExternalTexture: {
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ return {};
+ }
+ MaybeError ValidateCompatibilityWithBindGroupLayout(DeviceBase* device,
+ BindGroupIndex group,
+ const EntryPointMetadata& entryPoint,
+ const BindGroupLayoutBase* layout) {
+ // Iterate over all bindings used by this group in the shader, and find the
+ // corresponding binding in the BindGroupLayout, if it exists.
+ for (const auto& [bindingId, bindingInfo] : entryPoint.bindings[group]) {
+ DAWN_TRY_CONTEXT(ValidateCompatibilityOfSingleBindingWithLayout(
+ device, layout, entryPoint.stage, bindingId, bindingInfo),
+ "validating that the entry-point's declaration for [[group(%u), "
+ "binding(%u)]] matches %s",
+ static_cast<uint32_t>(group), static_cast<uint32_t>(bindingId),
+ layout);
+ }
+
+ return {};
+ }
+
+ ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(
+ const DeviceBase* device,
+ const tint::Program* program) {
+ ASSERT(program->IsValid());
+
+ const CombinedLimits& limits = device->GetLimits();
+
+ EntryPointMetadataTable result;
+
+ tint::inspector::Inspector inspector(program);
+ auto entryPoints = inspector.GetEntryPoints();
+ DAWN_INVALID_IF(inspector.has_error(), "Tint Reflection failure: Inspector: %s\n",
+ inspector.error());
+
+ // TODO(dawn:563): use DAWN_TRY_CONTEXT to output the name of the entry point we're
+ // reflecting.
+ constexpr uint32_t kMaxInterStageShaderLocation = kMaxInterStageShaderVariables - 1;
+ for (auto& entryPoint : entryPoints) {
+ ASSERT(result.count(entryPoint.name) == 0);
+
+ auto metadata = std::make_unique<EntryPointMetadata>();
+
+ if (!entryPoint.overridable_constants.empty()) {
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Pipeline overridable constants are disallowed because they "
+ "are partially implemented.");
+
+ const auto& name2Id = inspector.GetConstantNameToIdMap();
+ const auto& id2Scalar = inspector.GetConstantIDs();
+
+ for (auto& c : entryPoint.overridable_constants) {
+ uint32_t id = name2Id.at(c.name);
+ OverridableConstantScalar defaultValue;
+ if (c.is_initialized) {
+ // if it is initialized, the scalar must exist
+ const auto& scalar = id2Scalar.at(id);
+ if (scalar.IsBool()) {
+ defaultValue.b = scalar.AsBool();
+ } else if (scalar.IsU32()) {
+ defaultValue.u32 = scalar.AsU32();
+ } else if (scalar.IsI32()) {
+ defaultValue.i32 = scalar.AsI32();
+ } else if (scalar.IsFloat()) {
+ defaultValue.f32 = scalar.AsFloat();
+ } else {
+ UNREACHABLE();
+ }
+ }
+ EntryPointMetadata::OverridableConstant constant = {
+ id, FromTintOverridableConstantType(c.type), c.is_initialized,
+ defaultValue};
+
+ std::string identifier =
+ c.is_numeric_id_specified ? std::to_string(constant.id) : c.name;
+ metadata->overridableConstants[identifier] = constant;
+
+ if (!c.is_initialized) {
+ auto [_, inserted] =
+ metadata->uninitializedOverridableConstants.emplace(
+ std::move(identifier));
+ // The insertion should have taken place
+ ASSERT(inserted);
+ } else {
+ auto [_, inserted] = metadata->initializedOverridableConstants.emplace(
+ std::move(identifier));
+ // The insertion should have taken place
+ ASSERT(inserted);
+ }
+ }
+ }
+
+ DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
+
+ if (metadata->stage == SingleShaderStage::Compute) {
+ DAWN_INVALID_IF(
+ entryPoint.workgroup_size_x > limits.v1.maxComputeWorkgroupSizeX ||
+ entryPoint.workgroup_size_y > limits.v1.maxComputeWorkgroupSizeY ||
+ entryPoint.workgroup_size_z > limits.v1.maxComputeWorkgroupSizeZ,
+ "Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
+ "maximum allowed (%u, %u, %u).",
+ entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
+ entryPoint.workgroup_size_z, limits.v1.maxComputeWorkgroupSizeX,
+ limits.v1.maxComputeWorkgroupSizeY, limits.v1.maxComputeWorkgroupSizeZ);
+
+ // Dimensions have already been validated against their individual limits above.
+ // Cast to uint64_t to avoid overflow in this multiplication.
+ uint64_t numInvocations = static_cast<uint64_t>(entryPoint.workgroup_size_x) *
+ entryPoint.workgroup_size_y *
+ entryPoint.workgroup_size_z;
+ DAWN_INVALID_IF(numInvocations > limits.v1.maxComputeInvocationsPerWorkgroup,
+ "The total number of workgroup invocations (%u) exceeds the "
+ "maximum allowed (%u).",
+ numInvocations, limits.v1.maxComputeInvocationsPerWorkgroup);
+
+ const size_t workgroupStorageSize =
+ inspector.GetWorkgroupStorageSize(entryPoint.name);
+ DAWN_INVALID_IF(workgroupStorageSize > limits.v1.maxComputeWorkgroupStorageSize,
+ "The total use of workgroup storage (%u bytes) is larger than "
+ "the maximum allowed (%u bytes).",
+ workgroupStorageSize, limits.v1.maxComputeWorkgroupStorageSize);
+
+ metadata->localWorkgroupSize.x = entryPoint.workgroup_size_x;
+ metadata->localWorkgroupSize.y = entryPoint.workgroup_size_y;
+ metadata->localWorkgroupSize.z = entryPoint.workgroup_size_z;
+
+ metadata->usesNumWorkgroups = entryPoint.num_workgroups_used;
+ }
+
+ if (metadata->stage == SingleShaderStage::Vertex) {
+ for (const auto& inputVar : entryPoint.input_variables) {
+ DAWN_INVALID_IF(
+ !inputVar.has_location_decoration,
+ "Vertex input variable \"%s\" doesn't have a location decoration.",
+ inputVar.name);
+
+ uint32_t unsanitizedLocation = inputVar.location_decoration;
+ DAWN_INVALID_IF(unsanitizedLocation >= kMaxVertexAttributes,
+ "Vertex input variable \"%s\" has a location (%u) that "
+ "exceeds the maximum (%u)",
+ inputVar.name, unsanitizedLocation, kMaxVertexAttributes);
+ VertexAttributeLocation location(static_cast<uint8_t>(unsanitizedLocation));
+
+ DAWN_TRY_ASSIGN(
+ metadata->vertexInputBaseTypes[location],
+ TintComponentTypeToVertexFormatBaseType(inputVar.component_type));
+ metadata->usedVertexInputs.set(location);
+ }
+
+ // [[position]] must be declared in a vertex shader but is not exposed as an
+ // output variable by Tint so we directly add its components to the total.
+ uint32_t totalInterStageShaderComponents = 4;
+ for (const auto& outputVar : entryPoint.output_variables) {
+ DAWN_INVALID_IF(
+ !outputVar.has_location_decoration,
+ "Vertex ouput variable \"%s\" doesn't have a location decoration.",
+ outputVar.name);
+
+ uint32_t location = outputVar.location_decoration;
+ DAWN_INVALID_IF(location > kMaxInterStageShaderLocation,
+ "Vertex output variable \"%s\" has a location (%u) that "
+ "exceeds the maximum (%u).",
+ outputVar.name, location, kMaxInterStageShaderLocation);
+
+ metadata->usedInterStageVariables.set(location);
+ DAWN_TRY_ASSIGN(
+ metadata->interStageVariables[location].baseType,
+ TintComponentTypeToInterStageComponentType(outputVar.component_type));
+ DAWN_TRY_ASSIGN(metadata->interStageVariables[location].componentCount,
+ TintCompositionTypeToInterStageComponentCount(
+ outputVar.composition_type));
+ DAWN_TRY_ASSIGN(
+ metadata->interStageVariables[location].interpolationType,
+ TintInterpolationTypeToInterpolationType(outputVar.interpolation_type));
+ DAWN_TRY_ASSIGN(
+ metadata->interStageVariables[location].interpolationSampling,
+ TintInterpolationSamplingToInterpolationSamplingType(
+ outputVar.interpolation_sampling));
+
+ totalInterStageShaderComponents +=
+ metadata->interStageVariables[location].componentCount;
+ }
+
+ DAWN_INVALID_IF(
+ totalInterStageShaderComponents > kMaxInterStageShaderComponents,
+ "Total vertex output components count (%u) exceeds the maximum (%u).",
+ totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+ }
+
+ if (metadata->stage == SingleShaderStage::Fragment) {
+ uint32_t totalInterStageShaderComponents = 0;
+ for (const auto& inputVar : entryPoint.input_variables) {
+ DAWN_INVALID_IF(
+ !inputVar.has_location_decoration,
+ "Fragment input variable \"%s\" doesn't have a location decoration.",
+ inputVar.name);
+
+ uint32_t location = inputVar.location_decoration;
+ DAWN_INVALID_IF(location > kMaxInterStageShaderLocation,
+ "Fragment input variable \"%s\" has a location (%u) that "
+ "exceeds the maximum (%u).",
+ inputVar.name, location, kMaxInterStageShaderLocation);
+
+ metadata->usedInterStageVariables.set(location);
+ DAWN_TRY_ASSIGN(
+ metadata->interStageVariables[location].baseType,
+ TintComponentTypeToInterStageComponentType(inputVar.component_type));
+ DAWN_TRY_ASSIGN(metadata->interStageVariables[location].componentCount,
+ TintCompositionTypeToInterStageComponentCount(
+ inputVar.composition_type));
+ DAWN_TRY_ASSIGN(
+ metadata->interStageVariables[location].interpolationType,
+ TintInterpolationTypeToInterpolationType(inputVar.interpolation_type));
+ DAWN_TRY_ASSIGN(
+ metadata->interStageVariables[location].interpolationSampling,
+ TintInterpolationSamplingToInterpolationSamplingType(
+ inputVar.interpolation_sampling));
+
+ totalInterStageShaderComponents +=
+ metadata->interStageVariables[location].componentCount;
+ }
+
+ if (entryPoint.front_facing_used) {
+ totalInterStageShaderComponents += 1;
+ }
+ if (entryPoint.input_sample_mask_used) {
+ totalInterStageShaderComponents += 1;
+ }
+ if (entryPoint.sample_index_used) {
+ totalInterStageShaderComponents += 1;
+ }
+ if (entryPoint.input_position_used) {
+ totalInterStageShaderComponents += 4;
+ }
+
+ DAWN_INVALID_IF(
+ totalInterStageShaderComponents > kMaxInterStageShaderComponents,
+ "Total fragment input components count (%u) exceeds the maximum (%u).",
+ totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+
+ for (const auto& outputVar : entryPoint.output_variables) {
+ DAWN_INVALID_IF(
+ !outputVar.has_location_decoration,
+ "Fragment input variable \"%s\" doesn't have a location decoration.",
+ outputVar.name);
+
+ uint32_t unsanitizedAttachment = outputVar.location_decoration;
+ DAWN_INVALID_IF(unsanitizedAttachment >= kMaxColorAttachments,
+ "Fragment output variable \"%s\" has a location (%u) that "
+ "exceeds the maximum (%u).",
+ outputVar.name, unsanitizedAttachment,
+ kMaxColorAttachments);
+ ColorAttachmentIndex attachment(
+ static_cast<uint8_t>(unsanitizedAttachment));
+
+ DAWN_TRY_ASSIGN(
+ metadata->fragmentOutputVariables[attachment].baseType,
+ TintComponentTypeToTextureComponentType(outputVar.component_type));
+ uint32_t componentCount;
+ DAWN_TRY_ASSIGN(componentCount,
+ TintCompositionTypeToInterStageComponentCount(
+ outputVar.composition_type));
+ // componentCount should be no larger than 4u
+ ASSERT(componentCount <= 4u);
+ metadata->fragmentOutputVariables[attachment].componentCount =
+ componentCount;
+ metadata->fragmentOutputsWritten.set(attachment);
+ }
+ }
+
+ for (const tint::inspector::ResourceBinding& resource :
+ inspector.GetResourceBindings(entryPoint.name)) {
+ DAWN_INVALID_IF(resource.bind_group >= kMaxBindGroups,
+ "The entry-point uses a binding with a group decoration (%u) "
+ "that exceeds the maximum (%u).",
+ resource.bind_group, kMaxBindGroups);
+
+ BindingNumber bindingNumber(resource.binding);
+ BindGroupIndex bindGroupIndex(resource.bind_group);
+
+ DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
+ "Binding number (%u) exceeds the maximum binding number (%u).",
+ uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
+
+ const auto& [binding, inserted] = metadata->bindings[bindGroupIndex].emplace(
+ bindingNumber, ShaderBindingInfo{});
+ DAWN_INVALID_IF(
+ !inserted,
+ "Entry-point has a duplicate binding for (group:%u, binding:%u).",
+ resource.binding, resource.bind_group);
+
+ ShaderBindingInfo* info = &binding->second;
+ info->bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
+
+ switch (info->bindingType) {
+ case BindingInfoType::Buffer:
+ info->buffer.minBindingSize = resource.size_no_padding;
+ DAWN_TRY_ASSIGN(info->buffer.type, TintResourceTypeToBufferBindingType(
+ resource.resource_type));
+ break;
+ case BindingInfoType::Sampler:
+ switch (resource.resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kSampler:
+ info->sampler.isComparison = false;
+ break;
+ case tint::inspector::ResourceBinding::ResourceType::
+ kComparisonSampler:
+ info->sampler.isComparison = true;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case BindingInfoType::Texture:
+ info->texture.viewDimension =
+ TintTextureDimensionToTextureViewDimension(resource.dim);
+ if (resource.resource_type ==
+ tint::inspector::ResourceBinding::ResourceType::kDepthTexture ||
+ resource.resource_type ==
+ tint::inspector::ResourceBinding::ResourceType::
+ kDepthMultisampledTexture) {
+ info->texture.compatibleSampleTypes = SampleTypeBit::Depth;
+ } else {
+ info->texture.compatibleSampleTypes =
+ TintSampledKindToSampleTypeBit(resource.sampled_kind);
+ }
+ info->texture.multisampled =
+ resource.resource_type == tint::inspector::ResourceBinding::
+ ResourceType::kMultisampledTexture ||
+ resource.resource_type ==
+ tint::inspector::ResourceBinding::ResourceType::
+ kDepthMultisampledTexture;
+
+ break;
+ case BindingInfoType::StorageTexture:
+ DAWN_TRY_ASSIGN(
+ info->storageTexture.access,
+ TintResourceTypeToStorageTextureAccess(resource.resource_type));
+ info->storageTexture.format =
+ TintImageFormatToTextureFormat(resource.image_format);
+ info->storageTexture.viewDimension =
+ TintTextureDimensionToTextureViewDimension(resource.dim);
+
+ break;
+ case BindingInfoType::ExternalTexture:
+ break;
+ default:
+ return DAWN_VALIDATION_ERROR("Unknown binding type in Shader");
+ }
+ }
+
+ std::vector<tint::inspector::SamplerTexturePair> samplerTextureUses =
+ inspector.GetSamplerTextureUses(entryPoint.name);
+ metadata->samplerTexturePairs.reserve(samplerTextureUses.size());
+ std::transform(
+ samplerTextureUses.begin(), samplerTextureUses.end(),
+ std::back_inserter(metadata->samplerTexturePairs),
+ [](const tint::inspector::SamplerTexturePair& pair) {
+ EntryPointMetadata::SamplerTexturePair result;
+ result.sampler = {BindGroupIndex(pair.sampler_binding_point.group),
+ BindingNumber(pair.sampler_binding_point.binding)};
+ result.texture = {BindGroupIndex(pair.texture_binding_point.group),
+ BindingNumber(pair.texture_binding_point.binding)};
+ return result;
+ });
+
+ result[entryPoint.name] = std::move(metadata);
+ }
+ return std::move(result);
+ }
+ } // anonymous namespace
+
+ ShaderModuleParseResult::ShaderModuleParseResult() = default;
+ ShaderModuleParseResult::~ShaderModuleParseResult() = default;
+
+ ShaderModuleParseResult::ShaderModuleParseResult(ShaderModuleParseResult&& rhs) = default;
+
+ ShaderModuleParseResult& ShaderModuleParseResult::operator=(ShaderModuleParseResult&& rhs) =
+ default;
+
+ bool ShaderModuleParseResult::HasParsedShader() const {
+ return tintProgram != nullptr;
+ }
+
+ // TintSource is a PIMPL container for a tint::Source::File, which needs to be kept alive for as
+ // long as tint diagnostics are inspected / printed.
+ class TintSource {
+ public:
+ template <typename... ARGS>
+ TintSource(ARGS&&... args) : file(std::forward<ARGS>(args)...) {
+ }
+
+ tint::Source::File file;
+ };
+
+ MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* outMessages) {
+ ASSERT(parseResult != nullptr);
+
+ const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
+ DAWN_INVALID_IF(chainedDescriptor == nullptr,
+ "Shader module descriptor missing chained descriptor");
+
+ // For now only a single SPIRV or WGSL subdescriptor is allowed.
+ DAWN_TRY(ValidateSingleSType(chainedDescriptor, wgpu::SType::ShaderModuleSPIRVDescriptor,
+ wgpu::SType::ShaderModuleWGSLDescriptor));
+
+ ScopedTintICEHandler scopedICEHandler(device);
+
+ const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+ FindInChain(chainedDescriptor, &spirvDesc);
+ const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+ FindInChain(chainedDescriptor, &wgslDesc);
+
+ // We have a temporary toggle to force the SPIRV ingestion to go through a WGSL
+ // intermediate step. It is done by switching the spirvDesc for a wgslDesc below.
+ ShaderModuleWGSLDescriptor newWgslDesc;
+ std::string newWgslCode;
+ if (spirvDesc && device->IsToggleEnabled(Toggle::ForceWGSLStep)) {
+ std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+
+ tint::writer::wgsl::Options options;
+ auto result = tint::writer::wgsl::Generate(&program, options);
+ DAWN_INVALID_IF(!result.success, "Tint WGSL failure: Generator: %s", result.error);
+
+ newWgslCode = std::move(result.wgsl);
+ newWgslDesc.source = newWgslCode.c_str();
+
+ spirvDesc = nullptr;
+ wgslDesc = &newWgslDesc;
+ }
+
+ if (spirvDesc) {
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowSpirv),
+ "SPIR-V is disallowed.");
+
+ std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+ } else if (wgslDesc) {
+ auto tintSource = std::make_unique<TintSource>("", wgslDesc->source);
+
+ if (device->IsToggleEnabled(Toggle::DumpShaders)) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "// Dumped WGSL:" << std::endl << wgslDesc->source;
+ device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
+
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, ParseWGSL(&tintSource->file, outMessages));
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+ parseResult->tintSource = std::move(tintSource);
+ }
+
+ return {};
+ }
+
+ RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
+ const PipelineLayoutBase* layout) {
+ RequiredBufferSizes bufferSizes;
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ bufferSizes[group] = GetBindGroupMinBufferSizes(entryPoint.bindings[group],
+ layout->GetBindGroupLayout(group));
+ }
+
+ return bufferSizes;
+ }
+
+ ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
+ const tint::Program* program,
+ const tint::transform::DataMap& inputs,
+ tint::transform::DataMap* outputs,
+ OwnedCompilationMessages* outMessages) {
+ tint::transform::Output output = transform->Run(program, inputs);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(output.program.Diagnostics());
+ }
+ DAWN_INVALID_IF(!output.program.IsValid(), "Tint program failure: %s\n",
+ output.program.Diagnostics().str());
+ if (outputs != nullptr) {
+ *outputs = std::move(output.data);
+ }
+ return std::move(output.program);
+ }
+
+ void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
+ const std::string& entryPoint,
+ BindGroupIndex pullingBufferBindingSet,
+ tint::transform::DataMap* transformInputs) {
+ tint::transform::VertexPulling::Config cfg;
+ cfg.entry_point_name = entryPoint;
+ cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
+
+ cfg.vertex_state.resize(renderPipeline.GetVertexBufferCount());
+ for (VertexBufferSlot slot : IterateBitSet(renderPipeline.GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& dawnInfo = renderPipeline.GetVertexBuffer(slot);
+ tint::transform::VertexBufferLayoutDescriptor* tintInfo =
+ &cfg.vertex_state[static_cast<uint8_t>(slot)];
+
+ tintInfo->array_stride = dawnInfo.arrayStride;
+ tintInfo->step_mode = ToTintVertexStepMode(dawnInfo.stepMode);
+ }
+
+ for (VertexAttributeLocation location :
+ IterateBitSet(renderPipeline.GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& dawnInfo = renderPipeline.GetAttribute(location);
+ tint::transform::VertexAttributeDescriptor tintInfo;
+ tintInfo.format = ToTintVertexFormat(dawnInfo.format);
+ tintInfo.offset = dawnInfo.offset;
+ tintInfo.shader_location = static_cast<uint32_t>(static_cast<uint8_t>(location));
+
+ uint8_t vertexBufferSlot = static_cast<uint8_t>(dawnInfo.vertexBufferSlot);
+ cfg.vertex_state[vertexBufferSlot].attributes.push_back(tintInfo);
+ }
+
+ transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
+ }
+
+ MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
+ const EntryPointMetadata& entryPoint,
+ const PipelineLayoutBase* layout) {
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ DAWN_TRY_CONTEXT(ValidateCompatibilityWithBindGroupLayout(
+ device, group, entryPoint, layout->GetBindGroupLayout(group)),
+ "validating the entry-point's compatibility for group %u with %s",
+ static_cast<uint32_t>(group), layout->GetBindGroupLayout(group));
+ }
+
+ for (BindGroupIndex group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
+ DAWN_INVALID_IF(entryPoint.bindings[group].size() > 0,
+ "The entry-point uses bindings in group %u but %s doesn't have a "
+ "BindGroupLayout for this index",
+ static_cast<uint32_t>(group), layout);
+ }
+
+ // Validate that filtering samplers are not used with unfilterable textures.
+ for (const auto& pair : entryPoint.samplerTexturePairs) {
+ const BindGroupLayoutBase* samplerBGL = layout->GetBindGroupLayout(pair.sampler.group);
+ const BindingInfo& samplerInfo =
+ samplerBGL->GetBindingInfo(samplerBGL->GetBindingIndex(pair.sampler.binding));
+ if (samplerInfo.sampler.type != wgpu::SamplerBindingType::Filtering) {
+ continue;
+ }
+ const BindGroupLayoutBase* textureBGL = layout->GetBindGroupLayout(pair.texture.group);
+ const BindingInfo& textureInfo =
+ textureBGL->GetBindingInfo(textureBGL->GetBindingIndex(pair.texture.binding));
+
+ ASSERT(textureInfo.bindingType != BindingInfoType::Buffer &&
+ textureInfo.bindingType != BindingInfoType::Sampler &&
+ textureInfo.bindingType != BindingInfoType::StorageTexture);
+
+ if (textureInfo.bindingType != BindingInfoType::Texture) {
+ continue;
+ }
+
+ // Uint/sint can't be statically used with a sampler, so they any
+ // texture bindings reflected must be float or depth textures. If
+ // the shader uses a float/depth texture but the bind group layout
+ // specifies a uint/sint texture binding,
+ // |ValidateCompatibilityWithBindGroupLayout| will fail since the
+ // sampleType does not match.
+ ASSERT(textureInfo.texture.sampleType != wgpu::TextureSampleType::Undefined &&
+ textureInfo.texture.sampleType != wgpu::TextureSampleType::Uint &&
+ textureInfo.texture.sampleType != wgpu::TextureSampleType::Sint);
+
+ DAWN_INVALID_IF(
+ textureInfo.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat,
+ "Texture binding (group:%u, binding:%u) is %s but used statically with a sampler "
+ "(group:%u, binding:%u) that's %s",
+ static_cast<uint32_t>(pair.texture.group),
+ static_cast<uint32_t>(pair.texture.binding),
+ wgpu::TextureSampleType::UnfilterableFloat,
+ static_cast<uint32_t>(pair.sampler.group),
+ static_cast<uint32_t>(pair.sampler.binding), wgpu::SamplerBindingType::Filtering);
+ }
+
+ return {};
+ }
+
+ // ShaderModuleBase
+
+ ShaderModuleBase::ShaderModuleBase(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag)
+ : ApiObjectBase(device, descriptor->label), mType(Type::Undefined) {
+ ASSERT(descriptor->nextInChain != nullptr);
+ const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &spirvDesc);
+ const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &wgslDesc);
+ ASSERT(spirvDesc || wgslDesc);
+
+ if (spirvDesc) {
+ mType = Type::Spirv;
+ mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+ } else if (wgslDesc) {
+ mType = Type::Wgsl;
+ mWgsl = std::string(wgslDesc->source);
+ }
+ }
+
+ ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor, kUntrackedByDevice) {
+ TrackInDevice();
+ }
+
+ ShaderModuleBase::ShaderModuleBase(DeviceBase* device)
+ : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+ }
+
+ ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mType(Type::Undefined) {
+ }
+
+ ShaderModuleBase::~ShaderModuleBase() = default;
+
+ void ShaderModuleBase::DestroyImpl() {
+ if (IsCachedReference()) {
+ // Do not uncache the actual cached object if we are a blueprint.
+ GetDevice()->UncacheShaderModule(this);
+ }
+ }
+
+ // static
+ Ref<ShaderModuleBase> ShaderModuleBase::MakeError(DeviceBase* device) {
+ return AcquireRef(new ShaderModuleBase(device, ObjectBase::kError));
+ }
+
+ ObjectType ShaderModuleBase::GetType() const {
+ return ObjectType::ShaderModule;
+ }
+
+ bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
+ return mEntryPoints.count(entryPoint) > 0;
+ }
+
+ const EntryPointMetadata& ShaderModuleBase::GetEntryPoint(const std::string& entryPoint) const {
+ ASSERT(HasEntryPoint(entryPoint));
+ return *mEntryPoints.at(entryPoint);
+ }
+
+ size_t ShaderModuleBase::ComputeContentHash() {
+ ObjectContentHasher recorder;
+ recorder.Record(mType);
+ recorder.Record(mOriginalSpirv);
+ recorder.Record(mWgsl);
+ return recorder.GetContentHash();
+ }
+
+ bool ShaderModuleBase::EqualityFunc::operator()(const ShaderModuleBase* a,
+ const ShaderModuleBase* b) const {
+ return a->mType == b->mType && a->mOriginalSpirv == b->mOriginalSpirv &&
+ a->mWgsl == b->mWgsl;
+ }
+
+ const tint::Program* ShaderModuleBase::GetTintProgram() const {
+ ASSERT(mTintProgram);
+ return mTintProgram.get();
+ }
+
+ void ShaderModuleBase::APIGetCompilationInfo(wgpu::CompilationInfoCallback callback,
+ void* userdata) {
+ if (callback == nullptr) {
+ return;
+ }
+
+ callback(WGPUCompilationInfoRequestStatus_Success,
+ mCompilationMessages->GetCompilationInfo(), userdata);
+ }
+
+ void ShaderModuleBase::InjectCompilationMessages(
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages) {
+ // TODO(dawn:944): ensure the InjectCompilationMessages is properly handled for shader
+ // module returned from cache.
+ // InjectCompilationMessages should be called only once for a shader module, after it is
+ // created. However currently InjectCompilationMessages may be called on a shader module
+ // returned from cache rather than newly created, and violate the rule. We just skip the
+ // injection in this case for now, but a proper solution including ensure the cache goes
+ // before the validation is required.
+ if (mCompilationMessages != nullptr) {
+ return;
+ }
+ // Move the compilationMessages into the shader module and emit the tint errors and warnings
+ mCompilationMessages = std::move(compilationMessages);
+
+ // Emit the formatted Tint errors and warnings within the moved compilationMessages
+ const std::vector<std::string>& formattedTintMessages =
+ mCompilationMessages->GetFormattedTintMessages();
+ if (formattedTintMessages.empty()) {
+ return;
+ }
+ std::ostringstream t;
+ for (auto pMessage = formattedTintMessages.begin(); pMessage != formattedTintMessages.end();
+ pMessage++) {
+ if (pMessage != formattedTintMessages.begin()) {
+ t << std::endl;
+ }
+ t << *pMessage;
+ }
+ this->GetDevice()->EmitLog(WGPULoggingType_Warning, t.str().c_str());
+ }
+
+ OwnedCompilationMessages* ShaderModuleBase::GetCompilationMessages() const {
+ return mCompilationMessages.get();
+ }
+
+ // static
+ void ShaderModuleBase::AddExternalTextureTransform(const PipelineLayoutBase* layout,
+ tint::transform::Manager* transformManager,
+ tint::transform::DataMap* transformInputs) {
+ tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
+ for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
+
+ for (const auto& expansion : bgl->GetExternalTextureBindingExpansionMap()) {
+ newBindingsMap[{static_cast<uint32_t>(i),
+ static_cast<uint32_t>(expansion.second.plane0)}] = {
+ {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.plane1)},
+ {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.params)}};
+ }
+ }
+
+ if (!newBindingsMap.empty()) {
+ transformManager->Add<tint::transform::MultiplanarExternalTexture>();
+ transformInputs->Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
+ newBindingsMap);
+ }
+ }
+
+ MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult) {
+ mTintProgram = std::move(parseResult->tintProgram);
+ mTintSource = std::move(parseResult->tintSource);
+
+ DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingTint(GetDevice(), mTintProgram.get()));
+ return {};
+ }
+
+ size_t PipelineLayoutEntryPointPairHashFunc::operator()(
+ const PipelineLayoutEntryPointPair& pair) const {
+ size_t hash = 0;
+ HashCombine(&hash, pair.first, pair.second);
+ return hash;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/ShaderModule.h b/chromium/third_party/dawn/src/dawn/native/ShaderModule.h
new file mode 100644
index 00000000000..ea446840d43
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ShaderModule.h
@@ -0,0 +1,308 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SHADERMODULE_H_
+#define DAWNNATIVE_SHADERMODULE_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/CachedObject.h"
+#include "dawn/native/CompilationMessages.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/VertexFormat.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <bitset>
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+namespace tint {
+
+ class Program;
+
+ namespace transform {
+ class DataMap;
+ class Manager;
+ class Transform;
+ class VertexPulling;
+ } // namespace transform
+
+} // namespace tint
+
+namespace dawn::native {
+
+ struct EntryPointMetadata;
+
+ // Base component type of an inter-stage variable
+ enum class InterStageComponentType {
+ Sint,
+ Uint,
+ Float,
+ };
+
+ enum class InterpolationType {
+ Perspective,
+ Linear,
+ Flat,
+ };
+
+ enum class InterpolationSampling {
+ None,
+ Center,
+ Centroid,
+ Sample,
+ };
+
+ using PipelineLayoutEntryPointPair = std::pair<PipelineLayoutBase*, std::string>;
+ struct PipelineLayoutEntryPointPairHashFunc {
+ size_t operator()(const PipelineLayoutEntryPointPair& pair) const;
+ };
+
+ // A map from name to EntryPointMetadata.
+ using EntryPointMetadataTable =
+ std::unordered_map<std::string, std::unique_ptr<EntryPointMetadata>>;
+
+ // Source for a tint program
+ class TintSource;
+
+ struct ShaderModuleParseResult {
+ ShaderModuleParseResult();
+ ~ShaderModuleParseResult();
+ ShaderModuleParseResult(ShaderModuleParseResult&& rhs);
+ ShaderModuleParseResult& operator=(ShaderModuleParseResult&& rhs);
+
+ bool HasParsedShader() const;
+
+ std::unique_ptr<tint::Program> tintProgram;
+ std::unique_ptr<TintSource> tintSource;
+ };
+
+ MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult,
+ OwnedCompilationMessages* outMessages);
+ MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
+ const EntryPointMetadata& entryPoint,
+ const PipelineLayoutBase* layout);
+
+ RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
+ const PipelineLayoutBase* layout);
+ ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
+ const tint::Program* program,
+ const tint::transform::DataMap& inputs,
+ tint::transform::DataMap* outputs,
+ OwnedCompilationMessages* messages);
+
+ /// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
+ void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
+ const std::string& entryPoint,
+ BindGroupIndex pullingBufferBindingSet,
+ tint::transform::DataMap* transformInputs);
+
+ // Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
+ // for isComparison instead of a wgpu::SamplerBindingType enum.
+ struct ShaderSamplerBindingInfo {
+ bool isComparison;
+ };
+
+ // Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
+ // instead of a single enum.
+ struct ShaderTextureBindingInfo {
+ SampleTypeBit compatibleSampleTypes;
+ wgpu::TextureViewDimension viewDimension;
+ bool multisampled;
+ };
+
+ // Per-binding shader metadata contains some SPIRV specific information in addition to
+ // most of the frontend per-binding information.
+ struct ShaderBindingInfo {
+ // The SPIRV ID of the resource.
+ uint32_t id;
+ uint32_t base_type_id;
+
+ BindingNumber binding;
+ BindingInfoType bindingType;
+
+ BufferBindingLayout buffer;
+ ShaderSamplerBindingInfo sampler;
+ ShaderTextureBindingInfo texture;
+ StorageTextureBindingLayout storageTexture;
+ };
+
+ using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
+ using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
+
+ // The WebGPU overridable constants only support these scalar types
+ union OverridableConstantScalar {
+ // Use int32_t for boolean to initialize the full 32bit
+ int32_t b;
+ float f32;
+ int32_t i32;
+ uint32_t u32;
+ };
+
+ // Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
+ // stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
+ // pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
+ // ShaderModuleBase.
+ struct EntryPointMetadata {
+ // bindings[G][B] is the reflection data for the binding defined with
+ // [[group=G, binding=B]] in WGSL / SPIRV.
+ BindingInfoArray bindings;
+
+ struct SamplerTexturePair {
+ BindingSlot sampler;
+ BindingSlot texture;
+ };
+ std::vector<SamplerTexturePair> samplerTexturePairs;
+
+ // The set of vertex attributes this entryPoint uses.
+ ityp::array<VertexAttributeLocation, VertexFormatBaseType, kMaxVertexAttributes>
+ vertexInputBaseTypes;
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> usedVertexInputs;
+
+ // An array to record the basic types (float, int and uint) of the fragment shader outputs.
+ struct FragmentOutputVariableInfo {
+ wgpu::TextureComponentType baseType;
+ uint8_t componentCount;
+ };
+ ityp::array<ColorAttachmentIndex, FragmentOutputVariableInfo, kMaxColorAttachments>
+ fragmentOutputVariables;
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> fragmentOutputsWritten;
+
+ struct InterStageVariableInfo {
+ InterStageComponentType baseType;
+ uint32_t componentCount;
+ InterpolationType interpolationType;
+ InterpolationSampling interpolationSampling;
+ };
+ // Now that we only support vertex and fragment stages, there can't be both inter-stage
+ // inputs and outputs in one shader stage.
+ std::bitset<kMaxInterStageShaderVariables> usedInterStageVariables;
+ std::array<InterStageVariableInfo, kMaxInterStageShaderVariables> interStageVariables;
+
+ // The local workgroup size declared for a compute entry point (or 0s otehrwise).
+ Origin3D localWorkgroupSize;
+
+ // The shader stage for this binding.
+ SingleShaderStage stage;
+
+ struct OverridableConstant {
+ uint32_t id;
+ // Match tint::inspector::OverridableConstant::Type
+ // Bool is defined as a macro on linux X11 and cannot compile
+ enum class Type { Boolean, Float32, Uint32, Int32 } type;
+
+ // If the constant doesn't not have an initializer in the shader
+ // Then it is required for the pipeline stage to have a constant record to initialize a
+ // value
+ bool isInitialized;
+
+ // Store the default initialized value in shader
+ // This is used by metal backend as the function_constant does not have dafault values
+ // Initialized when isInitialized == true
+ OverridableConstantScalar defaultValue;
+ };
+
+ using OverridableConstantsMap = std::unordered_map<std::string, OverridableConstant>;
+
+ // Map identifier to overridable constant
+ // Identifier is unique: either the variable name or the numeric ID if specified
+ OverridableConstantsMap overridableConstants;
+
+ // Overridable constants that are not initialized in shaders
+ // They need value initialization from pipeline stage or it is a validation error
+ std::unordered_set<std::string> uninitializedOverridableConstants;
+
+ // Store constants with shader initialized values as well
+ // This is used by metal backend to set values with default initializers that are not
+ // overridden
+ std::unordered_set<std::string> initializedOverridableConstants;
+
+ bool usesNumWorkgroups = false;
+ };
+
+ class ShaderModuleBase : public ApiObjectBase, public CachedObject {
+ public:
+ ShaderModuleBase(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ApiObjectBase::UntrackedByDeviceTag tag);
+ ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModuleBase() override;
+
+ static Ref<ShaderModuleBase> MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ // Return true iff the program has an entrypoint called `entryPoint`.
+ bool HasEntryPoint(const std::string& entryPoint) const;
+
+ // Return the metadata for the given `entryPoint`. HasEntryPoint with the same argument
+ // must be true.
+ const EntryPointMetadata& GetEntryPoint(const std::string& entryPoint) const;
+
+ // Functions necessary for the unordered_set<ShaderModuleBase*>-based cache.
+ size_t ComputeContentHash() override;
+
+ struct EqualityFunc {
+ bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
+ };
+
+ const tint::Program* GetTintProgram() const;
+
+ void APIGetCompilationInfo(wgpu::CompilationInfoCallback callback, void* userdata);
+
+ void InjectCompilationMessages(
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages);
+
+ OwnedCompilationMessages* GetCompilationMessages() const;
+
+ protected:
+ // Constructor used only for mocking and testing.
+ ShaderModuleBase(DeviceBase* device);
+ void DestroyImpl() override;
+
+ MaybeError InitializeBase(ShaderModuleParseResult* parseResult);
+
+ static void AddExternalTextureTransform(const PipelineLayoutBase* layout,
+ tint::transform::Manager* transformManager,
+ tint::transform::DataMap* transformInputs);
+
+ private:
+ ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ // The original data in the descriptor for caching.
+ enum class Type { Undefined, Spirv, Wgsl };
+ Type mType;
+ std::vector<uint32_t> mOriginalSpirv;
+ std::string mWgsl;
+
+ EntryPointMetadataTable mEntryPoints;
+ std::unique_ptr<tint::Program> mTintProgram;
+ std::unique_ptr<TintSource> mTintSource; // Keep the tint::Source::File alive
+
+ std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_SHADERMODULE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp b/chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp
new file mode 100644
index 00000000000..72eb8c15c51
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/SpirvValidation.cpp
@@ -0,0 +1,74 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/SpirvValidation.h"
+
+#include "dawn/native/Device.h"
+
+#include <spirv-tools/libspirv.hpp>
+#include <sstream>
+
+namespace dawn::native {
+
+ MaybeError ValidateSpirv(DeviceBase* device,
+ const std::vector<uint32_t>& spirv,
+ bool dumpSpirv) {
+ spvtools::SpirvTools spirvTools(SPV_ENV_VULKAN_1_1);
+ spirvTools.SetMessageConsumer([device](spv_message_level_t level, const char*,
+ const spv_position_t& position,
+ const char* message) {
+ WGPULoggingType wgpuLogLevel;
+ switch (level) {
+ case SPV_MSG_FATAL:
+ case SPV_MSG_INTERNAL_ERROR:
+ case SPV_MSG_ERROR:
+ wgpuLogLevel = WGPULoggingType_Error;
+ break;
+ case SPV_MSG_WARNING:
+ wgpuLogLevel = WGPULoggingType_Warning;
+ break;
+ case SPV_MSG_INFO:
+ wgpuLogLevel = WGPULoggingType_Info;
+ break;
+ default:
+ wgpuLogLevel = WGPULoggingType_Error;
+ break;
+ }
+
+ std::ostringstream ss;
+ ss << "SPIRV line " << position.index << ": " << message << std::endl;
+ device->EmitLog(wgpuLogLevel, ss.str().c_str());
+ });
+
+ const bool valid = spirvTools.Validate(spirv);
+ if (dumpSpirv || !valid) {
+ std::ostringstream dumpedMsg;
+ std::string disassembly;
+ if (spirvTools.Disassemble(
+ spirv, &disassembly,
+ SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT)) {
+ dumpedMsg << "/* Dumped generated SPIRV disassembly */" << std::endl << disassembly;
+ } else {
+ dumpedMsg << "/* Failed to disassemble generated SPIRV */";
+ }
+ device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
+
+ DAWN_INVALID_IF(!valid,
+ "Produced invalid SPIRV. Please file a bug at https://crbug.com/tint.");
+
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/SpirvValidation.h b/chromium/third_party/dawn/src/dawn/native/SpirvValidation.h
new file mode 100644
index 00000000000..984ebcdefc3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/SpirvValidation.h
@@ -0,0 +1,27 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Error.h"
+
+#include <vector>
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ MaybeError ValidateSpirv(DeviceBase* device,
+ const std::vector<uint32_t>& spirv,
+ bool dumpSpirv);
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp b/chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp
new file mode 100644
index 00000000000..a6c258ca5aa
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/StagingBuffer.cpp
@@ -0,0 +1,29 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/StagingBuffer.h"
+
+namespace dawn::native {
+
+ StagingBufferBase::StagingBufferBase(size_t size) : mBufferSize(size) {
+ }
+
+ size_t StagingBufferBase::GetSize() const {
+ return mBufferSize;
+ }
+
+ void* StagingBufferBase::GetMappedPointer() const {
+ return mMappedPointer;
+ }
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/StagingBuffer.h b/chromium/third_party/dawn/src/dawn/native/StagingBuffer.h
new file mode 100644
index 00000000000..0ebb1c4a429
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/StagingBuffer.h
@@ -0,0 +1,41 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_STAGINGBUFFER_H_
+#define DAWNNATIVE_STAGINGBUFFER_H_
+
+#include "dawn/native/Error.h"
+
+namespace dawn::native {
+
+ class StagingBufferBase {
+ public:
+ StagingBufferBase(size_t size);
+ virtual ~StagingBufferBase() = default;
+
+ virtual MaybeError Initialize() = 0;
+
+ void* GetMappedPointer() const;
+ size_t GetSize() const;
+
+ protected:
+ void* mMappedPointer = nullptr;
+
+ private:
+ const size_t mBufferSize;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_STAGINGBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Subresource.cpp b/chromium/third_party/dawn/src/dawn/native/Subresource.cpp
new file mode 100644
index 00000000000..a83142bfcac
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Subresource.cpp
@@ -0,0 +1,132 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Subresource.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Format.h"
+
+namespace dawn::native {
+
+ Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect) {
+ Aspect aspectMask = ConvertAspect(format, aspect);
+ ASSERT(HasOneBit(aspectMask));
+ return aspectMask;
+ }
+
+ Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect) {
+ Aspect aspectMask = SelectFormatAspects(format, aspect);
+ ASSERT(aspectMask != Aspect::None);
+ return aspectMask;
+ }
+
+ Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect) {
+ // Color view |format| must be treated as the same plane |aspect|.
+ if (format.aspects == Aspect::Color) {
+ switch (aspect) {
+ case wgpu::TextureAspect::Plane0Only:
+ return Aspect::Plane0;
+ case wgpu::TextureAspect::Plane1Only:
+ return Aspect::Plane1;
+ default:
+ break;
+ }
+ }
+ return ConvertAspect(format, aspect);
+ }
+
+ Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect) {
+ switch (aspect) {
+ case wgpu::TextureAspect::All:
+ return format.aspects;
+ case wgpu::TextureAspect::DepthOnly:
+ return format.aspects & Aspect::Depth;
+ case wgpu::TextureAspect::StencilOnly:
+ return format.aspects & Aspect::Stencil;
+ case wgpu::TextureAspect::Plane0Only:
+ return format.aspects & Aspect::Plane0;
+ case wgpu::TextureAspect::Plane1Only:
+ return format.aspects & Aspect::Plane1;
+ }
+ UNREACHABLE();
+ }
+
+ uint8_t GetAspectIndex(Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ switch (aspect) {
+ case Aspect::Color:
+ case Aspect::Depth:
+ case Aspect::Plane0:
+ case Aspect::CombinedDepthStencil:
+ return 0;
+ case Aspect::Plane1:
+ case Aspect::Stencil:
+ return 1;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ uint8_t GetAspectCount(Aspect aspects) {
+ // TODO(crbug.com/dawn/829): This should use popcount once Dawn has such a function.
+ // Note that we can't do a switch because compilers complain that Depth | Stencil is not
+ // a valid enum value.
+ if (aspects == Aspect::Color || aspects == Aspect::Depth ||
+ aspects == Aspect::CombinedDepthStencil) {
+ return 1;
+ } else if (aspects == (Aspect::Plane0 | Aspect::Plane1)) {
+ return 2;
+ } else {
+ ASSERT(aspects == (Aspect::Depth | Aspect::Stencil));
+ return 2;
+ }
+ }
+
+ SubresourceRange::SubresourceRange(Aspect aspects,
+ FirstAndCountRange<uint32_t> arrayLayerParam,
+ FirstAndCountRange<uint32_t> mipLevelParams)
+ : aspects(aspects),
+ baseArrayLayer(arrayLayerParam.first),
+ layerCount(arrayLayerParam.count),
+ baseMipLevel(mipLevelParams.first),
+ levelCount(mipLevelParams.count) {
+ }
+
+ SubresourceRange::SubresourceRange()
+ : aspects(Aspect::None), baseArrayLayer(0), layerCount(0), baseMipLevel(0), levelCount(0) {
+ }
+
+ // static
+ SubresourceRange SubresourceRange::SingleMipAndLayer(uint32_t baseMipLevel,
+ uint32_t baseArrayLayer,
+ Aspect aspects) {
+ return {aspects, {baseArrayLayer, 1}, {baseMipLevel, 1}};
+ }
+
+ // static
+ SubresourceRange SubresourceRange::MakeSingle(Aspect aspect,
+ uint32_t baseArrayLayer,
+ uint32_t baseMipLevel) {
+ ASSERT(HasOneBit(aspect));
+ return {aspect, {baseArrayLayer, 1}, {baseMipLevel, 1}};
+ }
+
+ // static
+ SubresourceRange SubresourceRange::MakeFull(Aspect aspects,
+ uint32_t layerCount,
+ uint32_t levelCount) {
+ return {aspects, {0, layerCount}, {0, levelCount}};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Subresource.h b/chromium/third_party/dawn/src/dawn/native/Subresource.h
new file mode 100644
index 00000000000..63795e5f7e9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Subresource.h
@@ -0,0 +1,112 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SUBRESOURCE_H_
+#define DAWNNATIVE_SUBRESOURCE_H_
+
+#include "dawn/native/EnumClassBitmasks.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ // Note: Subresource indices are computed by iterating the aspects in increasing order.
+ // D3D12 uses these directly, so the order much match D3D12's indices.
+ // - Depth/Stencil textures have Depth as Plane 0, and Stencil as Plane 1.
+ enum class Aspect : uint8_t {
+ None = 0x0,
+ Color = 0x1,
+ Depth = 0x2,
+ Stencil = 0x4,
+
+ // Aspects used to select individual planes in a multi-planar format.
+ Plane0 = 0x8,
+ Plane1 = 0x10,
+
+ // An aspect for that represents the combination of both the depth and stencil aspects. It
+ // can be ignored outside of the Vulkan backend.
+ CombinedDepthStencil = 0x20,
+ };
+
+ template <>
+ struct EnumBitmaskSize<Aspect> {
+ static constexpr unsigned value = 6;
+ };
+
+ // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
+ // does not exist in the format.
+ // Also ASSERTs if "All" is selected and results in more than one aspect.
+ Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect);
+
+ // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
+ // does not exist in the format.
+ Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect);
+
+ // Returns the Aspects of the Format that are selected by the wgpu::TextureAspect.
+ // Note that this can return Aspect::None if the Format doesn't have any of the
+ // selected aspects.
+ Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect);
+
+ // Convert TextureAspect to the aspect which corresponds to the view format. This
+ // special cases per plane view formats before calling ConvertAspect.
+ Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect);
+
+ // Helper struct to make it clear that what the parameters of a range mean.
+ template <typename T>
+ struct FirstAndCountRange {
+ T first;
+ T count;
+ };
+
+ struct SubresourceRange {
+ SubresourceRange(Aspect aspects,
+ FirstAndCountRange<uint32_t> arrayLayerParam,
+ FirstAndCountRange<uint32_t> mipLevelParams);
+ SubresourceRange();
+
+ Aspect aspects;
+ uint32_t baseArrayLayer;
+ uint32_t layerCount;
+ uint32_t baseMipLevel;
+ uint32_t levelCount;
+
+ static SubresourceRange SingleMipAndLayer(uint32_t baseMipLevel,
+ uint32_t baseArrayLayer,
+ Aspect aspects);
+ static SubresourceRange MakeSingle(Aspect aspect,
+ uint32_t baseArrayLayer,
+ uint32_t baseMipLevel);
+
+ static SubresourceRange MakeFull(Aspect aspects, uint32_t layerCount, uint32_t levelCount);
+ };
+
+ // Helper function to use aspects as linear indices in arrays.
+ uint8_t GetAspectIndex(Aspect aspect);
+ uint8_t GetAspectCount(Aspect aspects);
+
+ // The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
+ // the per plane index does not exceed the known maximum plane count.
+ static constexpr uint32_t kMaxPlanesPerFormat = 3;
+
+} // namespace dawn::native
+
+namespace dawn {
+
+ template <>
+ struct IsDawnBitmask<dawn::native::Aspect> {
+ static constexpr bool enable = true;
+ };
+
+} // namespace dawn
+
+#endif // DAWNNATIVE_SUBRESOURCE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h b/chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h
new file mode 100644
index 00000000000..345f994ceac
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/SubresourceStorage.h
@@ -0,0 +1,555 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SUBRESOURCESTORAGE_H_
+#define DAWNNATIVE_SUBRESOURCESTORAGE_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/TypeTraits.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Subresource.h"
+
+#include <array>
+#include <limits>
+#include <memory>
+#include <vector>
+
+namespace dawn::native {
+
+ // SubresourceStorage<T> acts like a simple map from subresource (aspect, layer, level) to a
+ // value of type T except that it tries to compress similar subresources so that algorithms
+ // can act on a whole range of subresources at once if they have the same state.
+ //
+ // For example a very common case to optimize for is the tracking of the usage of texture
+ // subresources inside a render pass: the vast majority of texture views will select the whole
+ // texture while a small minority will select a sub-range. We want to optimize the common case
+ // by setting and checking a single "usage" value when a full subresource is used but at the
+ // same time allow per-subresource data when needed.
+ //
+ // Another example is barrier tracking per-subresource in the backends: it will often happen
+ // that during texture upload each mip level will have a different "barrier state". However
+ // when the texture is fully uploaded and after it is used for sampling (with a full view) for
+ // the first time, the barrier state will likely be the same across all the subresources.
+ // That's why some form of "recompression" of subresource state must be possibe.
+ //
+ // In order to keep the implementation details private and to avoid iterator-hell, this
+ // container uses a more functional approach of calling a closure on the interesting ranges.
+ // This is for example how to look at the state of all subresources.
+ //
+ // subresources.Iterate([](const SubresourceRange& range, const T& data) {
+ // // Do something with the knowledge that all the subresources in `range` have value
+ // // `data`.
+ // });
+ //
+ // SubresourceStorage internally tracks compression state per aspect and then per layer of each
+ // aspect. This means that a 2-aspect texture can have the following compression state:
+ //
+ // - Aspect 0 is fully compressed.
+ // - Aspect 1 is partially compressed:
+ // - Aspect 1 layer 3 is decompressed.
+ // - Aspect 1 layer 0-2 and 4-42 are compressed.
+ //
+ // A useful model to reason about SubresourceStorage is to represent is as a tree:
+ //
+ // - SubresourceStorage is the root.
+ // |-> Nodes 1 deep represent each aspect. If an aspect is compressed, its node doesn't have
+ // any children because the data is constant across all of the subtree.
+ // |-> Nodes 2 deep represent layers (for uncompressed aspects). If a layer is compressed,
+ // its node doesn't have any children because the data is constant across all of the
+ // subtree.
+ // |-> Nodes 3 deep represent individial mip levels (for uncompressed layers).
+ //
+ // The concept of recompression is the removal of all child nodes of a non-leaf node when the
+ // data is constant across them. Decompression is the addition of child nodes to a leaf node
+ // and copying of its data to all its children.
+ //
+ // The choice of having secondary compression for array layers is to optimize for the cases
+ // where transfer operations are used to update specific layers of texture with render or
+ // transfer operations, while the rest is untouched. It seems much less likely that there
+ // would be operations that touch all Nth mips of a 2D array texture without touching the
+ // others.
+ //
+ // There are several hot code paths that create new SubresourceStorage like the tracking of
+ // resource usage per-pass. We don't want to allocate a container for the decompressed data
+ // unless we have to because it would dramatically lower performance. Instead
+ // SubresourceStorage contains an inline array that contains the per-aspect compressed data
+ // and only allocates a per-subresource on aspect decompression.
+ //
+ // T must be a copyable type that supports equality comparison with ==.
+ //
+ // The implementation of functions in this file can have a lot of control flow and corner cases
+ // so each modification should come with extensive tests and ensure 100% code coverage of the
+ // modified functions. See instructions at
+ // https://chromium.googlesource.com/chromium/src/+/master/docs/testing/code_coverage.md#local-coverage-script
+ // to run the test with code coverage. A command line that worked in the past (with the right
+ // GN args for the out/coverage directory in a Chromium checkout) is:
+ //
+ /*
+ python tools/code_coverage/coverage.py dawn_unittests -b out/coverage -o out/report -c \
+ "out/coverage/dawn_unittests --gtest_filter=SubresourceStorage\*" -f \
+ third_party/dawn/src/dawn/native
+ */
+ //
+ // TODO(crbug.com/dawn/836): Make the recompression optional, the calling code should know
+ // if recompression can happen or not in Update() and Merge()
+ template <typename T>
+ class SubresourceStorage {
+ public:
+ static_assert(std::is_copy_assignable<T>::value, "T must be copyable");
+ static_assert(HasEqualityOperator<T>::value, "T requires bool operator == (T, T)");
+
+ // Creates the storage with the given "dimensions" and all subresources starting with the
+ // initial value.
+ SubresourceStorage(Aspect aspects,
+ uint32_t arrayLayerCount,
+ uint32_t mipLevelCount,
+ T initialValue = {});
+
+ // Returns the data for a single subresource. Note that the reference returned might be the
+ // same for multiple subresources.
+ const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
+
+ // Given an iterateFunc that's a function or function-like objet that can be called with
+ // arguments of type (const SubresourceRange& range, const T& data) and returns void,
+ // calls it with aggregate ranges if possible, such that each subresource is part of
+ // exactly one of the ranges iterateFunc is called with (and obviously data is the value
+ // stored for that subresource). For example:
+ //
+ // subresources.Iterate([&](const SubresourceRange& range, const T& data) {
+ // // Do something with range and data.
+ // });
+ template <typename F>
+ void Iterate(F&& iterateFunc) const;
+
+ // Given an updateFunc that's a function or function-like objet that can be called with
+ // arguments of type (const SubresourceRange& range, T* data) and returns void,
+ // calls it with ranges that in aggregate form `range` and pass for each of the
+ // sub-ranges a pointer to modify the value for that sub-range. For example:
+ //
+ // subresources.Update(view->GetRange(), [](const SubresourceRange&, T* data) {
+ // *data |= wgpu::TextureUsage::Stuff;
+ // });
+ //
+ // /!\ WARNING: updateFunc should never use range to compute the update to data otherwise
+ // your code is likely to break when compression happens. Range should only be used for
+ // side effects like using it to compute a Vulkan pipeline barrier.
+ template <typename F>
+ void Update(const SubresourceRange& range, F&& updateFunc);
+
+ // Given a mergeFunc that's a function or a function-like object that can be called with
+ // arguments of type (const SubresourceRange& range, T* data, const U& otherData) and
+ // returns void, calls it with ranges that in aggregate form the full resources and pass
+ // for each of the sub-ranges a pointer to modify the value for that sub-range and the
+ // corresponding value from other for that sub-range. For example:
+ //
+ // subresources.Merge(otherUsages,
+ // [](const SubresourceRange&, T* data, const T& otherData) {
+ // *data |= otherData;
+ // });
+ //
+ // /!\ WARNING: mergeFunc should never use range to compute the update to data otherwise
+ // your code is likely to break when compression happens. Range should only be used for
+ // side effects like using it to compute a Vulkan pipeline barrier.
+ template <typename U, typename F>
+ void Merge(const SubresourceStorage<U>& other, F&& mergeFunc);
+
+ // Other operations to consider:
+ //
+ // - UpdateTo(Range, T) that updates the range to a constant value.
+
+ // Methods to query the internal state of SubresourceStorage for testing.
+ Aspect GetAspectsForTesting() const;
+ uint32_t GetArrayLayerCountForTesting() const;
+ uint32_t GetMipLevelCountForTesting() const;
+ bool IsAspectCompressedForTesting(Aspect aspect) const;
+ bool IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const;
+
+ private:
+ template <typename U>
+ friend class SubresourceStorage;
+
+ void DecompressAspect(uint32_t aspectIndex);
+ void RecompressAspect(uint32_t aspectIndex);
+
+ void DecompressLayer(uint32_t aspectIndex, uint32_t layer);
+ void RecompressLayer(uint32_t aspectIndex, uint32_t layer);
+
+ SubresourceRange GetFullLayerRange(Aspect aspect, uint32_t layer) const;
+
+ // LayerCompressed should never be called when the aspect is compressed otherwise it would
+ // need to check that mLayerCompressed is not null before indexing it.
+ bool& LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex);
+ bool LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex) const;
+
+ // Return references to the data for a compressed plane / layer or subresource.
+ // Each variant should be called exactly under the correct compression level.
+ T& DataInline(uint32_t aspectIndex);
+ T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0);
+ const T& DataInline(uint32_t aspectIndex) const;
+ const T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0) const;
+
+ Aspect mAspects;
+ uint8_t mMipLevelCount;
+ uint16_t mArrayLayerCount;
+
+ // Invariant: if an aspect is marked compressed, then all it's layers are marked as
+ // compressed.
+ static constexpr size_t kMaxAspects = 2;
+ std::array<bool, kMaxAspects> mAspectCompressed;
+ std::array<T, kMaxAspects> mInlineAspectData;
+
+ // Indexed as mLayerCompressed[aspectIndex * mArrayLayerCount + layer].
+ std::unique_ptr<bool[]> mLayerCompressed;
+
+ // Indexed as mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level].
+ // The data for a compressed aspect is stored in the slot for (aspect, 0, 0). Similarly
+ // the data for a compressed layer of aspect if in the slot for (aspect, layer, 0).
+ std::unique_ptr<T[]> mData;
+ };
+
+ template <typename T>
+ SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
+ uint32_t arrayLayerCount,
+ uint32_t mipLevelCount,
+ T initialValue)
+ : mAspects(aspects), mMipLevelCount(mipLevelCount), mArrayLayerCount(arrayLayerCount) {
+ ASSERT(arrayLayerCount <= std::numeric_limits<decltype(mArrayLayerCount)>::max());
+ ASSERT(mipLevelCount <= std::numeric_limits<decltype(mMipLevelCount)>::max());
+
+ uint32_t aspectCount = GetAspectCount(aspects);
+ ASSERT(aspectCount <= kMaxAspects);
+
+ for (uint32_t aspectIndex = 0; aspectIndex < aspectCount; aspectIndex++) {
+ mAspectCompressed[aspectIndex] = true;
+ DataInline(aspectIndex) = initialValue;
+ }
+ }
+
+ template <typename T>
+ template <typename F>
+ void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
+ bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
+ bool fullAspects =
+ range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
+
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ uint32_t aspectIndex = GetAspectIndex(aspect);
+
+ // Call the updateFunc once for the whole aspect if possible or decompress and fallback
+ // to per-layer handling.
+ if (mAspectCompressed[aspectIndex]) {
+ if (fullAspects) {
+ SubresourceRange updateRange =
+ SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
+ updateFunc(updateRange, &DataInline(aspectIndex));
+ continue;
+ }
+ DecompressAspect(aspectIndex);
+ }
+
+ uint32_t layerEnd = range.baseArrayLayer + range.layerCount;
+ for (uint32_t layer = range.baseArrayLayer; layer < layerEnd; layer++) {
+ // Call the updateFunc once for the whole layer if possible or decompress and
+ // fallback to per-level handling.
+ if (LayerCompressed(aspectIndex, layer)) {
+ if (fullLayers) {
+ SubresourceRange updateRange = GetFullLayerRange(aspect, layer);
+ updateFunc(updateRange, &Data(aspectIndex, layer));
+ continue;
+ }
+ DecompressLayer(aspectIndex, layer);
+ }
+
+ // Worst case: call updateFunc per level.
+ uint32_t levelEnd = range.baseMipLevel + range.levelCount;
+ for (uint32_t level = range.baseMipLevel; level < levelEnd; level++) {
+ SubresourceRange updateRange =
+ SubresourceRange::MakeSingle(aspect, layer, level);
+ updateFunc(updateRange, &Data(aspectIndex, layer, level));
+ }
+
+ // If the range has fullLayers then it is likely we can recompress after the calls
+ // to updateFunc (this branch is skipped if updateFunc was called for the whole
+ // layer).
+ if (fullLayers) {
+ RecompressLayer(aspectIndex, layer);
+ }
+ }
+
+ // If the range has fullAspects then it is likely we can recompress after the calls to
+ // updateFunc (this branch is skipped if updateFunc was called for the whole aspect).
+ if (fullAspects) {
+ RecompressAspect(aspectIndex);
+ }
+ }
+ }
+
+ template <typename T>
+ template <typename U, typename F>
+ void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
+ ASSERT(mAspects == other.mAspects);
+ ASSERT(mArrayLayerCount == other.mArrayLayerCount);
+ ASSERT(mMipLevelCount == other.mMipLevelCount);
+
+ for (Aspect aspect : IterateEnumMask(mAspects)) {
+ uint32_t aspectIndex = GetAspectIndex(aspect);
+
+ // If the other storage's aspect is compressed we don't need to decompress anything
+ // in `this` and can just iterate through it, merging with `other`'s constant value for
+ // the aspect. For code simplicity this can be done with a call to Update().
+ if (other.mAspectCompressed[aspectIndex]) {
+ const U& otherData = other.DataInline(aspectIndex);
+ Update(SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount),
+ [&](const SubresourceRange& subrange, T* data) {
+ mergeFunc(subrange, data, otherData);
+ });
+ continue;
+ }
+
+ // Other doesn't have the aspect compressed so we must do at least per-layer merging.
+ if (mAspectCompressed[aspectIndex]) {
+ DecompressAspect(aspectIndex);
+ }
+
+ for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+ // Similarly to above, use a fast path if other's layer is compressed.
+ if (other.LayerCompressed(aspectIndex, layer)) {
+ const U& otherData = other.Data(aspectIndex, layer);
+ Update(GetFullLayerRange(aspect, layer),
+ [&](const SubresourceRange& subrange, T* data) {
+ mergeFunc(subrange, data, otherData);
+ });
+ continue;
+ }
+
+ // Sad case, other is decompressed for this layer, do per-level merging.
+ if (LayerCompressed(aspectIndex, layer)) {
+ DecompressLayer(aspectIndex, layer);
+ }
+
+ for (uint32_t level = 0; level < mMipLevelCount; level++) {
+ SubresourceRange updateRange =
+ SubresourceRange::MakeSingle(aspect, layer, level);
+ mergeFunc(updateRange, &Data(aspectIndex, layer, level),
+ other.Data(aspectIndex, layer, level));
+ }
+
+ RecompressLayer(aspectIndex, layer);
+ }
+
+ RecompressAspect(aspectIndex);
+ }
+ }
+
+ template <typename T>
+ template <typename F>
+ void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
+ for (Aspect aspect : IterateEnumMask(mAspects)) {
+ uint32_t aspectIndex = GetAspectIndex(aspect);
+
+ // Fastest path, call iterateFunc on the whole aspect at once.
+ if (mAspectCompressed[aspectIndex]) {
+ SubresourceRange range =
+ SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
+ iterateFunc(range, DataInline(aspectIndex));
+ continue;
+ }
+
+ for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+ // Fast path, call iterateFunc on the whole array layer at once.
+ if (LayerCompressed(aspectIndex, layer)) {
+ SubresourceRange range = GetFullLayerRange(aspect, layer);
+ iterateFunc(range, Data(aspectIndex, layer));
+ continue;
+ }
+
+ // Slow path, call iterateFunc for each mip level.
+ for (uint32_t level = 0; level < mMipLevelCount; level++) {
+ SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+ iterateFunc(range, Data(aspectIndex, layer, level));
+ }
+ }
+ }
+ }
+
+ template <typename T>
+ const T& SubresourceStorage<T>::Get(Aspect aspect,
+ uint32_t arrayLayer,
+ uint32_t mipLevel) const {
+ uint32_t aspectIndex = GetAspectIndex(aspect);
+ ASSERT(aspectIndex < GetAspectCount(mAspects));
+ ASSERT(arrayLayer < mArrayLayerCount);
+ ASSERT(mipLevel < mMipLevelCount);
+
+ // Fastest path, the aspect is compressed!
+ if (mAspectCompressed[aspectIndex]) {
+ return DataInline(aspectIndex);
+ }
+
+ // Fast path, the array layer is compressed.
+ if (LayerCompressed(aspectIndex, arrayLayer)) {
+ return Data(aspectIndex, arrayLayer);
+ }
+
+ return Data(aspectIndex, arrayLayer, mipLevel);
+ }
+
+ template <typename T>
+ Aspect SubresourceStorage<T>::GetAspectsForTesting() const {
+ return mAspects;
+ }
+
+ template <typename T>
+ uint32_t SubresourceStorage<T>::GetArrayLayerCountForTesting() const {
+ return mArrayLayerCount;
+ }
+
+ template <typename T>
+ uint32_t SubresourceStorage<T>::GetMipLevelCountForTesting() const {
+ return mMipLevelCount;
+ }
+
+ template <typename T>
+ bool SubresourceStorage<T>::IsAspectCompressedForTesting(Aspect aspect) const {
+ return mAspectCompressed[GetAspectIndex(aspect)];
+ }
+
+ template <typename T>
+ bool SubresourceStorage<T>::IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const {
+ return mAspectCompressed[GetAspectIndex(aspect)] ||
+ mLayerCompressed[GetAspectIndex(aspect) * mArrayLayerCount + layer];
+ }
+
+ template <typename T>
+ void SubresourceStorage<T>::DecompressAspect(uint32_t aspectIndex) {
+ ASSERT(mAspectCompressed[aspectIndex]);
+ const T& aspectData = DataInline(aspectIndex);
+ mAspectCompressed[aspectIndex] = false;
+
+ // Extra allocations are only needed when aspects are decompressed. Create them lazily.
+ if (mData == nullptr) {
+ ASSERT(mLayerCompressed == nullptr);
+
+ uint32_t aspectCount = GetAspectCount(mAspects);
+ mLayerCompressed = std::make_unique<bool[]>(aspectCount * mArrayLayerCount);
+ mData = std::make_unique<T[]>(aspectCount * mArrayLayerCount * mMipLevelCount);
+
+ for (uint32_t layerIndex = 0; layerIndex < aspectCount * mArrayLayerCount;
+ layerIndex++) {
+ mLayerCompressed[layerIndex] = true;
+ }
+ }
+
+ ASSERT(LayerCompressed(aspectIndex, 0));
+ for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+ Data(aspectIndex, layer) = aspectData;
+ ASSERT(LayerCompressed(aspectIndex, layer));
+ }
+ }
+
+ template <typename T>
+ void SubresourceStorage<T>::RecompressAspect(uint32_t aspectIndex) {
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ // All layers of the aspect must be compressed for the aspect to possibly recompress.
+ for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+ if (!LayerCompressed(aspectIndex, layer)) {
+ return;
+ }
+ }
+
+ T layer0Data = Data(aspectIndex, 0);
+ for (uint32_t layer = 1; layer < mArrayLayerCount; layer++) {
+ if (!(Data(aspectIndex, layer) == layer0Data)) {
+ return;
+ }
+ }
+
+ mAspectCompressed[aspectIndex] = true;
+ DataInline(aspectIndex) = layer0Data;
+ }
+
+ template <typename T>
+ void SubresourceStorage<T>::DecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+ ASSERT(LayerCompressed(aspectIndex, layer));
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ const T& layerData = Data(aspectIndex, layer);
+ LayerCompressed(aspectIndex, layer) = false;
+
+ // We assume that (aspect, layer, 0) is stored at the same place as (aspect, layer) which
+ // allows starting the iteration at level 1.
+ for (uint32_t level = 1; level < mMipLevelCount; level++) {
+ Data(aspectIndex, layer, level) = layerData;
+ }
+ }
+
+ template <typename T>
+ void SubresourceStorage<T>::RecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+ ASSERT(!LayerCompressed(aspectIndex, layer));
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ const T& level0Data = Data(aspectIndex, layer, 0);
+
+ for (uint32_t level = 1; level < mMipLevelCount; level++) {
+ if (!(Data(aspectIndex, layer, level) == level0Data)) {
+ return;
+ }
+ }
+
+ LayerCompressed(aspectIndex, layer) = true;
+ }
+
+ template <typename T>
+ SubresourceRange SubresourceStorage<T>::GetFullLayerRange(Aspect aspect, uint32_t layer) const {
+ return {aspect, {layer, 1}, {0, mMipLevelCount}};
+ }
+
+ template <typename T>
+ bool& SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) {
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+ }
+
+ template <typename T>
+ bool SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) const {
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+ }
+
+ template <typename T>
+ T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) {
+ ASSERT(mAspectCompressed[aspectIndex]);
+ return mInlineAspectData[aspectIndex];
+ }
+ template <typename T>
+ T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) {
+ ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+ }
+ template <typename T>
+ const T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) const {
+ ASSERT(mAspectCompressed[aspectIndex]);
+ return mInlineAspectData[aspectIndex];
+ }
+ template <typename T>
+ const T& SubresourceStorage<T>::Data(uint32_t aspectIndex,
+ uint32_t layer,
+ uint32_t level) const {
+ ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
+ ASSERT(!mAspectCompressed[aspectIndex]);
+ return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+ }
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_SUBRESOURCESTORAGE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Surface.cpp b/chromium/third_party/dawn/src/dawn/native/Surface.cpp
new file mode 100644
index 00000000000..bede05f09ee
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Surface.cpp
@@ -0,0 +1,246 @@
+// Copyright 2020 the Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Surface.h"
+
+#include "dawn/common/Platform.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/SwapChain.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# include <windows.ui.core.h>
+# include <windows.ui.xaml.controls.h>
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+# include "dawn/common/xlib_with_undefs.h"
+#endif // defined(DAWN_USE_X11)
+
+namespace dawn::native {
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ Surface::Type value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case Surface::Type::MetalLayer:
+ s->Append("MetalLayer");
+ break;
+ case Surface::Type::WindowsHWND:
+ s->Append("WindowsHWND");
+ break;
+ case Surface::Type::WindowsCoreWindow:
+ s->Append("WindowsCoreWindow");
+ break;
+ case Surface::Type::WindowsSwapChainPanel:
+ s->Append("WindowsSwapChainPanel");
+ break;
+ case Surface::Type::Xlib:
+ s->Append("Xlib");
+ break;
+ }
+ return {true};
+ }
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ bool InheritsFromCAMetalLayer(void* obj);
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+ MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+ const SurfaceDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain == nullptr,
+ "Surface cannot be created with %s. nextInChain is not specified.",
+ descriptor);
+
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+ wgpu::SType::SurfaceDescriptorFromMetalLayer,
+ wgpu::SType::SurfaceDescriptorFromWindowsHWND,
+ wgpu::SType::SurfaceDescriptorFromWindowsCoreWindow,
+ wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel,
+ wgpu::SType::SurfaceDescriptorFromXlibWindow));
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &metalDesc);
+ if (metalDesc) {
+ // Check that the layer is a CAMetalLayer (or a derived class).
+ DAWN_INVALID_IF(!InheritsFromCAMetalLayer(metalDesc->layer),
+ "Layer must be a CAMetalLayer");
+ return {};
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# if defined(DAWN_PLATFORM_WIN32)
+ const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &hwndDesc);
+ if (hwndDesc) {
+ DAWN_INVALID_IF(IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0, "Invalid HWND");
+ return {};
+ }
+# endif // defined(DAWN_PLATFORM_WIN32)
+ const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &coreWindowDesc);
+ if (coreWindowDesc) {
+ // Validate the coreWindow by query for ICoreWindow interface
+ ComPtr<ABI::Windows::UI::Core::ICoreWindow> coreWindow;
+ DAWN_INVALID_IF(coreWindowDesc->coreWindow == nullptr ||
+ FAILED(static_cast<IUnknown*>(coreWindowDesc->coreWindow)
+ ->QueryInterface(IID_PPV_ARGS(&coreWindow))),
+ "Invalid CoreWindow");
+ return {};
+ }
+ const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
+ if (swapChainPanelDesc) {
+ // Validate the swapChainPanel by querying for ISwapChainPanel interface
+ ComPtr<ABI::Windows::UI::Xaml::Controls::ISwapChainPanel> swapChainPanel;
+ DAWN_INVALID_IF(swapChainPanelDesc->swapChainPanel == nullptr ||
+ FAILED(static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel)
+ ->QueryInterface(IID_PPV_ARGS(&swapChainPanel))),
+ "Invalid SwapChainPanel");
+ return {};
+ }
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+ const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &xDesc);
+ if (xDesc) {
+ // Check the validity of the window by calling a getter function on the window that
+ // returns a status code. If the window is bad the call return a status of zero. We
+ // need to set a temporary X11 error handler while doing this because the default
+ // X11 error handler exits the program on any error.
+ XErrorHandler oldErrorHandler =
+ XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
+ XWindowAttributes attributes;
+ int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display),
+ xDesc->window, &attributes);
+ XSetErrorHandler(oldErrorHandler);
+
+ DAWN_INVALID_IF(status == 0, "Invalid X Window");
+ return {};
+ }
+#endif // defined(DAWN_USE_X11)
+
+ return DAWN_FORMAT_VALIDATION_ERROR("Unsupported sType (%s)",
+ descriptor->nextInChain->sType);
+ }
+
+ Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
+ : mInstance(instance) {
+ ASSERT(descriptor->nextInChain != nullptr);
+ const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+ const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+ const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+ const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
+ const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &metalDesc);
+ FindInChain(descriptor->nextInChain, &hwndDesc);
+ FindInChain(descriptor->nextInChain, &coreWindowDesc);
+ FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
+ FindInChain(descriptor->nextInChain, &xDesc);
+ ASSERT(metalDesc || hwndDesc || xDesc);
+ if (metalDesc) {
+ mType = Type::MetalLayer;
+ mMetalLayer = metalDesc->layer;
+ } else if (hwndDesc) {
+ mType = Type::WindowsHWND;
+ mHInstance = hwndDesc->hinstance;
+ mHWND = hwndDesc->hwnd;
+ } else if (coreWindowDesc) {
+#if defined(DAWN_PLATFORM_WINDOWS)
+ mType = Type::WindowsCoreWindow;
+ mCoreWindow = static_cast<IUnknown*>(coreWindowDesc->coreWindow);
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+ } else if (swapChainPanelDesc) {
+#if defined(DAWN_PLATFORM_WINDOWS)
+ mType = Type::WindowsSwapChainPanel;
+ mSwapChainPanel = static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel);
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+ } else if (xDesc) {
+ mType = Type::Xlib;
+ mXDisplay = xDesc->display;
+ mXWindow = xDesc->window;
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ Surface::~Surface() {
+ if (mSwapChain != nullptr) {
+ mSwapChain->DetachFromSurface();
+ mSwapChain = nullptr;
+ }
+ }
+
+ NewSwapChainBase* Surface::GetAttachedSwapChain() {
+ return mSwapChain.Get();
+ }
+
+ void Surface::SetAttachedSwapChain(NewSwapChainBase* swapChain) {
+ mSwapChain = swapChain;
+ }
+
+ InstanceBase* Surface::GetInstance() {
+ return mInstance.Get();
+ }
+
+ Surface::Type Surface::GetType() const {
+ return mType;
+ }
+
+ void* Surface::GetMetalLayer() const {
+ ASSERT(mType == Type::MetalLayer);
+ return mMetalLayer;
+ }
+
+ void* Surface::GetHInstance() const {
+ ASSERT(mType == Type::WindowsHWND);
+ return mHInstance;
+ }
+ void* Surface::GetHWND() const {
+ ASSERT(mType == Type::WindowsHWND);
+ return mHWND;
+ }
+
+ IUnknown* Surface::GetCoreWindow() const {
+ ASSERT(mType == Type::WindowsCoreWindow);
+#if defined(DAWN_PLATFORM_WINDOWS)
+ return mCoreWindow.Get();
+#else
+ return nullptr;
+#endif
+ }
+
+ IUnknown* Surface::GetSwapChainPanel() const {
+ ASSERT(mType == Type::WindowsSwapChainPanel);
+#if defined(DAWN_PLATFORM_WINDOWS)
+ return mSwapChainPanel.Get();
+#else
+ return nullptr;
+#endif
+ }
+
+ void* Surface::GetXDisplay() const {
+ ASSERT(mType == Type::Xlib);
+ return mXDisplay;
+ }
+ uint32_t Surface::GetXWindow() const {
+ ASSERT(mType == Type::Xlib);
+ return mXWindow;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Surface.h b/chromium/third_party/dawn/src/dawn/native/Surface.h
new file mode 100644
index 00000000000..c6320af71ce
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Surface.h
@@ -0,0 +1,110 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SURFACE_H_
+#define DAWNNATIVE_SURFACE_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/Platform.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# include "dawn/native/d3d12/d3d12_platform.h"
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+// Forward declare IUnknown
+// GetCoreWindow needs to return an IUnknown pointer
+// non-windows platforms don't have this type
+struct IUnknown;
+
+namespace dawn::native {
+
+ MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+ const SurfaceDescriptor* descriptor);
+
+ // A surface is a sum types of all the kind of windows Dawn supports. The OS-specific types
+ // aren't used because they would cause compilation errors on other OSes (or require
+ // ObjectiveC).
+ // The surface is also used to store the current swapchain so that we can detach it when it is
+ // replaced.
+ class Surface final : public RefCounted {
+ public:
+ Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor);
+
+ void SetAttachedSwapChain(NewSwapChainBase* swapChain);
+ NewSwapChainBase* GetAttachedSwapChain();
+
+ // These are valid to call on all Surfaces.
+ enum class Type { MetalLayer, WindowsHWND, WindowsCoreWindow, WindowsSwapChainPanel, Xlib };
+ Type GetType() const;
+ InstanceBase* GetInstance();
+
+ // Valid to call if the type is MetalLayer
+ void* GetMetalLayer() const;
+
+ // Valid to call if the type is WindowsHWND
+ void* GetHInstance() const;
+ void* GetHWND() const;
+
+ // Valid to call if the type is WindowsCoreWindow
+ IUnknown* GetCoreWindow() const;
+
+ // Valid to call if the type is WindowsSwapChainPanel
+ IUnknown* GetSwapChainPanel() const;
+
+ // Valid to call if the type is WindowsXlib
+ void* GetXDisplay() const;
+ uint32_t GetXWindow() const;
+
+ private:
+ ~Surface() override;
+
+ Ref<InstanceBase> mInstance;
+ Type mType;
+
+ // The swapchain will set this to null when it is destroyed.
+ Ref<NewSwapChainBase> mSwapChain;
+
+ // MetalLayer
+ void* mMetalLayer = nullptr;
+
+ // WindowsHwnd
+ void* mHInstance = nullptr;
+ void* mHWND = nullptr;
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ // WindowsCoreWindow
+ ComPtr<IUnknown> mCoreWindow;
+
+ // WindowsSwapChainPanel
+ ComPtr<IUnknown> mSwapChainPanel;
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+ // Xlib
+ void* mXDisplay = nullptr;
+ uint32_t mXWindow = 0;
+ };
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ Surface::Type value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_SURFACE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Surface_metal.mm b/chromium/third_party/dawn/src/dawn/native/Surface_metal.mm
new file mode 100644
index 00000000000..ecb5d884aa6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Surface_metal.mm
@@ -0,0 +1,30 @@
+// Copyright 2020 the Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Contains a helper function for Surface.cpp that needs to be written in ObjectiveC.
+
+#if !defined(DAWN_ENABLE_BACKEND_METAL)
+# error "Surface_metal.mm requires the Metal backend to be enabled."
+#endif // !defined(DAWN_ENABLE_BACKEND_METAL)
+
+#import <QuartzCore/CAMetalLayer.h>
+
+namespace dawn::native {
+
+ bool InheritsFromCAMetalLayer(void* obj) {
+ id<NSObject> object = static_cast<id>(obj);
+ return [object isKindOfClass:[CAMetalLayer class]];
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn/native/SwapChain.cpp
new file mode 100644
index 00000000000..6b4c331a307
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/SwapChain.cpp
@@ -0,0 +1,412 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/native/Adapter.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+namespace dawn::native {
+
+ namespace {
+
+ class ErrorSwapChain final : public SwapChainBase {
+ public:
+ ErrorSwapChain(DeviceBase* device) : SwapChainBase(device, ObjectBase::kError) {
+ }
+
+ private:
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override {
+ GetDevice()->ConsumedError(
+ DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+ }
+
+ TextureViewBase* APIGetCurrentTextureView() override {
+ GetDevice()->ConsumedError(
+ DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+ return TextureViewBase::MakeError(GetDevice());
+ }
+
+ void APIPresent() override {
+ GetDevice()->ConsumedError(
+ DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+ }
+ };
+
+ } // anonymous namespace
+
+ MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+ const Surface* surface,
+ const SwapChainDescriptor* descriptor) {
+ if (descriptor->implementation != 0) {
+ DAWN_INVALID_IF(surface != nullptr,
+ "Exactly one of surface or implementation must be set");
+
+ DawnSwapChainImplementation* impl =
+ reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
+
+ DAWN_INVALID_IF(!impl->Init || !impl->Destroy || !impl->Configure ||
+ !impl->GetNextTexture || !impl->Present,
+ "Implementation is incomplete");
+
+ } else {
+ DAWN_INVALID_IF(surface == nullptr,
+ "At least one of surface or implementation must be set");
+
+ DAWN_TRY(ValidatePresentMode(descriptor->presentMode));
+
+ // TODO(crbug.com/dawn/160): Lift this restriction once
+ // wgpu::Instance::GetPreferredSurfaceFormat is implemented.
+ DAWN_INVALID_IF(descriptor->format != wgpu::TextureFormat::BGRA8Unorm,
+ "Format (%s) is not %s, which is (currently) the only accepted format.",
+ descriptor->format, wgpu::TextureFormat::BGRA8Unorm);
+
+ DAWN_INVALID_IF(descriptor->usage != wgpu::TextureUsage::RenderAttachment,
+ "Usage (%s) is not %s, which is (currently) the only accepted usage.",
+ descriptor->usage, wgpu::TextureUsage::RenderAttachment);
+
+ DAWN_INVALID_IF(descriptor->width == 0 || descriptor->height == 0,
+ "Swap Chain size (width: %u, height: %u) is empty.", descriptor->width,
+ descriptor->height);
+
+ DAWN_INVALID_IF(
+ descriptor->width > device->GetLimits().v1.maxTextureDimension2D ||
+ descriptor->height > device->GetLimits().v1.maxTextureDimension2D,
+ "Swap Chain size (width: %u, height: %u) is greater than the maximum 2D texture "
+ "size (width: %u, height: %u).",
+ descriptor->width, descriptor->height, device->GetLimits().v1.maxTextureDimension2D,
+ device->GetLimits().v1.maxTextureDimension2D);
+ }
+
+ return {};
+ }
+
+ TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain) {
+ TextureDescriptor desc;
+ desc.usage = swapChain->GetUsage();
+ desc.dimension = wgpu::TextureDimension::e2D;
+ desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
+ desc.format = swapChain->GetFormat();
+ desc.mipLevelCount = 1;
+ desc.sampleCount = 1;
+
+ return desc;
+ }
+
+ // SwapChainBase
+
+ SwapChainBase::SwapChainBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+ TrackInDevice();
+ }
+
+ SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
+ }
+
+ SwapChainBase::~SwapChainBase() {
+ }
+
+ void SwapChainBase::DestroyImpl() {
+ }
+
+ // static
+ SwapChainBase* SwapChainBase::MakeError(DeviceBase* device) {
+ return new ErrorSwapChain(device);
+ }
+
+ ObjectType SwapChainBase::GetType() const {
+ return ObjectType::SwapChain;
+ }
+
+ // OldSwapChainBase
+
+ OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
+ : SwapChainBase(device),
+ mImplementation(
+ *reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
+ }
+
+ OldSwapChainBase::~OldSwapChainBase() {
+ if (!IsError()) {
+ const auto& im = GetImplementation();
+ im.Destroy(im.userData);
+ }
+ }
+
+ void OldSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) {
+ if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
+ return;
+ }
+ ASSERT(!IsError());
+
+ allowedUsage |= wgpu::TextureUsage::Present;
+
+ mFormat = format;
+ mAllowedUsage = allowedUsage;
+ mWidth = width;
+ mHeight = height;
+ mImplementation.Configure(mImplementation.userData, static_cast<WGPUTextureFormat>(format),
+ static_cast<WGPUTextureUsage>(allowedUsage), width, height);
+ }
+
+ TextureViewBase* OldSwapChainBase::APIGetCurrentTextureView() {
+ if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
+ return TextureViewBase::MakeError(GetDevice());
+ }
+ ASSERT(!IsError());
+
+ // Return the same current texture view until Present is called.
+ if (mCurrentTextureView != nullptr) {
+ // Calling GetCurrentTextureView always returns a new reference so add it even when
+ // reuse the existing texture view.
+ mCurrentTextureView->Reference();
+ return mCurrentTextureView.Get();
+ }
+
+ // Create the backing texture and the view.
+ TextureDescriptor descriptor;
+ descriptor.dimension = wgpu::TextureDimension::e2D;
+ descriptor.size.width = mWidth;
+ descriptor.size.height = mHeight;
+ descriptor.size.depthOrArrayLayers = 1;
+ descriptor.sampleCount = 1;
+ descriptor.format = mFormat;
+ descriptor.mipLevelCount = 1;
+ descriptor.usage = mAllowedUsage;
+
+ // Get the texture but remove the external refcount because it is never passed outside
+ // of dawn_native
+ mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
+
+ mCurrentTextureView = mCurrentTexture->APICreateView();
+ return mCurrentTextureView.Get();
+ }
+
+ void OldSwapChainBase::APIPresent() {
+ if (GetDevice()->ConsumedError(ValidatePresent())) {
+ return;
+ }
+ ASSERT(!IsError());
+
+ if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTextureView.Get()))) {
+ return;
+ }
+
+ mImplementation.Present(mImplementation.userData);
+
+ mCurrentTexture = nullptr;
+ mCurrentTextureView = nullptr;
+ }
+
+ const DawnSwapChainImplementation& OldSwapChainBase::GetImplementation() {
+ ASSERT(!IsError());
+ return mImplementation;
+ }
+
+ MaybeError OldSwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ DAWN_TRY(ValidateTextureUsage(allowedUsage));
+ DAWN_TRY(ValidateTextureFormat(format));
+
+ DAWN_INVALID_IF(width == 0 || height == 0,
+ "Configuration size (width: %u, height: %u) for %s is empty.", width,
+ height, this);
+
+ return {};
+ }
+
+ MaybeError OldSwapChainBase::ValidateGetCurrentTextureView() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ // If width is 0, it implies swap chain has never been configured
+ DAWN_INVALID_IF(mWidth == 0, "%s was not configured prior to calling GetNextTexture.",
+ this);
+
+ return {};
+ }
+
+ MaybeError OldSwapChainBase::ValidatePresent() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ DAWN_INVALID_IF(
+ mCurrentTextureView == nullptr,
+ "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
+ this);
+
+ return {};
+ }
+
+ // Implementation of NewSwapChainBase
+
+ NewSwapChainBase::NewSwapChainBase(DeviceBase* device,
+ Surface* surface,
+ const SwapChainDescriptor* descriptor)
+ : SwapChainBase(device),
+ mAttached(false),
+ mWidth(descriptor->width),
+ mHeight(descriptor->height),
+ mFormat(descriptor->format),
+ mUsage(descriptor->usage),
+ mPresentMode(descriptor->presentMode),
+ mSurface(surface) {
+ }
+
+ NewSwapChainBase::~NewSwapChainBase() {
+ if (mCurrentTextureView != nullptr) {
+ ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+ TextureBase::TextureState::Destroyed);
+ }
+
+ ASSERT(!mAttached);
+ }
+
+ void NewSwapChainBase::DetachFromSurface() {
+ if (mAttached) {
+ DetachFromSurfaceImpl();
+ mSurface = nullptr;
+ mAttached = false;
+ }
+ }
+
+ void NewSwapChainBase::SetIsAttached() {
+ mAttached = true;
+ }
+
+ void NewSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) {
+ GetDevice()->ConsumedError(
+ DAWN_FORMAT_VALIDATION_ERROR("Configure is invalid for surface-based swapchains."));
+ }
+
+ TextureViewBase* NewSwapChainBase::APIGetCurrentTextureView() {
+ if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
+ return TextureViewBase::MakeError(GetDevice());
+ }
+
+ if (mCurrentTextureView != nullptr) {
+ // Calling GetCurrentTextureView always returns a new reference so add it even when
+ // reusing the existing texture view.
+ mCurrentTextureView->Reference();
+ return mCurrentTextureView.Get();
+ }
+
+ TextureViewBase* view = nullptr;
+ if (GetDevice()->ConsumedError(GetCurrentTextureViewImpl(), &view)) {
+ return TextureViewBase::MakeError(GetDevice());
+ }
+
+ // Check that the return texture view matches exactly what was given for this descriptor.
+ ASSERT(view->GetTexture()->GetFormat().format == mFormat);
+ ASSERT(IsSubset(mUsage, view->GetTexture()->GetUsage()));
+ ASSERT(view->GetLevelCount() == 1);
+ ASSERT(view->GetLayerCount() == 1);
+ ASSERT(view->GetDimension() == wgpu::TextureViewDimension::e2D);
+ ASSERT(view->GetTexture()->GetMipLevelVirtualSize(view->GetBaseMipLevel()).width == mWidth);
+ ASSERT(view->GetTexture()->GetMipLevelVirtualSize(view->GetBaseMipLevel()).height ==
+ mHeight);
+
+ mCurrentTextureView = view;
+ return view;
+ }
+
+ void NewSwapChainBase::APIPresent() {
+ if (GetDevice()->ConsumedError(ValidatePresent())) {
+ return;
+ }
+
+ if (GetDevice()->ConsumedError(PresentImpl())) {
+ return;
+ }
+
+ ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+ TextureBase::TextureState::Destroyed);
+ mCurrentTextureView = nullptr;
+ }
+
+ uint32_t NewSwapChainBase::GetWidth() const {
+ return mWidth;
+ }
+
+ uint32_t NewSwapChainBase::GetHeight() const {
+ return mHeight;
+ }
+
+ wgpu::TextureFormat NewSwapChainBase::GetFormat() const {
+ return mFormat;
+ }
+
+ wgpu::TextureUsage NewSwapChainBase::GetUsage() const {
+ return mUsage;
+ }
+
+ wgpu::PresentMode NewSwapChainBase::GetPresentMode() const {
+ return mPresentMode;
+ }
+
+ Surface* NewSwapChainBase::GetSurface() const {
+ return mSurface;
+ }
+
+ bool NewSwapChainBase::IsAttached() const {
+ return mAttached;
+ }
+
+ wgpu::BackendType NewSwapChainBase::GetBackendType() const {
+ return GetDevice()->GetAdapter()->GetBackendType();
+ }
+
+ MaybeError NewSwapChainBase::ValidatePresent() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ DAWN_INVALID_IF(!mAttached, "Cannot call Present called on detached %s.", this);
+
+ DAWN_INVALID_IF(
+ mCurrentTextureView == nullptr,
+ "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
+ this);
+
+ return {};
+ }
+
+ MaybeError NewSwapChainBase::ValidateGetCurrentTextureView() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ DAWN_INVALID_IF(!mAttached, "Cannot call GetCurrentTextureView on detached %s.", this);
+
+ return {};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/SwapChain.h b/chromium/third_party/dawn/src/dawn/native/SwapChain.h
new file mode 100644
index 00000000000..b19c0b6bcca
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/SwapChain.h
@@ -0,0 +1,169 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SWAPCHAIN_H_
+#define DAWNNATIVE_SWAPCHAIN_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+ const Surface* surface,
+ const SwapChainDescriptor* descriptor);
+
+ TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
+
+ class SwapChainBase : public ApiObjectBase {
+ public:
+ SwapChainBase(DeviceBase* device);
+
+ static SwapChainBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ // Dawn API
+ virtual void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) = 0;
+ virtual TextureViewBase* APIGetCurrentTextureView() = 0;
+ virtual void APIPresent() = 0;
+
+ protected:
+ SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ ~SwapChainBase() override;
+ void DestroyImpl() override;
+ };
+
+ // The base class for implementation-based SwapChains that are deprecated.
+ class OldSwapChainBase : public SwapChainBase {
+ public:
+ OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
+
+ // Dawn API
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override;
+ TextureViewBase* APIGetCurrentTextureView() override;
+ void APIPresent() override;
+
+ protected:
+ ~OldSwapChainBase() override;
+ const DawnSwapChainImplementation& GetImplementation();
+ virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
+ virtual MaybeError OnBeforePresent(TextureViewBase* view) = 0;
+
+ private:
+ MaybeError ValidateConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) const;
+ MaybeError ValidateGetCurrentTextureView() const;
+ MaybeError ValidatePresent() const;
+
+ DawnSwapChainImplementation mImplementation = {};
+ wgpu::TextureFormat mFormat = {};
+ wgpu::TextureUsage mAllowedUsage;
+ uint32_t mWidth = 0;
+ uint32_t mHeight = 0;
+ Ref<TextureBase> mCurrentTexture;
+ Ref<TextureViewBase> mCurrentTextureView;
+ };
+
+ // The base class for surface-based SwapChains that aren't ready yet.
+ class NewSwapChainBase : public SwapChainBase {
+ public:
+ NewSwapChainBase(DeviceBase* device,
+ Surface* surface,
+ const SwapChainDescriptor* descriptor);
+
+ // This is called when the swapchain is detached when one of the following happens:
+ //
+ // - The surface it is attached to is being destroyed.
+ // - The swapchain is being replaced by another one on the surface.
+ //
+ // Note that the surface has a Ref on the last swapchain that was used on it so the
+ // SwapChain destructor will only be called after one of the things above happens.
+ //
+ // The call for the detaching previous swapchain should be called inside the backend
+ // implementation of SwapChains. This is to allow them to acquire any resources before
+ // calling detach to make a seamless transition from the previous swapchain.
+ //
+ // Likewise the call for the swapchain being destroyed must be done in the backend's
+ // swapchain's destructor since C++ says it is UB to call virtual methods in the base class
+ // destructor.
+ void DetachFromSurface();
+
+ void SetIsAttached();
+
+ // Dawn API
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override;
+ TextureViewBase* APIGetCurrentTextureView() override;
+ void APIPresent() override;
+
+ uint32_t GetWidth() const;
+ uint32_t GetHeight() const;
+ wgpu::TextureFormat GetFormat() const;
+ wgpu::TextureUsage GetUsage() const;
+ wgpu::PresentMode GetPresentMode() const;
+ Surface* GetSurface() const;
+ bool IsAttached() const;
+ wgpu::BackendType GetBackendType() const;
+
+ protected:
+ ~NewSwapChainBase() override;
+
+ private:
+ bool mAttached;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ wgpu::TextureFormat mFormat;
+ wgpu::TextureUsage mUsage;
+ wgpu::PresentMode mPresentMode;
+
+ // This is a weak reference to the surface. If the surface is destroyed it will call
+ // DetachFromSurface and mSurface will be updated to nullptr.
+ Surface* mSurface = nullptr;
+ Ref<TextureViewBase> mCurrentTextureView;
+
+ MaybeError ValidatePresent() const;
+ MaybeError ValidateGetCurrentTextureView() const;
+
+ // GetCurrentTextureViewImpl and PresentImpl are guaranteed to be called in an interleaved
+ // manner, starting with GetCurrentTextureViewImpl.
+
+ // The returned texture view must match the swapchain descriptor exactly.
+ virtual ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() = 0;
+ // The call to present must destroy the current view's texture so further access to it are
+ // invalid.
+ virtual MaybeError PresentImpl() = 0;
+
+ // Guaranteed to be called exactly once during the lifetime of the SwapChain. After it is
+ // called no other virtual method can be called.
+ virtual void DetachFromSurfaceImpl() = 0;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_SWAPCHAIN_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Texture.cpp b/chromium/third_party/dawn/src/dawn/native/Texture.cpp
new file mode 100644
index 00000000000..5f81a7f39ad
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Texture.cpp
@@ -0,0 +1,781 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/Texture.h"
+
+#include <algorithm>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/Adapter.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/ObjectType_autogen.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/ValidationUtils_autogen.h"
+
+namespace dawn::native {
+ namespace {
+ // WebGPU currently does not have texture format reinterpretation. If it does, the
+ // code to check for it might go here.
+ MaybeError ValidateTextureViewFormatCompatibility(const TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ if (texture->GetFormat().format != descriptor->format) {
+ if (descriptor->aspect != wgpu::TextureAspect::All &&
+ texture->GetFormat().GetAspectInfo(descriptor->aspect).format ==
+ descriptor->format) {
+ return {};
+ }
+
+ return DAWN_VALIDATION_ERROR(
+ "The format of texture view is not compatible to the original texture");
+ }
+
+ return {};
+ }
+
+ bool IsTextureViewDimensionCompatibleWithTextureDimension(
+ wgpu::TextureViewDimension textureViewDimension,
+ wgpu::TextureDimension textureDimension) {
+ switch (textureViewDimension) {
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return textureDimension == wgpu::TextureDimension::e2D;
+
+ case wgpu::TextureViewDimension::e3D:
+ return textureDimension == wgpu::TextureDimension::e3D;
+
+ case wgpu::TextureViewDimension::e1D:
+ return textureDimension == wgpu::TextureDimension::e1D;
+
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ bool IsArrayLayerValidForTextureViewDimension(
+ wgpu::TextureViewDimension textureViewDimension,
+ uint32_t textureViewArrayLayer) {
+ switch (textureViewDimension) {
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e3D:
+ return textureViewArrayLayer == 1u;
+ case wgpu::TextureViewDimension::e2DArray:
+ return true;
+ case wgpu::TextureViewDimension::Cube:
+ return textureViewArrayLayer == 6u;
+ case wgpu::TextureViewDimension::CubeArray:
+ return textureViewArrayLayer % 6 == 0;
+ case wgpu::TextureViewDimension::e1D:
+ return textureViewArrayLayer == 1u;
+
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ MaybeError ValidateSampleCount(const TextureDescriptor* descriptor,
+ wgpu::TextureUsage usage,
+ const Format* format) {
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+ "The sample count (%u) of the texture is not supported.",
+ descriptor->sampleCount);
+
+ if (descriptor->sampleCount > 1) {
+ DAWN_INVALID_IF(descriptor->mipLevelCount > 1,
+ "The mip level count (%u) of a multisampled texture is not 1.",
+ descriptor->mipLevelCount);
+
+ // Multisampled 1D and 3D textures are not supported in D3D12/Metal/Vulkan.
+ // Multisampled 2D array texture is not supported because on Metal it requires the
+ // version of macOS be greater than 10.14.
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "The dimension (%s) of a multisampled texture is not 2D.",
+ descriptor->dimension);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers > 1,
+ "The depthOrArrayLayers (%u) of a multisampled texture is not 1.",
+ descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(!format->supportsMultisample,
+ "The texture format (%s) does not support multisampling.",
+ format->format);
+
+ // Compressed formats are not renderable. They cannot support multisample.
+ ASSERT(!format->isCompressed);
+
+ DAWN_INVALID_IF(usage & wgpu::TextureUsage::StorageBinding,
+ "The sample count (%u) of a storage textures is not 1.",
+ descriptor->sampleCount);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateTextureViewDimensionCompatibility(
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ DAWN_INVALID_IF(
+ !IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
+ descriptor->arrayLayerCount),
+ "The dimension (%s) of the texture view is not compatible with the layer count "
+ "(%u) of %s.",
+ descriptor->dimension, descriptor->arrayLayerCount, texture);
+
+ DAWN_INVALID_IF(
+ !IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
+ texture->GetDimension()),
+ "The dimension (%s) of the texture view is not compatible with the dimension (%s) "
+ "of %s.",
+ descriptor->dimension, texture->GetDimension(), texture);
+
+ switch (descriptor->dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ DAWN_INVALID_IF(
+ texture->GetSize().width != texture->GetSize().height,
+ "A %s texture view is not compatible with %s because the texture's width "
+ "(%u) and height (%u) are not equal.",
+ descriptor->dimension, texture, texture->GetSize().width,
+ texture->GetSize().height);
+ break;
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e3D:
+ break;
+
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateTextureSize(const DeviceBase* device,
+ const TextureDescriptor* descriptor,
+ const Format* format) {
+ ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
+ descriptor->size.depthOrArrayLayers != 0);
+ const CombinedLimits& limits = device->GetLimits();
+ Extent3D maxExtent;
+ switch (descriptor->dimension) {
+ case wgpu::TextureDimension::e1D:
+ maxExtent = {limits.v1.maxTextureDimension1D, 1, 1};
+ break;
+ case wgpu::TextureDimension::e2D:
+ maxExtent = {limits.v1.maxTextureDimension2D, limits.v1.maxTextureDimension2D,
+ limits.v1.maxTextureArrayLayers};
+ break;
+ case wgpu::TextureDimension::e3D:
+ maxExtent = {limits.v1.maxTextureDimension3D, limits.v1.maxTextureDimension3D,
+ limits.v1.maxTextureDimension3D};
+ break;
+ }
+ DAWN_INVALID_IF(descriptor->size.width > maxExtent.width ||
+ descriptor->size.height > maxExtent.height ||
+ descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers,
+ "Texture size (%s) exceeded maximum texture size (%s).",
+ &descriptor->size, &maxExtent);
+
+ switch (descriptor->dimension) {
+ case wgpu::TextureDimension::e1D:
+ DAWN_INVALID_IF(
+ descriptor->mipLevelCount != 1,
+ "Texture mip level count (%u) is more than 1 when its dimension is %s.",
+ descriptor->mipLevelCount, wgpu::TextureDimension::e1D);
+ break;
+ case wgpu::TextureDimension::e2D: {
+ uint32_t maxMippedDimension =
+ std::max(descriptor->size.width, descriptor->size.height);
+ DAWN_INVALID_IF(
+ Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+ "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+ descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+ break;
+ }
+ case wgpu::TextureDimension::e3D: {
+ uint32_t maxMippedDimension = std::max(
+ descriptor->size.width,
+ std::max(descriptor->size.height, descriptor->size.depthOrArrayLayers));
+ DAWN_INVALID_IF(
+ Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+ "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+ descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+ break;
+ }
+ }
+
+ if (format->isCompressed) {
+ const TexelBlockInfo& blockInfo =
+ format->GetAspectInfo(wgpu::TextureAspect::All).block;
+ DAWN_INVALID_IF(
+ descriptor->size.width % blockInfo.width != 0 ||
+ descriptor->size.height % blockInfo.height != 0,
+ "The size (%s) of the texture is not a multiple of the block width (%u) and "
+ "height (%u) of the texture format (%s).",
+ &descriptor->size, blockInfo.width, blockInfo.height, format->format);
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateTextureUsage(const TextureDescriptor* descriptor,
+ wgpu::TextureUsage usage,
+ const Format* format) {
+ DAWN_TRY(dawn::native::ValidateTextureUsage(usage));
+
+ DAWN_INVALID_IF(usage == wgpu::TextureUsage::None, "The texture usage must not be 0.");
+
+ constexpr wgpu::TextureUsage kValidCompressedUsages =
+ wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
+ wgpu::TextureUsage::CopyDst;
+ DAWN_INVALID_IF(
+ format->isCompressed && !IsSubset(usage, kValidCompressedUsages),
+ "The texture usage (%s) is incompatible with the compressed texture format (%s).",
+ usage, format->format);
+
+ DAWN_INVALID_IF(
+ !format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment),
+ "The texture usage (%s) includes %s, which is incompatible with the non-renderable "
+ "format (%s).",
+ usage, wgpu::TextureUsage::RenderAttachment, format->format);
+
+ DAWN_INVALID_IF(
+ !format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding),
+ "The texture usage (%s) includes %s, which is incompatible with the format (%s).",
+ usage, wgpu::TextureUsage::StorageBinding, format->format);
+
+ // Only allows simple readonly texture usages.
+ constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
+ wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
+ DAWN_INVALID_IF(
+ format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
+ "The texture usage (%s) is incompatible with the multi-planar format (%s).", usage,
+ format->format);
+
+ return {};
+ }
+
+ } // anonymous namespace
+
+ MaybeError ValidateTextureDescriptor(const DeviceBase* device,
+ const TextureDescriptor* descriptor) {
+ DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+ wgpu::SType::DawnTextureInternalUsageDescriptor));
+
+ const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+ DAWN_INVALID_IF(
+ internalUsageDesc != nullptr && !device->IsFeatureEnabled(Feature::DawnInternalUsages),
+ "The dawn-internal-usages feature is not enabled");
+
+ const Format* format;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+
+ wgpu::TextureUsage usage = descriptor->usage;
+ if (internalUsageDesc != nullptr) {
+ usage |= internalUsageDesc->internalUsage;
+ }
+
+ DAWN_TRY(ValidateTextureUsage(descriptor, usage, format));
+ DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
+ DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
+
+ DAWN_INVALID_IF(descriptor->size.width == 0 || descriptor->size.height == 0 ||
+ descriptor->size.depthOrArrayLayers == 0 ||
+ descriptor->mipLevelCount == 0,
+ "The texture size (%s) or mipLevelCount (%u) is empty.", &descriptor->size,
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(
+ descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed,
+ "The dimension (%s) of a texture with a compressed format (%s) is not 2D.",
+ descriptor->dimension, format->format);
+
+ // Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
+ // doesn't support depth/stencil formats on 3D textures.
+ DAWN_INVALID_IF(
+ descriptor->dimension != wgpu::TextureDimension::e2D &&
+ (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0,
+ "The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
+ descriptor->dimension, format->format);
+
+ DAWN_TRY(ValidateTextureSize(device, descriptor, format));
+
+ // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
+ // Readbacks from the non-zero mip of a stencil texture may contain garbage data.
+ DAWN_INVALID_IF(
+ device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
+ descriptor->mipLevelCount > 1 &&
+ device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal,
+ "https://crbug.com/dawn/838: Stencil textures with more than one mip level are "
+ "disabled on Metal.");
+
+ DAWN_INVALID_IF(
+ device->IsToggleEnabled(Toggle::DisableR8RG8Mipmaps) && descriptor->mipLevelCount > 1 &&
+ (descriptor->format == wgpu::TextureFormat::R8Unorm ||
+ descriptor->format == wgpu::TextureFormat::RG8Unorm),
+ "https://crbug.com/dawn/1071: r8unorm and rg8unorm textures with more than one mip "
+ "level are disabled on Metal.");
+
+ return {};
+ }
+
+ MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ // Parent texture should have been already validated.
+ ASSERT(texture);
+ ASSERT(!texture->IsError());
+
+ DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
+ DAWN_TRY(ValidateTextureFormat(descriptor->format));
+ DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
+
+ DAWN_INVALID_IF(
+ SelectFormatAspects(texture->GetFormat(), descriptor->aspect) == Aspect::None,
+ "Texture format (%s) does not have the texture view's selected aspect (%s).",
+ texture->GetFormat().format, descriptor->aspect);
+
+ DAWN_INVALID_IF(descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0,
+ "The texture view's arrayLayerCount (%u) or mipLevelCount (%u) is zero.",
+ descriptor->arrayLayerCount, descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(
+ uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
+ uint64_t(texture->GetArrayLayers()),
+ "Texture view array layer range (baseArrayLayer: %u, arrayLayerCount: %u) exceeds the "
+ "texture's array layer count (%u).",
+ descriptor->baseArrayLayer, descriptor->arrayLayerCount, texture->GetArrayLayers());
+
+ DAWN_INVALID_IF(
+ uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
+ uint64_t(texture->GetNumMipLevels()),
+ "Texture view mip level range (baseMipLevel: %u, mipLevelCount: %u) exceeds the "
+ "texture's mip level count (%u).",
+ descriptor->baseMipLevel, descriptor->mipLevelCount, texture->GetNumMipLevels());
+
+ DAWN_TRY(ValidateTextureViewFormatCompatibility(texture, descriptor));
+ DAWN_TRY(ValidateTextureViewDimensionCompatibility(texture, descriptor));
+
+ return {};
+ }
+
+ TextureViewDescriptor GetTextureViewDescriptorWithDefaults(
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ ASSERT(texture);
+
+ TextureViewDescriptor desc = {};
+ if (descriptor) {
+ desc = *descriptor;
+ }
+
+ // The default value for the view dimension depends on the texture's dimension with a
+ // special case for 2DArray being chosen automatically if arrayLayerCount is unspecified.
+ if (desc.dimension == wgpu::TextureViewDimension::Undefined) {
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ desc.dimension = wgpu::TextureViewDimension::e1D;
+ break;
+
+ case wgpu::TextureDimension::e2D:
+ desc.dimension = wgpu::TextureViewDimension::e2D;
+ break;
+
+ case wgpu::TextureDimension::e3D:
+ desc.dimension = wgpu::TextureViewDimension::e3D;
+ break;
+ }
+ }
+
+ if (desc.format == wgpu::TextureFormat::Undefined) {
+ // TODO(dawn:682): Use GetAspectInfo(aspect).
+ desc.format = texture->GetFormat().format;
+ }
+ if (desc.arrayLayerCount == wgpu::kArrayLayerCountUndefined) {
+ switch (desc.dimension) {
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e3D:
+ desc.arrayLayerCount = 1;
+ break;
+ case wgpu::TextureViewDimension::Cube:
+ desc.arrayLayerCount = 6;
+ break;
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::CubeArray:
+ desc.arrayLayerCount = texture->GetArrayLayers() - desc.baseArrayLayer;
+ break;
+ default:
+ // We don't put UNREACHABLE() here because we validate enums only after this
+ // function sets default values. Otherwise, the UNREACHABLE() will be hit.
+ break;
+ }
+ }
+
+ if (desc.mipLevelCount == wgpu::kMipLevelCountUndefined) {
+ desc.mipLevelCount = texture->GetNumMipLevels() - desc.baseMipLevel;
+ }
+ return desc;
+ }
+
+ // WebGPU only supports sample counts of 1 and 4. We could expand to more based on
+ // platform support, but it would probably be a feature.
+ bool IsValidSampleCount(uint32_t sampleCount) {
+ switch (sampleCount) {
+ case 1:
+ case 4:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ // TextureBase
+
+ TextureBase::TextureBase(DeviceBase* device,
+ const TextureDescriptor* descriptor,
+ TextureState state)
+ : ApiObjectBase(device, descriptor->label),
+ mDimension(descriptor->dimension),
+ mFormat(device->GetValidInternalFormat(descriptor->format)),
+ mSize(descriptor->size),
+ mMipLevelCount(descriptor->mipLevelCount),
+ mSampleCount(descriptor->sampleCount),
+ mUsage(descriptor->usage),
+ mInternalUsage(mUsage),
+ mState(state) {
+ uint32_t subresourceCount =
+ mMipLevelCount * GetArrayLayers() * GetAspectCount(mFormat.aspects);
+ mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
+
+ const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(descriptor->nextInChain, &internalUsageDesc);
+ if (internalUsageDesc != nullptr) {
+ mInternalUsage |= internalUsageDesc->internalUsage;
+ }
+ TrackInDevice();
+ }
+
+ static Format kUnusedFormat;
+
+ TextureBase::TextureBase(DeviceBase* device, TextureState state)
+ : ApiObjectBase(device, kLabelNotImplemented), mFormat(kUnusedFormat), mState(state) {
+ TrackInDevice();
+ }
+
+ TextureBase::TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
+ }
+
+ void TextureBase::DestroyImpl() {
+ mState = TextureState::Destroyed;
+ }
+
+ // static
+ TextureBase* TextureBase::MakeError(DeviceBase* device) {
+ return new TextureBase(device, ObjectBase::kError);
+ }
+
+ ObjectType TextureBase::GetType() const {
+ return ObjectType::Texture;
+ }
+
+ wgpu::TextureDimension TextureBase::GetDimension() const {
+ ASSERT(!IsError());
+ return mDimension;
+ }
+
+ const Format& TextureBase::GetFormat() const {
+ ASSERT(!IsError());
+ return mFormat;
+ }
+ const Extent3D& TextureBase::GetSize() const {
+ ASSERT(!IsError());
+ return mSize;
+ }
+ uint32_t TextureBase::GetWidth() const {
+ ASSERT(!IsError());
+ return mSize.width;
+ }
+ uint32_t TextureBase::GetHeight() const {
+ ASSERT(!IsError());
+ return mSize.height;
+ }
+ uint32_t TextureBase::GetDepth() const {
+ ASSERT(!IsError());
+ ASSERT(mDimension == wgpu::TextureDimension::e3D);
+ return mSize.depthOrArrayLayers;
+ }
+ uint32_t TextureBase::GetArrayLayers() const {
+ ASSERT(!IsError());
+ if (mDimension == wgpu::TextureDimension::e3D) {
+ return 1;
+ }
+ return mSize.depthOrArrayLayers;
+ }
+ uint32_t TextureBase::GetNumMipLevels() const {
+ ASSERT(!IsError());
+ return mMipLevelCount;
+ }
+ SubresourceRange TextureBase::GetAllSubresources() const {
+ ASSERT(!IsError());
+ return {mFormat.aspects, {0, GetArrayLayers()}, {0, mMipLevelCount}};
+ }
+ uint32_t TextureBase::GetSampleCount() const {
+ ASSERT(!IsError());
+ return mSampleCount;
+ }
+ uint32_t TextureBase::GetSubresourceCount() const {
+ ASSERT(!IsError());
+ return static_cast<uint32_t>(mIsSubresourceContentInitializedAtIndex.size());
+ }
+ wgpu::TextureUsage TextureBase::GetUsage() const {
+ ASSERT(!IsError());
+ return mUsage;
+ }
+ wgpu::TextureUsage TextureBase::GetInternalUsage() const {
+ ASSERT(!IsError());
+ return mInternalUsage;
+ }
+
+ TextureBase::TextureState TextureBase::GetTextureState() const {
+ ASSERT(!IsError());
+ return mState;
+ }
+
+ uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
+ uint32_t arraySlice,
+ Aspect aspect) const {
+ ASSERT(HasOneBit(aspect));
+ return mipLevel +
+ GetNumMipLevels() * (arraySlice + GetArrayLayers() * GetAspectIndex(aspect));
+ }
+
+ bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
+ ASSERT(!IsError());
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+ uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+ ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+ if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
+ const SubresourceRange& range) {
+ ASSERT(!IsError());
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+ uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+ ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+ mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
+ }
+ }
+ }
+ }
+
+ MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+ DAWN_INVALID_IF(mState == TextureState::Destroyed, "Destroyed texture %s used in a submit.",
+ this);
+ return {};
+ }
+
+ bool TextureBase::IsMultisampledTexture() const {
+ ASSERT(!IsError());
+ return mSampleCount > 1;
+ }
+
+ Extent3D TextureBase::GetMipLevelVirtualSize(uint32_t level) const {
+ Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
+ if (mDimension == wgpu::TextureDimension::e1D) {
+ return extent;
+ }
+
+ extent.height = std::max(mSize.height >> level, 1u);
+ if (mDimension == wgpu::TextureDimension::e2D) {
+ return extent;
+ }
+
+ extent.depthOrArrayLayers = std::max(mSize.depthOrArrayLayers >> level, 1u);
+ return extent;
+ }
+
+ Extent3D TextureBase::GetMipLevelPhysicalSize(uint32_t level) const {
+ Extent3D extent = GetMipLevelVirtualSize(level);
+
+ // Compressed Textures will have paddings if their width or height is not a multiple of
+ // 4 at non-zero mipmap levels.
+ if (mFormat.isCompressed && level != 0) {
+ // If |level| is non-zero, then each dimension of |extent| is at most half of
+ // the max texture dimension. Computations here which add the block width/height
+ // to the extent cannot overflow.
+ const TexelBlockInfo& blockInfo = mFormat.GetAspectInfo(wgpu::TextureAspect::All).block;
+ extent.width = (extent.width + blockInfo.width - 1) / blockInfo.width * blockInfo.width;
+ extent.height =
+ (extent.height + blockInfo.height - 1) / blockInfo.height * blockInfo.height;
+ }
+
+ return extent;
+ }
+
+ Extent3D TextureBase::ClampToMipLevelVirtualSize(uint32_t level,
+ const Origin3D& origin,
+ const Extent3D& extent) const {
+ const Extent3D virtualSizeAtLevel = GetMipLevelVirtualSize(level);
+ ASSERT(origin.x <= virtualSizeAtLevel.width);
+ ASSERT(origin.y <= virtualSizeAtLevel.height);
+ uint32_t clampedCopyExtentWidth = (extent.width > virtualSizeAtLevel.width - origin.x)
+ ? (virtualSizeAtLevel.width - origin.x)
+ : extent.width;
+ uint32_t clampedCopyExtentHeight = (extent.height > virtualSizeAtLevel.height - origin.y)
+ ? (virtualSizeAtLevel.height - origin.y)
+ : extent.height;
+ return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depthOrArrayLayers};
+ }
+
+ TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+
+ Ref<TextureViewBase> result;
+ if (device->ConsumedError(device->CreateTextureView(this, descriptor), &result,
+ "calling %s.CreateView(%s).", this, descriptor)) {
+ return TextureViewBase::MakeError(device);
+ }
+ return result.Detach();
+ }
+
+ void TextureBase::APIDestroy() {
+ if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy().", this)) {
+ return;
+ }
+ ASSERT(!IsError());
+ Destroy();
+ }
+
+ MaybeError TextureBase::ValidateDestroy() const {
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ return {};
+ }
+
+ // TextureViewBase
+
+ TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
+ : ApiObjectBase(texture->GetDevice(), descriptor->label),
+ mTexture(texture),
+ mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
+ mDimension(descriptor->dimension),
+ mRange({ConvertViewAspect(mFormat, descriptor->aspect),
+ {descriptor->baseArrayLayer, descriptor->arrayLayerCount},
+ {descriptor->baseMipLevel, descriptor->mipLevelCount}}) {
+ TrackInDevice();
+ }
+
+ TextureViewBase::TextureViewBase(TextureBase* texture)
+ : ApiObjectBase(texture->GetDevice(), kLabelNotImplemented),
+ mTexture(texture),
+ mFormat(kUnusedFormat) {
+ TrackInDevice();
+ }
+
+ TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
+ }
+
+ void TextureViewBase::DestroyImpl() {
+ }
+
+ // static
+ TextureViewBase* TextureViewBase::MakeError(DeviceBase* device) {
+ return new TextureViewBase(device, ObjectBase::kError);
+ }
+
+ ObjectType TextureViewBase::GetType() const {
+ return ObjectType::TextureView;
+ }
+
+ const TextureBase* TextureViewBase::GetTexture() const {
+ ASSERT(!IsError());
+ return mTexture.Get();
+ }
+
+ TextureBase* TextureViewBase::GetTexture() {
+ ASSERT(!IsError());
+ return mTexture.Get();
+ }
+
+ Aspect TextureViewBase::GetAspects() const {
+ ASSERT(!IsError());
+ return mRange.aspects;
+ }
+
+ const Format& TextureViewBase::GetFormat() const {
+ ASSERT(!IsError());
+ return mFormat;
+ }
+
+ wgpu::TextureViewDimension TextureViewBase::GetDimension() const {
+ ASSERT(!IsError());
+ return mDimension;
+ }
+
+ uint32_t TextureViewBase::GetBaseMipLevel() const {
+ ASSERT(!IsError());
+ return mRange.baseMipLevel;
+ }
+
+ uint32_t TextureViewBase::GetLevelCount() const {
+ ASSERT(!IsError());
+ return mRange.levelCount;
+ }
+
+ uint32_t TextureViewBase::GetBaseArrayLayer() const {
+ ASSERT(!IsError());
+ return mRange.baseArrayLayer;
+ }
+
+ uint32_t TextureViewBase::GetLayerCount() const {
+ ASSERT(!IsError());
+ return mRange.layerCount;
+ }
+
+ const SubresourceRange& TextureViewBase::GetSubresourceRange() const {
+ ASSERT(!IsError());
+ return mRange;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Texture.h b/chromium/third_party/dawn/src/dawn/native/Texture.h
new file mode 100644
index 00000000000..54ed018a585
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Texture.h
@@ -0,0 +1,157 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TEXTURE_H_
+#define DAWNNATIVE_TEXTURE_H_
+
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/Forward.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/Subresource.h"
+
+#include "dawn/native/dawn_platform.h"
+
+#include <vector>
+
+namespace dawn::native {
+
+ MaybeError ValidateTextureDescriptor(const DeviceBase* device,
+ const TextureDescriptor* descriptor);
+ MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+ TextureViewDescriptor GetTextureViewDescriptorWithDefaults(
+ const TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+
+ bool IsValidSampleCount(uint32_t sampleCount);
+
+ static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
+ wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding |
+ kReadOnlyRenderAttachment;
+
+ class TextureBase : public ApiObjectBase {
+ public:
+ enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
+ enum class ClearValue { Zero, NonZero };
+ TextureBase(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
+
+ static TextureBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ wgpu::TextureDimension GetDimension() const;
+ const Format& GetFormat() const;
+ const Extent3D& GetSize() const;
+ uint32_t GetWidth() const;
+ uint32_t GetHeight() const;
+ uint32_t GetDepth() const;
+ uint32_t GetArrayLayers() const;
+ uint32_t GetNumMipLevels() const;
+ SubresourceRange GetAllSubresources() const;
+ uint32_t GetSampleCount() const;
+ uint32_t GetSubresourceCount() const;
+
+ // |GetUsage| returns the usage with which the texture was created using the base WebGPU
+ // API. The dawn-internal-usages extension may add additional usages. |GetInternalUsage|
+ // returns the union of base usage and the usages added by the extension.
+ wgpu::TextureUsage GetUsage() const;
+ wgpu::TextureUsage GetInternalUsage() const;
+
+ TextureState GetTextureState() const;
+ uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
+ bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
+ void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
+
+ MaybeError ValidateCanUseInSubmitNow() const;
+
+ bool IsMultisampledTexture() const;
+
+ // For a texture with non-block-compressed texture format, its physical size is always equal
+ // to its virtual size. For a texture with block compressed texture format, the physical
+ // size is the one with paddings if necessary, which is always a multiple of the block size
+ // and used in texture copying. The virtual size is the one without paddings, which is not
+ // required to be a multiple of the block size and used in texture sampling.
+ Extent3D GetMipLevelPhysicalSize(uint32_t level) const;
+ Extent3D GetMipLevelVirtualSize(uint32_t level) const;
+ Extent3D ClampToMipLevelVirtualSize(uint32_t level,
+ const Origin3D& origin,
+ const Extent3D& extent) const;
+
+ // Dawn API
+ TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
+ void APIDestroy();
+
+ protected:
+ // Constructor used only for mocking and testing.
+ TextureBase(DeviceBase* device, TextureState state);
+ void DestroyImpl() override;
+
+ private:
+ TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ MaybeError ValidateDestroy() const;
+ wgpu::TextureDimension mDimension;
+ const Format& mFormat;
+ Extent3D mSize;
+ uint32_t mMipLevelCount;
+ uint32_t mSampleCount;
+ wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
+ wgpu::TextureUsage mInternalUsage = wgpu::TextureUsage::None;
+ TextureState mState;
+
+ // TODO(crbug.com/dawn/845): Use a more optimized data structure to save space
+ std::vector<bool> mIsSubresourceContentInitializedAtIndex;
+ };
+
+ class TextureViewBase : public ApiObjectBase {
+ public:
+ TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+ static TextureViewBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
+ const TextureBase* GetTexture() const;
+ TextureBase* GetTexture();
+
+ Aspect GetAspects() const;
+ const Format& GetFormat() const;
+ wgpu::TextureViewDimension GetDimension() const;
+ uint32_t GetBaseMipLevel() const;
+ uint32_t GetLevelCount() const;
+ uint32_t GetBaseArrayLayer() const;
+ uint32_t GetLayerCount() const;
+ const SubresourceRange& GetSubresourceRange() const;
+
+ protected:
+ // Constructor used only for mocking and testing.
+ TextureViewBase(TextureBase* texture);
+ void DestroyImpl() override;
+
+ private:
+ TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ Ref<TextureBase> mTexture;
+
+ const Format& mFormat;
+ wgpu::TextureViewDimension mDimension;
+ SubresourceRange mRange;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/TintUtils.cpp b/chromium/third_party/dawn/src/dawn/native/TintUtils.cpp
new file mode 100644
index 00000000000..d84c982af29
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/TintUtils.cpp
@@ -0,0 +1,55 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/Device.h"
+
+#include <tint/tint.h>
+
+namespace dawn::native {
+
+ namespace {
+
+ thread_local DeviceBase* tlDevice = nullptr;
+
+ void TintICEReporter(const tint::diag::List& diagnostics) {
+ if (tlDevice) {
+ tlDevice->HandleError(InternalErrorType::Validation, diagnostics.str().c_str());
+ }
+ }
+
+ bool InitializeTintErrorReporter() {
+ tint::SetInternalCompilerErrorReporter(&TintICEReporter);
+ return true;
+ }
+
+ } // namespace
+
+ ScopedTintICEHandler::ScopedTintICEHandler(DeviceBase* device) {
+ // Call tint::SetInternalCompilerErrorReporter() the first time
+ // this constructor is called. Static initialization is
+ // guaranteed to be thread-safe, and only occur once.
+ static bool init_once_tint_error_reporter = InitializeTintErrorReporter();
+ (void)init_once_tint_error_reporter;
+
+ // Shouldn't have overlapping instances of this handler.
+ ASSERT(tlDevice == nullptr);
+ tlDevice = device;
+ }
+
+ ScopedTintICEHandler::~ScopedTintICEHandler() {
+ tlDevice = nullptr;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/TintUtils.h b/chromium/third_party/dawn/src/dawn/native/TintUtils.h
new file mode 100644
index 00000000000..2dcb8f31e61
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/TintUtils.h
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TINTUTILS_H_
+#define DAWNNATIVE_TINTUTILS_H_
+
+#include "dawn/common/NonCopyable.h"
+
+namespace dawn::native {
+
+ class DeviceBase;
+
+ // Indicates that for the lifetime of this object tint internal compiler errors should be
+ // reported to the given device.
+ class ScopedTintICEHandler : public NonCopyable {
+ public:
+ ScopedTintICEHandler(DeviceBase* device);
+ ~ScopedTintICEHandler();
+
+ private:
+ ScopedTintICEHandler(ScopedTintICEHandler&&) = delete;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/ToBackend.h b/chromium/third_party/dawn/src/dawn/native/ToBackend.h
new file mode 100644
index 00000000000..a2a69cb8cce
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/ToBackend.h
@@ -0,0 +1,155 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TOBACKEND_H_
+#define DAWNNATIVE_TOBACKEND_H_
+
+#include "dawn/native/Forward.h"
+
+namespace dawn::native {
+
+ // ToBackendTraits implements the mapping from base type to member type of BackendTraits
+ template <typename T, typename BackendTraits>
+ struct ToBackendTraits;
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<AdapterBase, BackendTraits> {
+ using BackendType = typename BackendTraits::AdapterType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<BindGroupBase, BackendTraits> {
+ using BackendType = typename BackendTraits::BindGroupType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
+ using BackendType = typename BackendTraits::BindGroupLayoutType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<BufferBase, BackendTraits> {
+ using BackendType = typename BackendTraits::BufferType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<CommandBufferBase, BackendTraits> {
+ using BackendType = typename BackendTraits::CommandBufferType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<ComputePipelineBase, BackendTraits> {
+ using BackendType = typename BackendTraits::ComputePipelineType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<DeviceBase, BackendTraits> {
+ using BackendType = typename BackendTraits::DeviceType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
+ using BackendType = typename BackendTraits::PipelineLayoutType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<QuerySetBase, BackendTraits> {
+ using BackendType = typename BackendTraits::QuerySetType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<QueueBase, BackendTraits> {
+ using BackendType = typename BackendTraits::QueueType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<RenderPipelineBase, BackendTraits> {
+ using BackendType = typename BackendTraits::RenderPipelineType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<ResourceHeapBase, BackendTraits> {
+ using BackendType = typename BackendTraits::ResourceHeapType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<SamplerBase, BackendTraits> {
+ using BackendType = typename BackendTraits::SamplerType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
+ using BackendType = typename BackendTraits::ShaderModuleType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<StagingBufferBase, BackendTraits> {
+ using BackendType = typename BackendTraits::StagingBufferType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<TextureBase, BackendTraits> {
+ using BackendType = typename BackendTraits::TextureType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<SwapChainBase, BackendTraits> {
+ using BackendType = typename BackendTraits::SwapChainType;
+ };
+
+ template <typename BackendTraits>
+ struct ToBackendTraits<TextureViewBase, BackendTraits> {
+ using BackendType = typename BackendTraits::TextureViewType;
+ };
+
+ // ToBackendBase implements conversion to the given BackendTraits
+ // To use it in a backend, use the following:
+ // template<typename T>
+ // auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
+ // return ToBackendBase<MyBackendTraits>(common);
+ // }
+
+ template <typename BackendTraits, typename T>
+ Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
+ return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(
+ common);
+ }
+
+ template <typename BackendTraits, typename T>
+ Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&& ToBackendBase(Ref<T>&& common) {
+ return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&&>(
+ common);
+ }
+
+ template <typename BackendTraits, typename T>
+ const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(
+ const Ref<T>& common) {
+ return reinterpret_cast<
+ const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
+ }
+
+ template <typename BackendTraits, typename T>
+ typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
+ return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
+ }
+
+ template <typename BackendTraits, typename T>
+ const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
+ return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(
+ common);
+ }
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_TOBACKEND_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/Toggles.cpp b/chromium/third_party/dawn/src/dawn/native/Toggles.cpp
new file mode 100644
index 00000000000..e65bdf17b07
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Toggles.cpp
@@ -0,0 +1,324 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/Toggles.h"
+
+namespace dawn::native {
+ namespace {
+
+ struct ToggleEnumAndInfo {
+ Toggle toggle;
+ ToggleInfo info;
+ };
+
+ using ToggleEnumAndInfoList =
+ std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
+
+ static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
+ {Toggle::EmulateStoreAndMSAAResolve,
+ {"emulate_store_and_msaa_resolve",
+ "Emulate storing into multisampled color attachments and doing MSAA resolve "
+ "simultaneously. This workaround is enabled by default on the Metal drivers that do "
+ "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
+ "those platforms, we should do MSAA resolve in another render pass after ending the "
+ "previous one.",
+ "https://crbug.com/dawn/56"}},
+ {Toggle::NonzeroClearResourcesOnCreationForTesting,
+ {"nonzero_clear_resources_on_creation_for_testing",
+ "Clears texture to full 1 bits as soon as they are created, but doesn't update "
+ "the tracking state of the texture. This way we can test the logic of clearing "
+ "textures that use recycled memory.",
+ "https://crbug.com/dawn/145"}},
+ {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
+ {"always_resolve_into_zero_level_and_layer",
+ "When the resolve target is a texture view that is created on the non-zero level or "
+ "layer of a texture, we first resolve into a temporarily 2D texture with only one "
+ "mipmap level and one array layer, and copy the result of MSAA resolve into the "
+ "true resolve target. This workaround is enabled by default on the Metal drivers "
+ "that have bugs when setting non-zero resolveLevel or resolveSlice.",
+ "https://crbug.com/dawn/56"}},
+ {Toggle::LazyClearResourceOnFirstUse,
+ {"lazy_clear_resource_on_first_use",
+ "Clears resource to zero on first usage. This initializes the resource "
+ "so that no dirty bits from recycled memory is present in the new resource.",
+ "https://crbug.com/dawn/145"}},
+ {Toggle::TurnOffVsync,
+ {"turn_off_vsync",
+ "Turn off vsync when rendering. In order to do performance test or run perf tests, "
+ "turn off vsync so that the fps can exeed 60.",
+ "https://crbug.com/dawn/237"}},
+ {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
+ {"use_temporary_buffer_in_texture_to_texture_copy",
+ "Split texture-to-texture copy into two copies: copy from source texture into a "
+ "temporary buffer, and copy from the temporary buffer into the destination texture "
+ "when copying between compressed textures that don't have block-aligned sizes. This "
+ "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
+ "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
+ "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
+ "https://crbug.com/dawn/42"}},
+ {Toggle::UseD3D12ResourceHeapTier2,
+ {"use_d3d12_resource_heap_tier2",
+ "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
+ "texture and buffers in the same heap. This allows better heap re-use and reduces "
+ "fragmentation.",
+ "https://crbug.com/dawn/27"}},
+ {Toggle::UseD3D12RenderPass,
+ {"use_d3d12_render_pass",
+ "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
+ "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
+ "will emulate a render pass.",
+ "https://crbug.com/dawn/36"}},
+ {Toggle::UseD3D12ResidencyManagement,
+ {"use_d3d12_residency_management",
+ "Enable residency management. This allows page-in and page-out of resource heaps in "
+ "GPU memory. This component improves overcommitted performance by keeping the most "
+ "recently used resources local to the GPU. Turning this component off can cause "
+ "allocation failures when application memory exceeds physical device memory.",
+ "https://crbug.com/dawn/193"}},
+ {Toggle::SkipValidation,
+ {"skip_validation", "Skip expensive validation of Dawn commands.",
+ "https://crbug.com/dawn/271"}},
+ {Toggle::VulkanUseD32S8,
+ {"vulkan_use_d32s8",
+ "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
+ "backend will use D32S8 (toggle to on) but setting the toggle to off will make it "
+ "use the D24S8 format when possible.",
+ "https://crbug.com/dawn/286"}},
+ {Toggle::MetalDisableSamplerCompare,
+ {"metal_disable_sampler_compare",
+ "Disables the use of sampler compare on Metal. This is unsupported before A9 "
+ "processors.",
+ "https://crbug.com/dawn/342"}},
+ {Toggle::MetalUseSharedModeForCounterSampleBuffer,
+ {"metal_use_shared_mode_for_counter_sample_buffer",
+ "The query set on Metal need to create MTLCounterSampleBuffer which storage mode "
+ "must be either MTLStorageModeShared or MTLStorageModePrivate. But the private mode "
+ "does not work properly on Intel platforms. The workaround is use shared mode "
+ "instead.",
+ "https://crbug.com/dawn/434"}},
+ {Toggle::DisableBaseVertex,
+ {"disable_base_vertex",
+ "Disables the use of non-zero base vertex which is unsupported on some platforms.",
+ "https://crbug.com/dawn/343"}},
+ {Toggle::DisableBaseInstance,
+ {"disable_base_instance",
+ "Disables the use of non-zero base instance which is unsupported on some "
+ "platforms.",
+ "https://crbug.com/dawn/343"}},
+ {Toggle::DisableIndexedDrawBuffers,
+ {"disable_indexed_draw_buffers",
+ "Disables the use of indexed draw buffer state which is unsupported on some "
+ "platforms.",
+ "https://crbug.com/dawn/582"}},
+ {Toggle::DisableSnormRead,
+ {"disable_snorm_read",
+ "Disables reading from Snorm textures which is unsupported on some platforms.",
+ "https://crbug.com/dawn/667"}},
+ {Toggle::DisableDepthStencilRead,
+ {"disable_depth_stencil_read",
+ "Disables reading from depth/stencil textures which is unsupported on some "
+ "platforms.",
+ "https://crbug.com/dawn/667"}},
+ {Toggle::DisableSampleVariables,
+ {"disable_sample_variables",
+ "Disables gl_SampleMask and related functionality which is unsupported on some "
+ "platforms.",
+ "https://crbug.com/dawn/673"}},
+ {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
+ {"use_d3d12_small_shader_visible_heap",
+ "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
+ "default. This setting is used to test bindgroup encoding.",
+ "https://crbug.com/dawn/155"}},
+ {Toggle::UseDXC,
+ {"use_dxc",
+ "Use DXC instead of FXC for compiling HLSL when both dxcompiler.dll and dxil.dll "
+ "is available.",
+ "https://crbug.com/dawn/402"}},
+ {Toggle::DisableRobustness,
+ {"disable_robustness", "Disable robust buffer access", "https://crbug.com/dawn/480"}},
+ {Toggle::MetalEnableVertexPulling,
+ {"metal_enable_vertex_pulling",
+ "Uses vertex pulling to protect out-of-bounds reads on Metal",
+ "https://crbug.com/dawn/480"}},
+ {Toggle::DisallowUnsafeAPIs,
+ {"disallow_unsafe_apis",
+ "Produces validation errors on API entry points or parameter combinations that "
+ "aren't considered secure yet.",
+ "http://crbug.com/1138528"}},
+ {Toggle::FlushBeforeClientWaitSync,
+ {"flush_before_client_wait_sync",
+ "Call glFlush before glClientWaitSync to work around bugs in the latter",
+ "https://crbug.com/dawn/633"}},
+ {Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ {"use_temp_buffer_in_small_format_texture_to_texture_copy_from_greater_to_less_mip_"
+ "level",
+ "Split texture-to-texture copy into two copies: copy from source texture into a "
+ "temporary buffer, and copy from the temporary buffer into the destination texture "
+ "under specific situations. This workaround is by default enabled on some Intel "
+ "GPUs which have a driver bug in the execution of CopyTextureRegion() when we copy "
+ "with the formats whose texel block sizes are less than 4 bytes from a greater mip "
+ "level to a smaller mip level on D3D12 backends.",
+ "https://crbug.com/1161355"}},
+ {Toggle::EmitHLSLDebugSymbols,
+ {"emit_hlsl_debug_symbols",
+ "Sets the D3DCOMPILE_SKIP_OPTIMIZATION and D3DCOMPILE_DEBUG compilation flags when "
+ "compiling HLSL code. Enables better shader debugging with external graphics "
+ "debugging tools.",
+ "https://crbug.com/dawn/776"}},
+ {Toggle::DisallowSpirv,
+ {"disallow_spirv",
+ "Disallow usage of SPIR-V completely so that only WGSL is used for shader modules."
+ "This is useful to prevent a Chromium renderer process from successfully sending"
+ "SPIR-V code to be compiled in the GPU process.",
+ "https://crbug.com/1214923"}},
+ {Toggle::DumpShaders,
+ {"dump_shaders",
+ "Dump shaders for debugging purposes. Dumped shaders will be log via "
+ "EmitLog, thus printed in Chrome console or consumed by user-defined callback "
+ "function.",
+ "https://crbug.com/dawn/792"}},
+ {Toggle::DEPRECATED_DumpTranslatedShaders,
+ {"dump_translated_shaders", "Deprecated. Use dump_shaders",
+ "https://crbug.com/dawn/792"}},
+ {Toggle::ForceWGSLStep,
+ {"force_wgsl_step",
+ "When ingesting SPIR-V shaders, force a first conversion to WGSL. This allows "
+ "testing Tint's SPIRV->WGSL translation on real content to be sure that it will "
+ "work when the same translation runs in a WASM module in the page.",
+ "https://crbug.com/dawn/960"}},
+ {Toggle::DisableWorkgroupInit,
+ {"disable_workgroup_init",
+ "Disables the workgroup memory zero-initialization for compute shaders.",
+ "https://crbug.com/tint/1003"}},
+ {Toggle::DisableSymbolRenaming,
+ {"disable_symbol_renaming",
+ "Disables the WGSL symbol renaming so that names are preserved.",
+ "https://crbug.com/dawn/1016"}},
+ {Toggle::UseUserDefinedLabelsInBackend,
+ {"use_user_defined_labels_in_backend",
+ "Enables calls to SetLabel to be forwarded to backend-specific APIs that label "
+ "objects.",
+ "https://crbug.com/dawn/840"}},
+ {Toggle::DisableR8RG8Mipmaps,
+ {"disable_r8_rg8_mipmaps",
+ "Disables mipmaps for r8unorm and rg8unorm textures, which are known on some drivers "
+ "to not clear correctly.",
+ "https://crbug.com/dawn/1071"}},
+ {Toggle::UseDummyFragmentInVertexOnlyPipeline,
+ {"use_dummy_fragment_in_vertex_only_pipeline",
+ "Use a dummy empty fragment shader in vertex only render pipeline. This toggle must "
+ "be enabled for OpenGL ES backend, and serves as a workaround by default enabled on "
+ "some Metal devices with Intel GPU to ensure the depth result is correct.",
+ "https://crbug.com/dawn/136"}},
+ {Toggle::FxcOptimizations,
+ {"fxc_optimizations",
+ "Enable optimizations when compiling with FXC. Disabled by default because FXC "
+ "miscompiles in many cases when optimizations are enabled.",
+ "https://crbug.com/dawn/1203"}},
+ {Toggle::RecordDetailedTimingInTraceEvents,
+ {"record_detailed_timing_in_trace_events",
+ "Record detailed timing information in trace events at certain point. Currently the "
+ "timing information is recorded right before calling ExecuteCommandLists on a D3D12 "
+ "command queue, and the information includes system time, CPU timestamp, GPU "
+ "timestamp, and their frequency.",
+ "https://crbug.com/dawn/1264"}},
+
+ // Dummy comment to separate the }} so it is clearer what to copy-paste to add a toggle.
+ }};
+ } // anonymous namespace
+
+ void TogglesSet::Set(Toggle toggle, bool enabled) {
+ if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
+ Set(Toggle::DumpShaders, enabled);
+ return;
+ }
+ ASSERT(toggle != Toggle::InvalidEnum);
+ const size_t toggleIndex = static_cast<size_t>(toggle);
+ toggleBitset.set(toggleIndex, enabled);
+ }
+
+ bool TogglesSet::Has(Toggle toggle) const {
+ if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
+ return Has(Toggle::DumpShaders);
+ }
+ ASSERT(toggle != Toggle::InvalidEnum);
+ const size_t toggleIndex = static_cast<size_t>(toggle);
+ return toggleBitset.test(toggleIndex);
+ }
+
+ std::vector<const char*> TogglesSet::GetContainedToggleNames() const {
+ std::vector<const char*> togglesNameInUse(toggleBitset.count());
+
+ uint32_t index = 0;
+ for (uint32_t i : IterateBitSet(toggleBitset)) {
+ const char* toggleName = ToggleEnumToName(static_cast<Toggle>(i));
+ togglesNameInUse[index] = toggleName;
+ ++index;
+ }
+
+ return togglesNameInUse;
+ }
+
+ const char* ToggleEnumToName(Toggle toggle) {
+ ASSERT(toggle != Toggle::InvalidEnum);
+
+ const ToggleEnumAndInfo& toggleNameAndInfo =
+ kToggleNameAndInfoList[static_cast<size_t>(toggle)];
+ ASSERT(toggleNameAndInfo.toggle == toggle);
+ return toggleNameAndInfo.info.name;
+ }
+
+ const ToggleInfo* TogglesInfo::GetToggleInfo(const char* toggleName) {
+ ASSERT(toggleName);
+
+ EnsureToggleNameToEnumMapInitialized();
+
+ const auto& iter = mToggleNameToEnumMap.find(toggleName);
+ if (iter != mToggleNameToEnumMap.cend()) {
+ return &kToggleNameAndInfoList[static_cast<size_t>(iter->second)].info;
+ }
+ return nullptr;
+ }
+
+ Toggle TogglesInfo::ToggleNameToEnum(const char* toggleName) {
+ ASSERT(toggleName);
+
+ EnsureToggleNameToEnumMapInitialized();
+
+ const auto& iter = mToggleNameToEnumMap.find(toggleName);
+ if (iter != mToggleNameToEnumMap.cend()) {
+ return kToggleNameAndInfoList[static_cast<size_t>(iter->second)].toggle;
+ }
+ return Toggle::InvalidEnum;
+ }
+
+ void TogglesInfo::EnsureToggleNameToEnumMapInitialized() {
+ if (mToggleNameToEnumMapInitialized) {
+ return;
+ }
+
+ for (size_t index = 0; index < kToggleNameAndInfoList.size(); ++index) {
+ const ToggleEnumAndInfo& toggleNameAndInfo = kToggleNameAndInfoList[index];
+ ASSERT(index == static_cast<size_t>(toggleNameAndInfo.toggle));
+ mToggleNameToEnumMap[toggleNameAndInfo.info.name] = toggleNameAndInfo.toggle;
+ }
+
+ mToggleNameToEnumMapInitialized = true;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/Toggles.h b/chromium/third_party/dawn/src/dawn/native/Toggles.h
new file mode 100644
index 00000000000..4a4574dd437
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/Toggles.h
@@ -0,0 +1,98 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TOGGLES_H_
+#define DAWNNATIVE_TOGGLES_H_
+
+#include <bitset>
+#include <unordered_map>
+#include <vector>
+
+#include "dawn/native/DawnNative.h"
+
+namespace dawn::native {
+
+ enum class Toggle {
+ EmulateStoreAndMSAAResolve,
+ NonzeroClearResourcesOnCreationForTesting,
+ AlwaysResolveIntoZeroLevelAndLayer,
+ LazyClearResourceOnFirstUse,
+ TurnOffVsync,
+ UseTemporaryBufferInCompressedTextureToTextureCopy,
+ UseD3D12ResourceHeapTier2,
+ UseD3D12RenderPass,
+ UseD3D12ResidencyManagement,
+ SkipValidation,
+ VulkanUseD32S8,
+ MetalDisableSamplerCompare,
+ MetalUseSharedModeForCounterSampleBuffer,
+ DisableBaseVertex,
+ DisableBaseInstance,
+ DisableIndexedDrawBuffers,
+ DisableSnormRead,
+ DisableDepthStencilRead,
+ DisableSampleVariables,
+ UseD3D12SmallShaderVisibleHeapForTesting,
+ UseDXC,
+ DisableRobustness,
+ MetalEnableVertexPulling,
+ DisallowUnsafeAPIs,
+ FlushBeforeClientWaitSync,
+ UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ EmitHLSLDebugSymbols,
+ DisallowSpirv,
+ DumpShaders,
+ DEPRECATED_DumpTranslatedShaders, // Use DumpShaders
+ ForceWGSLStep,
+ DisableWorkgroupInit,
+ DisableSymbolRenaming,
+ UseUserDefinedLabelsInBackend,
+ DisableR8RG8Mipmaps,
+ UseDummyFragmentInVertexOnlyPipeline,
+ FxcOptimizations,
+ RecordDetailedTimingInTraceEvents,
+
+ EnumCount,
+ InvalidEnum = EnumCount,
+ };
+
+ // A wrapper of the bitset to store if a toggle is present or not. This wrapper provides the
+ // convenience to convert the enums of enum class Toggle to the indices of a bitset.
+ struct TogglesSet {
+ std::bitset<static_cast<size_t>(Toggle::EnumCount)> toggleBitset;
+
+ void Set(Toggle toggle, bool enabled);
+ bool Has(Toggle toggle) const;
+ std::vector<const char*> GetContainedToggleNames() const;
+ };
+
+ const char* ToggleEnumToName(Toggle toggle);
+
+ class TogglesInfo {
+ public:
+ // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
+ // of a toggle supported in Dawn.
+ const ToggleInfo* GetToggleInfo(const char* toggleName);
+ Toggle ToggleNameToEnum(const char* toggleName);
+
+ private:
+ void EnsureToggleNameToEnumMapInitialized();
+
+ bool mToggleNameToEnumMapInitialized = false;
+ std::unordered_map<std::string, Toggle> mToggleNameToEnumMap;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_TOGGLES_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp b/chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp
new file mode 100644
index 00000000000..2f2ae7f23d4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/VertexFormat.cpp
@@ -0,0 +1,69 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/VertexFormat.h"
+
+#include "dawn/common/Assert.h"
+
+#include <array>
+
+namespace dawn::native {
+
+ static constexpr std::array<VertexFormatInfo, 31> sVertexFormatTable = {{
+ //
+ {wgpu::VertexFormat::Undefined, 0, 0, 0, VertexFormatBaseType::Float},
+
+ {wgpu::VertexFormat::Uint8x2, 2, 2, 1, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint8x4, 4, 4, 1, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Sint8x2, 2, 2, 1, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint8x4, 4, 4, 1, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Unorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Unorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Snorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Snorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
+
+ {wgpu::VertexFormat::Uint16x2, 4, 2, 2, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint16x4, 8, 4, 2, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Sint16x2, 4, 2, 2, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint16x4, 8, 4, 2, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Unorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Unorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Snorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Snorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float16x2, 4, 2, 2, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float16x4, 8, 4, 2, VertexFormatBaseType::Float},
+
+ {wgpu::VertexFormat::Float32, 4, 1, 4, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float32x2, 8, 2, 4, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float32x3, 12, 3, 4, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Float32x4, 16, 4, 4, VertexFormatBaseType::Float},
+ {wgpu::VertexFormat::Uint32, 4, 1, 4, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint32x2, 8, 2, 4, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint32x3, 12, 3, 4, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Uint32x4, 16, 4, 4, VertexFormatBaseType::Uint},
+ {wgpu::VertexFormat::Sint32, 4, 1, 4, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint32x2, 8, 2, 4, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint32x3, 12, 3, 4, VertexFormatBaseType::Sint},
+ {wgpu::VertexFormat::Sint32x4, 16, 4, 4, VertexFormatBaseType::Sint},
+ //
+ }};
+
+ const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format) {
+ ASSERT(format != wgpu::VertexFormat::Undefined);
+ ASSERT(static_cast<uint32_t>(format) < sVertexFormatTable.size());
+ ASSERT(sVertexFormatTable[static_cast<uint32_t>(format)].format == format);
+ return sVertexFormatTable[static_cast<uint32_t>(format)];
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/VertexFormat.h b/chromium/third_party/dawn/src/dawn/native/VertexFormat.h
new file mode 100644
index 00000000000..f88ae289097
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/VertexFormat.h
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VERTEXFORMAT_H_
+#define DAWNNATIVE_VERTEXFORMAT_H_
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+
+ enum class VertexFormatBaseType {
+ Float,
+ Uint,
+ Sint,
+ };
+
+ struct VertexFormatInfo {
+ wgpu::VertexFormat format;
+ uint32_t byteSize;
+ uint32_t componentCount;
+ uint32_t componentByteSize;
+ VertexFormatBaseType baseType;
+ };
+
+ const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_VERTEXFORMAT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp b/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp
new file mode 100644
index 00000000000..1b0f6e88cbd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.cpp
@@ -0,0 +1,31 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/XlibXcbFunctions.h"
+
+namespace dawn::native {
+
+ XlibXcbFunctions::XlibXcbFunctions() {
+ if (!mLib.Open("libX11-xcb.so.1") ||
+ !mLib.GetProc(&xGetXCBConnection, "XGetXCBConnection")) {
+ mLib.Close();
+ }
+ }
+ XlibXcbFunctions::~XlibXcbFunctions() = default;
+
+ bool XlibXcbFunctions::IsLoaded() const {
+ return xGetXCBConnection != nullptr;
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h b/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h
new file mode 100644
index 00000000000..52998a4d9a5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/XlibXcbFunctions.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_XLIBXCBFUNCTIONS_H_
+#define DAWNNATIVE_XLIBXCBFUNCTIONS_H_
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/native/Error.h"
+
+#include "dawn/common/xlib_with_undefs.h"
+
+class DynamicLib;
+
+namespace dawn::native {
+
+ // A helper class that dynamically loads the x11-xcb library that contains XGetXCBConnection
+ // (and nothing else). This has to be dynamic because this libraries isn't present on all Linux
+ // deployment platforms that Chromium targets.
+ class XlibXcbFunctions {
+ public:
+ XlibXcbFunctions();
+ ~XlibXcbFunctions();
+
+ bool IsLoaded() const;
+
+ // Functions from x11-xcb
+ decltype(&::XGetXCBConnection) xGetXCBConnection = nullptr;
+
+ private:
+ DynamicLib mLib;
+ };
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_XLIBXCBFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp
new file mode 100644
index 00000000000..d31b9afef65
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.cpp
@@ -0,0 +1,425 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/AdapterD3D12.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/WindowsUtils.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/d3d12/BackendD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+#include <sstream>
+
+namespace dawn::native::d3d12 {
+
+ Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
+ : AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
+ mHardwareAdapter(hardwareAdapter),
+ mBackend(backend) {
+ }
+
+ Adapter::~Adapter() {
+ CleanUpDebugLayerFilters();
+ }
+
+ bool Adapter::SupportsExternalImages() const {
+ // Via dawn::native::d3d12::ExternalImageDXGI::Create
+ return true;
+ }
+
+ const D3D12DeviceInfo& Adapter::GetDeviceInfo() const {
+ return mDeviceInfo;
+ }
+
+ IDXGIAdapter3* Adapter::GetHardwareAdapter() const {
+ return mHardwareAdapter.Get();
+ }
+
+ Backend* Adapter::GetBackend() const {
+ return mBackend;
+ }
+
+ ComPtr<ID3D12Device> Adapter::GetDevice() const {
+ return mD3d12Device;
+ }
+
+ const gpu_info::D3DDriverVersion& Adapter::GetDriverVersion() const {
+ return mDriverVersion;
+ }
+
+ MaybeError Adapter::InitializeImpl() {
+ // D3D12 cannot check for feature support without a device.
+ // Create the device to populate the adapter properties then reuse it when needed for actual
+ // rendering.
+ const PlatformFunctions* functions = GetBackend()->GetFunctions();
+ if (FAILED(functions->d3d12CreateDevice(GetHardwareAdapter(), D3D_FEATURE_LEVEL_11_0,
+ _uuidof(ID3D12Device), &mD3d12Device))) {
+ return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
+ }
+
+ DAWN_TRY(InitializeDebugLayerFilters());
+
+ DXGI_ADAPTER_DESC1 adapterDesc;
+ mHardwareAdapter->GetDesc1(&adapterDesc);
+
+ mDeviceId = adapterDesc.DeviceId;
+ mVendorId = adapterDesc.VendorId;
+ mName = WCharToUTF8(adapterDesc.Description);
+
+ DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+
+ if (adapterDesc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
+ mAdapterType = wgpu::AdapterType::CPU;
+ } else {
+ mAdapterType = (mDeviceInfo.isUMA) ? wgpu::AdapterType::IntegratedGPU
+ : wgpu::AdapterType::DiscreteGPU;
+ }
+
+ // Convert the adapter's D3D12 driver version to a readable string like "24.21.13.9793".
+ LARGE_INTEGER umdVersion;
+ if (mHardwareAdapter->CheckInterfaceSupport(__uuidof(IDXGIDevice), &umdVersion) !=
+ DXGI_ERROR_UNSUPPORTED) {
+ uint64_t encodedVersion = umdVersion.QuadPart;
+
+ std::ostringstream o;
+ o << "D3D12 driver version ";
+ for (size_t i = 0; i < mDriverVersion.size(); ++i) {
+ mDriverVersion[i] = (encodedVersion >> (48 - 16 * i)) & 0xFFFF;
+ o << mDriverVersion[i] << ".";
+ }
+ mDriverDescription = o.str();
+ }
+
+ return {};
+ }
+
+ bool Adapter::AreTimestampQueriesSupported() const {
+ D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+ queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+ queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+ ComPtr<ID3D12CommandQueue> d3d12CommandQueue;
+ HRESULT hr = mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&d3d12CommandQueue));
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ // GetTimestampFrequency returns an error HRESULT when there are bugs in Windows container
+ // and vGPU implementations.
+ uint64_t timeStampFrequency;
+ hr = d3d12CommandQueue->GetTimestampFrequency(&timeStampFrequency);
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+ if (AreTimestampQueriesSupported()) {
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+ }
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+ mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+ mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+
+ return {};
+ }
+
+ MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+ D3D12_FEATURE_DATA_D3D12_OPTIONS featureData = {};
+
+ DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+ &featureData, sizeof(featureData)),
+ "CheckFeatureSupport D3D12_FEATURE_D3D12_OPTIONS"));
+
+ // Check if the device is at least D3D_FEATURE_LEVEL_11_1 or D3D_FEATURE_LEVEL_11_0
+ const D3D_FEATURE_LEVEL levelsToQuery[]{D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0};
+
+ D3D12_FEATURE_DATA_FEATURE_LEVELS featureLevels;
+ featureLevels.NumFeatureLevels = sizeof(levelsToQuery) / sizeof(D3D_FEATURE_LEVEL);
+ featureLevels.pFeatureLevelsRequested = levelsToQuery;
+ DAWN_TRY(
+ CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_FEATURE_LEVELS,
+ &featureLevels, sizeof(featureLevels)),
+ "CheckFeatureSupport D3D12_FEATURE_FEATURE_LEVELS"));
+
+ if (featureLevels.MaxSupportedFeatureLevel == D3D_FEATURE_LEVEL_11_0 &&
+ featureData.ResourceBindingTier < D3D12_RESOURCE_BINDING_TIER_2) {
+ return DAWN_VALIDATION_ERROR(
+ "At least Resource Binding Tier 2 is required for D3D12 Feature Level 11.0 "
+ "devices.");
+ }
+
+ GetDefaultLimits(&limits->v1);
+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels
+
+ // Limits that are the same across D3D feature levels
+ limits->v1.maxTextureDimension1D = D3D12_REQ_TEXTURE1D_U_DIMENSION;
+ limits->v1.maxTextureDimension2D = D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION;
+ limits->v1.maxTextureDimension3D = D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION;
+ limits->v1.maxTextureArrayLayers = D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION;
+ // Slot values can be 0-15, inclusive:
+ // https://docs.microsoft.com/en-ca/windows/win32/api/d3d12/ns-d3d12-d3d12_input_element_desc
+ limits->v1.maxVertexBuffers = 16;
+ limits->v1.maxVertexAttributes = D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT;
+
+ // Note: WebGPU requires FL11.1+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-support
+ // Resource Binding Tier: 1 2 3
+
+ // Max(CBV+UAV+SRV) 1M 1M 1M+
+ // Max CBV per stage 14 14 full
+ // Max SRV per stage 128 full full
+ // Max UAV in all stages 64 64 full
+ // Max Samplers per stage 16 2048 2048
+
+ // https://docs.microsoft.com/en-us/windows-hardware/test/hlk/testref/efad06e8-51d1-40ce-ad5c-573a134b4bb6
+ // "full" means the full heap can be used. This is tested
+ // to work for 1 million descriptors, and 1.1M for tier 3.
+ uint32_t maxCBVsPerStage;
+ uint32_t maxSRVsPerStage;
+ uint32_t maxUAVsAllStages;
+ uint32_t maxSamplersPerStage;
+ switch (featureData.ResourceBindingTier) {
+ case D3D12_RESOURCE_BINDING_TIER_1:
+ maxCBVsPerStage = 14;
+ maxSRVsPerStage = 128;
+ maxUAVsAllStages = 64;
+ maxSamplersPerStage = 16;
+ break;
+ case D3D12_RESOURCE_BINDING_TIER_2:
+ maxCBVsPerStage = 14;
+ maxSRVsPerStage = 1'000'000;
+ maxUAVsAllStages = 64;
+ maxSamplersPerStage = 2048;
+ break;
+ case D3D12_RESOURCE_BINDING_TIER_3:
+ default:
+ maxCBVsPerStage = 1'100'000;
+ maxSRVsPerStage = 1'100'000;
+ maxUAVsAllStages = 1'100'000;
+ maxSamplersPerStage = 2048;
+ break;
+ }
+
+ ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageTexturesPerShaderStage);
+ ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageBuffersPerShaderStage);
+ uint32_t maxUAVsPerStage = maxUAVsAllStages / 2;
+
+ limits->v1.maxUniformBuffersPerShaderStage = maxCBVsPerStage;
+ // Allocate half of the UAVs to storage buffers, and half to storage textures.
+ limits->v1.maxStorageTexturesPerShaderStage = maxUAVsPerStage / 2;
+ limits->v1.maxStorageBuffersPerShaderStage = maxUAVsPerStage - maxUAVsPerStage / 2;
+ limits->v1.maxSampledTexturesPerShaderStage = maxSRVsPerStage;
+ limits->v1.maxSamplersPerShaderStage = maxSamplersPerStage;
+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/root-signature-limits
+ // In DWORDS. Descriptor tables cost 1, Root constants cost 1, Root descriptors cost 2.
+ static constexpr uint32_t kMaxRootSignatureSize = 64u;
+ // Dawn maps WebGPU's binding model by:
+ // - (maxBindGroups)
+ // CBVs/UAVs/SRVs for bind group are a root descriptor table
+ // - (maxBindGroups)
+ // Samplers for each bind group are a root descriptor table
+ // - (2 * maxDynamicBuffers)
+ // Each dynamic buffer is a root descriptor
+ // RESERVED:
+ // - 3 = max of:
+ // - 2 root constants for the baseVertex/baseInstance constants.
+ // - 3 root constants for num workgroups X, Y, Z
+ // - 4 root constants (kMaxDynamicStorageBuffersPerPipelineLayout) for dynamic storage
+ // buffer lengths.
+ static constexpr uint32_t kReservedSlots = 7;
+
+ // Available slots after base limits considered.
+ uint32_t availableRootSignatureSlots =
+ kMaxRootSignatureSize - kReservedSlots -
+ 2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
+ limits->v1.maxDynamicStorageBuffersPerPipelineLayout);
+
+ // Because we need either:
+ // - 1 cbv/uav/srv table + 1 sampler table
+ // - 2 slots for a root descriptor
+ uint32_t availableDynamicBufferOrBindGroup = availableRootSignatureSlots / 2;
+
+ // We can either have a bind group, a dyn uniform buffer or a dyn storage buffer.
+ // Distribute evenly.
+ limits->v1.maxBindGroups += availableDynamicBufferOrBindGroup / 3;
+ limits->v1.maxDynamicUniformBuffersPerPipelineLayout +=
+ availableDynamicBufferOrBindGroup / 3;
+ limits->v1.maxDynamicStorageBuffersPerPipelineLayout +=
+ (availableDynamicBufferOrBindGroup - 2 * (availableDynamicBufferOrBindGroup / 3));
+
+ ASSERT(2 * (limits->v1.maxBindGroups +
+ limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
+ limits->v1.maxDynamicStorageBuffersPerPipelineLayout) <=
+ kMaxRootSignatureSize - kReservedSlots);
+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-attributes-numthreads
+ limits->v1.maxComputeWorkgroupSizeX = D3D12_CS_THREAD_GROUP_MAX_X;
+ limits->v1.maxComputeWorkgroupSizeY = D3D12_CS_THREAD_GROUP_MAX_Y;
+ limits->v1.maxComputeWorkgroupSizeZ = D3D12_CS_THREAD_GROUP_MAX_Z;
+ limits->v1.maxComputeInvocationsPerWorkgroup = D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP;
+
+ // https://docs.maxComputeWorkgroupSizeXmicrosoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_dispatch_arguments
+ limits->v1.maxComputeWorkgroupsPerDimension =
+ D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
+
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-devices-downlevel-compute-shaders
+ // Thread Group Shared Memory is limited to 16Kb on downlevel hardware. This is less than
+ // the 32Kb that is available to Direct3D 11 hardware. D3D12 is also 32kb.
+ limits->v1.maxComputeWorkgroupStorageSize = 32768;
+
+ // Max number of "constants" where each constant is a 16-byte float4
+ limits->v1.maxUniformBufferBindingSize = D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16;
+ // D3D12 has no documented limit on the size of a storage buffer binding.
+ limits->v1.maxStorageBufferBindingSize = 4294967295;
+
+ // TODO(crbug.com/dawn/685):
+ // LIMITS NOT SET:
+ // - maxInterStageShaderComponents
+ // - maxVertexBufferArrayStride
+
+ return {};
+ }
+
+ MaybeError Adapter::InitializeDebugLayerFilters() {
+ if (!GetInstance()->IsBackendValidationEnabled()) {
+ return {};
+ }
+
+ D3D12_MESSAGE_ID denyIds[] = {
+
+ //
+ // Permanent IDs: list of warnings that are not applicable
+ //
+
+ // Resource sub-allocation partially maps pre-allocated heaps. This means the
+ // entire physical addresses space may have no resources or have many resources
+ // assigned the same heap.
+ D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_HAS_NO_RESOURCE,
+ D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_INTERSECTS_MULTIPLE_BUFFERS,
+
+ // The debug layer validates pipeline objects when they are created. Dawn validates
+ // them when them when they are set. Therefore, since the issue is caught at a later
+ // time, we can silence this warnings.
+ D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
+
+ // Adding a clear color during resource creation would require heuristics or delayed
+ // creation.
+ // https://crbug.com/dawn/418
+ D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
+ D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
+
+ // Dawn enforces proper Unmaps at a later time.
+ // https://crbug.com/dawn/422
+ D3D12_MESSAGE_ID_EXECUTECOMMANDLISTS_GPU_WRITTEN_READBACK_RESOURCE_MAPPED,
+
+ // WebGPU allows empty scissors without empty viewports.
+ D3D12_MESSAGE_ID_DRAW_EMPTY_SCISSOR_RECTANGLE,
+
+ //
+ // Temporary IDs: list of warnings that should be fixed or promoted
+ //
+
+ // Remove after warning have been addressed
+ // https://crbug.com/dawn/421
+ D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
+
+ // For small placed resource alignment, we first request the small alignment, which may
+ // get rejected and generate a debug error. Then, we request 0 to get the allowed
+ // allowed alignment.
+ D3D12_MESSAGE_ID_CREATERESOURCE_INVALIDALIGNMENT,
+
+ // WebGPU allows OOB vertex buffer access and relies on D3D12's robust buffer access
+ // behavior.
+ D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_TOO_SMALL,
+
+ // WebGPU allows setVertexBuffer with offset that equals to the whole vertex buffer
+ // size.
+ // Even this means that no vertex buffer view has been set in D3D12 backend.
+ // https://crbug.com/dawn/1255
+ D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_NOT_SET,
+ };
+
+ // Create a retrieval filter with a deny list to suppress messages.
+ // Any messages remaining will be converted to Dawn errors.
+ D3D12_INFO_QUEUE_FILTER filter{};
+ // Filter out info/message and only create errors from warnings or worse.
+ D3D12_MESSAGE_SEVERITY severities[] = {
+ D3D12_MESSAGE_SEVERITY_INFO,
+ D3D12_MESSAGE_SEVERITY_MESSAGE,
+ };
+ filter.DenyList.NumSeverities = ARRAYSIZE(severities);
+ filter.DenyList.pSeverityList = severities;
+ filter.DenyList.NumIDs = ARRAYSIZE(denyIds);
+ filter.DenyList.pIDList = denyIds;
+
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
+ "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
+
+ // To avoid flooding the console, a storage-filter is also used to
+ // prevent messages from getting logged.
+ DAWN_TRY(CheckHRESULT(infoQueue->PushStorageFilter(&filter),
+ "ID3D12InfoQueue::PushStorageFilter"));
+
+ DAWN_TRY(CheckHRESULT(infoQueue->PushRetrievalFilter(&filter),
+ "ID3D12InfoQueue::PushRetrievalFilter"));
+
+ return {};
+ }
+
+ void Adapter::CleanUpDebugLayerFilters() {
+ if (!GetInstance()->IsBackendValidationEnabled()) {
+ return;
+ }
+
+ // The device may not exist if this adapter failed to initialize.
+ if (mD3d12Device == nullptr) {
+ return;
+ }
+
+ // If the debug layer is not installed, return immediately to avoid crashing the process.
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ if (FAILED(mD3d12Device.As(&infoQueue))) {
+ return;
+ }
+
+ infoQueue->PopRetrievalFilter();
+ infoQueue->PopStorageFilter();
+ }
+
+ ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+ return Device::Create(this, descriptor);
+ }
+
+ // Resets the backend device and creates a new one. If any D3D12 objects belonging to the
+ // current ID3D12Device have not been destroyed, a non-zero value will be returned upon Reset()
+ // and the subequent call to CreateDevice will return a handle the existing device instead of
+ // creating a new one.
+ MaybeError Adapter::ResetInternalDeviceForTestingImpl() {
+ ASSERT(mD3d12Device.Reset() == 0);
+ DAWN_TRY(Initialize());
+
+ return {};
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h
new file mode 100644
index 00000000000..3247a133c9d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/AdapterD3D12.h
@@ -0,0 +1,66 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_ADAPTERD3D12_H_
+#define DAWNNATIVE_D3D12_ADAPTERD3D12_H_
+
+#include "dawn/native/Adapter.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/native/d3d12/D3D12Info.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Backend;
+
+ class Adapter : public AdapterBase {
+ public:
+ Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
+ ~Adapter() override;
+
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override;
+
+ const D3D12DeviceInfo& GetDeviceInfo() const;
+ IDXGIAdapter3* GetHardwareAdapter() const;
+ Backend* GetBackend() const;
+ ComPtr<ID3D12Device> GetDevice() const;
+ const gpu_info::D3DDriverVersion& GetDriverVersion() const;
+
+ private:
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+ const DeviceDescriptor* descriptor) override;
+ MaybeError ResetInternalDeviceForTestingImpl() override;
+
+ bool AreTimestampQueriesSupported() const;
+
+ MaybeError InitializeImpl() override;
+ MaybeError InitializeSupportedFeaturesImpl() override;
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+
+ MaybeError InitializeDebugLayerFilters();
+ void CleanUpDebugLayerFilters();
+
+ ComPtr<IDXGIAdapter3> mHardwareAdapter;
+ ComPtr<ID3D12Device> mD3d12Device;
+ gpu_info::D3DDriverVersion mDriverVersion;
+
+ Backend* mBackend;
+ D3D12DeviceInfo mDeviceInfo = {};
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_ADAPTERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp
new file mode 100644
index 00000000000..27a98823b9a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.cpp
@@ -0,0 +1,209 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BackendD3D12.h"
+
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+
+ ResultOrError<ComPtr<IDXGIFactory4>> CreateFactory(const PlatformFunctions* functions,
+ BackendValidationLevel validationLevel,
+ bool beginCaptureOnStartup) {
+ ComPtr<IDXGIFactory4> factory;
+
+ uint32_t dxgiFactoryFlags = 0;
+
+ // Enable the debug layer (requires the Graphics Tools "optional feature").
+ {
+ if (validationLevel != BackendValidationLevel::Disabled) {
+ ComPtr<ID3D12Debug3> debugController;
+ if (SUCCEEDED(
+ functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
+ ASSERT(debugController != nullptr);
+ debugController->EnableDebugLayer();
+ if (validationLevel == BackendValidationLevel::Full) {
+ debugController->SetEnableGPUBasedValidation(true);
+ }
+
+ // Enable additional debug layers.
+ dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
+ }
+ }
+
+ if (beginCaptureOnStartup) {
+ ComPtr<IDXGraphicsAnalysis> graphicsAnalysis;
+ if (functions->dxgiGetDebugInterface1 != nullptr &&
+ SUCCEEDED(functions->dxgiGetDebugInterface1(
+ 0, IID_PPV_ARGS(&graphicsAnalysis)))) {
+ graphicsAnalysis->BeginCapture();
+ }
+ }
+ }
+
+ if (FAILED(functions->createDxgiFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)))) {
+ return DAWN_INTERNAL_ERROR("Failed to create a DXGI factory");
+ }
+
+ ASSERT(factory != nullptr);
+ return std::move(factory);
+ }
+
+ ResultOrError<Ref<AdapterBase>> CreateAdapterFromIDXGIAdapter(
+ Backend* backend,
+ ComPtr<IDXGIAdapter> dxgiAdapter) {
+ ComPtr<IDXGIAdapter3> dxgiAdapter3;
+ DAWN_TRY(CheckHRESULT(dxgiAdapter.As(&dxgiAdapter3), "DXGIAdapter retrieval"));
+ Ref<Adapter> adapter = AcquireRef(new Adapter(backend, std::move(dxgiAdapter3)));
+ DAWN_TRY(adapter->Initialize());
+
+ return {std::move(adapter)};
+ }
+
+ } // anonymous namespace
+
+ Backend::Backend(InstanceBase* instance)
+ : BackendConnection(instance, wgpu::BackendType::D3D12) {
+ }
+
+ MaybeError Backend::Initialize() {
+ mFunctions = std::make_unique<PlatformFunctions>();
+ DAWN_TRY(mFunctions->LoadFunctions());
+
+ const auto instance = GetInstance();
+
+ DAWN_TRY_ASSIGN(mFactory,
+ CreateFactory(mFunctions.get(), instance->GetBackendValidationLevel(),
+ instance->IsBeginCaptureOnStartupEnabled()));
+
+ return {};
+ }
+
+ ComPtr<IDXGIFactory4> Backend::GetFactory() const {
+ return mFactory;
+ }
+
+ MaybeError Backend::EnsureDxcLibrary() {
+ if (mDxcLibrary == nullptr) {
+ DAWN_TRY(CheckHRESULT(
+ mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
+ "DXC create library"));
+ ASSERT(mDxcLibrary != nullptr);
+ }
+ return {};
+ }
+
+ MaybeError Backend::EnsureDxcCompiler() {
+ if (mDxcCompiler == nullptr) {
+ DAWN_TRY(CheckHRESULT(
+ mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
+ "DXC create compiler"));
+ ASSERT(mDxcCompiler != nullptr);
+ }
+ return {};
+ }
+
+ MaybeError Backend::EnsureDxcValidator() {
+ if (mDxcValidator == nullptr) {
+ DAWN_TRY(CheckHRESULT(
+ mFunctions->dxcCreateInstance(CLSID_DxcValidator, IID_PPV_ARGS(&mDxcValidator)),
+ "DXC create validator"));
+ ASSERT(mDxcValidator != nullptr);
+ }
+ return {};
+ }
+
+ ComPtr<IDxcLibrary> Backend::GetDxcLibrary() const {
+ ASSERT(mDxcLibrary != nullptr);
+ return mDxcLibrary;
+ }
+
+ ComPtr<IDxcCompiler> Backend::GetDxcCompiler() const {
+ ASSERT(mDxcCompiler != nullptr);
+ return mDxcCompiler;
+ }
+
+ ComPtr<IDxcValidator> Backend::GetDxcValidator() const {
+ ASSERT(mDxcValidator != nullptr);
+ return mDxcValidator;
+ }
+
+ const PlatformFunctions* Backend::GetFunctions() const {
+ return mFunctions.get();
+ }
+
+ std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+ AdapterDiscoveryOptions options;
+ auto result = DiscoverAdapters(&options);
+ if (result.IsError()) {
+ GetInstance()->ConsumedError(result.AcquireError());
+ return {};
+ }
+ return result.AcquireSuccess();
+ }
+
+ ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ ASSERT(optionsBase->backendType == WGPUBackendType_D3D12);
+ const AdapterDiscoveryOptions* options =
+ static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+ std::vector<Ref<AdapterBase>> adapters;
+ if (options->dxgiAdapter != nullptr) {
+ // |dxgiAdapter| was provided. Discover just that adapter.
+ Ref<AdapterBase> adapter;
+ DAWN_TRY_ASSIGN(adapter, CreateAdapterFromIDXGIAdapter(this, options->dxgiAdapter));
+ adapters.push_back(std::move(adapter));
+ return std::move(adapters);
+ }
+
+ // Enumerate and discover all available adapters.
+ for (uint32_t adapterIndex = 0;; ++adapterIndex) {
+ ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
+ if (mFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
+ break; // No more adapters to enumerate.
+ }
+
+ ASSERT(dxgiAdapter != nullptr);
+ ResultOrError<Ref<AdapterBase>> adapter =
+ CreateAdapterFromIDXGIAdapter(this, dxgiAdapter);
+ if (adapter.IsError()) {
+ GetInstance()->ConsumedError(adapter.AcquireError());
+ continue;
+ }
+
+ adapters.push_back(adapter.AcquireSuccess());
+ }
+
+ return adapters;
+ }
+
+ BackendConnection* Connect(InstanceBase* instance) {
+ Backend* backend = new Backend(instance);
+
+ if (instance->ConsumedError(backend->Initialize())) {
+ delete backend;
+ return nullptr;
+ }
+
+ return backend;
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h
new file mode 100644
index 00000000000..01ae6bca7f6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BackendD3D12.h
@@ -0,0 +1,59 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_BACKENDD3D12_H_
+#define DAWNNATIVE_D3D12_BACKENDD3D12_H_
+
+#include "dawn/native/BackendConnection.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class PlatformFunctions;
+
+ class Backend : public BackendConnection {
+ public:
+ Backend(InstanceBase* instance);
+
+ MaybeError Initialize();
+
+ ComPtr<IDXGIFactory4> GetFactory() const;
+
+ MaybeError EnsureDxcLibrary();
+ MaybeError EnsureDxcCompiler();
+ MaybeError EnsureDxcValidator();
+ ComPtr<IDxcLibrary> GetDxcLibrary() const;
+ ComPtr<IDxcCompiler> GetDxcCompiler() const;
+ ComPtr<IDxcValidator> GetDxcValidator() const;
+
+ const PlatformFunctions* GetFunctions() const;
+
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) override;
+
+ private:
+ // Keep mFunctions as the first member so that in the destructor it is freed last. Otherwise
+ // the D3D12 DLLs are unloaded before we are done using them.
+ std::unique_ptr<PlatformFunctions> mFunctions;
+ ComPtr<IDXGIFactory4> mFactory;
+ ComPtr<IDxcLibrary> mDxcLibrary;
+ ComPtr<IDxcCompiler> mDxcCompiler;
+ ComPtr<IDxcValidator> mDxcValidator;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_BACKENDD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp
new file mode 100644
index 00000000000..f1693452269
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.cpp
@@ -0,0 +1,268 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ // static
+ ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+ }
+
+ BindGroup::BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ uint32_t viewSizeIncrement,
+ const CPUDescriptorHeapAllocation& viewAllocation)
+ : BindGroupBase(this, device, descriptor) {
+ BindGroupLayout* bgl = ToBackend(GetLayout());
+
+ mCPUViewAllocation = viewAllocation;
+
+ const auto& descriptorHeapOffsets = bgl->GetDescriptorHeapOffsets();
+
+ ID3D12Device* d3d12Device = device->GetD3D12Device();
+
+ // It's not necessary to create descriptors in the descriptor heap for dynamic resources.
+ // This is because they are created as root descriptors which are never heap allocated.
+ // Since dynamic buffers are packed in the front, we can skip over these bindings by
+ // starting from the dynamic buffer count.
+ for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+ bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+
+ // Increment size does not need to be stored and is only used to get a handle
+ // local to the allocation with OffsetFrom().
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+
+ ID3D12Resource* resource = ToBackend(binding.buffer)->GetD3D12Resource();
+ if (resource == nullptr) {
+ // The Buffer was destroyed. Skip creating buffer views since there is no
+ // resource. This bind group won't be used as it is an error to submit a
+ // command buffer that references destroyed resources.
+ continue;
+ }
+
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform: {
+ D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
+ desc.SizeInBytes =
+ Align(binding.size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT);
+ desc.BufferLocation =
+ ToBackend(binding.buffer)->GetVA() + binding.offset;
+
+ d3d12Device->CreateConstantBufferView(
+ &desc, viewAllocation.OffsetFrom(
+ viewSizeIncrement, descriptorHeapOffsets[bindingIndex]));
+ break;
+ }
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding: {
+ // Since Tint outputs HLSL shaders with RWByteAddressBuffer,
+ // we must use D3D12_BUFFER_UAV_FLAG_RAW when making the
+ // UNORDERED_ACCESS_VIEW_DESC. Using D3D12_BUFFER_UAV_FLAG_RAW requires
+ // that we use DXGI_FORMAT_R32_TYPELESS as the format of the view.
+ // DXGI_FORMAT_R32_TYPELESS requires that the element size be 4
+ // byte aligned. Since binding.size and binding.offset are in bytes,
+ // we need to divide by 4 to obtain the element size.
+ D3D12_UNORDERED_ACCESS_VIEW_DESC desc;
+ desc.Buffer.NumElements = binding.size / 4;
+ desc.Format = DXGI_FORMAT_R32_TYPELESS;
+ desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
+ desc.Buffer.FirstElement = binding.offset / 4;
+ desc.Buffer.StructureByteStride = 0;
+ desc.Buffer.CounterOffsetInBytes = 0;
+ desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
+
+ d3d12Device->CreateUnorderedAccessView(
+ resource, nullptr, &desc,
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
+ }
+ case wgpu::BufferBindingType::ReadOnlyStorage: {
+ // Like StorageBuffer, Tint outputs HLSL shaders for readonly
+ // storage buffer with ByteAddressBuffer. So we must use
+ // D3D12_BUFFER_SRV_FLAG_RAW when making the SRV descriptor. And it has
+ // similar requirement for format, element size, etc.
+ D3D12_SHADER_RESOURCE_VIEW_DESC desc;
+ desc.Format = DXGI_FORMAT_R32_TYPELESS;
+ desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
+ desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+ desc.Buffer.FirstElement = binding.offset / 4;
+ desc.Buffer.NumElements = binding.size / 4;
+ desc.Buffer.StructureByteStride = 0;
+ desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
+ d3d12Device->CreateShaderResourceView(
+ resource, &desc,
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
+ }
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+
+ break;
+ }
+
+ case BindingInfoType::Texture: {
+ auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+ auto& srv = view->GetSRVDescriptor();
+
+ ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
+ if (resource == nullptr) {
+ // The Texture was destroyed. Skip creating the SRV since there is no
+ // resource. This bind group won't be used as it is an error to submit a
+ // command buffer that references destroyed resources.
+ continue;
+ }
+
+ d3d12Device->CreateShaderResourceView(
+ resource, &srv,
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
+ }
+
+ case BindingInfoType::StorageTexture: {
+ TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+ ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
+ if (resource == nullptr) {
+ // The Texture was destroyed. Skip creating the SRV/UAV since there is no
+ // resource. This bind group won't be used as it is an error to submit a
+ // command buffer that references destroyed resources.
+ continue;
+ }
+
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::WriteOnly: {
+ D3D12_UNORDERED_ACCESS_VIEW_DESC uav = view->GetUAVDescriptor();
+ d3d12Device->CreateUnorderedAccessView(
+ resource, nullptr, &uav,
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
+ break;
+ }
+
+ case wgpu::StorageTextureAccess::Undefined:
+ UNREACHABLE();
+ }
+
+ break;
+ }
+
+ case BindingInfoType::ExternalTexture: {
+ UNREACHABLE();
+ }
+
+ case BindingInfoType::Sampler: {
+ // No-op as samplers will be later initialized by CreateSamplers().
+ break;
+ }
+ }
+ }
+
+ // Loop through the dynamic storage buffers and build a flat map from the index of the
+ // dynamic storage buffer to its binding size. The index |dynamicStorageBufferIndex|
+ // means that it is the i'th buffer that is both dynamic and storage, in increasing order
+ // of BindingNumber.
+ mDynamicStorageBufferLengths.resize(bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+ uint32_t dynamicStorageBufferIndex = 0;
+ for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
+ ++bindingIndex) {
+ if (bgl->IsStorageBufferBinding(bindingIndex)) {
+ mDynamicStorageBufferLengths[dynamicStorageBufferIndex++] =
+ GetBindingAsBufferBinding(bindingIndex).size;
+ }
+ }
+ }
+
+ BindGroup::~BindGroup() = default;
+
+ void BindGroup::DestroyImpl() {
+ BindGroupBase::DestroyImpl();
+ ToBackend(GetLayout())->DeallocateBindGroup(this, &mCPUViewAllocation);
+ ASSERT(!mCPUViewAllocation.IsValid());
+ }
+
+ bool BindGroup::PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator) {
+ const BindGroupLayout* bgl = ToBackend(GetLayout());
+
+ const uint32_t descriptorCount = bgl->GetCbvUavSrvDescriptorCount();
+ if (descriptorCount == 0 || viewAllocator->IsAllocationStillValid(mGPUViewAllocation)) {
+ return true;
+ }
+
+ // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+ // If either failed, return early to re-allocate and switch the heaps.
+ Device* device = ToBackend(GetDevice());
+
+ D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+ if (!viewAllocator->AllocateGPUDescriptors(descriptorCount,
+ device->GetPendingCommandSerial(),
+ &baseCPUDescriptor, &mGPUViewAllocation)) {
+ return false;
+ }
+
+ // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+ // simple copies per bindgroup, a single non-simple copy could be issued.
+ // TODO(dawn:155): Consider doing this optimization.
+ device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+ mCPUViewAllocation.GetBaseDescriptor(),
+ D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
+
+ return true;
+ }
+
+ D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseViewDescriptor() const {
+ return mGPUViewAllocation.GetBaseDescriptor();
+ }
+
+ D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
+ ASSERT(mSamplerAllocationEntry != nullptr);
+ return mSamplerAllocationEntry->GetBaseDescriptor();
+ }
+
+ bool BindGroup::PopulateSamplers(Device* device,
+ ShaderVisibleDescriptorAllocator* samplerAllocator) {
+ if (mSamplerAllocationEntry == nullptr) {
+ return true;
+ }
+ return mSamplerAllocationEntry->Populate(device, samplerAllocator);
+ }
+
+ void BindGroup::SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry) {
+ mSamplerAllocationEntry = std::move(entry);
+ }
+
+ const BindGroup::DynamicStorageBufferLengths& BindGroup::GetDynamicStorageBufferLengths()
+ const {
+ return mDynamicStorageBufferLengths;
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h
new file mode 100644
index 00000000000..7fcf782cf6b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupD3D12.h
@@ -0,0 +1,68 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
+#define DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
+
+#include "dawn/common/PlacementAllocated.h"
+#include "dawn/common/ityp_span.h"
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+ class SamplerHeapCacheEntry;
+ class ShaderVisibleDescriptorAllocator;
+
+ class BindGroup final : public BindGroupBase, public PlacementAllocated {
+ public:
+ static ResultOrError<Ref<BindGroup>> Create(Device* device,
+ const BindGroupDescriptor* descriptor);
+
+ BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ uint32_t viewSizeIncrement,
+ const CPUDescriptorHeapAllocation& viewAllocation);
+
+ // Returns true if the BindGroup was successfully populated.
+ bool PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator);
+ bool PopulateSamplers(Device* device, ShaderVisibleDescriptorAllocator* samplerAllocator);
+
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseViewDescriptor() const;
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
+
+ void SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry);
+
+ using DynamicStorageBufferLengths =
+ ityp::stack_vec<uint32_t, uint32_t, kMaxDynamicStorageBuffersPerPipelineLayout>;
+ const DynamicStorageBufferLengths& GetDynamicStorageBufferLengths() const;
+
+ private:
+ ~BindGroup() override;
+
+ void DestroyImpl() override;
+
+ Ref<SamplerHeapCacheEntry> mSamplerAllocationEntry;
+
+ GPUDescriptorHeapAllocation mGPUViewAllocation;
+ CPUDescriptorHeapAllocation mCPUViewAllocation;
+
+ DynamicStorageBufferLengths mDynamicStorageBufferLengths;
+ };
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
new file mode 100644
index 00000000000..80d4ea4e714
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
@@ -0,0 +1,191 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+
+namespace dawn::native::d3d12 {
+ namespace {
+ D3D12_DESCRIPTOR_RANGE_TYPE WGPUBindingInfoToDescriptorRangeType(
+ const BindingInfo& bindingInfo) {
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+
+ case BindingInfoType::Sampler:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
+
+ case BindingInfoType::Texture:
+ case BindingInfoType::ExternalTexture:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
+
+ case BindingInfoType::StorageTexture:
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::WriteOnly:
+ return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
+ case wgpu::StorageTextureAccess::Undefined:
+ UNREACHABLE();
+ }
+ }
+ }
+ } // anonymous namespace
+
+ // static
+ Ref<BindGroupLayout> BindGroupLayout::Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+ }
+
+ BindGroupLayout::BindGroupLayout(Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mDescriptorHeapOffsets(GetBindingCount()),
+ mShaderRegisters(GetBindingCount()),
+ mCbvUavSrvDescriptorCount(0),
+ mSamplerDescriptorCount(0),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+ D3D12_DESCRIPTOR_RANGE_TYPE descriptorRangeType =
+ WGPUBindingInfoToDescriptorRangeType(bindingInfo);
+
+ // TODO(dawn:728) In the future, special handling will be needed for external textures
+ // here because they encompass multiple views.
+ mShaderRegisters[bindingIndex] = uint32_t(bindingInfo.binding);
+
+ if (bindingIndex < GetDynamicBufferCount()) {
+ continue;
+ }
+
+ // For dynamic resources, Dawn uses root descriptor in D3D12 backend. So there is no
+ // need to allocate the descriptor from descriptor heap or create descriptor ranges.
+ ASSERT(!bindingInfo.buffer.hasDynamicOffset);
+
+ // TODO(dawn:728) In the future, special handling will be needed for external textures
+ // here because they encompass multiple views.
+ mDescriptorHeapOffsets[bindingIndex] =
+ descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
+ ? mSamplerDescriptorCount++
+ : mCbvUavSrvDescriptorCount++;
+
+ D3D12_DESCRIPTOR_RANGE range;
+ range.RangeType = descriptorRangeType;
+ range.NumDescriptors = 1;
+ range.BaseShaderRegister = GetShaderRegister(bindingIndex);
+ range.RegisterSpace = kRegisterSpacePlaceholder;
+ range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
+
+ std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges =
+ descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
+ ? mSamplerDescriptorRanges
+ : mCbvUavSrvDescriptorRanges;
+
+ // Try to join this range with the previous one, if the current range is a continuation
+ // of the previous. This is possible because the binding infos in the base type are
+ // sorted.
+ if (descriptorRanges.size() >= 2) {
+ D3D12_DESCRIPTOR_RANGE& previous = descriptorRanges.back();
+ if (previous.RangeType == range.RangeType &&
+ previous.BaseShaderRegister + previous.NumDescriptors ==
+ range.BaseShaderRegister) {
+ previous.NumDescriptors += range.NumDescriptors;
+ continue;
+ }
+ }
+
+ descriptorRanges.push_back(range);
+ }
+
+ mViewAllocator = device->GetViewStagingDescriptorAllocator(GetCbvUavSrvDescriptorCount());
+ mSamplerAllocator =
+ device->GetSamplerStagingDescriptorAllocator(GetSamplerDescriptorCount());
+ }
+
+ ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
+ Device* device,
+ const BindGroupDescriptor* descriptor) {
+ uint32_t viewSizeIncrement = 0;
+ CPUDescriptorHeapAllocation viewAllocation;
+ if (GetCbvUavSrvDescriptorCount() > 0) {
+ DAWN_TRY_ASSIGN(viewAllocation, mViewAllocator->AllocateCPUDescriptors());
+ viewSizeIncrement = mViewAllocator->GetSizeIncrement();
+ }
+
+ Ref<BindGroup> bindGroup = AcquireRef<BindGroup>(
+ mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation));
+
+ if (GetSamplerDescriptorCount() > 0) {
+ Ref<SamplerHeapCacheEntry> samplerHeapCacheEntry;
+ DAWN_TRY_ASSIGN(samplerHeapCacheEntry, device->GetSamplerHeapCache()->GetOrCreate(
+ bindGroup.Get(), mSamplerAllocator));
+ bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
+ }
+
+ return bindGroup;
+ }
+
+ void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
+ CPUDescriptorHeapAllocation* viewAllocation) {
+ if (viewAllocation->IsValid()) {
+ mViewAllocator->Deallocate(viewAllocation);
+ }
+
+ mBindGroupAllocator.Deallocate(bindGroup);
+ }
+
+ ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetDescriptorHeapOffsets() const {
+ return {mDescriptorHeapOffsets.data(), mDescriptorHeapOffsets.size()};
+ }
+
+ uint32_t BindGroupLayout::GetShaderRegister(BindingIndex bindingIndex) const {
+ return mShaderRegisters[bindingIndex];
+ }
+
+ uint32_t BindGroupLayout::GetCbvUavSrvDescriptorCount() const {
+ return mCbvUavSrvDescriptorCount;
+ }
+
+ uint32_t BindGroupLayout::GetSamplerDescriptorCount() const {
+ return mSamplerDescriptorCount;
+ }
+
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetCbvUavSrvDescriptorRanges()
+ const {
+ return mCbvUavSrvDescriptorRanges;
+ }
+
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetSamplerDescriptorRanges() const {
+ return mSamplerDescriptorRanges;
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
new file mode 100644
index 00000000000..f16b16b9158
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
@@ -0,0 +1,94 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
+#define DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
+
+#include "dawn/native/BindGroupLayout.h"
+
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class BindGroup;
+ class CPUDescriptorHeapAllocation;
+ class Device;
+ class StagingDescriptorAllocator;
+
+ // A purposefully invalid register space.
+ //
+ // We use the bind group index as the register space, but don't know the bind group index until
+ // pipeline layout creation time. This value should be replaced in PipelineLayoutD3D12.
+ static constexpr uint32_t kRegisterSpacePlaceholder =
+ D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START;
+
+ class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ static Ref<BindGroupLayout> Create(Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
+
+ // The offset (in descriptor count) into the corresponding descriptor heap. Not valid for
+ // dynamic binding indexes.
+ ityp::span<BindingIndex, const uint32_t> GetDescriptorHeapOffsets() const;
+
+ // The D3D shader register that the Dawn binding index is mapped to by this bind group
+ // layout.
+ uint32_t GetShaderRegister(BindingIndex bindingIndex) const;
+
+ // Counts of descriptors in the descriptor tables.
+ uint32_t GetCbvUavSrvDescriptorCount() const;
+ uint32_t GetSamplerDescriptorCount() const;
+
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& GetCbvUavSrvDescriptorRanges() const;
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& GetSamplerDescriptorRanges() const;
+
+ private:
+ BindGroupLayout(Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+ ~BindGroupLayout() override = default;
+
+ // Contains the offset into the descriptor heap for the given resource view. Samplers and
+ // non-samplers are stored in separate descriptor heaps, so the offsets should be unique
+ // within each group and tightly packed.
+ //
+ // Dynamic resources are not used here since their descriptors are placed directly in root
+ // parameters.
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mDescriptorHeapOffsets;
+
+ // Contains the shader register this binding is mapped to.
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mShaderRegisters;
+
+ uint32_t mCbvUavSrvDescriptorCount;
+ uint32_t mSamplerDescriptorCount;
+
+ std::vector<D3D12_DESCRIPTOR_RANGE> mCbvUavSrvDescriptorRanges;
+ std::vector<D3D12_DESCRIPTOR_RANGE> mSamplerDescriptorRanges;
+
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+
+ StagingDescriptorAllocator* mSamplerAllocator = nullptr;
+ StagingDescriptorAllocator* mViewAllocator = nullptr;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp
new file mode 100644
index 00000000000..27d999170b6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.cpp
@@ -0,0 +1,493 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/BufferD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+ D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::BufferUsage usage) {
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
+
+ if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+ flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
+ }
+
+ return flags;
+ }
+
+ D3D12_RESOURCE_STATES D3D12BufferUsage(wgpu::BufferUsage usage) {
+ D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
+
+ if (usage & wgpu::BufferUsage::CopySrc) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
+ }
+ if (usage & wgpu::BufferUsage::CopyDst) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+ }
+ if (usage & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform)) {
+ resourceState |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
+ }
+ if (usage & wgpu::BufferUsage::Index) {
+ resourceState |= D3D12_RESOURCE_STATE_INDEX_BUFFER;
+ }
+ if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+ resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+ }
+ if (usage & kReadOnlyStorageBuffer) {
+ resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+ D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
+ }
+ if (usage & wgpu::BufferUsage::Indirect) {
+ resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
+ }
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+ }
+
+ return resourceState;
+ }
+
+ D3D12_HEAP_TYPE D3D12HeapType(wgpu::BufferUsage allowedUsage) {
+ if (allowedUsage & wgpu::BufferUsage::MapRead) {
+ return D3D12_HEAP_TYPE_READBACK;
+ } else if (allowedUsage & wgpu::BufferUsage::MapWrite) {
+ return D3D12_HEAP_TYPE_UPLOAD;
+ } else {
+ return D3D12_HEAP_TYPE_DEFAULT;
+ }
+ }
+
+ size_t D3D12BufferSizeAlignment(wgpu::BufferUsage usage) {
+ if ((usage & wgpu::BufferUsage::Uniform) != 0) {
+ // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation
+ // forbids binding a CBV to an unaligned size. To prevent, one can always safely
+ // align the buffer size to the CBV data alignment as other buffer usages
+ // ignore it (no size check). The validation will still enforce bound checks with
+ // the unaligned size returned by GetSize().
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
+ return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
+ }
+ return 1;
+ }
+ } // namespace
+
+ // static
+ ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+ DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+ return buffer;
+ }
+
+ Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor) {
+ }
+
+ MaybeError Buffer::Initialize(bool mappedAtCreation) {
+ // Allocate at least 4 bytes so clamped accesses are always in bounds.
+ uint64_t size = std::max(GetSize(), uint64_t(4u));
+ size_t alignment = D3D12BufferSizeAlignment(GetUsage());
+ if (size > std::numeric_limits<uint64_t>::max() - alignment) {
+ // Alignment would overlow.
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ mAllocatedSize = Align(size, alignment);
+
+ D3D12_RESOURCE_DESC resourceDescriptor;
+ resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+ resourceDescriptor.Alignment = 0;
+ resourceDescriptor.Width = mAllocatedSize;
+ resourceDescriptor.Height = 1;
+ resourceDescriptor.DepthOrArraySize = 1;
+ resourceDescriptor.MipLevels = 1;
+ resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
+ resourceDescriptor.SampleDesc.Count = 1;
+ resourceDescriptor.SampleDesc.Quality = 0;
+ resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
+ // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
+ // and robust resource initialization.
+ resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
+
+ auto heapType = D3D12HeapType(GetUsage());
+ auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
+
+ // D3D12 requires buffers on the READBACK heap to have the D3D12_RESOURCE_STATE_COPY_DEST
+ // state
+ if (heapType == D3D12_HEAP_TYPE_READBACK) {
+ bufferUsage |= D3D12_RESOURCE_STATE_COPY_DEST;
+ mFixedResourceState = true;
+ mLastUsage = wgpu::BufferUsage::CopyDst;
+ }
+
+ // D3D12 requires buffers on the UPLOAD heap to have the D3D12_RESOURCE_STATE_GENERIC_READ
+ // state
+ if (heapType == D3D12_HEAP_TYPE_UPLOAD) {
+ bufferUsage |= D3D12_RESOURCE_STATE_GENERIC_READ;
+ mFixedResourceState = true;
+ mLastUsage = wgpu::BufferUsage::CopySrc;
+ }
+
+ DAWN_TRY_ASSIGN(
+ mResourceAllocation,
+ ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
+
+ SetLabelImpl();
+
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+ !mappedAtCreation) {
+ CommandRecordingContext* commandRecordingContext;
+ DAWN_TRY_ASSIGN(commandRecordingContext,
+ ToBackend(GetDevice())->GetPendingCommandContext());
+
+ DAWN_TRY(ClearBuffer(commandRecordingContext, uint8_t(1u)));
+ }
+
+ // Initialize the padding bytes to zero.
+ if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
+ !mappedAtCreation) {
+ uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+ if (paddingBytes > 0) {
+ CommandRecordingContext* commandRecordingContext;
+ DAWN_TRY_ASSIGN(commandRecordingContext,
+ ToBackend(GetDevice())->GetPendingCommandContext());
+
+ uint32_t clearSize = paddingBytes;
+ uint64_t clearOffset = GetSize();
+ DAWN_TRY(ClearBuffer(commandRecordingContext, 0, clearOffset, clearSize));
+ }
+ }
+
+ return {};
+ }
+
+ Buffer::~Buffer() = default;
+
+ ID3D12Resource* Buffer::GetD3D12Resource() const {
+ return mResourceAllocation.GetD3D12Resource();
+ }
+
+ // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+ // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+ // cause subsequent errors.
+ bool Buffer::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+
+ // Return the resource barrier.
+ return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newUsage);
+ }
+
+ void Buffer::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::BufferUsage newUsage) {
+ D3D12_RESOURCE_BARRIER barrier;
+
+ if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newUsage)) {
+ commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+ }
+ }
+
+ // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+ // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+ // cause subsequent errors.
+ bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage) {
+ // Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
+ if (mFixedResourceState) {
+ ASSERT(mLastUsage == newUsage);
+ return false;
+ }
+
+ D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
+ D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
+
+ // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
+ // If one of the usages isn't UAV, then other barriers are used.
+ bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
+ newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+
+ if (needsUAVBarrier) {
+ barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
+ barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+ barrier->UAV.pResource = GetD3D12Resource();
+
+ mLastUsage = newUsage;
+ return true;
+ }
+
+ // We can skip transitions to already current usages.
+ if (IsSubset(newUsage, mLastUsage)) {
+ return false;
+ }
+
+ mLastUsage = newUsage;
+
+ // The COMMON state represents a state where no write operations can be pending, which makes
+ // it possible to transition to and from some states without synchronizaton (i.e. without an
+ // explicit ResourceBarrier call). A buffer can be implicitly promoted to 1) a single write
+ // state, or 2) multiple read states. A buffer that is accessed within a command list will
+ // always implicitly decay to the COMMON state after the call to ExecuteCommandLists
+ // completes - this is because all buffer writes are guaranteed to be completed before the
+ // next ExecuteCommandLists call executes.
+ // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
+
+ // To track implicit decays, we must record the pending serial on which a transition will
+ // occur. When that buffer is used again, the previously recorded serial must be compared to
+ // the last completed serial to determine if the buffer has implicity decayed to the common
+ // state.
+ const ExecutionSerial pendingCommandSerial =
+ ToBackend(GetDevice())->GetPendingCommandSerial();
+ if (pendingCommandSerial > mLastUsedSerial) {
+ lastState = D3D12_RESOURCE_STATE_COMMON;
+ mLastUsedSerial = pendingCommandSerial;
+ }
+
+ // All possible buffer states used by Dawn are eligible for implicit promotion from COMMON.
+ // These are: COPY_SOURCE, VERTEX_AND_COPY_BUFFER, INDEX_BUFFER, COPY_DEST,
+ // UNORDERED_ACCESS, and INDIRECT_ARGUMENT. Note that for implicit promotion, the
+ // destination state cannot be 1) more than one write state, or 2) both a read and write
+ // state. This goes unchecked here because it should not be allowed through render/compute
+ // pass validation.
+ if (lastState == D3D12_RESOURCE_STATE_COMMON) {
+ return false;
+ }
+
+ // TODO(crbug.com/dawn/1024): The before and after states must be different. Remove this
+ // workaround and use D3D12 states instead of WebGPU usages to manage the tracking of
+ // barrier state.
+ if (lastState == newState) {
+ return false;
+ }
+
+ barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+ barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+ barrier->Transition.pResource = GetD3D12Resource();
+ barrier->Transition.StateBefore = lastState;
+ barrier->Transition.StateAfter = newState;
+ barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+
+ return true;
+ }
+
+ D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
+ return mResourceAllocation.GetGPUPointer();
+ }
+
+ bool Buffer::IsCPUWritableAtCreation() const {
+ // We use a staging buffer for the buffers with mappedAtCreation == true and created on the
+ // READBACK heap because for the buffers on the READBACK heap, the data written on the CPU
+ // side won't be uploaded to GPU. When we enable zero-initialization, the CPU side memory
+ // of the buffer is all written to 0 but not the GPU side memory, so on the next mapping
+ // operation the zeroes get overwritten by whatever was in the GPU memory when the buffer
+ // was created. With a staging buffer, the data on the CPU side will first upload to the
+ // staging buffer, and copied from the staging buffer to the GPU memory of the current
+ // buffer in the unmap() call.
+ // TODO(enga): Handle CPU-visible memory on UMA
+ return (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
+ }
+
+ MaybeError Buffer::MapInternal(bool isWrite,
+ size_t offset,
+ size_t size,
+ const char* contextInfo) {
+ // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+ // evicted. This buffer should already have been made resident when it was created.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
+
+ D3D12_RANGE range = {offset, offset + size};
+ // mMappedData is the pointer to the start of the resource, irrespective of offset.
+ // MSDN says (note the weird use of "never"):
+ //
+ // When ppData is not NULL, the pointer returned is never offset by any values in
+ // pReadRange.
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
+ DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
+
+ if (isWrite) {
+ mWrittenMappedRange = range;
+ }
+
+ return {};
+ }
+
+ MaybeError Buffer::MapAtCreationImpl() {
+ // We will use a staging buffer for MapRead buffers instead so we just clear the staging
+ // buffer and initialize the original buffer by copying the staging buffer to the original
+ // buffer one the first time Unmap() is called.
+ ASSERT((GetUsage() & wgpu::BufferUsage::MapWrite) != 0);
+
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ DAWN_TRY(MapInternal(true, 0, size_t(GetAllocatedSize()), "D3D12 map at creation"));
+
+ return {};
+ }
+
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
+ DAWN_TRY(EnsureDataInitialized(commandContext));
+
+ return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
+ }
+
+ void Buffer::UnmapImpl() {
+ GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
+ mMappedData = nullptr;
+ mWrittenMappedRange = {0, 0};
+
+ // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
+ // them when they are unmapped.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
+ }
+
+ void* Buffer::GetMappedPointerImpl() {
+ // The frontend asks that the pointer returned is from the start of the resource
+ // irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
+ return mMappedData;
+ }
+
+ void Buffer::DestroyImpl() {
+ if (mMappedData != nullptr) {
+ // If the buffer is currently mapped, unmap without flushing the writes to the GPU
+ // since the buffer cannot be used anymore. UnmapImpl checks mWrittenRange to know
+ // which parts to flush, so we set it to an empty range to prevent flushes.
+ mWrittenMappedRange = {0, 0};
+ }
+ BufferBase::DestroyImpl();
+
+ ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
+ }
+
+ bool Buffer::CheckIsResidentForTesting() const {
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ return heap->IsInList() || heap->IsResidencyLocked();
+ }
+
+ bool Buffer::CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const {
+ return mResourceAllocation.GetInfo().mMethod == allocationMethod;
+ }
+
+ MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+ if (!NeedsInitialization()) {
+ return {};
+ }
+
+ DAWN_TRY(InitializeToZero(commandContext));
+ return {};
+ }
+
+ ResultOrError<bool> Buffer::EnsureDataInitializedAsDestination(
+ CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size) {
+ if (!NeedsInitialization()) {
+ return {false};
+ }
+
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ return {false};
+ }
+
+ DAWN_TRY(InitializeToZero(commandContext));
+ return {true};
+ }
+
+ MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy) {
+ if (!NeedsInitialization()) {
+ return {};
+ }
+
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ } else {
+ DAWN_TRY(InitializeToZero(commandContext));
+ }
+
+ return {};
+ }
+
+ void Buffer::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), "Dawn_Buffer",
+ GetLabel());
+ }
+
+ MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+ ASSERT(NeedsInitialization());
+
+ // TODO(crbug.com/dawn/484): skip initializing the buffer when it is created on a heap
+ // that has already been zero initialized.
+ DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
+ SetIsDataInitialized();
+ GetDevice()->IncrementLazyClearCountForTesting();
+
+ return {};
+ }
+
+ MaybeError Buffer::ClearBuffer(CommandRecordingContext* commandContext,
+ uint8_t clearValue,
+ uint64_t offset,
+ uint64_t size) {
+ Device* device = ToBackend(GetDevice());
+ size = size > 0 ? size : GetAllocatedSize();
+
+ // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
+ // changed away, so we can only clear such buffer with buffer mapping.
+ if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
+ DAWN_TRY(MapInternal(true, static_cast<size_t>(offset), static_cast<size_t>(size),
+ "D3D12 map at clear buffer"));
+ memset(mMappedData, clearValue, size);
+ UnmapImpl();
+ } else if (clearValue == 0u) {
+ DAWN_TRY(device->ClearBufferToZero(commandContext, this, offset, size));
+ } else {
+ // TODO(crbug.com/dawn/852): use ClearUnorderedAccessView*() when the buffer usage
+ // includes STORAGE.
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+
+ memset(uploadHandle.mappedBuffer, clearValue, size);
+
+ device->CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+ uploadHandle.startOffset, this, offset, size);
+ }
+
+ return {};
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h
new file mode 100644
index 00000000000..253565a387b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/BufferD3D12.h
@@ -0,0 +1,91 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_BUFFERD3D12_H_
+#define DAWNNATIVE_D3D12_BUFFERD3D12_H_
+
+#include "dawn/native/Buffer.h"
+
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class CommandRecordingContext;
+ class Device;
+
+ class Buffer final : public BufferBase {
+ public:
+ static ResultOrError<Ref<Buffer>> Create(Device* device,
+ const BufferDescriptor* descriptor);
+
+ ID3D12Resource* GetD3D12Resource() const;
+ D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
+
+ bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::BufferUsage newUsage);
+
+ bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
+ bool CheckIsResidentForTesting() const;
+
+ MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
+ ResultOrError<bool> EnsureDataInitializedAsDestination(
+ CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size);
+ MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ Buffer(Device* device, const BufferDescriptor* descriptor);
+ ~Buffer() override;
+
+ MaybeError Initialize(bool mappedAtCreation);
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ virtual MaybeError MapAtCreationImpl() override;
+ void* GetMappedPointerImpl() override;
+
+ MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
+
+ bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage);
+
+ MaybeError InitializeToZero(CommandRecordingContext* commandContext);
+ MaybeError ClearBuffer(CommandRecordingContext* commandContext,
+ uint8_t clearValue,
+ uint64_t offset = 0,
+ uint64_t size = 0);
+
+ ResourceHeapAllocation mResourceAllocation;
+ bool mFixedResourceState = false;
+ wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
+ ExecutionSerial mLastUsedSerial = std::numeric_limits<ExecutionSerial>::max();
+
+ D3D12_RANGE mWrittenMappedRange = {0, 0};
+ void* mMappedData = nullptr;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_BUFFERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
new file mode 100644
index 00000000000..617c1966d1b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
@@ -0,0 +1,53 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::d3d12 {
+
+ CPUDescriptorHeapAllocation::CPUDescriptorHeapAllocation(
+ D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+ uint32_t heapIndex)
+ : mBaseDescriptor(baseDescriptor), mHeapIndex(heapIndex) {
+ }
+
+ D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::GetBaseDescriptor() const {
+ ASSERT(IsValid());
+ return mBaseDescriptor;
+ }
+
+ D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::OffsetFrom(
+ uint32_t sizeIncrementInBytes,
+ uint32_t offsetInDescriptorCount) const {
+ ASSERT(IsValid());
+ D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseDescriptor;
+ cpuHandle.ptr += sizeIncrementInBytes * offsetInDescriptorCount;
+ return cpuHandle;
+ }
+
+ uint32_t CPUDescriptorHeapAllocation::GetHeapIndex() const {
+ ASSERT(mHeapIndex >= 0);
+ return mHeapIndex;
+ }
+
+ bool CPUDescriptorHeapAllocation::IsValid() const {
+ return mBaseDescriptor.ptr != 0;
+ }
+
+ void CPUDescriptorHeapAllocation::Invalidate() {
+ mBaseDescriptor = {0};
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
new file mode 100644
index 00000000000..997d0563841
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
@@ -0,0 +1,47 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
+#define DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
+
+#include <cstdint>
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ // Wrapper for a handle into a CPU-only descriptor heap.
+ class CPUDescriptorHeapAllocation {
+ public:
+ CPUDescriptorHeapAllocation() = default;
+ CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor, uint32_t heapIndex);
+
+ D3D12_CPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+
+ D3D12_CPU_DESCRIPTOR_HANDLE OffsetFrom(uint32_t sizeIncrementInBytes,
+ uint32_t offsetInDescriptorCount) const;
+ uint32_t GetHeapIndex() const;
+
+ bool IsValid() const;
+
+ void Invalidate();
+
+ private:
+ D3D12_CPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
+ uint32_t mHeapIndex = -1;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp
new file mode 100644
index 00000000000..88ac0b8aa54
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.cpp
@@ -0,0 +1,72 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/CommandAllocatorManager.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+
+namespace dawn::native::d3d12 {
+
+ CommandAllocatorManager::CommandAllocatorManager(Device* device)
+ : device(device), mAllocatorCount(0) {
+ mFreeAllocators.set();
+ }
+
+ ResultOrError<ID3D12CommandAllocator*> CommandAllocatorManager::ReserveCommandAllocator() {
+ // If there are no free allocators, get the oldest serial in flight and wait on it
+ if (mFreeAllocators.none()) {
+ const ExecutionSerial firstSerial = mInFlightCommandAllocators.FirstSerial();
+ DAWN_TRY(device->WaitForSerial(firstSerial));
+ DAWN_TRY(Tick(firstSerial));
+ }
+
+ ASSERT(mFreeAllocators.any());
+
+ // Get the index of the first free allocator from the bitset
+ unsigned int firstFreeIndex = *(IterateBitSet(mFreeAllocators).begin());
+
+ if (firstFreeIndex >= mAllocatorCount) {
+ ASSERT(firstFreeIndex == mAllocatorCount);
+ mAllocatorCount++;
+ DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateCommandAllocator(
+ D3D12_COMMAND_LIST_TYPE_DIRECT,
+ IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])),
+ "D3D12 create command allocator"));
+ }
+
+ // Mark the command allocator as used
+ mFreeAllocators.reset(firstFreeIndex);
+
+ // Enqueue the command allocator. It will be scheduled for reset after the next
+ // ExecuteCommandLists
+ mInFlightCommandAllocators.Enqueue({mCommandAllocators[firstFreeIndex], firstFreeIndex},
+ device->GetPendingCommandSerial());
+ return mCommandAllocators[firstFreeIndex].Get();
+ }
+
+ MaybeError CommandAllocatorManager::Tick(ExecutionSerial lastCompletedSerial) {
+ // Reset all command allocators that are no longer in flight
+ for (auto it : mInFlightCommandAllocators.IterateUpTo(lastCompletedSerial)) {
+ DAWN_TRY(CheckHRESULT(it.commandAllocator->Reset(), "D3D12 reset command allocator"));
+ mFreeAllocators.set(it.index);
+ }
+ mInFlightCommandAllocators.ClearUpTo(lastCompletedSerial);
+ return {};
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h
new file mode 100644
index 00000000000..1f8cc1e4f86
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandAllocatorManager.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
+#define DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+
+#include <bitset>
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class CommandAllocatorManager {
+ public:
+ CommandAllocatorManager(Device* device);
+
+ // A CommandAllocator that is reserved must be used on the next ExecuteCommandLists
+ // otherwise its commands may be reset before execution has completed on the GPU
+ ResultOrError<ID3D12CommandAllocator*> ReserveCommandAllocator();
+ MaybeError Tick(ExecutionSerial lastCompletedSerial);
+
+ private:
+ Device* device;
+
+ // This must be at least 2 because the Device and Queue use separate command allocators
+ static constexpr unsigned int kMaxCommandAllocators = 32;
+ unsigned int mAllocatorCount;
+
+ struct IndexedCommandAllocator {
+ ComPtr<ID3D12CommandAllocator> commandAllocator;
+ unsigned int index;
+ };
+
+ ComPtr<ID3D12CommandAllocator> mCommandAllocators[kMaxCommandAllocators];
+ std::bitset<kMaxCommandAllocators> mFreeAllocators;
+ SerialQueue<ExecutionSerial, IndexedCommandAllocator> mInFlightCommandAllocators;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp
new file mode 100644
index 00000000000..83efc938a5e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.cpp
@@ -0,0 +1,1652 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/CommandBufferD3D12.h"
+
+#include "dawn/native/BindGroupTracker.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/ComputePipelineD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/QuerySetD3D12.h"
+#include "dawn/native/d3d12/RenderPassBuilderD3D12.h"
+#include "dawn/native/d3d12/RenderPipelineD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/StagingBufferD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+
+ DXGI_FORMAT DXGIIndexFormat(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Undefined:
+ return DXGI_FORMAT_UNKNOWN;
+ case wgpu::IndexFormat::Uint16:
+ return DXGI_FORMAT_R16_UINT;
+ case wgpu::IndexFormat::Uint32:
+ return DXGI_FORMAT_R32_UINT;
+ }
+ }
+
+ D3D12_QUERY_TYPE D3D12QueryType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return D3D12_QUERY_TYPE_BINARY_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return D3D12_QUERY_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return D3D12_QUERY_TYPE_TIMESTAMP;
+ }
+ }
+
+ bool CanUseCopyResource(const TextureCopy& src,
+ const TextureCopy& dst,
+ const Extent3D& copySize) {
+ // Checked by validation
+ ASSERT(src.texture->GetSampleCount() == dst.texture->GetSampleCount());
+ ASSERT(src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()));
+ ASSERT(src.aspect == dst.aspect);
+
+ const Extent3D& srcSize = src.texture->GetSize();
+ const Extent3D& dstSize = dst.texture->GetSize();
+
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
+ // In order to use D3D12's copy resource, the textures must be the same dimensions, and
+ // the copy must be of the entire resource.
+ // TODO(dawn:129): Support 1D textures.
+ return src.aspect == src.texture->GetFormat().aspects &&
+ src.texture->GetDimension() == dst.texture->GetDimension() && //
+ dst.texture->GetNumMipLevels() == 1 && //
+ src.texture->GetNumMipLevels() == 1 && // A copy command is of a single mip, so
+ // if a resource has more than one, we
+ // definitely cannot use CopyResource.
+ copySize.width == dstSize.width && //
+ copySize.width == srcSize.width && //
+ copySize.height == dstSize.height && //
+ copySize.height == srcSize.height && //
+ copySize.depthOrArrayLayers == dstSize.depthOrArrayLayers && //
+ copySize.depthOrArrayLayers == srcSize.depthOrArrayLayers;
+ }
+
+ void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList,
+ WriteTimestampCmd* cmd) {
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ ASSERT(D3D12QueryType(querySet->GetQueryType()) == D3D12_QUERY_TYPE_TIMESTAMP);
+ commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_TIMESTAMP,
+ cmd->queryIndex);
+ }
+
+ void RecordResolveQuerySetCmd(ID3D12GraphicsCommandList* commandList,
+ Device* device,
+ QuerySet* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ Buffer* destination,
+ uint64_t destinationOffset) {
+ const std::vector<bool>& availability = querySet->GetQueryAvailability();
+
+ auto currentIt = availability.begin() + firstQuery;
+ auto lastIt = availability.begin() + firstQuery + queryCount;
+
+ // Traverse available queries in the range of [firstQuery, firstQuery + queryCount - 1]
+ while (currentIt != lastIt) {
+ auto firstTrueIt = std::find(currentIt, lastIt, true);
+ // No available query found for resolving
+ if (firstTrueIt == lastIt) {
+ break;
+ }
+ auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+ // The query index of firstTrueIt where the resolving starts
+ uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
+ // The queries count between firstTrueIt and nextFalseIt need to be resolved
+ uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+
+ // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
+ uint32_t resolveDestinationOffset =
+ destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+
+ // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
+ commandList->ResolveQueryData(
+ querySet->GetQueryHeap(), D3D12QueryType(querySet->GetQueryType()),
+ resolveQueryIndex, resolveQueryCount, destination->GetD3D12Resource(),
+ resolveDestinationOffset);
+
+ // Set current iterator to next false
+ currentIt = nextFalseIt;
+ }
+ }
+
+ void RecordFirstIndexOffset(ID3D12GraphicsCommandList* commandList,
+ RenderPipeline* pipeline,
+ uint32_t firstVertex,
+ uint32_t firstInstance) {
+ const FirstOffsetInfo& firstOffsetInfo = pipeline->GetFirstOffsetInfo();
+ if (!firstOffsetInfo.usesVertexIndex && !firstOffsetInfo.usesInstanceIndex) {
+ return;
+ }
+ std::array<uint32_t, 2> offsets{};
+ uint32_t count = 0;
+ if (firstOffsetInfo.usesVertexIndex) {
+ offsets[firstOffsetInfo.vertexIndexOffset / sizeof(uint32_t)] = firstVertex;
+ ++count;
+ }
+ if (firstOffsetInfo.usesInstanceIndex) {
+ offsets[firstOffsetInfo.instanceIndexOffset / sizeof(uint32_t)] = firstInstance;
+ ++count;
+ }
+ PipelineLayout* layout = ToBackend(pipeline->GetLayout());
+ commandList->SetGraphicsRoot32BitConstants(layout->GetFirstIndexOffsetParameterIndex(),
+ count, offsets.data(), 0);
+ }
+
+ bool ShouldCopyUsingTemporaryBuffer(DeviceBase* device,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy) {
+ // Currently we only need the workaround for an Intel D3D12 driver issue.
+ if (device->IsToggleEnabled(
+ Toggle::
+ UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel)) {
+ bool copyToLesserLevel = srcCopy.mipLevel > dstCopy.mipLevel;
+ ASSERT(
+ srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
+
+ // GetAspectInfo(aspect) requires HasOneBit(aspect) == true, plus the texel block
+ // sizes of depth stencil formats are always no less than 4 bytes.
+ bool isSmallColorFormat =
+ HasOneBit(srcCopy.aspect) &&
+ srcCopy.texture->GetFormat().GetAspectInfo(srcCopy.aspect).block.byteSize < 4u;
+ if (copyToLesserLevel && isSmallColorFormat) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ MaybeError RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize) {
+ ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
+ ASSERT(srcCopy.aspect == dstCopy.aspect);
+ dawn::native::Format format = srcCopy.texture->GetFormat();
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+ ASSERT(copySize.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copySize.width / blockInfo.width;
+ ASSERT(copySize.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copySize.height / blockInfo.height;
+
+ // Create tempBuffer
+ uint32_t bytesPerRow =
+ Align(blockInfo.byteSize * widthInBlocks, kTextureBytesPerRowAlignment);
+ uint32_t rowsPerImage = heightInBlocks;
+
+ // The size of temporary buffer isn't needed to be a multiple of 4 because we don't
+ // need to set mappedAtCreation to be true.
+ auto tempBufferSize =
+ ComputeRequiredBytesInCopy(blockInfo, copySize, bytesPerRow, rowsPerImage);
+
+ BufferDescriptor tempBufferDescriptor;
+ tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+ tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
+ Device* device = ToBackend(srcCopy.texture->GetDevice());
+ Ref<BufferBase> tempBufferBase;
+ DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
+ Ref<Buffer> tempBuffer = ToBackend(std::move(tempBufferBase));
+
+ BufferCopy bufferCopy;
+ bufferCopy.buffer = tempBuffer;
+ bufferCopy.offset = 0;
+ bufferCopy.bytesPerRow = bytesPerRow;
+ bufferCopy.rowsPerImage = rowsPerImage;
+
+ // Copy from source texture into tempBuffer
+ tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ RecordBufferTextureCopy(BufferTextureCopyDirection::T2B,
+ recordingContext->GetCommandList(), bufferCopy, srcCopy,
+ copySize);
+
+ // Copy from tempBuffer into destination texture
+ tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ RecordBufferTextureCopy(BufferTextureCopyDirection::B2T,
+ recordingContext->GetCommandList(), bufferCopy, dstCopy,
+ copySize);
+
+ // Save tempBuffer into recordingContext
+ recordingContext->AddToTempBuffers(std::move(tempBuffer));
+
+ return {};
+ }
+
+ void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
+ ComputePipeline* pipeline,
+ DispatchCmd* dispatch) {
+ if (!pipeline->UsesNumWorkgroups()) {
+ return;
+ }
+
+ PipelineLayout* layout = ToBackend(pipeline->GetLayout());
+ commandList->SetComputeRoot32BitConstants(layout->GetNumWorkgroupsParameterIndex(), 3,
+ dispatch, 0);
+ }
+
+ // Records the necessary barriers for a synchronization scope using the resource usage
+ // data pre-computed in the frontend. Also performs lazy initialization if required.
+ // Returns whether any UAV are used in the synchronization scope.
+ bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
+ const SyncScopeResourceUsage& usages) {
+ std::vector<D3D12_RESOURCE_BARRIER> barriers;
+
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
+
+ for (size_t i = 0; i < usages.buffers.size(); ++i) {
+ Buffer* buffer = ToBackend(usages.buffers[i]);
+
+ // TODO(crbug.com/dawn/852): clear storage buffers with
+ // ClearUnorderedAccessView*().
+ buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
+
+ D3D12_RESOURCE_BARRIER barrier;
+ if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
+ usages.bufferUsages[i])) {
+ barriers.push_back(barrier);
+ }
+ bufferUsages |= usages.bufferUsages[i];
+ }
+
+ wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
+
+ for (size_t i = 0; i < usages.textures.size(); ++i) {
+ Texture* texture = ToBackend(usages.textures[i]);
+
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ usages.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
+ }
+ textureUsages |= usage;
+ });
+
+ ToBackend(usages.textures[i])
+ ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
+ usages.textureUsages[i]);
+ }
+
+ if (barriers.size()) {
+ commandList->ResourceBarrier(barriers.size(), barriers.data());
+ }
+
+ return (bufferUsages & wgpu::BufferUsage::Storage ||
+ textureUsages & wgpu::TextureUsage::StorageBinding);
+ }
+
+ } // anonymous namespace
+
+ class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
+ using Base = BindGroupTrackerBase;
+
+ public:
+ BindGroupStateTracker(Device* device)
+ : BindGroupTrackerBase(),
+ mDevice(device),
+ mViewAllocator(device->GetViewShaderVisibleDescriptorAllocator()),
+ mSamplerAllocator(device->GetSamplerShaderVisibleDescriptorAllocator()) {
+ }
+
+ void SetInComputePass(bool inCompute_) {
+ mInCompute = inCompute_;
+ }
+
+ MaybeError Apply(CommandRecordingContext* commandContext) {
+ BeforeApply();
+
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+ UpdateRootSignatureIfNecessary(commandList);
+
+ // Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
+ // ringbuffer. There can be a single shader-visible descriptor heap of each type bound
+ // at any given time. This means that when we switch heaps, all other currently bound
+ // bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
+ // the signal to change the bounded heaps.
+ // Re-populating all bindgroups after the last one fails causes duplicated allocations
+ // to occur on overflow.
+ bool didCreateBindGroupViews = true;
+ bool didCreateBindGroupSamplers = true;
+ for (BindGroupIndex index : IterateBitSet(mDirtyBindGroups)) {
+ BindGroup* group = ToBackend(mBindGroups[index]);
+ didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
+ didCreateBindGroupSamplers = group->PopulateSamplers(mDevice, mSamplerAllocator);
+ if (!didCreateBindGroupViews && !didCreateBindGroupSamplers) {
+ break;
+ }
+ }
+
+ if (!didCreateBindGroupViews || !didCreateBindGroupSamplers) {
+ if (!didCreateBindGroupViews) {
+ DAWN_TRY(mViewAllocator->AllocateAndSwitchShaderVisibleHeap());
+ }
+
+ if (!didCreateBindGroupSamplers) {
+ DAWN_TRY(mSamplerAllocator->AllocateAndSwitchShaderVisibleHeap());
+ }
+
+ mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
+ mDirtyBindGroups |= mBindGroupLayoutsMask;
+
+ // Must be called before applying the bindgroups.
+ SetID3D12DescriptorHeaps(commandList);
+
+ for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
+ BindGroup* group = ToBackend(mBindGroups[index]);
+ didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
+ didCreateBindGroupSamplers =
+ group->PopulateSamplers(mDevice, mSamplerAllocator);
+ ASSERT(didCreateBindGroupViews);
+ ASSERT(didCreateBindGroupSamplers);
+ }
+ }
+
+ for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ BindGroup* group = ToBackend(mBindGroups[index]);
+ ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
+ mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
+ }
+
+ AfterApply();
+
+ return {};
+ }
+
+ void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
+ ASSERT(commandList != nullptr);
+ std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps = {
+ mViewAllocator->GetShaderVisibleHeap(), mSamplerAllocator->GetShaderVisibleHeap()};
+ ASSERT(descriptorHeaps[0] != nullptr);
+ ASSERT(descriptorHeaps[1] != nullptr);
+ commandList->SetDescriptorHeaps(descriptorHeaps.size(), descriptorHeaps.data());
+
+ // Descriptor table state is undefined at the beginning of a command list and after
+ // descriptor heaps are changed on a command list. Invalidate the root sampler tables to
+ // reset the root descriptor table for samplers, otherwise the shader cannot access the
+ // descriptor heaps.
+ mBoundRootSamplerTables = {};
+ }
+
+ private:
+ void UpdateRootSignatureIfNecessary(ID3D12GraphicsCommandList* commandList) {
+ if (mLastAppliedPipelineLayout != mPipelineLayout) {
+ if (mInCompute) {
+ commandList->SetComputeRootSignature(
+ ToBackend(mPipelineLayout)->GetRootSignature());
+ } else {
+ commandList->SetGraphicsRootSignature(
+ ToBackend(mPipelineLayout)->GetRootSignature());
+ }
+ // Invalidate the root sampler tables previously set in the root signature.
+ mBoundRootSamplerTables = {};
+ }
+ }
+
+ void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
+ const PipelineLayout* pipelineLayout,
+ BindGroupIndex index,
+ BindGroup* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint64_t* dynamicOffsetsIn) {
+ ityp::span<BindingIndex, const uint64_t> dynamicOffsets(
+ dynamicOffsetsIn, BindingIndex(dynamicOffsetCountIn));
+ ASSERT(dynamicOffsets.size() == group->GetLayout()->GetDynamicBufferCount());
+
+ // Usually, the application won't set the same offsets many times,
+ // so always try to apply dynamic offsets even if the offsets stay the same
+ if (dynamicOffsets.size() != BindingIndex(0)) {
+ // Update dynamic offsets.
+ // Dynamic buffer bindings are packed at the beginning of the layout.
+ for (BindingIndex bindingIndex{0}; bindingIndex < dynamicOffsets.size();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ group->GetLayout()->GetBindingInfo(bindingIndex);
+ if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+ // Skip dynamic buffers that are not visible. D3D12 does not have None
+ // visibility.
+ continue;
+ }
+
+ uint32_t parameterIndex =
+ pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
+ BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+
+ // Calculate buffer locations that root descriptors links to. The location
+ // is (base buffer location + initial offset + dynamic offset)
+ uint64_t dynamicOffset = dynamicOffsets[bindingIndex];
+ uint64_t offset = binding.offset + dynamicOffset;
+ D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
+ ToBackend(binding.buffer)->GetVA() + offset;
+
+ ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ if (mInCompute) {
+ commandList->SetComputeRootConstantBufferView(parameterIndex,
+ bufferLocation);
+ } else {
+ commandList->SetGraphicsRootConstantBufferView(parameterIndex,
+ bufferLocation);
+ }
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ if (mInCompute) {
+ commandList->SetComputeRootUnorderedAccessView(parameterIndex,
+ bufferLocation);
+ } else {
+ commandList->SetGraphicsRootUnorderedAccessView(parameterIndex,
+ bufferLocation);
+ }
+ break;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ if (mInCompute) {
+ commandList->SetComputeRootShaderResourceView(parameterIndex,
+ bufferLocation);
+ } else {
+ commandList->SetGraphicsRootShaderResourceView(parameterIndex,
+ bufferLocation);
+ }
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ }
+ }
+
+ // It's not necessary to update descriptor tables if only the dynamic offset changed.
+ if (!mDirtyBindGroups[index]) {
+ return;
+ }
+
+ const uint32_t cbvUavSrvCount =
+ ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
+ const uint32_t samplerCount =
+ ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
+
+ if (cbvUavSrvCount > 0) {
+ uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
+ const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseViewDescriptor();
+ if (mInCompute) {
+ commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
+ } else {
+ commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
+ }
+ }
+
+ if (samplerCount > 0) {
+ uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
+ const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
+ group->GetBaseSamplerDescriptor();
+ // Check if the group requires its sampler table to be set in the pipeline.
+ // This because sampler heap allocations could be cached and use the same table.
+ if (mBoundRootSamplerTables[index].ptr != baseDescriptor.ptr) {
+ if (mInCompute) {
+ commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
+ } else {
+ commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
+ }
+
+ mBoundRootSamplerTables[index] = baseDescriptor;
+ }
+ }
+
+ const auto& dynamicStorageBufferLengths = group->GetDynamicStorageBufferLengths();
+ if (dynamicStorageBufferLengths.size() != 0) {
+ uint32_t parameterIndex =
+ pipelineLayout->GetDynamicStorageBufferLengthsParameterIndex();
+ uint32_t firstRegisterOffset =
+ pipelineLayout->GetDynamicStorageBufferLengthInfo()[index].firstRegisterOffset;
+
+ if (mInCompute) {
+ commandList->SetComputeRoot32BitConstants(
+ parameterIndex, dynamicStorageBufferLengths.size(),
+ dynamicStorageBufferLengths.data(), firstRegisterOffset);
+ } else {
+ commandList->SetGraphicsRoot32BitConstants(
+ parameterIndex, dynamicStorageBufferLengths.size(),
+ dynamicStorageBufferLengths.data(), firstRegisterOffset);
+ }
+ }
+ }
+
+ Device* mDevice;
+
+ bool mInCompute = false;
+
+ ityp::array<BindGroupIndex, D3D12_GPU_DESCRIPTOR_HANDLE, kMaxBindGroups>
+ mBoundRootSamplerTables = {};
+
+ ShaderVisibleDescriptorAllocator* mViewAllocator;
+ ShaderVisibleDescriptorAllocator* mSamplerAllocator;
+ };
+
+ namespace {
+ class VertexBufferTracker {
+ public:
+ void OnSetVertexBuffer(VertexBufferSlot slot,
+ Buffer* buffer,
+ uint64_t offset,
+ uint64_t size) {
+ mStartSlot = std::min(mStartSlot, slot);
+ mEndSlot = std::max(mEndSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
+
+ auto* d3d12BufferView = &mD3D12BufferViews[slot];
+ d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
+ d3d12BufferView->SizeInBytes = size;
+ // The bufferView stride is set based on the vertex state before a draw.
+ }
+
+ void Apply(ID3D12GraphicsCommandList* commandList,
+ const RenderPipeline* renderPipeline) {
+ ASSERT(renderPipeline != nullptr);
+
+ VertexBufferSlot startSlot = mStartSlot;
+ VertexBufferSlot endSlot = mEndSlot;
+
+ // If the vertex state has changed, we need to update the StrideInBytes
+ // for the D3D12 buffer views. We also need to extend the dirty range to
+ // touch all these slots because the stride may have changed.
+ if (mLastAppliedRenderPipeline != renderPipeline) {
+ mLastAppliedRenderPipeline = renderPipeline;
+
+ for (VertexBufferSlot slot :
+ IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+ startSlot = std::min(startSlot, slot);
+ endSlot = std::max(endSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
+ mD3D12BufferViews[slot].StrideInBytes =
+ renderPipeline->GetVertexBuffer(slot).arrayStride;
+ }
+ }
+
+ if (endSlot <= startSlot) {
+ return;
+ }
+
+ // mD3D12BufferViews is kept up to date with the most recent data passed
+ // to SetVertexBuffer. This makes it correct to only track the start
+ // and end of the dirty range. When Apply is called,
+ // we will at worst set non-dirty vertex buffers in duplicate.
+ commandList->IASetVertexBuffers(static_cast<uint8_t>(startSlot),
+ static_cast<uint8_t>(ityp::Sub(endSlot, startSlot)),
+ &mD3D12BufferViews[startSlot]);
+
+ mStartSlot = VertexBufferSlot(kMaxVertexBuffers);
+ mEndSlot = VertexBufferSlot(uint8_t(0));
+ }
+
+ private:
+ // startSlot and endSlot indicate the range of dirty vertex buffers.
+ // If there are multiple calls to SetVertexBuffer, the start and end
+ // represent the union of the dirty ranges (the union may have non-dirty
+ // data in the middle of the range).
+ const RenderPipeline* mLastAppliedRenderPipeline = nullptr;
+ VertexBufferSlot mStartSlot{kMaxVertexBuffers};
+ VertexBufferSlot mEndSlot{uint8_t(0)};
+ ityp::array<VertexBufferSlot, D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers>
+ mD3D12BufferViews = {};
+ };
+
+ void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
+ BeginRenderPassCmd* renderPass) {
+ ASSERT(renderPass != nullptr);
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ TextureViewBase* resolveTarget =
+ renderPass->colorAttachments[i].resolveTarget.Get();
+ if (resolveTarget == nullptr) {
+ continue;
+ }
+
+ TextureViewBase* colorView = renderPass->colorAttachments[i].view.Get();
+ Texture* colorTexture = ToBackend(colorView->GetTexture());
+ Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
+
+ // Transition the usages of the color attachment and resolve target.
+ colorTexture->TrackUsageAndTransitionNow(commandContext,
+ D3D12_RESOURCE_STATE_RESOLVE_SOURCE,
+ colorView->GetSubresourceRange());
+ resolveTexture->TrackUsageAndTransitionNow(commandContext,
+ D3D12_RESOURCE_STATE_RESOLVE_DEST,
+ resolveTarget->GetSubresourceRange());
+
+ // Do MSAA resolve with ResolveSubResource().
+ ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
+ ID3D12Resource* resolveTextureHandle = resolveTexture->GetD3D12Resource();
+ const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
+ resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer(),
+ Aspect::Color);
+ constexpr uint32_t kColorTextureSubresourceIndex = 0;
+ commandContext->GetCommandList()->ResolveSubresource(
+ resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
+ kColorTextureSubresourceIndex, colorTexture->GetD3D12Format());
+ }
+ }
+
+ } // anonymous namespace
+
+ // static
+ Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+ }
+
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+ : CommandBufferBase(encoder, descriptor) {
+ }
+
+ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
+ Device* device = ToBackend(GetDevice());
+ BindGroupStateTracker bindingTracker(device);
+
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ // Make sure we use the correct descriptors for this command list. Could be done once per
+ // actual command list but here is ok because there should be few command buffers.
+ bindingTracker.SetID3D12DescriptorHeaps(commandList);
+
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::BeginComputePass: {
+ mCommands.NextCommand<BeginComputePassCmd>();
+
+ bindingTracker.SetInComputePass(true);
+ DAWN_TRY(RecordComputePass(
+ commandContext, &bindingTracker,
+ GetResourceUsages().computePasses[nextComputePassNumber]));
+
+ nextComputePassNumber++;
+ break;
+ }
+
+ case Command::BeginRenderPass: {
+ BeginRenderPassCmd* beginRenderPassCmd =
+ mCommands.NextCommand<BeginRenderPassCmd>();
+
+ const bool passHasUAV = TransitionAndClearForSyncScope(
+ commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
+ bindingTracker.SetInComputePass(false);
+
+ LazyClearRenderPassAttachments(beginRenderPassCmd);
+ DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
+ passHasUAV));
+
+ nextRenderPassNumber++;
+ break;
+ }
+
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ if (copy->size == 0) {
+ // Skip no-op copies.
+ break;
+ }
+ Buffer* srcBuffer = ToBackend(copy->source.Get());
+ Buffer* dstBuffer = ToBackend(copy->destination.Get());
+
+ DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
+ bool cleared;
+ DAWN_TRY_ASSIGN(cleared,
+ dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, copy->destinationOffset, copy->size));
+ DAWN_UNUSED(cleared);
+
+ srcBuffer->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::CopySrc);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::CopyDst);
+
+ commandList->CopyBufferRegion(
+ dstBuffer->GetD3D12Resource(), copy->destinationOffset,
+ srcBuffer->GetD3D12Resource(), copy->sourceOffset, copy->size);
+ break;
+ }
+
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ Buffer* buffer = ToBackend(copy->source.buffer.Get());
+ Texture* texture = ToBackend(copy->destination.texture.Get());
+
+ DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
+
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
+ if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
+ copy->destination.mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, subresources);
+ } else {
+ texture->EnsureSubresourceContentInitialized(commandContext, subresources);
+ }
+
+ buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
+ subresources);
+
+ RecordBufferTextureCopy(BufferTextureCopyDirection::B2T, commandList,
+ copy->source, copy->destination, copy->copySize);
+
+ break;
+ }
+
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ Texture* texture = ToBackend(copy->source.texture.Get());
+ Buffer* buffer = ToBackend(copy->destination.buffer.Get());
+
+ DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
+
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+
+ texture->EnsureSubresourceContentInitialized(commandContext, subresources);
+
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+ subresources);
+ buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+ RecordBufferTextureCopy(BufferTextureCopyDirection::T2B, commandList,
+ copy->destination, copy->source, copy->copySize);
+
+ break;
+ }
+
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy =
+ mCommands.NextCommand<CopyTextureToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ Texture* source = ToBackend(copy->source.texture.Get());
+ Texture* destination = ToBackend(copy->destination.texture.Get());
+
+ SubresourceRange srcRange =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+ SubresourceRange dstRange =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
+ source->EnsureSubresourceContentInitialized(commandContext, srcRange);
+ if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
+ copy->destination.mipLevel)) {
+ destination->SetIsSubresourceContentInitialized(true, dstRange);
+ } else {
+ destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
+ }
+
+ if (copy->source.texture.Get() == copy->destination.texture.Get() &&
+ copy->source.mipLevel == copy->destination.mipLevel) {
+ // When there are overlapped subresources, the layout of the overlapped
+ // subresources should all be COMMON instead of what we set now. Currently
+ // it is not allowed to copy with overlapped subresources, but we still
+ // add the ASSERT here as a reminder for this possible misuse.
+ ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
+ copy->copySize.depthOrArrayLayers));
+ }
+ source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+ srcRange);
+ destination->TrackUsageAndTransitionNow(commandContext,
+ wgpu::TextureUsage::CopyDst, dstRange);
+
+ ASSERT(srcRange.aspects == dstRange.aspects);
+ if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source,
+ copy->destination)) {
+ DAWN_TRY(RecordCopyTextureWithTemporaryBuffer(
+ commandContext, copy->source, copy->destination, copy->copySize));
+ break;
+ }
+
+ if (CanUseCopyResource(copy->source, copy->destination, copy->copySize)) {
+ commandList->CopyResource(destination->GetD3D12Resource(),
+ source->GetD3D12Resource());
+ } else if (source->GetDimension() == wgpu::TextureDimension::e3D &&
+ destination->GetDimension() == wgpu::TextureDimension::e3D) {
+ for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+ D3D12_TEXTURE_COPY_LOCATION srcLocation =
+ ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel,
+ 0, aspect);
+ D3D12_TEXTURE_COPY_LOCATION dstLocation =
+ ComputeTextureCopyLocationForTexture(
+ destination, copy->destination.mipLevel, 0, aspect);
+
+ D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
+ copy->source.origin, copy->copySize);
+
+ commandList->CopyTextureRegion(&dstLocation, copy->destination.origin.x,
+ copy->destination.origin.y,
+ copy->destination.origin.z, &srcLocation,
+ &sourceRegion);
+ }
+ } else {
+ const dawn::native::Extent3D copyExtentOneSlice = {
+ copy->copySize.width, copy->copySize.height, 1u};
+
+ for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+ for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+ uint32_t sourceLayer = 0;
+ uint32_t sourceZ = 0;
+ switch (source->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(copy->source.origin.z == 0);
+ break;
+ case wgpu::TextureDimension::e2D:
+ sourceLayer = copy->source.origin.z + z;
+ break;
+ case wgpu::TextureDimension::e3D:
+ sourceZ = copy->source.origin.z + z;
+ break;
+ }
+
+ uint32_t destinationLayer = 0;
+ uint32_t destinationZ = 0;
+ switch (destination->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(copy->destination.origin.z == 0);
+ break;
+ case wgpu::TextureDimension::e2D:
+ destinationLayer = copy->destination.origin.z + z;
+ break;
+ case wgpu::TextureDimension::e3D:
+ destinationZ = copy->destination.origin.z + z;
+ break;
+ }
+ D3D12_TEXTURE_COPY_LOCATION srcLocation =
+ ComputeTextureCopyLocationForTexture(
+ source, copy->source.mipLevel, sourceLayer, aspect);
+
+ D3D12_TEXTURE_COPY_LOCATION dstLocation =
+ ComputeTextureCopyLocationForTexture(destination,
+ copy->destination.mipLevel,
+ destinationLayer, aspect);
+
+ Origin3D sourceOriginInSubresource = copy->source.origin;
+ sourceOriginInSubresource.z = sourceZ;
+ D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
+ sourceOriginInSubresource, copyExtentOneSlice);
+
+ commandList->CopyTextureRegion(
+ &dstLocation, copy->destination.origin.x,
+ copy->destination.origin.y, destinationZ, &srcLocation,
+ &sourceRegion);
+ }
+ }
+ }
+ break;
+ }
+
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+ if (cmd->size == 0) {
+ // Skip no-op fills.
+ break;
+ }
+ Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+
+ bool clearedToZero;
+ DAWN_TRY_ASSIGN(clearedToZero, dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, cmd->offset, cmd->size));
+
+ if (!clearedToZero) {
+ DAWN_TRY(device->ClearBufferToZero(commandContext, cmd->buffer.Get(),
+ cmd->offset, cmd->size));
+ }
+
+ break;
+ }
+
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ uint32_t firstQuery = cmd->firstQuery;
+ uint32_t queryCount = cmd->queryCount;
+ Buffer* destination = ToBackend(cmd->destination.Get());
+ uint64_t destinationOffset = cmd->destinationOffset;
+
+ bool cleared;
+ DAWN_TRY_ASSIGN(cleared, destination->EnsureDataInitializedAsDestination(
+ commandContext, destinationOffset,
+ queryCount * sizeof(uint64_t)));
+ DAWN_UNUSED(cleared);
+
+ // Resolving unavailable queries is undefined behaviour on D3D12, we only can
+ // resolve the available part of sparse queries. In order to resolve the
+ // unavailables as 0s, we need to clear the resolving region of the destination
+ // buffer to 0s.
+ auto startIt = querySet->GetQueryAvailability().begin() + firstQuery;
+ auto endIt = querySet->GetQueryAvailability().begin() + firstQuery + queryCount;
+ bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+ if (hasUnavailableQueries) {
+ DAWN_TRY(device->ClearBufferToZero(commandContext, destination,
+ destinationOffset,
+ queryCount * sizeof(uint64_t)));
+ }
+
+ destination->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::QueryResolve);
+
+ RecordResolveQuerySetCmd(commandList, device, querySet, firstQuery, queryCount,
+ destination, destinationOffset);
+
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+ }
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ mCommands.NextCommand<PopDebugGroupCmd>();
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixEndEventOnCommandList(commandList);
+ }
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+ }
+ break;
+ }
+
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
+ }
+
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ bool cleared;
+ DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, offset, size));
+ DAWN_UNUSED(cleared);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::CopyDst);
+ commandList->CopyBufferRegion(
+ dstBuffer->GetD3D12Resource(), offset,
+ ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+ uploadHandle.startOffset, size);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ const ComputePassResourceUsage& resourceUsages) {
+ uint64_t currentDispatch = 0;
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ Command type;
+ ComputePipeline* lastPipeline = nullptr;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+ // Skip noop dispatches, it can cause D3D12 warning from validation layers and
+ // leads to device lost.
+ if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
+ break;
+ }
+
+ TransitionAndClearForSyncScope(commandContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+
+ RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
+ commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
+ currentDispatch++;
+ break;
+ }
+
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+
+ TransitionAndClearForSyncScope(commandContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+
+ ComPtr<ID3D12CommandSignature> signature =
+ lastPipeline->GetDispatchIndirectCommandSignature();
+ commandList->ExecuteIndirect(
+ signature.Get(), 1, ToBackend(dispatch->indirectBuffer)->GetD3D12Resource(),
+ dispatch->indirectOffset, nullptr, 0);
+ currentDispatch++;
+ break;
+ }
+
+ case Command::EndComputePass: {
+ mCommands.NextCommand<EndComputePassCmd>();
+ return {};
+ }
+
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+ ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+ commandList->SetPipelineState(pipeline->GetPipelineState());
+
+ bindingTracker->OnSetPipeline(pipeline);
+ lastPipeline = pipeline;
+ break;
+ }
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+ BindGroup* group = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
+
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+
+ bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+ dynamicOffsets);
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+ }
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ mCommands.NextCommand<PopDebugGroupCmd>();
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixEndEventOnCommandList(commandList);
+ }
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+ }
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
+ BeginRenderPassCmd* renderPass,
+ RenderPassBuilder* renderPassBuilder) {
+ Device* device = ToBackend(GetDevice());
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
+
+ // Set view attachment.
+ CPUDescriptorHeapAllocation rtvAllocation;
+ DAWN_TRY_ASSIGN(
+ rtvAllocation,
+ device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+
+ const D3D12_RENDER_TARGET_VIEW_DESC viewDesc = view->GetRTVDescriptor();
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = rtvAllocation.GetBaseDescriptor();
+
+ device->GetD3D12Device()->CreateRenderTargetView(
+ ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
+
+ renderPassBuilder->SetRenderTargetView(i, baseDescriptor);
+
+ // Set color load operation.
+ renderPassBuilder->SetRenderTargetBeginningAccess(
+ i, attachmentInfo.loadOp, attachmentInfo.clearColor, view->GetD3D12Format());
+
+ // Set color store operation.
+ if (attachmentInfo.resolveTarget != nullptr) {
+ TextureView* resolveDestinationView = ToBackend(attachmentInfo.resolveTarget.Get());
+ Texture* resolveDestinationTexture =
+ ToBackend(resolveDestinationView->GetTexture());
+
+ resolveDestinationTexture->TrackUsageAndTransitionNow(
+ commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST,
+ resolveDestinationView->GetSubresourceRange());
+
+ renderPassBuilder->SetRenderTargetEndingAccessResolve(i, attachmentInfo.storeOp,
+ view, resolveDestinationView);
+ } else {
+ renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
+ }
+ }
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ RenderPassDepthStencilAttachmentInfo& attachmentInfo =
+ renderPass->depthStencilAttachment;
+ TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
+
+ // Set depth attachment.
+ CPUDescriptorHeapAllocation dsvAllocation;
+ DAWN_TRY_ASSIGN(
+ dsvAllocation,
+ device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
+
+ const D3D12_DEPTH_STENCIL_VIEW_DESC viewDesc = view->GetDSVDescriptor(
+ attachmentInfo.depthReadOnly, attachmentInfo.stencilReadOnly);
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvAllocation.GetBaseDescriptor();
+
+ device->GetD3D12Device()->CreateDepthStencilView(
+ ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
+
+ renderPassBuilder->SetDepthStencilView(baseDescriptor);
+
+ const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
+ const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
+
+ // Set depth/stencil load operations.
+ if (hasDepth) {
+ renderPassBuilder->SetDepthAccess(
+ attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
+ attachmentInfo.clearDepth, view->GetD3D12Format());
+ } else {
+ renderPassBuilder->SetDepthNoAccess();
+ }
+
+ if (hasStencil) {
+ renderPassBuilder->SetStencilAccess(
+ attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+ attachmentInfo.clearStencil, view->GetD3D12Format());
+ } else {
+ renderPassBuilder->SetStencilNoAccess();
+ }
+
+ } else {
+ renderPassBuilder->SetDepthStencilNoAccess();
+ }
+
+ return {};
+ }
+
+ void CommandBuffer::EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+ const RenderPassBuilder* renderPassBuilder) const {
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ // Clear framebuffer attachments as needed.
+ {
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < renderPassBuilder->GetColorAttachmentCount(); i++) {
+ // Load op - color
+ if (renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i]
+ .BeginningAccess.Type == D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+ commandList->ClearRenderTargetView(
+ renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i].cpuDescriptor,
+ renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i]
+ .BeginningAccess.Clear.ClearValue.Color,
+ 0, nullptr);
+ }
+ }
+
+ if (renderPassBuilder->HasDepth()) {
+ D3D12_CLEAR_FLAGS clearFlags = {};
+ float depthClear = 0.0f;
+ uint8_t stencilClear = 0u;
+
+ if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->DepthBeginningAccess.Type ==
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+ clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+ depthClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth;
+ }
+ if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->StencilBeginningAccess.Type ==
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+ clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+ stencilClear =
+ renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil;
+ }
+
+ if (clearFlags) {
+ commandList->ClearDepthStencilView(
+ renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor,
+ clearFlags, depthClear, stencilClear, 0, nullptr);
+ }
+ }
+ }
+
+ commandList->OMSetRenderTargets(
+ static_cast<uint8_t>(renderPassBuilder->GetColorAttachmentCount()),
+ renderPassBuilder->GetRenderTargetViews(), FALSE,
+ renderPassBuilder->HasDepth()
+ ? &renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor
+ : nullptr);
+ }
+
+ MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ BeginRenderPassCmd* renderPass,
+ const bool passHasUAV) {
+ Device* device = ToBackend(GetDevice());
+ const bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
+
+ // renderPassBuilder must be scoped to RecordRenderPass because any underlying
+ // D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
+ // valid until after EndRenderPass() has been called.
+ RenderPassBuilder renderPassBuilder(passHasUAV);
+
+ DAWN_TRY(SetupRenderPass(commandContext, renderPass, &renderPassBuilder));
+
+ // Use D3D12's native render pass API if it's available, otherwise emulate the
+ // beginning and ending access operations.
+ if (useRenderPass) {
+ commandContext->GetCommandList4()->BeginRenderPass(
+ static_cast<uint8_t>(renderPassBuilder.GetColorAttachmentCount()),
+ renderPassBuilder.GetRenderPassRenderTargetDescriptors().data(),
+ renderPassBuilder.HasDepth()
+ ? renderPassBuilder.GetRenderPassDepthStencilDescriptor()
+ : nullptr,
+ renderPassBuilder.GetRenderPassFlags());
+ } else {
+ EmulateBeginRenderPass(commandContext, &renderPassBuilder);
+ }
+
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ // Set up default dynamic state
+ {
+ uint32_t width = renderPass->width;
+ uint32_t height = renderPass->height;
+ D3D12_VIEWPORT viewport = {
+ 0.f, 0.f, static_cast<float>(width), static_cast<float>(height), 0.f, 1.f};
+ D3D12_RECT scissorRect = {0, 0, static_cast<long>(width), static_cast<long>(height)};
+ commandList->RSSetViewports(1, &viewport);
+ commandList->RSSetScissorRects(1, &scissorRect);
+
+ static constexpr std::array<float, 4> defaultBlendFactor = {0, 0, 0, 0};
+ commandList->OMSetBlendFactor(&defaultBlendFactor[0]);
+
+ commandList->OMSetStencilRef(0);
+ }
+
+ RenderPipeline* lastPipeline = nullptr;
+ VertexBufferTracker vertexBufferTracker = {};
+
+ auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
+ switch (type) {
+ case Command::Draw: {
+ DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+ vertexBufferTracker.Apply(commandList, lastPipeline);
+ RecordFirstIndexOffset(commandList, lastPipeline, draw->firstVertex,
+ draw->firstInstance);
+ commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
+ draw->firstVertex, draw->firstInstance);
+ break;
+ }
+
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+ vertexBufferTracker.Apply(commandList, lastPipeline);
+ RecordFirstIndexOffset(commandList, lastPipeline, draw->baseVertex,
+ draw->firstInstance);
+ commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
+ draw->firstIndex, draw->baseVertex,
+ draw->firstInstance);
+ break;
+ }
+
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+ vertexBufferTracker.Apply(commandList, lastPipeline);
+
+ // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
+ // Zero the index offset values to avoid reusing values from the previous draw
+ RecordFirstIndexOffset(commandList, lastPipeline, 0, 0);
+
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ComPtr<ID3D12CommandSignature> signature =
+ ToBackend(GetDevice())->GetDrawIndirectSignature();
+ commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
+ draw->indirectOffset, nullptr, 0);
+ break;
+ }
+
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+ DAWN_TRY(bindingTracker->Apply(commandContext));
+ vertexBufferTracker.Apply(commandList, lastPipeline);
+
+ // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
+ // Zero the index offset values to avoid reusing values from the previous draw
+ RecordFirstIndexOffset(commandList, lastPipeline, 0, 0);
+
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(buffer != nullptr);
+
+ ComPtr<ID3D12CommandSignature> signature =
+ ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
+ commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
+ draw->indirectOffset, nullptr, 0);
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+ const char* label = iter->NextData<char>(cmd->length + 1);
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+ }
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ iter->NextCommand<PopDebugGroupCmd>();
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixEndEventOnCommandList(commandList);
+ }
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+ const char* label = iter->NextData<char>(cmd->length + 1);
+
+ if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+ // PIX color is 1 byte per channel in ARGB format
+ constexpr uint64_t kPIXBlackColor = 0xff000000;
+ ToBackend(GetDevice())
+ ->GetFunctions()
+ ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+ }
+ break;
+ }
+
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+ RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+ commandList->SetPipelineState(pipeline->GetPipelineState());
+ commandList->IASetPrimitiveTopology(pipeline->GetD3D12PrimitiveTopology());
+
+ bindingTracker->OnSetPipeline(pipeline);
+
+ lastPipeline = pipeline;
+ break;
+ }
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+ BindGroup* group = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
+
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+
+ bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+ dynamicOffsets);
+ break;
+ }
+
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+
+ D3D12_INDEX_BUFFER_VIEW bufferView;
+ bufferView.Format = DXGIIndexFormat(cmd->format);
+ bufferView.BufferLocation = ToBackend(cmd->buffer)->GetVA() + cmd->offset;
+ bufferView.SizeInBytes = cmd->size;
+
+ commandList->IASetIndexBuffer(&bufferView);
+ break;
+ }
+
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+
+ vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+ cmd->offset, cmd->size);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return {};
+ };
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndRenderPass: {
+ mCommands.NextCommand<EndRenderPassCmd>();
+ if (useRenderPass) {
+ commandContext->GetCommandList4()->EndRenderPass();
+ } else if (renderPass->attachmentState->GetSampleCount() > 1) {
+ ResolveMultisampledRenderPass(commandContext, renderPass);
+ }
+ return {};
+ }
+
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+
+ commandList->OMSetStencilRef(cmd->reference);
+ break;
+ }
+
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+ D3D12_VIEWPORT viewport;
+ viewport.TopLeftX = cmd->x;
+ viewport.TopLeftY = cmd->y;
+ viewport.Width = cmd->width;
+ viewport.Height = cmd->height;
+ viewport.MinDepth = cmd->minDepth;
+ viewport.MaxDepth = cmd->maxDepth;
+
+ commandList->RSSetViewports(1, &viewport);
+ break;
+ }
+
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+ D3D12_RECT rect;
+ rect.left = cmd->x;
+ rect.top = cmd->y;
+ rect.right = cmd->x + cmd->width;
+ rect.bottom = cmd->y + cmd->height;
+
+ commandList->RSSetScissorRects(1, &rect);
+ break;
+ }
+
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+ const std::array<float, 4> color = ConvertToFloatColor(cmd->color);
+ commandList->OMSetBlendFactor(color.data());
+ break;
+ }
+
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+ auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+ for (uint32_t i = 0; i < cmd->count; ++i) {
+ CommandIterator* iter = bundles[i]->GetCommands();
+ iter->Reset();
+ while (iter->NextCommandId(&type)) {
+ DAWN_TRY(EncodeRenderBundleCommand(iter, type));
+ }
+ }
+ break;
+ }
+
+ case Command::BeginOcclusionQuery: {
+ BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
+ D3D12_QUERY_TYPE_BINARY_OCCLUSION);
+ commandList->BeginQuery(querySet->GetQueryHeap(),
+ D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
+ break;
+ }
+
+ case Command::EndOcclusionQuery: {
+ EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
+ D3D12_QUERY_TYPE_BINARY_OCCLUSION);
+ commandList->EndQuery(querySet->GetQueryHeap(),
+ D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
+ }
+
+ default: {
+ DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
+ break;
+ }
+ }
+ }
+ return {};
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h
new file mode 100644
index 00000000000..d6d44385b15
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandBufferD3D12.h
@@ -0,0 +1,57 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
+#define DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native {
+ struct BeginRenderPassCmd;
+} // namespace dawn::native
+
+namespace dawn::native::d3d12 {
+
+ class BindGroupStateTracker;
+ class CommandRecordingContext;
+ class RenderPassBuilder;
+
+ class CommandBuffer final : public CommandBufferBase {
+ public:
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
+
+ MaybeError RecordCommands(CommandRecordingContext* commandContext);
+
+ private:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+ MaybeError RecordComputePass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ const ComputePassResourceUsage& resourceUsages);
+ MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ BeginRenderPassCmd* renderPass,
+ bool passHasUAV);
+ MaybeError SetupRenderPass(CommandRecordingContext* commandContext,
+ BeginRenderPassCmd* renderPass,
+ RenderPassBuilder* renderPassBuilder);
+ void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+ const RenderPassBuilder* renderPassBuilder) const;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp
new file mode 100644
index 00000000000..bb8ef813005
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.cpp
@@ -0,0 +1,175 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+
+#include "dawn/native/d3d12/CommandAllocatorManager.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <profileapi.h>
+#include <sysinfoapi.h>
+
+namespace dawn::native::d3d12 {
+
+ void CommandRecordingContext::AddToSharedTextureList(Texture* texture) {
+ ASSERT(IsOpen());
+ mSharedTextures.insert(texture);
+ }
+
+ MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
+ CommandAllocatorManager* commandAllocationManager) {
+ ASSERT(!IsOpen());
+ ID3D12CommandAllocator* commandAllocator;
+ DAWN_TRY_ASSIGN(commandAllocator, commandAllocationManager->ReserveCommandAllocator());
+ if (mD3d12CommandList != nullptr) {
+ MaybeError error = CheckHRESULT(mD3d12CommandList->Reset(commandAllocator, nullptr),
+ "D3D12 resetting command list");
+ if (error.IsError()) {
+ mD3d12CommandList.Reset();
+ DAWN_TRY(std::move(error));
+ }
+ } else {
+ ComPtr<ID3D12GraphicsCommandList> d3d12GraphicsCommandList;
+ DAWN_TRY(CheckHRESULT(
+ d3d12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, commandAllocator,
+ nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
+ "D3D12 creating direct command list"));
+ mD3d12CommandList = std::move(d3d12GraphicsCommandList);
+ // Store a cast to ID3D12GraphicsCommandList4. This is required to use the D3D12 render
+ // pass APIs introduced in Windows build 1809.
+ mD3d12CommandList.As(&mD3d12CommandList4);
+ }
+
+ mIsOpen = true;
+
+ return {};
+ }
+
+ MaybeError CommandRecordingContext::ExecuteCommandList(Device* device) {
+ if (IsOpen()) {
+ // Shared textures must be transitioned to common state after the last usage in order
+ // for them to be used by other APIs like D3D11. We ensure this by transitioning to the
+ // common state right before command list submission. TransitionUsageNow itself ensures
+ // no unnecessary transitions happen if the resources is already in the common state.
+ for (Texture* texture : mSharedTextures) {
+ DAWN_TRY(texture->AcquireKeyedMutex());
+ texture->TrackAllUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
+ }
+
+ MaybeError error =
+ CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
+ if (error.IsError()) {
+ Release();
+ DAWN_TRY(std::move(error));
+ }
+ DAWN_TRY(device->GetResidencyManager()->EnsureHeapsAreResident(
+ mHeapsPendingUsage.data(), mHeapsPendingUsage.size()));
+
+ if (device->IsToggleEnabled(Toggle::RecordDetailedTimingInTraceEvents)) {
+ uint64_t gpuTimestamp;
+ uint64_t cpuTimestamp;
+ FILETIME fileTimeNonPrecise;
+ SYSTEMTIME systemTimeNonPrecise;
+
+ // Both supported since Windows 2000, have a accuracy of 1ms
+ GetSystemTimeAsFileTime(&fileTimeNonPrecise);
+ GetSystemTime(&systemTimeNonPrecise);
+ // Query CPU and GPU timestamps at almost the same time
+ device->GetCommandQueue()->GetClockCalibration(&gpuTimestamp, &cpuTimestamp);
+
+ uint64_t gpuFrequency;
+ uint64_t cpuFrequency;
+ LARGE_INTEGER cpuFrequencyLargeInteger;
+ device->GetCommandQueue()->GetTimestampFrequency(&gpuFrequency);
+ QueryPerformanceFrequency(
+ &cpuFrequencyLargeInteger); // Supported since Windows 2000
+ cpuFrequency = cpuFrequencyLargeInteger.QuadPart;
+
+ std::string timingInfo = absl::StrFormat(
+ "UTC Time: %u/%u/%u %02u:%02u:%02u.%03u, File Time: %u, CPU "
+ "Timestamp: %u, GPU Timestamp: %u, CPU Tick Frequency: %u, GPU Tick Frequency: "
+ "%u",
+ systemTimeNonPrecise.wYear, systemTimeNonPrecise.wMonth,
+ systemTimeNonPrecise.wDay, systemTimeNonPrecise.wHour,
+ systemTimeNonPrecise.wMinute, systemTimeNonPrecise.wSecond,
+ systemTimeNonPrecise.wMilliseconds,
+ (static_cast<uint64_t>(fileTimeNonPrecise.dwHighDateTime) << 32) +
+ fileTimeNonPrecise.dwLowDateTime,
+ cpuTimestamp, gpuTimestamp, cpuFrequency, gpuFrequency);
+
+ TRACE_EVENT_INSTANT1(
+ device->GetPlatform(), General,
+ "d3d12::CommandRecordingContext::ExecuteCommandList Detailed Timing", "Timing",
+ timingInfo.c_str());
+ }
+
+ ID3D12CommandList* d3d12CommandList = GetCommandList();
+ device->GetCommandQueue()->ExecuteCommandLists(1, &d3d12CommandList);
+
+ for (Texture* texture : mSharedTextures) {
+ texture->ReleaseKeyedMutex();
+ }
+
+ mIsOpen = false;
+ mSharedTextures.clear();
+ mHeapsPendingUsage.clear();
+ }
+ return {};
+ }
+
+ void CommandRecordingContext::TrackHeapUsage(Heap* heap, ExecutionSerial serial) {
+ // Before tracking the heap, check the last serial it was recorded on to ensure we aren't
+ // tracking it more than once.
+ if (heap->GetLastUsage() < serial) {
+ heap->SetLastUsage(serial);
+ mHeapsPendingUsage.push_back(heap);
+ }
+ }
+
+ ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
+ ASSERT(mD3d12CommandList != nullptr);
+ ASSERT(IsOpen());
+ return mD3d12CommandList.Get();
+ }
+
+ // This function will fail on Windows versions prior to 1809. Support must be queried through
+ // the device before calling.
+ ID3D12GraphicsCommandList4* CommandRecordingContext::GetCommandList4() const {
+ ASSERT(IsOpen());
+ ASSERT(mD3d12CommandList != nullptr);
+ return mD3d12CommandList4.Get();
+ }
+
+ void CommandRecordingContext::Release() {
+ mD3d12CommandList.Reset();
+ mD3d12CommandList4.Reset();
+ mIsOpen = false;
+ mSharedTextures.clear();
+ mHeapsPendingUsage.clear();
+ mTempBuffers.clear();
+ }
+
+ bool CommandRecordingContext::IsOpen() const {
+ return mIsOpen;
+ }
+
+ void CommandRecordingContext::AddToTempBuffers(Ref<Buffer> tempBuffer) {
+ mTempBuffers.emplace_back(tempBuffer);
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h
new file mode 100644
index 00000000000..21a60f23ed5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/CommandRecordingContext.h
@@ -0,0 +1,58 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
+#define DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include <set>
+
+namespace dawn::native::d3d12 {
+ class CommandAllocatorManager;
+ class Device;
+ class Heap;
+ class Texture;
+
+ class CommandRecordingContext {
+ public:
+ void AddToSharedTextureList(Texture* texture);
+ MaybeError Open(ID3D12Device* d3d12Device,
+ CommandAllocatorManager* commandAllocationManager);
+
+ ID3D12GraphicsCommandList* GetCommandList() const;
+ ID3D12GraphicsCommandList4* GetCommandList4() const;
+ void Release();
+ bool IsOpen() const;
+
+ MaybeError ExecuteCommandList(Device* device);
+
+ void TrackHeapUsage(Heap* heap, ExecutionSerial serial);
+
+ void AddToTempBuffers(Ref<Buffer> tempBuffer);
+
+ private:
+ ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
+ ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
+ bool mIsOpen = false;
+ std::set<Texture*> mSharedTextures;
+ std::vector<Heap*> mHeapsPendingUsage;
+
+ std::vector<Ref<Buffer>> mTempBuffers;
+ };
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
new file mode 100644
index 00000000000..6df1049ffbf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
@@ -0,0 +1,105 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ComputePipelineD3D12.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(device, descriptor));
+ }
+
+ MaybeError ComputePipeline::Initialize() {
+ Device* device = ToBackend(GetDevice());
+ uint32_t compileFlags = 0;
+
+ if (!device->IsToggleEnabled(Toggle::UseDXC) &&
+ !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
+ compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
+ }
+
+ if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
+ compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
+ }
+
+ // SPRIV-cross does matrix multiplication expecting row major matrices
+ compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
+
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+ ShaderModule* module = ToBackend(computeStage.module.Get());
+
+ D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
+ d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
+
+ CompiledShader compiledShader;
+ DAWN_TRY_ASSIGN(compiledShader, module->Compile(computeStage, SingleShaderStage::Compute,
+ ToBackend(GetLayout()), compileFlags));
+ d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
+ auto* d3d12Device = device->GetD3D12Device();
+ DAWN_TRY(CheckHRESULT(
+ d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
+ "D3D12 creating pipeline state"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ ComputePipeline::~ComputePipeline() = default;
+
+ void ComputePipeline::DestroyImpl() {
+ ComputePipelineBase::DestroyImpl();
+ ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+ }
+
+ ID3D12PipelineState* ComputePipeline::GetPipelineState() const {
+ return mPipelineState.Get();
+ }
+
+ void ComputePipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_ComputePipeline",
+ GetLabel());
+ }
+
+ void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+ std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+ userdata);
+ CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
+ bool ComputePipeline::UsesNumWorkgroups() const {
+ return GetStage(SingleShaderStage::Compute).metadata->usesNumWorkgroups;
+ }
+
+ ComPtr<ID3D12CommandSignature> ComputePipeline::GetDispatchIndirectCommandSignature() {
+ if (UsesNumWorkgroups()) {
+ return ToBackend(GetLayout())->GetDispatchIndirectCommandSignatureWithNumWorkgroups();
+ }
+ return ToBackend(GetDevice())->GetDispatchIndirectSignature();
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h
new file mode 100644
index 00000000000..03a02595226
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ComputePipelineD3D12.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
+#define DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class ComputePipeline final : public ComputePipelineBase {
+ public:
+ static Ref<ComputePipeline> CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ ComputePipeline() = delete;
+
+ ID3D12PipelineState* GetPipelineState() const;
+
+ MaybeError Initialize() override;
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ bool UsesNumWorkgroups() const;
+
+ ComPtr<ID3D12CommandSignature> GetDispatchIndirectCommandSignature();
+
+ private:
+ ~ComputePipeline() override;
+
+ void DestroyImpl() override;
+
+ using ComputePipelineBase::ComputePipelineBase;
+ ComPtr<ID3D12PipelineState> mPipelineState;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp
new file mode 100644
index 00000000000..d48d41fe7a5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.cpp
@@ -0,0 +1,187 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// D3D12Backend.cpp: contains the definition of symbols exported by D3D12Backend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/d3d12/D3D11on12Util.h"
+
+#include "dawn/common/HashUtils.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+#include <dawn/native/D3D12Backend.h>
+
+namespace dawn::native::d3d12 {
+
+ void Flush11On12DeviceToAvoidLeaks(ComPtr<ID3D11On12Device> d3d11on12Device) {
+ if (d3d11on12Device == nullptr) {
+ return;
+ }
+
+ ComPtr<ID3D11Device> d3d11Device;
+ if (FAILED(d3d11on12Device.As(&d3d11Device))) {
+ return;
+ }
+
+ ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+ d3d11Device->GetImmediateContext(&d3d11DeviceContext);
+
+ ASSERT(d3d11DeviceContext != nullptr);
+
+ // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
+ // are not released until work is submitted to the device context and flushed.
+ // The most minimal work we can get away with is issuing a TiledResourceBarrier.
+
+ // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
+ // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
+ ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
+ if (FAILED(d3d11DeviceContext.As(&d3d11DeviceContext2))) {
+ return;
+ }
+
+ d3d11DeviceContext2->TiledResourceBarrier(nullptr, nullptr);
+ d3d11DeviceContext2->Flush();
+ }
+
+ D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
+ ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex,
+ ComPtr<ID3D11On12Device> d3d11On12Device)
+ : mDXGIKeyedMutex(std::move(dxgiKeyedMutex)), mD3D11on12Device(std::move(d3d11On12Device)) {
+ }
+
+ D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
+ ComPtr<ID3D11On12Device> d3d11On12Device)
+ : mD3D11on12Device(std::move(d3d11On12Device)) {
+ }
+
+ D3D11on12ResourceCacheEntry::~D3D11on12ResourceCacheEntry() {
+ if (mDXGIKeyedMutex == nullptr) {
+ return;
+ }
+
+ if (mAcquireCount > 0) {
+ mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+ }
+
+ ComPtr<ID3D11Resource> d3d11Resource;
+ if (FAILED(mDXGIKeyedMutex.As(&d3d11Resource))) {
+ return;
+ }
+
+ ASSERT(mD3D11on12Device != nullptr);
+
+ ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
+ mD3D11on12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
+
+ d3d11Resource.Reset();
+ mDXGIKeyedMutex.Reset();
+
+ Flush11On12DeviceToAvoidLeaks(std::move(mD3D11on12Device));
+ }
+
+ MaybeError D3D11on12ResourceCacheEntry::AcquireKeyedMutex() {
+ ASSERT(mDXGIKeyedMutex != nullptr);
+ ASSERT(mAcquireCount >= 0);
+ if (mAcquireCount == 0) {
+ DAWN_TRY(CheckHRESULT(
+ mDXGIKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE),
+ "D3D12 acquiring shared mutex"));
+ }
+ mAcquireCount++;
+ return {};
+ }
+
+ void D3D11on12ResourceCacheEntry::ReleaseKeyedMutex() {
+ ASSERT(mDXGIKeyedMutex != nullptr);
+ ASSERT(mAcquireCount > 0);
+ mAcquireCount--;
+ if (mAcquireCount == 0) {
+ mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+ }
+ }
+
+ size_t D3D11on12ResourceCacheEntry::HashFunc::operator()(
+ const Ref<D3D11on12ResourceCacheEntry> a) const {
+ size_t hash = 0;
+ HashCombine(&hash, a->mD3D11on12Device.Get());
+ return hash;
+ }
+
+ bool D3D11on12ResourceCacheEntry::EqualityFunc::operator()(
+ const Ref<D3D11on12ResourceCacheEntry> a,
+ const Ref<D3D11on12ResourceCacheEntry> b) const {
+ return a->mD3D11on12Device == b->mD3D11on12Device;
+ }
+
+ D3D11on12ResourceCache::D3D11on12ResourceCache() = default;
+
+ D3D11on12ResourceCache::~D3D11on12ResourceCache() = default;
+
+ Ref<D3D11on12ResourceCacheEntry> D3D11on12ResourceCache::GetOrCreateD3D11on12Resource(
+ WGPUDevice device,
+ ID3D12Resource* d3d12Resource) {
+ Device* backendDevice = reinterpret_cast<Device*>(device);
+ // The Dawn and 11on12 device share the same D3D12 command queue whereas this external image
+ // could be accessed/produced with multiple Dawn devices. To avoid cross-queue sharing
+ // restrictions, the 11 wrapped resource is forbidden to be shared between Dawn devices by
+ // using the 11on12 device as the cache key.
+ ComPtr<ID3D11On12Device> d3d11on12Device = backendDevice->GetOrCreateD3D11on12Device();
+ if (d3d11on12Device == nullptr) {
+ dawn::ErrorLog() << "Unable to create 11on12 device for external image";
+ return nullptr;
+ }
+
+ D3D11on12ResourceCacheEntry blueprint(d3d11on12Device);
+ auto iter = mCache.find(&blueprint);
+ if (iter != mCache.end()) {
+ return *iter;
+ }
+
+ // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
+ // are a viable alternative but are, unfortunately, not available on all versions of Windows
+ // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
+ // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
+ ComPtr<ID3D11Texture2D> d3d11Texture;
+ D3D11_RESOURCE_FLAGS resourceFlags;
+ resourceFlags.BindFlags = 0;
+ resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+ resourceFlags.CPUAccessFlags = 0;
+ resourceFlags.StructureByteStride = 0;
+ if (FAILED(d3d11on12Device->CreateWrappedResource(
+ d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON,
+ D3D12_RESOURCE_STATE_COMMON, IID_PPV_ARGS(&d3d11Texture)))) {
+ return nullptr;
+ }
+
+ ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+ if (FAILED(d3d11Texture.As(&dxgiKeyedMutex))) {
+ return nullptr;
+ }
+
+ // Keep this cache from growing unbounded.
+ // TODO(dawn:625): Consider using a replacement policy based cache.
+ if (mCache.size() > kMaxD3D11on12ResourceCacheSize) {
+ mCache.clear();
+ }
+
+ Ref<D3D11on12ResourceCacheEntry> entry =
+ AcquireRef(new D3D11on12ResourceCacheEntry(dxgiKeyedMutex, std::move(d3d11on12Device)));
+ mCache.insert(entry);
+
+ return entry;
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h
new file mode 100644
index 00000000000..af7e680be68
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D11on12Util.h
@@ -0,0 +1,92 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D11ON12UTIL_H_
+#define DAWNNATIVE_D3D11ON12UTIL_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include <dawn/native/DawnNative.h>
+#include <memory>
+#include <unordered_set>
+
+struct ID3D11On12Device;
+struct IDXGIKeyedMutex;
+
+namespace dawn::native::d3d12 {
+
+ // Wraps 11 wrapped resources in a cache.
+ class D3D11on12ResourceCacheEntry : public RefCounted {
+ public:
+ D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11on12Device);
+ D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> d3d11on12Resource,
+ ComPtr<ID3D11On12Device> d3d11on12Device);
+ ~D3D11on12ResourceCacheEntry();
+
+ MaybeError AcquireKeyedMutex();
+ void ReleaseKeyedMutex();
+
+ // Functors necessary for the
+ // unordered_set<D3D11on12ResourceCacheEntry&>-based cache.
+ struct HashFunc {
+ size_t operator()(const Ref<D3D11on12ResourceCacheEntry> a) const;
+ };
+
+ struct EqualityFunc {
+ bool operator()(const Ref<D3D11on12ResourceCacheEntry> a,
+ const Ref<D3D11on12ResourceCacheEntry> b) const;
+ };
+
+ private:
+ ComPtr<IDXGIKeyedMutex> mDXGIKeyedMutex;
+ ComPtr<ID3D11On12Device> mD3D11on12Device;
+ int64_t mAcquireCount = 0;
+ };
+
+ // |D3D11on12ResourceCache| maintains a cache of 11 wrapped resources.
+ // Each entry represents a 11 resource that is exclusively accessed by Dawn device.
+ // Since each Dawn device creates and stores a 11on12 device, the 11on12 device
+ // is used as the key for the cache entry which ensures only the same 11 wrapped
+ // resource is re-used and also fully released.
+ //
+ // The cache is primarily needed to avoid repeatedly calling CreateWrappedResource
+ // and special release code per ProduceTexture(device).
+ class D3D11on12ResourceCache {
+ public:
+ D3D11on12ResourceCache();
+ ~D3D11on12ResourceCache();
+
+ Ref<D3D11on12ResourceCacheEntry> GetOrCreateD3D11on12Resource(
+ WGPUDevice device,
+ ID3D12Resource* d3d12Resource);
+
+ private:
+ // TODO(dawn:625): Figure out a large enough cache size.
+ static constexpr uint64_t kMaxD3D11on12ResourceCacheSize = 5;
+
+ // 11on12 resource cache entries are refcounted to ensure if the ExternalImage outlives the
+ // Dawn texture (or vice-versa), we always fully release the 11 wrapped resource without
+ // waiting until Dawn device to shutdown.
+ using Cache = std::unordered_set<Ref<D3D11on12ResourceCacheEntry>,
+ D3D11on12ResourceCacheEntry::HashFunc,
+ D3D11on12ResourceCacheEntry::EqualityFunc>;
+
+ Cache mCache;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D11ON12UTIL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp
new file mode 100644
index 00000000000..18d7145c83e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Backend.cpp
@@ -0,0 +1,179 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// D3D12Backend.cpp: contains the definition of symbols exported by D3D12Backend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/D3D12Backend.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/d3d12/D3D11on12Util.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/NativeSwapChainImplD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
+ return ToBackend(FromAPI(device))->GetD3D12Device();
+ }
+
+ DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+
+ DawnSwapChainImplementation impl;
+ impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
+ impl.textureUsage = WGPUTextureUsage_Present;
+
+ return impl;
+ }
+
+ WGPUTextureFormat GetNativeSwapChainPreferredFormat(
+ const DawnSwapChainImplementation* swapChain) {
+ NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+ }
+
+ ExternalImageDescriptorDXGISharedHandle::ExternalImageDescriptorDXGISharedHandle()
+ : ExternalImageDescriptor(ExternalImageType::DXGISharedHandle) {
+ }
+
+ ExternalImageDXGI::ExternalImageDXGI(ComPtr<ID3D12Resource> d3d12Resource,
+ const WGPUTextureDescriptor* descriptor)
+ : mD3D12Resource(std::move(d3d12Resource)),
+ mUsage(descriptor->usage),
+ mDimension(descriptor->dimension),
+ mSize(descriptor->size),
+ mFormat(descriptor->format),
+ mMipLevelCount(descriptor->mipLevelCount),
+ mSampleCount(descriptor->sampleCount) {
+ ASSERT(!descriptor->nextInChain ||
+ descriptor->nextInChain->sType == WGPUSType_DawnTextureInternalUsageDescriptor);
+ if (descriptor->nextInChain) {
+ mUsageInternal = reinterpret_cast<const WGPUDawnTextureInternalUsageDescriptor*>(
+ descriptor->nextInChain)
+ ->internalUsage;
+ }
+ mD3D11on12ResourceCache = std::make_unique<D3D11on12ResourceCache>();
+ }
+
+ ExternalImageDXGI::~ExternalImageDXGI() = default;
+
+ WGPUTexture ExternalImageDXGI::ProduceTexture(
+ WGPUDevice device,
+ const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+
+ // Ensure the texture usage is allowed
+ if (!IsSubset(descriptor->usage, mUsage)) {
+ dawn::ErrorLog() << "Texture usage is not valid for external image";
+ return nullptr;
+ }
+
+ TextureDescriptor textureDescriptor = {};
+ textureDescriptor.usage = static_cast<wgpu::TextureUsage>(descriptor->usage);
+ textureDescriptor.dimension = static_cast<wgpu::TextureDimension>(mDimension);
+ textureDescriptor.size = {mSize.width, mSize.height, mSize.depthOrArrayLayers};
+ textureDescriptor.format = static_cast<wgpu::TextureFormat>(mFormat);
+ textureDescriptor.mipLevelCount = mMipLevelCount;
+ textureDescriptor.sampleCount = mSampleCount;
+
+ DawnTextureInternalUsageDescriptor internalDesc = {};
+ if (mUsageInternal) {
+ textureDescriptor.nextInChain = &internalDesc;
+ internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(mUsageInternal);
+ internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+ }
+
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource =
+ mD3D11on12ResourceCache->GetOrCreateD3D11on12Resource(device, mD3D12Resource.Get());
+ if (d3d11on12Resource == nullptr) {
+ dawn::ErrorLog() << "Unable to create 11on12 resource for external image";
+ return nullptr;
+ }
+
+ Ref<TextureBase> texture = backendDevice->CreateD3D12ExternalTexture(
+ &textureDescriptor, mD3D12Resource, std::move(d3d11on12Resource),
+ descriptor->isSwapChainTexture, descriptor->isInitialized);
+
+ return ToAPI(texture.Detach());
+ }
+
+ // static
+ std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
+ WGPUDevice device,
+ const ExternalImageDescriptorDXGISharedHandle* descriptor) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+
+ Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
+ if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
+ descriptor->sharedHandle, IID_PPV_ARGS(&d3d12Resource)))) {
+ return nullptr;
+ }
+
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ if (backendDevice->ConsumedError(
+ ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
+ return nullptr;
+ }
+
+ if (backendDevice->ConsumedError(
+ ValidateTextureDescriptorCanBeWrapped(textureDescriptor),
+ "validating that a D3D12 external image can be wrapped with %s",
+ textureDescriptor)) {
+ return nullptr;
+ }
+
+ if (backendDevice->ConsumedError(
+ ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), textureDescriptor))) {
+ return nullptr;
+ }
+
+ // Shared handle is assumed to support resource sharing capability. The resource
+ // shared capability tier must agree to share resources between D3D devices.
+ const Format* format =
+ backendDevice->GetInternalFormat(textureDescriptor->format).AcquireSuccess();
+ if (format->IsMultiPlanar()) {
+ if (backendDevice->ConsumedError(ValidateD3D12VideoTextureCanBeShared(
+ backendDevice, D3D12TextureFormat(textureDescriptor->format)))) {
+ return nullptr;
+ }
+ }
+
+ std::unique_ptr<ExternalImageDXGI> result(
+ new ExternalImageDXGI(std::move(d3d12Resource), descriptor->cTextureDescriptor));
+ return result;
+ }
+
+ uint64_t SetExternalMemoryReservation(WGPUDevice device,
+ uint64_t requestedReservationSize,
+ MemorySegment memorySegment) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+
+ return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
+ memorySegment, requestedReservationSize);
+ }
+
+ AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(nullptr) {
+ }
+
+ AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp
new file mode 100644
index 00000000000..23a95568d46
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.cpp
@@ -0,0 +1,51 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/D3D12Error.h"
+
+#include <iomanip>
+#include <sstream>
+#include <string>
+
+namespace dawn::native::d3d12 {
+ MaybeError CheckHRESULTImpl(HRESULT result, const char* context) {
+ if (DAWN_LIKELY(SUCCEEDED(result))) {
+ return {};
+ }
+
+ std::ostringstream messageStream;
+ messageStream << context << " failed with ";
+ if (result == E_FAKE_ERROR_FOR_TESTING) {
+ messageStream << "E_FAKE_ERROR_FOR_TESTING";
+ } else {
+ messageStream << "0x" << std::uppercase << std::setfill('0') << std::setw(8) << std::hex
+ << result;
+ }
+
+ if (result == DXGI_ERROR_DEVICE_REMOVED) {
+ return DAWN_DEVICE_LOST_ERROR(messageStream.str());
+ } else {
+ return DAWN_INTERNAL_ERROR(messageStream.str());
+ }
+ }
+
+ MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context) {
+ if (result == E_OUTOFMEMORY || result == E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING) {
+ return DAWN_OUT_OF_MEMORY_ERROR(context);
+ }
+
+ return CheckHRESULTImpl(result, context);
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h
new file mode 100644
index 00000000000..f70690ad91f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Error.h
@@ -0,0 +1,45 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_D3D12ERROR_H_
+#define DAWNNATIVE_D3D12_D3D12ERROR_H_
+
+#include <d3d12.h>
+#include "dawn/native/Error.h"
+#include "dawn/native/ErrorInjector.h"
+
+namespace dawn::native::d3d12 {
+
+ constexpr HRESULT E_FAKE_ERROR_FOR_TESTING = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFF);
+ constexpr HRESULT E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING =
+ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFE);
+
+ // Returns a success only if result of HResult is success
+ MaybeError CheckHRESULTImpl(HRESULT result, const char* context);
+
+ // Uses CheckRESULT but returns OOM specific error when recoverable.
+ MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context);
+
+#define CheckHRESULT(resultIn, contextIn) \
+ ::dawn::native::d3d12::CheckHRESULTImpl( \
+ INJECT_ERROR_OR_RUN(resultIn, E_FAKE_ERROR_FOR_TESTING), contextIn)
+#define CheckOutOfMemoryHRESULT(resultIn, contextIn) \
+ ::dawn::native::d3d12::CheckOutOfMemoryHRESULTImpl( \
+ INJECT_ERROR_OR_RUN(resultIn, E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING, \
+ E_FAKE_ERROR_FOR_TESTING), \
+ contextIn)
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_D3D12ERROR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp
new file mode 100644
index 00000000000..ebd629b2066
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.cpp
@@ -0,0 +1,122 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/D3D12Info.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/BackendD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+namespace dawn::native::d3d12 {
+
+ ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
+ D3D12DeviceInfo info = {};
+
+ // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
+ // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
+ // for backwards compat.
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
+ D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
+ DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE,
+ &arch, sizeof(arch)),
+ "ID3D12Device::CheckFeatureSupport"));
+
+ info.isUMA = arch.UMA;
+
+ D3D12_FEATURE_DATA_D3D12_OPTIONS options = {};
+ DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+ &options, sizeof(options)),
+ "ID3D12Device::CheckFeatureSupport"));
+
+ info.resourceHeapTier = options.ResourceHeapTier;
+
+ // Windows builds 1809 and above can use the D3D12 render pass API. If we query
+ // CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
+ // the render pass API.
+ info.supportsRenderPass = false;
+ D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
+ // Performance regressions been observed when using a render pass on Intel graphics
+ // with RENDER_PASS_TIER_1 available, so fall back to a software emulated render
+ // pass on these platforms.
+ if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
+ !gpu_info::IsIntel(adapter.GetVendorId())) {
+ info.supportsRenderPass = true;
+ }
+ }
+
+ // Used to share resources cross-API. If we query CheckFeatureSupport for
+ // D3D12_FEATURE_D3D12_OPTIONS4 successfully, then we can use cross-API sharing.
+ info.supportsSharedResourceCapabilityTier1 = false;
+ D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureOptions4 = {};
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_D3D12_OPTIONS4, &featureOptions4, sizeof(featureOptions4)))) {
+ // Tier 1 support additionally enables the NV12 format. Since only the NV12 format
+ // is used by Dawn, check for Tier 1.
+ if (featureOptions4.SharedResourceCompatibilityTier >=
+ D3D12_SHARED_RESOURCE_COMPATIBILITY_TIER_1) {
+ info.supportsSharedResourceCapabilityTier1 = true;
+ }
+ }
+
+ D3D12_FEATURE_DATA_SHADER_MODEL knownShaderModels[] = {{D3D_SHADER_MODEL_6_2},
+ {D3D_SHADER_MODEL_6_1},
+ {D3D_SHADER_MODEL_6_0},
+ {D3D_SHADER_MODEL_5_1}};
+ uint32_t driverShaderModel = 0;
+ for (D3D12_FEATURE_DATA_SHADER_MODEL shaderModel : knownShaderModels) {
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_SHADER_MODEL, &shaderModel, sizeof(shaderModel)))) {
+ driverShaderModel = shaderModel.HighestShaderModel;
+ break;
+ }
+ }
+
+ if (driverShaderModel < D3D_SHADER_MODEL_5_1) {
+ return DAWN_INTERNAL_ERROR("Driver doesn't support Shader Model 5.1 or higher");
+ }
+
+ // D3D_SHADER_MODEL is encoded as 0xMm with M the major version and m the minor version
+ ASSERT(driverShaderModel <= 0xFF);
+ uint32_t shaderModelMajor = (driverShaderModel & 0xF0) >> 4;
+ uint32_t shaderModelMinor = (driverShaderModel & 0xF);
+
+ ASSERT(shaderModelMajor < 10);
+ ASSERT(shaderModelMinor < 10);
+ info.shaderModel = 10 * shaderModelMajor + shaderModelMinor;
+
+ // Profiles are always <stage>s_<minor>_<major> so we build the s_<minor>_major and add
+ // it to each of the stage's suffix.
+ std::wstring profileSuffix = L"s_M_n";
+ profileSuffix[2] = wchar_t('0' + shaderModelMajor);
+ profileSuffix[4] = wchar_t('0' + shaderModelMinor);
+
+ info.shaderProfiles[SingleShaderStage::Vertex] = L"v" + profileSuffix;
+ info.shaderProfiles[SingleShaderStage::Fragment] = L"p" + profileSuffix;
+ info.shaderProfiles[SingleShaderStage::Compute] = L"c" + profileSuffix;
+
+ D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureData4 = {};
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_D3D12_OPTIONS4, &featureData4, sizeof(featureData4)))) {
+ info.supportsShaderFloat16 = driverShaderModel >= D3D_SHADER_MODEL_6_2 &&
+ featureData4.Native16BitShaderOpsSupported;
+ }
+
+ return std::move(info);
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h
new file mode 100644
index 00000000000..83ee83764b4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/D3D12Info.h
@@ -0,0 +1,41 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_D3D12INFO_H_
+#define DAWNNATIVE_D3D12_D3D12INFO_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/PerStage.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Adapter;
+
+ struct D3D12DeviceInfo {
+ bool isUMA;
+ uint32_t resourceHeapTier;
+ bool supportsRenderPass;
+ bool supportsShaderFloat16;
+ // shaderModel indicates the maximum supported shader model, for example, the value 62
+ // indicates that current driver supports the maximum shader model is shader model 6.2.
+ uint32_t shaderModel;
+ PerStage<std::wstring> shaderProfiles;
+ bool supportsSharedResourceCapabilityTier1;
+ };
+
+ ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_D3D12INFO_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp
new file mode 100644
index 00000000000..918c7c08451
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.cpp
@@ -0,0 +1,743 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/BackendD3D12.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/CommandAllocatorManager.h"
+#include "dawn/native/d3d12/CommandBufferD3D12.h"
+#include "dawn/native/d3d12/ComputePipelineD3D12.h"
+#include "dawn/native/d3d12/D3D11on12Util.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/QuerySetD3D12.h"
+#include "dawn/native/d3d12/QueueD3D12.h"
+#include "dawn/native/d3d12/RenderPipelineD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/ResourceAllocatorManagerD3D12.h"
+#include "dawn/native/d3d12/SamplerD3D12.h"
+#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/StagingBufferD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/SwapChainD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+#include <sstream>
+
+namespace dawn::native::d3d12 {
+
+ // TODO(dawn:155): Figure out these values.
+ static constexpr uint16_t kShaderVisibleDescriptorHeapSize = 1024;
+ static constexpr uint8_t kAttachmentDescriptorHeapSize = 64;
+
+ // Value may change in the future to better accomodate large clears.
+ static constexpr uint64_t kZeroBufferSize = 1024 * 1024 * 4; // 4 Mb
+
+ static constexpr uint64_t kMaxDebugMessagesToPrint = 5;
+
+ // static
+ ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
+ const DeviceDescriptor* descriptor) {
+ Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+ DAWN_TRY(device->Initialize());
+ return device;
+ }
+
+ MaybeError Device::Initialize() {
+ InitTogglesFromDriver();
+
+ mD3d12Device = ToBackend(GetAdapter())->GetDevice();
+
+ ASSERT(mD3d12Device != nullptr);
+
+ // Create device-global objects
+ D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+ queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+ queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+ DAWN_TRY(
+ CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
+ "D3D12 create command queue"));
+
+ if (IsFeatureEnabled(Feature::TimestampQuery)) {
+ // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
+ // command queue doesn't support timestamps. D3D12_COMMAND_LIST_TYPE_DIRECT queues
+ // always support timestamps except where there are bugs in Windows container and vGPU
+ // implementations.
+ uint64_t frequency;
+ DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
+ "D3D12 get timestamp frequency"));
+ // Calculate the period in nanoseconds by the frequency.
+ mTimestampPeriod = static_cast<float>(1e9) / frequency;
+ }
+
+ // If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
+ // value.
+ mCommandQueue.As(&mD3d12SharingContract);
+
+ DAWN_TRY(
+ CheckHRESULT(mD3d12Device->CreateFence(uint64_t(GetLastSubmittedCommandSerial()),
+ D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&mFence)),
+ "D3D12 create fence"));
+
+ mFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+ ASSERT(mFenceEvent != nullptr);
+
+ // Initialize backend services
+ mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
+
+ // Zero sized allocator is never requested and does not need to exist.
+ for (uint32_t countIndex = 0; countIndex < kNumViewDescriptorAllocators; countIndex++) {
+ mViewAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+ this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
+ D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
+ }
+
+ for (uint32_t countIndex = 0; countIndex < kNumSamplerDescriptorAllocators; countIndex++) {
+ mSamplerAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+ this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+ }
+
+ mRenderTargetViewAllocator = std::make_unique<StagingDescriptorAllocator>(
+ this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
+
+ mDepthStencilViewAllocator = std::make_unique<StagingDescriptorAllocator>(
+ this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
+
+ mSamplerHeapCache = std::make_unique<SamplerHeapCache>(this);
+
+ mResidencyManager = std::make_unique<ResidencyManager>(this);
+ mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
+
+ // ShaderVisibleDescriptorAllocators use the ResidencyManager and must be initialized after.
+ DAWN_TRY_ASSIGN(
+ mSamplerShaderVisibleDescriptorAllocator,
+ ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
+
+ DAWN_TRY_ASSIGN(
+ mViewShaderVisibleDescriptorAllocator,
+ ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
+
+ // Initialize indirect commands
+ D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
+ argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
+
+ D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+ programDesc.ByteStride = 3 * sizeof(uint32_t);
+ programDesc.NumArgumentDescs = 1;
+ programDesc.pArgumentDescs = &argumentDesc;
+
+ GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+ IID_PPV_ARGS(&mDispatchIndirectSignature));
+
+ argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
+ programDesc.ByteStride = 4 * sizeof(uint32_t);
+
+ GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+ IID_PPV_ARGS(&mDrawIndirectSignature));
+
+ argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
+ programDesc.ByteStride = 5 * sizeof(uint32_t);
+
+ GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+ IID_PPV_ARGS(&mDrawIndexedIndirectSignature));
+
+ DAWN_TRY(DeviceBase::Initialize(new Queue(this)));
+ // Device shouldn't be used until after DeviceBase::Initialize so we must wait until after
+ // device initialization to call NextSerial
+ DAWN_TRY(NextSerial());
+
+ // The environment can only use DXC when it's available. Override the decision if it is not
+ // applicable.
+ DAWN_TRY(ApplyUseDxcToggle());
+
+ DAWN_TRY(CreateZeroBuffer());
+
+ return {};
+ }
+
+ Device::~Device() {
+ Destroy();
+ }
+
+ ID3D12Device* Device::GetD3D12Device() const {
+ return mD3d12Device.Get();
+ }
+
+ ComPtr<ID3D12CommandQueue> Device::GetCommandQueue() const {
+ return mCommandQueue;
+ }
+
+ ID3D12SharingContract* Device::GetSharingContract() const {
+ return mD3d12SharingContract.Get();
+ }
+
+ ComPtr<ID3D12CommandSignature> Device::GetDispatchIndirectSignature() const {
+ return mDispatchIndirectSignature;
+ }
+
+ ComPtr<ID3D12CommandSignature> Device::GetDrawIndirectSignature() const {
+ return mDrawIndirectSignature;
+ }
+
+ ComPtr<ID3D12CommandSignature> Device::GetDrawIndexedIndirectSignature() const {
+ return mDrawIndexedIndirectSignature;
+ }
+
+ ComPtr<IDXGIFactory4> Device::GetFactory() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetFactory();
+ }
+
+ MaybeError Device::ApplyUseDxcToggle() {
+ if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
+ ForceSetToggle(Toggle::UseDXC, false);
+ } else if (IsFeatureEnabled(Feature::ShaderFloat16)) {
+ // Currently we can only use DXC to compile HLSL shaders using float16.
+ ForceSetToggle(Toggle::UseDXC, true);
+ }
+
+ if (IsToggleEnabled(Toggle::UseDXC)) {
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcCompiler());
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcLibrary());
+ DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcValidator());
+ }
+
+ return {};
+ }
+
+ ComPtr<IDxcLibrary> Device::GetDxcLibrary() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcLibrary();
+ }
+
+ ComPtr<IDxcCompiler> Device::GetDxcCompiler() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcCompiler();
+ }
+
+ ComPtr<IDxcValidator> Device::GetDxcValidator() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetDxcValidator();
+ }
+
+ const PlatformFunctions* Device::GetFunctions() const {
+ return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
+ }
+
+ CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
+ return mCommandAllocatorManager.get();
+ }
+
+ ResidencyManager* Device::GetResidencyManager() const {
+ return mResidencyManager.get();
+ }
+
+ ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
+ // Callers of GetPendingCommandList do so to record commands. Only reserve a command
+ // allocator when it is needed so we don't submit empty command lists
+ if (!mPendingCommands.IsOpen()) {
+ DAWN_TRY(mPendingCommands.Open(mD3d12Device.Get(), mCommandAllocatorManager.get()));
+ }
+ return &mPendingCommands;
+ }
+
+ MaybeError Device::CreateZeroBuffer() {
+ BufferDescriptor zeroBufferDescriptor;
+ zeroBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+ zeroBufferDescriptor.size = kZeroBufferSize;
+ zeroBufferDescriptor.label = "ZeroBuffer_Internal";
+ DAWN_TRY_ASSIGN(mZeroBuffer, Buffer::Create(this, &zeroBufferDescriptor));
+
+ return {};
+ }
+
+ MaybeError Device::ClearBufferToZero(CommandRecordingContext* commandContext,
+ BufferBase* destination,
+ uint64_t offset,
+ uint64_t size) {
+ // TODO(crbug.com/dawn/852): It would be ideal to clear the buffer in CreateZeroBuffer, but
+ // the allocation of the staging buffer causes various end2end tests that monitor heap usage
+ // to fail if it's done during device creation. Perhaps ClearUnorderedAccessView*() can be
+ // used to avoid that.
+ if (!mZeroBuffer->IsDataInitialized()) {
+ DynamicUploader* uploader = GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(kZeroBufferSize, GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+
+ memset(uploadHandle.mappedBuffer, 0u, kZeroBufferSize);
+
+ CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+ uploadHandle.startOffset, mZeroBuffer.Get(), 0,
+ kZeroBufferSize);
+
+ mZeroBuffer->SetIsDataInitialized();
+ }
+
+ Buffer* dstBuffer = ToBackend(destination);
+
+ // Necessary to ensure residency of the zero buffer.
+ mZeroBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+ while (size > 0) {
+ uint64_t copySize = std::min(kZeroBufferSize, size);
+ commandContext->GetCommandList()->CopyBufferRegion(
+ dstBuffer->GetD3D12Resource(), offset, mZeroBuffer->GetD3D12Resource(), 0,
+ copySize);
+
+ offset += copySize;
+ size -= copySize;
+ }
+
+ return {};
+ }
+
+ MaybeError Device::TickImpl() {
+ // Perform cleanup operations to free unused objects
+ ExecutionSerial completedSerial = GetCompletedCommandSerial();
+
+ mResourceAllocatorManager->Tick(completedSerial);
+ DAWN_TRY(mCommandAllocatorManager->Tick(completedSerial));
+ mViewShaderVisibleDescriptorAllocator->Tick(completedSerial);
+ mSamplerShaderVisibleDescriptorAllocator->Tick(completedSerial);
+ mRenderTargetViewAllocator->Tick(completedSerial);
+ mDepthStencilViewAllocator->Tick(completedSerial);
+ mUsedComObjectRefs.ClearUpTo(completedSerial);
+
+ if (mPendingCommands.IsOpen()) {
+ DAWN_TRY(ExecutePendingCommandContext());
+ DAWN_TRY(NextSerial());
+ }
+
+ DAWN_TRY(CheckDebugLayerAndGenerateErrors());
+
+ return {};
+ }
+
+ MaybeError Device::NextSerial() {
+ IncrementLastSubmittedCommandSerial();
+
+ return CheckHRESULT(
+ mCommandQueue->Signal(mFence.Get(), uint64_t(GetLastSubmittedCommandSerial())),
+ "D3D12 command queue signal fence");
+ }
+
+ MaybeError Device::WaitForSerial(ExecutionSerial serial) {
+ DAWN_TRY(CheckPassedSerials());
+ if (GetCompletedCommandSerial() < serial) {
+ DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(uint64_t(serial), mFenceEvent),
+ "D3D12 set event on completion"));
+ WaitForSingleObject(mFenceEvent, INFINITE);
+ DAWN_TRY(CheckPassedSerials());
+ }
+ return {};
+ }
+
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ ExecutionSerial completedSerial = ExecutionSerial(mFence->GetCompletedValue());
+ if (DAWN_UNLIKELY(completedSerial == ExecutionSerial(UINT64_MAX))) {
+ // GetCompletedValue returns UINT64_MAX if the device was removed.
+ // Try to query the failure reason.
+ DAWN_TRY(CheckHRESULT(mD3d12Device->GetDeviceRemovedReason(),
+ "ID3D12Device::GetDeviceRemovedReason"));
+ // Otherwise, return a generic device lost error.
+ return DAWN_DEVICE_LOST_ERROR("Device lost");
+ }
+
+ if (completedSerial <= GetCompletedCommandSerial()) {
+ return ExecutionSerial(0);
+ }
+
+ return completedSerial;
+ }
+
+ void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
+ mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
+ }
+
+ MaybeError Device::ExecutePendingCommandContext() {
+ return mPendingCommands.ExecuteCommandList(this);
+ }
+
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ return BindGroup::Create(this, descriptor);
+ }
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+ }
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+ }
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return CommandBuffer::Create(encoder, descriptor);
+ }
+ Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return ComputePipeline::CreateUninitialized(this, descriptor);
+ }
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return PipelineLayout::Create(this, descriptor);
+ }
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
+ return QuerySet::Create(this, descriptor);
+ }
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return RenderPipeline::CreateUninitialized(this, descriptor);
+ }
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return Sampler::Create(this, descriptor);
+ }
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ return ShaderModule::Create(this, descriptor, parseResult);
+ }
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return OldSwapChain::Create(this, descriptor);
+ }
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+ }
+ ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return Texture::Create(this, descriptor);
+ }
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return TextureView::Create(texture, descriptor);
+ }
+ void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+ }
+ void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+ }
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer =
+ std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
+ return std::move(stagingBuffer);
+ }
+
+ MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ CommandRecordingContext* commandRecordingContext;
+ DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
+
+ Buffer* dstBuffer = ToBackend(destination);
+
+ bool cleared;
+ DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+ commandRecordingContext, destinationOffset, size));
+ DAWN_UNUSED(cleared);
+
+ CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
+ destinationOffset, size);
+
+ return {};
+ }
+
+ void Device::CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+ StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ ASSERT(commandContext != nullptr);
+ Buffer* dstBuffer = ToBackend(destination);
+ StagingBuffer* srcBuffer = ToBackend(source);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+ commandContext->GetCommandList()->CopyBufferRegion(
+ dstBuffer->GetD3D12Resource(), destinationOffset, srcBuffer->GetResource(),
+ sourceOffset, size);
+ }
+
+ MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
+ Texture* texture = ToBackend(dst->texture.Get());
+
+ SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+ if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
+ }
+
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
+
+ RecordBufferTextureCopyWithBufferHandle(
+ BufferTextureCopyDirection::B2T, commandContext->GetCommandList(),
+ ToBackend(source)->GetResource(), src.offset, src.bytesPerRow, src.rowsPerImage, *dst,
+ copySizePixels);
+
+ return {};
+ }
+
+ void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
+ mResourceAllocatorManager->DeallocateMemory(allocation);
+ }
+
+ ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage) {
+ return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor,
+ initialUsage);
+ }
+
+ Ref<TextureBase> Device::CreateD3D12ExternalTexture(
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture,
+ bool isInitialized) {
+ Ref<Texture> dawnTexture;
+ if (ConsumedError(Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
+ std::move(d3d11on12Resource),
+ isSwapChainTexture, isInitialized),
+ &dawnTexture)) {
+ return nullptr;
+ }
+ return {dawnTexture};
+ }
+
+ ComPtr<ID3D11On12Device> Device::GetOrCreateD3D11on12Device() {
+ if (mD3d11On12Device == nullptr) {
+ ComPtr<ID3D11Device> d3d11Device;
+ D3D_FEATURE_LEVEL d3dFeatureLevel;
+ IUnknown* const iUnknownQueue = mCommandQueue.Get();
+ if (FAILED(GetFunctions()->d3d11on12CreateDevice(mD3d12Device.Get(), 0, nullptr, 0,
+ &iUnknownQueue, 1, 1, &d3d11Device,
+ nullptr, &d3dFeatureLevel))) {
+ return nullptr;
+ }
+
+ ComPtr<ID3D11On12Device> d3d11on12Device;
+ HRESULT hr = d3d11Device.As(&d3d11on12Device);
+ ASSERT(SUCCEEDED(hr));
+
+ mD3d11On12Device = std::move(d3d11on12Device);
+ }
+ return mD3d11On12Device;
+ }
+
+ const D3D12DeviceInfo& Device::GetDeviceInfo() const {
+ return ToBackend(GetAdapter())->GetDeviceInfo();
+ }
+
+ void Device::InitTogglesFromDriver() {
+ const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
+ SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
+ SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
+ SetToggle(Toggle::UseD3D12ResidencyManagement, true);
+ SetToggle(Toggle::UseDXC, false);
+
+ // Disable optimizations when using FXC
+ // See https://crbug.com/dawn/1203
+ SetToggle(Toggle::FxcOptimizations, false);
+
+ // By default use the maximum shader-visible heap size allowed.
+ SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
+
+ uint32_t deviceId = GetAdapter()->GetDeviceId();
+ uint32_t vendorId = GetAdapter()->GetVendorId();
+
+ // Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
+ // See http://crbug.com/1161355 for more information.
+ if (gpu_info::IsIntel(vendorId) &&
+ (gpu_info::IsSkylake(deviceId) || gpu_info::IsKabylake(deviceId) ||
+ gpu_info::IsCoffeelake(deviceId))) {
+ constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {30, 0, 100, 9864};
+ if (gpu_info::CompareD3DDriverVersion(vendorId,
+ ToBackend(GetAdapter())->GetDriverVersion(),
+ kFirstDriverVersionWithFix) < 0) {
+ SetToggle(
+ Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ true);
+ }
+ }
+ }
+
+ MaybeError Device::WaitForIdleForDestruction() {
+ // Immediately forget about all pending commands
+ mPendingCommands.Release();
+
+ DAWN_TRY(NextSerial());
+ // Wait for all in-flight commands to finish executing
+ DAWN_TRY(WaitForSerial(GetLastSubmittedCommandSerial()));
+
+ return {};
+ }
+
+ MaybeError Device::CheckDebugLayerAndGenerateErrors() {
+ if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
+ return {};
+ }
+
+ ComPtr<ID3D12InfoQueue> infoQueue;
+ DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
+ "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
+ uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
+
+ // Check if any errors have occurred otherwise we would be creating an empty error. Note
+ // that we use GetNumStoredMessagesAllowedByRetrievalFilter instead of GetNumStoredMessages
+ // because we only convert WARNINGS or higher messages to dawn errors.
+ if (totalErrors == 0) {
+ return {};
+ }
+
+ std::ostringstream messages;
+ uint64_t errorsToPrint = std::min(kMaxDebugMessagesToPrint, totalErrors);
+ for (uint64_t i = 0; i < errorsToPrint; ++i) {
+ SIZE_T messageLength = 0;
+ HRESULT hr = infoQueue->GetMessage(i, nullptr, &messageLength);
+ if (FAILED(hr)) {
+ messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
+ continue;
+ }
+
+ std::unique_ptr<uint8_t[]> messageData(new uint8_t[messageLength]);
+ D3D12_MESSAGE* message = reinterpret_cast<D3D12_MESSAGE*>(messageData.get());
+ hr = infoQueue->GetMessage(i, message, &messageLength);
+ if (FAILED(hr)) {
+ messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
+ continue;
+ }
+
+ messages << message->pDescription << " (" << message->ID << ")\n";
+ }
+ if (errorsToPrint < totalErrors) {
+ messages << (totalErrors - errorsToPrint) << " messages silenced\n";
+ }
+ // We only print up to the first kMaxDebugMessagesToPrint errors
+ infoQueue->ClearStoredMessages();
+
+ return DAWN_INTERNAL_ERROR(messages.str());
+ }
+
+ void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
+
+ // Immediately forget about all pending commands for the case where device is lost on its
+ // own and WaitForIdleForDestruction isn't called.
+ mPendingCommands.Release();
+
+ if (mFenceEvent != nullptr) {
+ ::CloseHandle(mFenceEvent);
+ }
+
+ // Release recycled resource heaps.
+ if (mResourceAllocatorManager != nullptr) {
+ mResourceAllocatorManager->DestroyPool();
+ }
+
+ // We need to handle clearing up com object refs that were enqeued after TickImpl
+ mUsedComObjectRefs.ClearUpTo(std::numeric_limits<ExecutionSerial>::max());
+
+ ASSERT(mUsedComObjectRefs.Empty());
+ ASSERT(!mPendingCommands.IsOpen());
+ }
+
+ ShaderVisibleDescriptorAllocator* Device::GetViewShaderVisibleDescriptorAllocator() const {
+ return mViewShaderVisibleDescriptorAllocator.get();
+ }
+
+ ShaderVisibleDescriptorAllocator* Device::GetSamplerShaderVisibleDescriptorAllocator() const {
+ return mSamplerShaderVisibleDescriptorAllocator.get();
+ }
+
+ StagingDescriptorAllocator* Device::GetViewStagingDescriptorAllocator(
+ uint32_t descriptorCount) const {
+ ASSERT(descriptorCount <= kMaxViewDescriptorsPerBindGroup);
+ // This is Log2 of the next power of two, plus 1.
+ uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+ return mViewAllocators[allocatorIndex].get();
+ }
+
+ StagingDescriptorAllocator* Device::GetSamplerStagingDescriptorAllocator(
+ uint32_t descriptorCount) const {
+ ASSERT(descriptorCount <= kMaxSamplerDescriptorsPerBindGroup);
+ // This is Log2 of the next power of two, plus 1.
+ uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+ return mSamplerAllocators[allocatorIndex].get();
+ }
+
+ StagingDescriptorAllocator* Device::GetRenderTargetViewAllocator() const {
+ return mRenderTargetViewAllocator.get();
+ }
+
+ StagingDescriptorAllocator* Device::GetDepthStencilViewAllocator() const {
+ return mDepthStencilViewAllocator.get();
+ }
+
+ SamplerHeapCache* Device::GetSamplerHeapCache() {
+ return mSamplerHeapCache.get();
+ }
+
+ uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
+ }
+
+ // TODO(dawn:512): Once we optimize DynamicUploader allocation with offsets we
+ // should make this return D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT = 512.
+ // Current implementations would try to allocate additional 511 bytes,
+ // so we return 1 and let ComputeTextureCopySplits take care of the alignment.
+ uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return 1;
+ }
+
+ float Device::GetTimestampPeriodInNS() const {
+ return mTimestampPeriod;
+ }
+
+ bool Device::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ ComputePipelineBase* computePipeline) const {
+ return ToBackend(computePipeline)->UsesNumWorkgroups();
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h
new file mode 100644
index 00000000000..1a837929a38
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/DeviceD3D12.h
@@ -0,0 +1,265 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_DEVICED3D12_H_
+#define DAWNNATIVE_D3D12_DEVICED3D12_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/D3D12Info.h"
+#include "dawn/native/d3d12/Forward.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ class CommandAllocatorManager;
+ class PlatformFunctions;
+ class ResidencyManager;
+ class ResourceAllocatorManager;
+ class SamplerHeapCache;
+ class ShaderVisibleDescriptorAllocator;
+ class StagingDescriptorAllocator;
+
+#define ASSERT_SUCCESS(hr) \
+ do { \
+ HRESULT succeeded = hr; \
+ ASSERT(SUCCEEDED(succeeded)); \
+ } while (0)
+
+ // Definition of backend types
+ class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(Adapter* adapter,
+ const DeviceDescriptor* descriptor);
+ ~Device() override;
+
+ MaybeError Initialize();
+
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+
+ MaybeError TickImpl() override;
+
+ ID3D12Device* GetD3D12Device() const;
+ ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
+ ID3D12SharingContract* GetSharingContract() const;
+
+ ComPtr<ID3D12CommandSignature> GetDispatchIndirectSignature() const;
+ ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
+ ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectSignature() const;
+
+ CommandAllocatorManager* GetCommandAllocatorManager() const;
+ ResidencyManager* GetResidencyManager() const;
+
+ const PlatformFunctions* GetFunctions() const;
+ ComPtr<IDXGIFactory4> GetFactory() const;
+ ComPtr<IDxcLibrary> GetDxcLibrary() const;
+ ComPtr<IDxcCompiler> GetDxcCompiler() const;
+ ComPtr<IDxcValidator> GetDxcValidator() const;
+
+ ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
+
+ MaybeError ClearBufferToZero(CommandRecordingContext* commandContext,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size);
+
+ const D3D12DeviceInfo& GetDeviceInfo() const;
+
+ MaybeError NextSerial();
+ MaybeError WaitForSerial(ExecutionSerial serial);
+
+ void ReferenceUntilUnused(ComPtr<IUnknown> object);
+
+ MaybeError ExecutePendingCommandContext();
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+
+ void CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+ StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size);
+
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ ResultOrError<ResourceHeapAllocation> AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ void DeallocateMemory(ResourceHeapAllocation& allocation);
+
+ ShaderVisibleDescriptorAllocator* GetViewShaderVisibleDescriptorAllocator() const;
+ ShaderVisibleDescriptorAllocator* GetSamplerShaderVisibleDescriptorAllocator() const;
+
+ // Returns nullptr when descriptor count is zero.
+ StagingDescriptorAllocator* GetViewStagingDescriptorAllocator(
+ uint32_t descriptorCount) const;
+
+ StagingDescriptorAllocator* GetSamplerStagingDescriptorAllocator(
+ uint32_t descriptorCount) const;
+
+ SamplerHeapCache* GetSamplerHeapCache();
+
+ StagingDescriptorAllocator* GetRenderTargetViewAllocator() const;
+
+ StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
+
+ Ref<TextureBase> CreateD3D12ExternalTexture(
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture,
+ bool isInitialized);
+
+ ComPtr<ID3D11On12Device> GetOrCreateD3D11on12Device();
+
+ void InitTogglesFromDriver();
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+ ComputePipelineBase* computePipeline) const override;
+
+ private:
+ using DeviceBase::DeviceBase;
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+ const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+ void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
+
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+
+ MaybeError CheckDebugLayerAndGenerateErrors();
+
+ MaybeError ApplyUseDxcToggle();
+
+ MaybeError CreateZeroBuffer();
+
+ ComPtr<ID3D12Fence> mFence;
+ HANDLE mFenceEvent = nullptr;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+ ComPtr<ID3D12Device> mD3d12Device; // Device is owned by adapter and will not be outlived.
+ ComPtr<ID3D12CommandQueue> mCommandQueue;
+ ComPtr<ID3D12SharingContract> mD3d12SharingContract;
+
+ // 11on12 device corresponding to mCommandQueue
+ ComPtr<ID3D11On12Device> mD3d11On12Device;
+
+ ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
+ ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
+ ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
+
+ CommandRecordingContext mPendingCommands;
+
+ SerialQueue<ExecutionSerial, ComPtr<IUnknown>> mUsedComObjectRefs;
+
+ std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
+ std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
+ std::unique_ptr<ResidencyManager> mResidencyManager;
+
+ static constexpr uint32_t kMaxSamplerDescriptorsPerBindGroup =
+ 3 * kMaxSamplersPerShaderStage;
+ static constexpr uint32_t kMaxViewDescriptorsPerBindGroup =
+ kMaxBindingsPerPipelineLayout - kMaxSamplerDescriptorsPerBindGroup;
+
+ static constexpr uint32_t kNumSamplerDescriptorAllocators =
+ ConstexprLog2Ceil(kMaxSamplerDescriptorsPerBindGroup) + 1;
+ static constexpr uint32_t kNumViewDescriptorAllocators =
+ ConstexprLog2Ceil(kMaxViewDescriptorsPerBindGroup) + 1;
+
+ // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+ // the range [0, kMaxSamplerDescriptorsPerBindGroup].
+ std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumViewDescriptorAllocators + 1>
+ mViewAllocators;
+
+ // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+ // the range [0, kMaxViewDescriptorsPerBindGroup].
+ std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumSamplerDescriptorAllocators + 1>
+ mSamplerAllocators;
+
+ std::unique_ptr<StagingDescriptorAllocator> mRenderTargetViewAllocator;
+
+ std::unique_ptr<StagingDescriptorAllocator> mDepthStencilViewAllocator;
+
+ std::unique_ptr<ShaderVisibleDescriptorAllocator> mViewShaderVisibleDescriptorAllocator;
+
+ std::unique_ptr<ShaderVisibleDescriptorAllocator> mSamplerShaderVisibleDescriptorAllocator;
+
+ // Sampler cache needs to be destroyed before the CPU sampler allocator to ensure the final
+ // release is called.
+ std::unique_ptr<SamplerHeapCache> mSamplerHeapCache;
+
+ // A buffer filled with zeros that is used to copy into other buffers when they need to be
+ // cleared.
+ Ref<Buffer> mZeroBuffer;
+
+ // The number of nanoseconds required for a timestamp query to be incremented by 1
+ float mTimestampPeriod = 1.0f;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_DEVICED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h b/chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h
new file mode 100644
index 00000000000..a7aedb786ef
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/Forward.h
@@ -0,0 +1,69 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_FORWARD_H_
+#define DAWNNATIVE_D3D12_FORWARD_H_
+
+#include "dawn/native/ToBackend.h"
+
+namespace dawn::native::d3d12 {
+
+ class Adapter;
+ class BindGroup;
+ class BindGroupLayout;
+ class Buffer;
+ class CommandBuffer;
+ class ComputePipeline;
+ class Device;
+ class Heap;
+ class PipelineLayout;
+ class QuerySet;
+ class Queue;
+ class RenderPipeline;
+ class Sampler;
+ class ShaderModule;
+ class StagingBuffer;
+ class SwapChain;
+ class Texture;
+ class TextureView;
+
+ struct D3D12BackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using ResourceHeapType = Heap;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using StagingBufferType = StagingBuffer;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+ };
+
+ template <typename T>
+ auto ToBackend(T&& common) -> decltype(ToBackendBase<D3D12BackendTraits>(common)) {
+ return ToBackendBase<D3D12BackendTraits>(common);
+ }
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
new file mode 100644
index 00000000000..e5d4fb931a8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
@@ -0,0 +1,39 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ GPUDescriptorHeapAllocation::GPUDescriptorHeapAllocation(
+ D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
+ ExecutionSerial lastUsageSerial,
+ HeapVersionID heapSerial)
+ : mBaseDescriptor(baseDescriptor),
+ mLastUsageSerial(lastUsageSerial),
+ mHeapSerial(heapSerial) {
+ }
+
+ D3D12_GPU_DESCRIPTOR_HANDLE GPUDescriptorHeapAllocation::GetBaseDescriptor() const {
+ return mBaseDescriptor;
+ }
+
+ ExecutionSerial GPUDescriptorHeapAllocation::GetLastUsageSerial() const {
+ return mLastUsageSerial;
+ }
+
+ HeapVersionID GPUDescriptorHeapAllocation::GetHeapSerial() const {
+ return mHeapSerial;
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
new file mode 100644
index 00000000000..7f7ce1ecb67
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
@@ -0,0 +1,44 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_GPUDESCRIPTORHEAPALLOCATION_H_
+#define DAWNNATIVE_D3D12_GPUDESCRIPTORHEAPALLOCATION_H_
+
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/IntegerTypes.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ // Wrapper for a handle into a GPU-only descriptor heap.
+ class GPUDescriptorHeapAllocation {
+ public:
+ GPUDescriptorHeapAllocation() = default;
+ GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
+ ExecutionSerial lastUsageSerial,
+ HeapVersionID heapSerial);
+
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+ ExecutionSerial GetLastUsageSerial() const;
+ HeapVersionID GetHeapSerial() const;
+
+ private:
+ D3D12_GPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
+ ExecutionSerial mLastUsageSerial = ExecutionSerial(0);
+ HeapVersionID mHeapSerial = HeapVersionID(0);
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
new file mode 100644
index 00000000000..5a26be305d7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
@@ -0,0 +1,71 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ HeapAllocator::HeapAllocator(Device* device,
+ D3D12_HEAP_TYPE heapType,
+ D3D12_HEAP_FLAGS heapFlags,
+ MemorySegment memorySegment)
+ : mDevice(device),
+ mHeapType(heapType),
+ mHeapFlags(heapFlags),
+ mMemorySegment(memorySegment) {
+ }
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> HeapAllocator::AllocateResourceHeap(
+ uint64_t size) {
+ D3D12_HEAP_DESC heapDesc;
+ heapDesc.SizeInBytes = size;
+ heapDesc.Properties.Type = mHeapType;
+ heapDesc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+ heapDesc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+ heapDesc.Properties.CreationNodeMask = 0;
+ heapDesc.Properties.VisibleNodeMask = 0;
+ // It is preferred to use a size that is a multiple of the alignment.
+ // However, MSAA heaps are always aligned to 4MB instead of 64KB. This means
+ // if the heap size is too small, the VMM would fragment.
+ // TODO(crbug.com/dawn/849): Consider having MSAA vs non-MSAA heaps.
+ heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
+ heapDesc.Flags = mHeapFlags;
+
+ // CreateHeap will implicitly make the created heap resident. We must ensure enough free
+ // memory exists before allocating to avoid an out-of-memory error when overcommitted.
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(size, mMemorySegment));
+
+ ComPtr<ID3D12Heap> d3d12Heap;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&d3d12Heap)),
+ "ID3D12Device::CreateHeap"));
+
+ std::unique_ptr<ResourceHeapBase> heapBase =
+ std::make_unique<Heap>(std::move(d3d12Heap), mMemorySegment, size);
+
+ // Calling CreateHeap implicitly calls MakeResident on the new heap. We must track this to
+ // avoid calling MakeResident a second time.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(ToBackend(heapBase.get()));
+ return std::move(heapBase);
+ }
+
+ void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
+ mDevice->ReferenceUntilUnused(static_cast<Heap*>(heap.get())->GetD3D12Heap());
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h
new file mode 100644
index 00000000000..055f7393403
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapAllocatorD3D12.h
@@ -0,0 +1,48 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
+#define DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
+
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ // Wrapper to allocate a D3D12 heap.
+ class HeapAllocator : public ResourceHeapAllocator {
+ public:
+ HeapAllocator(Device* device,
+ D3D12_HEAP_TYPE heapType,
+ D3D12_HEAP_FLAGS heapFlags,
+ MemorySegment memorySegment);
+ ~HeapAllocator() override = default;
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) override;
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+
+ private:
+ Device* mDevice;
+ D3D12_HEAP_TYPE mHeapType;
+ D3D12_HEAP_FLAGS mHeapFlags;
+ MemorySegment mMemorySegment;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp
new file mode 100644
index 00000000000..7426757dfdd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.cpp
@@ -0,0 +1,31 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/HeapD3D12.h"
+
+namespace dawn::native::d3d12 {
+ Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
+ : Pageable(std::move(d3d12Pageable), memorySegment, size) {
+ mD3d12Pageable.As(&mD3d12Heap);
+ }
+
+ // This function should only be used when mD3D12Pageable was initialized from a
+ // ID3D12Pageable that was initially created as an ID3D12Heap (i.e. SubAllocation). If the
+ // ID3D12Pageable was initially created as an ID3D12Resource (i.e. DirectAllocation), then
+ // use GetD3D12Pageable().
+ ID3D12Heap* Heap::GetD3D12Heap() const {
+ return mD3d12Heap.Get();
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h
new file mode 100644
index 00000000000..c16036659a6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/HeapD3D12.h
@@ -0,0 +1,40 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_HEAPD3D12_H_
+#define DAWNNATIVE_D3D12_HEAPD3D12_H_
+
+#include "dawn/native/ResourceHeap.h"
+#include "dawn/native/d3d12/PageableD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ // This class is used to represent ID3D12Heap allocations, as well as an implicit heap
+ // representing a directly allocated resource. It inherits from Pageable because each Heap must
+ // be represented in the ResidencyManager.
+ class Heap : public ResourceHeapBase, public Pageable {
+ public:
+ Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+
+ ID3D12Heap* GetD3D12Heap() const;
+
+ private:
+ ComPtr<ID3D12Heap> mD3d12Heap;
+ };
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_HEAPD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h b/chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h
new file mode 100644
index 00000000000..1e3dbfbd4d6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/IntegerTypes.h
@@ -0,0 +1,31 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_INTEGERTYPES_H_
+#define DAWNNATIVE_D3D12_INTEGERTYPES_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/TypedInteger.h"
+
+#include <cstdint>
+
+namespace dawn::native::d3d12 {
+
+ // An ID used to desambiguate between multiple uses of the same descriptor heap in the
+ // BindGroup allocations.
+ using HeapVersionID = TypedInteger<struct HeapVersionIDT, uint64_t>;
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_INTEGERTYPES_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
new file mode 100644
index 00000000000..5156af58300
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
@@ -0,0 +1,120 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/NativeSwapChainImplD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+ DXGI_USAGE D3D12SwapChainBufferUsage(WGPUTextureUsage allowedUsages) {
+ DXGI_USAGE usage = DXGI_CPU_ACCESS_NONE;
+ if (allowedUsages & WGPUTextureUsage_TextureBinding) {
+ usage |= DXGI_USAGE_SHADER_INPUT;
+ }
+ if (allowedUsages & WGPUTextureUsage_StorageBinding) {
+ usage |= DXGI_USAGE_UNORDERED_ACCESS;
+ }
+ if (allowedUsages & WGPUTextureUsage_RenderAttachment) {
+ usage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ }
+ return usage;
+ }
+
+ static constexpr unsigned int kFrameCount = 3;
+ } // anonymous namespace
+
+ NativeSwapChainImpl::NativeSwapChainImpl(Device* device, HWND window)
+ : mWindow(window), mDevice(device), mInterval(1) {
+ }
+
+ NativeSwapChainImpl::~NativeSwapChainImpl() {
+ }
+
+ void NativeSwapChainImpl::Init(DawnWSIContextD3D12* /*context*/) {
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
+ uint32_t width,
+ uint32_t height) {
+ ASSERT(width > 0);
+ ASSERT(height > 0);
+ ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
+
+ ComPtr<IDXGIFactory4> factory = mDevice->GetFactory();
+ ComPtr<ID3D12CommandQueue> queue = mDevice->GetCommandQueue();
+
+ mInterval = mDevice->IsToggleEnabled(Toggle::TurnOffVsync) == true ? 0 : 1;
+
+ // Create the D3D12 swapchain, assuming only two buffers for now
+ DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+ swapChainDesc.Width = width;
+ swapChainDesc.Height = height;
+ swapChainDesc.Format = D3D12TextureFormat(GetPreferredFormat());
+ swapChainDesc.BufferUsage = D3D12SwapChainBufferUsage(usage);
+ swapChainDesc.BufferCount = kFrameCount;
+ swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+ swapChainDesc.SampleDesc.Count = 1;
+ swapChainDesc.SampleDesc.Quality = 0;
+
+ ComPtr<IDXGISwapChain1> swapChain1;
+ ASSERT_SUCCESS(factory->CreateSwapChainForHwnd(queue.Get(), mWindow, &swapChainDesc,
+ nullptr, nullptr, &swapChain1));
+
+ ASSERT_SUCCESS(swapChain1.As(&mSwapChain));
+
+ // Gather the resources that will be used to present to the swapchain
+ mBuffers.resize(kFrameCount);
+ for (uint32_t i = 0; i < kFrameCount; ++i) {
+ ASSERT_SUCCESS(mSwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])));
+ }
+
+ // Set the initial serial of buffers to 0 so that we don't wait on them when they are first
+ // used
+ mBufferSerials.resize(kFrameCount, ExecutionSerial(0));
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ mCurrentBuffer = mSwapChain->GetCurrentBackBufferIndex();
+ nextTexture->texture.ptr = mBuffers[mCurrentBuffer].Get();
+
+ // TODO(crbug.com/dawn/269) Currently we force the CPU to wait for the GPU to be finished
+ // with the buffer. Ideally the synchronization should be all done on the GPU.
+ ASSERT(mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]).IsSuccess());
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::Present() {
+ // This assumes the texture has already been transition to the PRESENT state.
+
+ ASSERT_SUCCESS(mSwapChain->Present(mInterval, 0));
+ // TODO(crbug.com/dawn/833): Make the serial ticking implicit.
+ ASSERT(mDevice->NextSerial().IsSuccess());
+
+ mBufferSerials[mCurrentBuffer] = mDevice->GetPendingCommandSerial();
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
new file mode 100644
index 00000000000..8ed5ee24a53
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
@@ -0,0 +1,60 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
+#define DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <vector>
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class NativeSwapChainImpl {
+ public:
+ using WSIContext = DawnWSIContextD3D12;
+
+ NativeSwapChainImpl(Device* device, HWND window);
+ ~NativeSwapChainImpl();
+
+ void Init(DawnWSIContextD3D12* context);
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+ DawnSwapChainError Present();
+
+ wgpu::TextureFormat GetPreferredFormat() const;
+
+ private:
+ HWND mWindow = nullptr;
+ Device* mDevice = nullptr;
+ UINT mInterval;
+
+ ComPtr<IDXGISwapChain3> mSwapChain = nullptr;
+ std::vector<ComPtr<ID3D12Resource>> mBuffers;
+ std::vector<ExecutionSerial> mBufferSerials;
+ uint32_t mCurrentBuffer;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp
new file mode 100644
index 00000000000..13942092700
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.cpp
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/PageableD3D12.h"
+
+namespace dawn::native::d3d12 {
+ Pageable::Pageable(ComPtr<ID3D12Pageable> d3d12Pageable,
+ MemorySegment memorySegment,
+ uint64_t size)
+ : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {
+ }
+
+ // When a pageable is destroyed, it no longer resides in resident memory, so we must evict
+ // it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
+ // ResidencyManager will attempt to use it after it has been deallocated.
+ Pageable::~Pageable() {
+ if (IsInResidencyLRUCache()) {
+ RemoveFromList();
+ }
+ }
+
+ ID3D12Pageable* Pageable::GetD3D12Pageable() const {
+ return mD3d12Pageable.Get();
+ }
+
+ ExecutionSerial Pageable::GetLastUsage() const {
+ return mLastUsage;
+ }
+
+ void Pageable::SetLastUsage(ExecutionSerial serial) {
+ mLastUsage = serial;
+ }
+
+ ExecutionSerial Pageable::GetLastSubmission() const {
+ return mLastSubmission;
+ }
+
+ void Pageable::SetLastSubmission(ExecutionSerial serial) {
+ mLastSubmission = serial;
+ }
+
+ MemorySegment Pageable::GetMemorySegment() const {
+ return mMemorySegment;
+ }
+
+ uint64_t Pageable::GetSize() const {
+ return mSize;
+ }
+
+ bool Pageable::IsInResidencyLRUCache() const {
+ return IsInList();
+ }
+
+ void Pageable::IncrementResidencyLock() {
+ mResidencyLockRefCount++;
+ }
+
+ void Pageable::DecrementResidencyLock() {
+ mResidencyLockRefCount--;
+ }
+
+ bool Pageable::IsResidencyLocked() const {
+ return mResidencyLockRefCount != 0;
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h
new file mode 100644
index 00000000000..19355dc2968
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PageableD3D12.h
@@ -0,0 +1,80 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_PAGEABLED3D12_H_
+#define DAWNNATIVE_D3D12_PAGEABLED3D12_H_
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+ // This class is used to represent ID3D12Pageable allocations, and also serves as a node within
+ // the ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
+ // allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
+ // LRU cache when it is evicted from resident memory due to budget constraints, or when the
+ // pageable allocation is released.
+ class Pageable : public LinkNode<Pageable> {
+ public:
+ Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+ ~Pageable();
+
+ ID3D12Pageable* GetD3D12Pageable() const;
+
+ // We set mLastRecordingSerial to denote the serial this pageable was last recorded to be
+ // used. We must check this serial against the current serial when recording usages to
+ // ensure we do not process residency for this pageable multiple times.
+ ExecutionSerial GetLastUsage() const;
+ void SetLastUsage(ExecutionSerial serial);
+
+ // The residency manager must know the last serial that any portion of the pageable was
+ // submitted to be used so that we can ensure this pageable stays resident in memory at
+ // least until that serial has completed.
+ ExecutionSerial GetLastSubmission() const;
+ void SetLastSubmission(ExecutionSerial serial);
+
+ MemorySegment GetMemorySegment() const;
+
+ uint64_t GetSize() const;
+
+ bool IsInResidencyLRUCache() const;
+
+ // In some scenarios, such as async buffer mapping or descriptor heaps, we must lock
+ // residency to ensure the pageable cannot be evicted. Because multiple buffers may be
+ // mapped in a single heap, we must track the number of resources currently locked.
+ void IncrementResidencyLock();
+ void DecrementResidencyLock();
+ bool IsResidencyLocked() const;
+
+ protected:
+ ComPtr<ID3D12Pageable> mD3d12Pageable;
+
+ private:
+ // mLastUsage denotes the last time this pageable was recorded for use.
+ ExecutionSerial mLastUsage = ExecutionSerial(0);
+ // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
+ // although this variable often contains the same value as mLastUsage, it can differ in some
+ // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
+ // updated upon the call, but the backend operation is deferred until the next submission
+ // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
+ // accurately identify when a pageable can be evicted.
+ ExecutionSerial mLastSubmission = ExecutionSerial(0);
+ MemorySegment mMemorySegment;
+ uint32_t mResidencyLockRefCount = 0;
+ uint64_t mSize = 0;
+ };
+} // namespace dawn::native::d3d12
+
+#endif
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
new file mode 100644
index 00000000000..794a7634a1e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
@@ -0,0 +1,377 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include <sstream>
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+using Microsoft::WRL::ComPtr;
+
+namespace dawn::native::d3d12 {
+ namespace {
+
+ // Reserve register names for internal use. This registers map to bindings in the shader,
+ // but are not directly related to allocation of the root signature.
+ // In the root signature, it the index of the root parameter where these registers are
+ // used that determines the layout of the root signature.
+ static constexpr uint32_t kRenderOrComputeInternalRegisterSpace = kMaxBindGroups + 1;
+ static constexpr uint32_t kRenderOrComputeInternalBaseRegister = 0;
+
+ static constexpr uint32_t kDynamicStorageBufferLengthsRegisterSpace = kMaxBindGroups + 2;
+ static constexpr uint32_t kDynamicStorageBufferLengthsBaseRegister = 0;
+
+ static constexpr uint32_t kInvalidDynamicStorageBufferLengthsParameterIndex =
+ std::numeric_limits<uint32_t>::max();
+
+ D3D12_SHADER_VISIBILITY ShaderVisibilityType(wgpu::ShaderStage visibility) {
+ ASSERT(visibility != wgpu::ShaderStage::None);
+
+ if (visibility == wgpu::ShaderStage::Vertex) {
+ return D3D12_SHADER_VISIBILITY_VERTEX;
+ }
+
+ if (visibility == wgpu::ShaderStage::Fragment) {
+ return D3D12_SHADER_VISIBILITY_PIXEL;
+ }
+
+ // For compute or any two combination of stages, visibility must be ALL
+ return D3D12_SHADER_VISIBILITY_ALL;
+ }
+
+ D3D12_ROOT_PARAMETER_TYPE RootParameterType(wgpu::BufferBindingType type) {
+ switch (type) {
+ case wgpu::BufferBindingType::Uniform:
+ return D3D12_ROOT_PARAMETER_TYPE_CBV;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ return D3D12_ROOT_PARAMETER_TYPE_UAV;
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ return D3D12_ROOT_PARAMETER_TYPE_SRV;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ } // anonymous namespace
+
+ ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
+ DAWN_TRY(layout->Initialize());
+ return layout;
+ }
+
+ MaybeError PipelineLayout::Initialize() {
+ Device* device = ToBackend(GetDevice());
+ // Parameters are D3D12_ROOT_PARAMETER_TYPE which is either a root table, constant, or
+ // descriptor.
+ std::vector<D3D12_ROOT_PARAMETER> rootParameters;
+
+ size_t rangesCount = 0;
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
+ rangesCount += bindGroupLayout->GetCbvUavSrvDescriptorRanges().size() +
+ bindGroupLayout->GetSamplerDescriptorRanges().size();
+ }
+
+ // We are taking pointers to `ranges`, so we cannot let it resize while we're pushing to it.
+ std::vector<D3D12_DESCRIPTOR_RANGE> ranges(rangesCount);
+
+ uint32_t rangeIndex = 0;
+
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
+
+ // Set the root descriptor table parameter and copy ranges. Ranges are offset by the
+ // bind group index Returns whether or not the parameter was set. A root parameter is
+ // not set if the number of ranges is 0
+ auto SetRootDescriptorTable =
+ [&](const std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges) -> bool {
+ auto rangeCount = descriptorRanges.size();
+ if (rangeCount == 0) {
+ return false;
+ }
+
+ D3D12_ROOT_PARAMETER rootParameter = {};
+ rootParameter.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
+ rootParameter.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+ rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
+ rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
+
+ for (auto& range : descriptorRanges) {
+ ASSERT(range.RegisterSpace == kRegisterSpacePlaceholder);
+ ranges[rangeIndex] = range;
+ ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
+ rangeIndex++;
+ }
+
+ rootParameters.emplace_back(rootParameter);
+
+ return true;
+ };
+
+ if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
+ mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
+ }
+ if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorRanges())) {
+ mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
+ }
+
+ // Init root descriptors in root signatures for dynamic buffer bindings.
+ // These are packed at the beginning of the layout binding info.
+ for (BindingIndex dynamicBindingIndex{0};
+ dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
+ ++dynamicBindingIndex) {
+ const BindingInfo& bindingInfo =
+ bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
+
+ if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+ // Skip dynamic buffers that are not visible. D3D12 does not have None
+ // visibility.
+ continue;
+ }
+
+ D3D12_ROOT_PARAMETER rootParameter = {};
+
+ // Setup root descriptor.
+ D3D12_ROOT_DESCRIPTOR rootDescriptor;
+ rootDescriptor.ShaderRegister =
+ bindGroupLayout->GetShaderRegister(dynamicBindingIndex);
+ rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
+
+ // Set root descriptors in root signatures.
+ rootParameter.Descriptor = rootDescriptor;
+ mDynamicRootParameterIndices[group][dynamicBindingIndex] = rootParameters.size();
+
+ // Set parameter types according to bind group layout descriptor.
+ rootParameter.ParameterType = RootParameterType(bindingInfo.buffer.type);
+
+ // Set visibilities according to bind group layout descriptor.
+ rootParameter.ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
+
+ rootParameters.emplace_back(rootParameter);
+ }
+ }
+
+ // Make sure that we added exactly the number of elements we expected. If we added more,
+ // |ranges| will have resized and the pointers in the |rootParameter|s will be invalid.
+ ASSERT(rangeIndex == rangesCount);
+
+ D3D12_ROOT_PARAMETER renderOrComputeInternalConstants{};
+ renderOrComputeInternalConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+ renderOrComputeInternalConstants.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+ // Always allocate 3 constants for either:
+ // - vertex_index and instance_index
+ // - num_workgroups_x, num_workgroups_y and num_workgroups_z
+ // NOTE: We should consider delaying root signature creation until we know how many values
+ // we need
+ renderOrComputeInternalConstants.Constants.Num32BitValues = 3;
+ renderOrComputeInternalConstants.Constants.RegisterSpace =
+ kRenderOrComputeInternalRegisterSpace;
+ renderOrComputeInternalConstants.Constants.ShaderRegister =
+ kRenderOrComputeInternalBaseRegister;
+ mFirstIndexOffsetParameterIndex = rootParameters.size();
+ mNumWorkgroupsParameterIndex = rootParameters.size();
+ // NOTE: We should consider moving this entry to earlier in the root signature since offsets
+ // would need to be updated often
+ rootParameters.emplace_back(renderOrComputeInternalConstants);
+
+ // Loops over all of the dynamic storage buffer bindings in the layout and build
+ // a mapping from the binding to the next offset into the root constant array where
+ // that dynamic storage buffer's binding size will be stored. The next register offset
+ // to use is tracked with |dynamicStorageBufferLengthsShaderRegisterOffset|.
+ // This data will be used by shader translation to emit a load from the root constant
+ // array to use as the binding's size in runtime array calculations.
+ // Each bind group's length data is stored contiguously in the root constant array,
+ // so the loop also computes the first register offset for each group where the
+ // data should start.
+ uint32_t dynamicStorageBufferLengthsShaderRegisterOffset = 0;
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+
+ mDynamicStorageBufferLengthInfo[group].firstRegisterOffset =
+ dynamicStorageBufferLengthsShaderRegisterOffset;
+ mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.reserve(
+ bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+
+ for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
+ ++bindingIndex) {
+ if (bgl->IsStorageBufferBinding(bindingIndex)) {
+ mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.push_back(
+ {bgl->GetBindingInfo(bindingIndex).binding,
+ dynamicStorageBufferLengthsShaderRegisterOffset++});
+ }
+ }
+
+ ASSERT(mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.size() ==
+ bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+ }
+ ASSERT(dynamicStorageBufferLengthsShaderRegisterOffset <=
+ kMaxDynamicStorageBuffersPerPipelineLayout);
+
+ if (dynamicStorageBufferLengthsShaderRegisterOffset > 0) {
+ D3D12_ROOT_PARAMETER dynamicStorageBufferLengthConstants{};
+ dynamicStorageBufferLengthConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+ dynamicStorageBufferLengthConstants.ParameterType =
+ D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+ dynamicStorageBufferLengthConstants.Constants.Num32BitValues =
+ dynamicStorageBufferLengthsShaderRegisterOffset;
+ dynamicStorageBufferLengthConstants.Constants.RegisterSpace =
+ kDynamicStorageBufferLengthsRegisterSpace;
+ dynamicStorageBufferLengthConstants.Constants.ShaderRegister =
+ kDynamicStorageBufferLengthsBaseRegister;
+ mDynamicStorageBufferLengthsParameterIndex = rootParameters.size();
+ rootParameters.emplace_back(dynamicStorageBufferLengthConstants);
+ } else {
+ mDynamicStorageBufferLengthsParameterIndex =
+ kInvalidDynamicStorageBufferLengthsParameterIndex;
+ }
+
+ D3D12_ROOT_SIGNATURE_DESC rootSignatureDescriptor;
+ rootSignatureDescriptor.NumParameters = rootParameters.size();
+ rootSignatureDescriptor.pParameters = rootParameters.data();
+ rootSignatureDescriptor.NumStaticSamplers = 0;
+ rootSignatureDescriptor.pStaticSamplers = nullptr;
+ rootSignatureDescriptor.Flags =
+ D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
+
+ ComPtr<ID3DBlob> signature;
+ ComPtr<ID3DBlob> error;
+ HRESULT hr = device->GetFunctions()->d3d12SerializeRootSignature(
+ &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error);
+ if (DAWN_UNLIKELY(FAILED(hr))) {
+ std::ostringstream messageStream;
+ if (error) {
+ messageStream << static_cast<const char*>(error->GetBufferPointer());
+
+ // |error| is observed to always end with a \n, but is not
+ // specified to do so, so we add an extra newline just in case.
+ messageStream << std::endl;
+ }
+ messageStream << "D3D12 serialize root signature";
+ DAWN_TRY(CheckHRESULT(hr, messageStream.str().c_str()));
+ }
+ DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
+ 0, signature->GetBufferPointer(), signature->GetBufferSize(),
+ IID_PPV_ARGS(&mRootSignature)),
+ "D3D12 create root signature"));
+ return {};
+ }
+
+ uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const {
+ ASSERT(group < kMaxBindGroupsTyped);
+ return mCbvUavSrvRootParameterInfo[group];
+ }
+
+ uint32_t PipelineLayout::GetSamplerRootParameterIndex(BindGroupIndex group) const {
+ ASSERT(group < kMaxBindGroupsTyped);
+ return mSamplerRootParameterInfo[group];
+ }
+
+ ID3D12RootSignature* PipelineLayout::GetRootSignature() const {
+ return mRootSignature.Get();
+ }
+
+ const PipelineLayout::DynamicStorageBufferLengthInfo&
+ PipelineLayout::GetDynamicStorageBufferLengthInfo() const {
+ return mDynamicStorageBufferLengthInfo;
+ }
+
+ uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
+ BindingIndex bindingIndex) const {
+ ASSERT(group < kMaxBindGroupsTyped);
+ ASSERT(bindingIndex < kMaxDynamicBuffersPerPipelineLayoutTyped);
+ ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).buffer.hasDynamicOffset);
+ ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
+ wgpu::ShaderStage::None);
+ return mDynamicRootParameterIndices[group][bindingIndex];
+ }
+
+ uint32_t PipelineLayout::GetFirstIndexOffsetRegisterSpace() const {
+ return kRenderOrComputeInternalRegisterSpace;
+ }
+
+ uint32_t PipelineLayout::GetFirstIndexOffsetShaderRegister() const {
+ return kRenderOrComputeInternalBaseRegister;
+ }
+
+ uint32_t PipelineLayout::GetFirstIndexOffsetParameterIndex() const {
+ return mFirstIndexOffsetParameterIndex;
+ }
+
+ uint32_t PipelineLayout::GetNumWorkgroupsRegisterSpace() const {
+ return kRenderOrComputeInternalRegisterSpace;
+ }
+
+ uint32_t PipelineLayout::GetNumWorkgroupsShaderRegister() const {
+ return kRenderOrComputeInternalBaseRegister;
+ }
+
+ uint32_t PipelineLayout::GetNumWorkgroupsParameterIndex() const {
+ return mNumWorkgroupsParameterIndex;
+ }
+
+ uint32_t PipelineLayout::GetDynamicStorageBufferLengthsRegisterSpace() const {
+ return kDynamicStorageBufferLengthsRegisterSpace;
+ }
+
+ uint32_t PipelineLayout::GetDynamicStorageBufferLengthsShaderRegister() const {
+ return kDynamicStorageBufferLengthsBaseRegister;
+ }
+
+ uint32_t PipelineLayout::GetDynamicStorageBufferLengthsParameterIndex() const {
+ ASSERT(mDynamicStorageBufferLengthsParameterIndex !=
+ kInvalidDynamicStorageBufferLengthsParameterIndex);
+ return mDynamicStorageBufferLengthsParameterIndex;
+ }
+
+ ID3D12CommandSignature* PipelineLayout::GetDispatchIndirectCommandSignatureWithNumWorkgroups() {
+ // mDispatchIndirectCommandSignatureWithNumWorkgroups won't be created until it is needed.
+ if (mDispatchIndirectCommandSignatureWithNumWorkgroups.Get() != nullptr) {
+ return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
+ }
+
+ D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
+ argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
+ argumentDescs[0].Constant.RootParameterIndex = GetNumWorkgroupsParameterIndex();
+ argumentDescs[0].Constant.Num32BitValuesToSet = 3;
+ argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
+
+ // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
+ // command. That command must come last.
+ argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
+
+ D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+ programDesc.ByteStride = 6 * sizeof(uint32_t);
+ programDesc.NumArgumentDescs = 2;
+ programDesc.pArgumentDescs = argumentDescs;
+
+ // The root signature must be specified if and only if the command signature changes one of
+ // the root arguments.
+ ToBackend(GetDevice())
+ ->GetD3D12Device()
+ ->CreateCommandSignature(
+ &programDesc, GetRootSignature(),
+ IID_PPV_ARGS(&mDispatchIndirectCommandSignatureWithNumWorkgroups));
+ return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h
new file mode 100644
index 00000000000..d1e845301a8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PipelineLayoutD3D12.h
@@ -0,0 +1,100 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
+#define DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class PipelineLayout final : public PipelineLayoutBase {
+ public:
+ static ResultOrError<Ref<PipelineLayout>> Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor);
+
+ uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
+ uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
+
+ // Returns the index of the root parameter reserved for a dynamic buffer binding
+ uint32_t GetDynamicRootParameterIndex(BindGroupIndex group,
+ BindingIndex bindingIndex) const;
+
+ uint32_t GetFirstIndexOffsetRegisterSpace() const;
+ uint32_t GetFirstIndexOffsetShaderRegister() const;
+ uint32_t GetFirstIndexOffsetParameterIndex() const;
+
+ uint32_t GetNumWorkgroupsRegisterSpace() const;
+ uint32_t GetNumWorkgroupsShaderRegister() const;
+ uint32_t GetNumWorkgroupsParameterIndex() const;
+
+ uint32_t GetDynamicStorageBufferLengthsRegisterSpace() const;
+ uint32_t GetDynamicStorageBufferLengthsShaderRegister() const;
+ uint32_t GetDynamicStorageBufferLengthsParameterIndex() const;
+
+ ID3D12RootSignature* GetRootSignature() const;
+
+ ID3D12CommandSignature* GetDispatchIndirectCommandSignatureWithNumWorkgroups();
+
+ struct PerBindGroupDynamicStorageBufferLengthInfo {
+ // First register offset for a bind group's dynamic storage buffer lengths.
+ // This is the index into the array of root constants where this bind group's
+ // lengths start.
+ uint32_t firstRegisterOffset;
+
+ struct BindingAndRegisterOffset {
+ BindingNumber binding;
+ uint32_t registerOffset;
+ };
+ // Associative list of (BindingNumber,registerOffset) pairs, which is passed into
+ // the shader to map the BindingPoint(thisGroup, BindingNumber) to the registerOffset
+ // into the root constant array which holds the dynamic storage buffer lengths.
+ std::vector<BindingAndRegisterOffset> bindingAndRegisterOffsets;
+ };
+
+ // Flat map from bind group index to the list of (BindingNumber,Register) pairs.
+ // Each pair is used in shader translation to
+ using DynamicStorageBufferLengthInfo =
+ ityp::array<BindGroupIndex, PerBindGroupDynamicStorageBufferLengthInfo, kMaxBindGroups>;
+
+ const DynamicStorageBufferLengthInfo& GetDynamicStorageBufferLengthInfo() const;
+
+ private:
+ ~PipelineLayout() override = default;
+ using PipelineLayoutBase::PipelineLayoutBase;
+ MaybeError Initialize();
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
+ ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
+ ityp::array<BindGroupIndex,
+ ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
+ kMaxBindGroups>
+ mDynamicRootParameterIndices;
+ DynamicStorageBufferLengthInfo mDynamicStorageBufferLengthInfo;
+ uint32_t mFirstIndexOffsetParameterIndex;
+ uint32_t mNumWorkgroupsParameterIndex;
+ uint32_t mDynamicStorageBufferLengthsParameterIndex;
+ ComPtr<ID3D12RootSignature> mRootSignature;
+ ComPtr<ID3D12CommandSignature> mDispatchIndirectCommandSignatureWithNumWorkgroups;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp
new file mode 100644
index 00000000000..786ae5a33b1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.cpp
@@ -0,0 +1,271 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/PlatformFunctions.h"
+
+#include "dawn/common/DynamicLib.h"
+
+#include <comdef.h>
+#include <array>
+#include <sstream>
+
+namespace dawn::native::d3d12 {
+ namespace {
+ // Extract Version from "10.0.{Version}.0" if possible, otherwise return 0.
+ uint32_t GetWindowsSDKVersionFromDirectoryName(const char* directoryName) {
+ constexpr char kPrefix[] = "10.0.";
+ constexpr char kPostfix[] = ".0";
+
+ constexpr uint32_t kPrefixLen = sizeof(kPrefix) - 1;
+ constexpr uint32_t kPostfixLen = sizeof(kPostfix) - 1;
+ const uint32_t directoryNameLen = strlen(directoryName);
+
+ if (directoryNameLen < kPrefixLen + kPostfixLen + 1) {
+ return 0;
+ }
+
+ // Check if directoryName starts with "10.0.".
+ if (strncmp(directoryName, kPrefix, kPrefixLen) != 0) {
+ return 0;
+ }
+
+ // Check if directoryName ends with ".0".
+ if (strncmp(directoryName + (directoryNameLen - kPostfixLen), kPostfix, kPostfixLen) !=
+ 0) {
+ return 0;
+ }
+
+ // Extract Version from "10.0.{Version}.0" and convert Version into an integer.
+ return atoi(directoryName + kPrefixLen);
+ }
+
+ class ScopedFileHandle final {
+ public:
+ explicit ScopedFileHandle(HANDLE handle) : mHandle(handle) {
+ }
+ ~ScopedFileHandle() {
+ if (mHandle != INVALID_HANDLE_VALUE) {
+ ASSERT(FindClose(mHandle));
+ }
+ }
+ HANDLE GetHandle() const {
+ return mHandle;
+ }
+
+ private:
+ HANDLE mHandle;
+ };
+
+ std::string GetWindowsSDKBasePath() {
+ const char* kDefaultWindowsSDKPath =
+ "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\*";
+ WIN32_FIND_DATAA fileData;
+ ScopedFileHandle handle(FindFirstFileA(kDefaultWindowsSDKPath, &fileData));
+ if (handle.GetHandle() == INVALID_HANDLE_VALUE) {
+ return "";
+ }
+
+ uint32_t highestWindowsSDKVersion = 0;
+ do {
+ if (!(fileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ continue;
+ }
+
+ highestWindowsSDKVersion =
+ std::max(highestWindowsSDKVersion,
+ GetWindowsSDKVersionFromDirectoryName(fileData.cFileName));
+ } while (FindNextFileA(handle.GetHandle(), &fileData));
+
+ if (highestWindowsSDKVersion == 0) {
+ return "";
+ }
+
+ // Currently we only support using DXC on x64.
+ std::ostringstream ostream;
+ ostream << "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0."
+ << highestWindowsSDKVersion << ".0\\x64\\";
+
+ return ostream.str();
+ }
+ } // anonymous namespace
+
+ PlatformFunctions::PlatformFunctions() = default;
+ PlatformFunctions::~PlatformFunctions() = default;
+
+ MaybeError PlatformFunctions::LoadFunctions() {
+ DAWN_TRY(LoadD3D12());
+ DAWN_TRY(LoadDXGI());
+ LoadDXCLibraries();
+ DAWN_TRY(LoadFXCompiler());
+ DAWN_TRY(LoadD3D11());
+ LoadPIXRuntime();
+ return {};
+ }
+
+ MaybeError PlatformFunctions::LoadD3D12() {
+#if DAWN_PLATFORM_WINUWP
+ d3d12CreateDevice = &D3D12CreateDevice;
+ d3d12GetDebugInterface = &D3D12GetDebugInterface;
+ d3d12SerializeRootSignature = &D3D12SerializeRootSignature;
+ d3d12CreateRootSignatureDeserializer = &D3D12CreateRootSignatureDeserializer;
+ d3d12SerializeVersionedRootSignature = &D3D12SerializeVersionedRootSignature;
+ d3d12CreateVersionedRootSignatureDeserializer =
+ &D3D12CreateVersionedRootSignatureDeserializer;
+#else
+ std::string error;
+ if (!mD3D12Lib.Open("d3d12.dll", &error) ||
+ !mD3D12Lib.GetProc(&d3d12CreateDevice, "D3D12CreateDevice", &error) ||
+ !mD3D12Lib.GetProc(&d3d12GetDebugInterface, "D3D12GetDebugInterface", &error) ||
+ !mD3D12Lib.GetProc(&d3d12SerializeRootSignature, "D3D12SerializeRootSignature",
+ &error) ||
+ !mD3D12Lib.GetProc(&d3d12CreateRootSignatureDeserializer,
+ "D3D12CreateRootSignatureDeserializer", &error) ||
+ !mD3D12Lib.GetProc(&d3d12SerializeVersionedRootSignature,
+ "D3D12SerializeVersionedRootSignature", &error) ||
+ !mD3D12Lib.GetProc(&d3d12CreateVersionedRootSignatureDeserializer,
+ "D3D12CreateVersionedRootSignatureDeserializer", &error)) {
+ return DAWN_INTERNAL_ERROR(error.c_str());
+ }
+#endif
+
+ return {};
+ }
+
+ MaybeError PlatformFunctions::LoadD3D11() {
+#if DAWN_PLATFORM_WINUWP
+ d3d11on12CreateDevice = &D3D11On12CreateDevice;
+#else
+ std::string error;
+ if (!mD3D11Lib.Open("d3d11.dll", &error) ||
+ !mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
+ return DAWN_INTERNAL_ERROR(error.c_str());
+ }
+#endif
+
+ return {};
+ }
+
+ MaybeError PlatformFunctions::LoadDXGI() {
+#if DAWN_PLATFORM_WINUWP
+# if defined(_DEBUG)
+ // DXGIGetDebugInterface1 is tagged as a development-only capability
+ // which implies that linking to this function will cause
+ // the application to fail Windows store certification
+ // But we need it when debuging using VS Graphics Diagnostics or PIX
+ // So we only link to it in debug build
+ dxgiGetDebugInterface1 = &DXGIGetDebugInterface1;
+# endif
+ createDxgiFactory2 = &CreateDXGIFactory2;
+#else
+ std::string error;
+ if (!mDXGILib.Open("dxgi.dll", &error) ||
+ !mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
+ !mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
+ return DAWN_INTERNAL_ERROR(error.c_str());
+ }
+#endif
+
+ return {};
+ }
+
+ void PlatformFunctions::LoadDXCLibraries() {
+ // TODO(dawn:766)
+ // Statically linked with dxcompiler.lib in UWP
+ // currently linked with dxcompiler.lib making CoreApp unable to activate
+ // LoadDXIL and LoadDXCompiler will fail in UWP, but LoadFunctions() can still be
+ // successfully executed.
+
+ const std::string& windowsSDKBasePath = GetWindowsSDKBasePath();
+
+ LoadDXIL(windowsSDKBasePath);
+ LoadDXCompiler(windowsSDKBasePath);
+ }
+
+ void PlatformFunctions::LoadDXIL(const std::string& baseWindowsSDKPath) {
+ const char* dxilDLLName = "dxil.dll";
+ const std::array<std::string, 2> kDxilDLLPaths = {
+ {dxilDLLName, baseWindowsSDKPath + dxilDLLName}};
+
+ for (const std::string& dxilDLLPath : kDxilDLLPaths) {
+ if (mDXILLib.Open(dxilDLLPath, nullptr)) {
+ return;
+ }
+ }
+ ASSERT(!mDXILLib.Valid());
+ }
+
+ void PlatformFunctions::LoadDXCompiler(const std::string& baseWindowsSDKPath) {
+ // DXIL must be loaded before DXC, otherwise shader signing is unavailable
+ if (!mDXILLib.Valid()) {
+ return;
+ }
+
+ const char* dxCompilerDLLName = "dxcompiler.dll";
+ const std::array<std::string, 2> kDxCompilerDLLPaths = {
+ {dxCompilerDLLName, baseWindowsSDKPath + dxCompilerDLLName}};
+
+ DynamicLib dxCompilerLib;
+ for (const std::string& dxCompilerDLLName : kDxCompilerDLLPaths) {
+ if (dxCompilerLib.Open(dxCompilerDLLName, nullptr)) {
+ break;
+ }
+ }
+
+ if (dxCompilerLib.Valid() &&
+ dxCompilerLib.GetProc(&dxcCreateInstance, "DxcCreateInstance", nullptr)) {
+ mDXCompilerLib = std::move(dxCompilerLib);
+ } else {
+ mDXILLib.Close();
+ }
+ }
+
+ MaybeError PlatformFunctions::LoadFXCompiler() {
+#if DAWN_PLATFORM_WINUWP
+ d3dCompile = &D3DCompile;
+ d3dDisassemble = &D3DDisassemble;
+#else
+ std::string error;
+ if (!mFXCompilerLib.Open("d3dcompiler_47.dll", &error) ||
+ !mFXCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error) ||
+ !mFXCompilerLib.GetProc(&d3dDisassemble, "D3DDisassemble", &error)) {
+ return DAWN_INTERNAL_ERROR(error.c_str());
+ }
+#endif
+ return {};
+ }
+
+ bool PlatformFunctions::IsPIXEventRuntimeLoaded() const {
+ return mPIXEventRuntimeLib.Valid();
+ }
+
+ bool PlatformFunctions::IsDXCAvailable() const {
+ return mDXILLib.Valid() && mDXCompilerLib.Valid();
+ }
+
+ void PlatformFunctions::LoadPIXRuntime() {
+ // TODO(dawn:766):
+ // In UWP PIX should be statically linked WinPixEventRuntime_UAP.lib
+ // So maybe we should put WinPixEventRuntime as a third party package
+ // Currently PIX is not going to be loaded in UWP since the following
+ // mPIXEventRuntimeLib.Open will fail.
+ if (!mPIXEventRuntimeLib.Open("WinPixEventRuntime.dll") ||
+ !mPIXEventRuntimeLib.GetProc(&pixBeginEventOnCommandList,
+ "PIXBeginEventOnCommandList") ||
+ !mPIXEventRuntimeLib.GetProc(&pixEndEventOnCommandList, "PIXEndEventOnCommandList") ||
+ !mPIXEventRuntimeLib.GetProc(&pixSetMarkerOnCommandList, "PIXSetMarkerOnCommandList")) {
+ mPIXEventRuntimeLib.Close();
+ }
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h b/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h
new file mode 100644
index 00000000000..a236b1a1566
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/PlatformFunctions.h
@@ -0,0 +1,110 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
+#define DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/native/Error.h"
+
+#include <d3dcompiler.h>
+
+namespace dawn::native::d3d12 {
+
+ // Loads the functions required from the platform dynamically so that we don't need to rely on
+ // them being present in the system. For example linking against d3d12.lib would prevent
+ // dawn_native from loading on Windows 7 system where d3d12.dll doesn't exist.
+ class PlatformFunctions {
+ public:
+ PlatformFunctions();
+ ~PlatformFunctions();
+
+ MaybeError LoadFunctions();
+ bool IsPIXEventRuntimeLoaded() const;
+ bool IsDXCAvailable() const;
+
+ // Functions from d3d12.dll
+ PFN_D3D12_CREATE_DEVICE d3d12CreateDevice = nullptr;
+ PFN_D3D12_GET_DEBUG_INTERFACE d3d12GetDebugInterface = nullptr;
+
+ PFN_D3D12_SERIALIZE_ROOT_SIGNATURE d3d12SerializeRootSignature = nullptr;
+ PFN_D3D12_CREATE_ROOT_SIGNATURE_DESERIALIZER d3d12CreateRootSignatureDeserializer = nullptr;
+ PFN_D3D12_SERIALIZE_VERSIONED_ROOT_SIGNATURE d3d12SerializeVersionedRootSignature = nullptr;
+ PFN_D3D12_CREATE_VERSIONED_ROOT_SIGNATURE_DESERIALIZER
+ d3d12CreateVersionedRootSignatureDeserializer = nullptr;
+
+ // Functions from dxgi.dll
+ using PFN_DXGI_GET_DEBUG_INTERFACE1 = HRESULT(WINAPI*)(UINT Flags,
+ REFIID riid,
+ _COM_Outptr_ void** pDebug);
+ PFN_DXGI_GET_DEBUG_INTERFACE1 dxgiGetDebugInterface1 = nullptr;
+
+ using PFN_CREATE_DXGI_FACTORY2 = HRESULT(WINAPI*)(UINT Flags,
+ REFIID riid,
+ _COM_Outptr_ void** ppFactory);
+ PFN_CREATE_DXGI_FACTORY2 createDxgiFactory2 = nullptr;
+
+ // Functions from dxcompiler.dll
+ using PFN_DXC_CREATE_INSTANCE = HRESULT(WINAPI*)(REFCLSID rclsid,
+ REFIID riid,
+ _COM_Outptr_ void** ppCompiler);
+ PFN_DXC_CREATE_INSTANCE dxcCreateInstance = nullptr;
+
+ // Functions from d3d3compiler.dll
+ pD3DCompile d3dCompile = nullptr;
+ pD3DDisassemble d3dDisassemble = nullptr;
+
+ // Functions from WinPixEventRuntime.dll
+ using PFN_PIX_END_EVENT_ON_COMMAND_LIST =
+ HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList);
+
+ PFN_PIX_END_EVENT_ON_COMMAND_LIST pixEndEventOnCommandList = nullptr;
+
+ using PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST = HRESULT(
+ WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
+
+ PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST pixBeginEventOnCommandList = nullptr;
+
+ using PFN_SET_MARKER_ON_COMMAND_LIST = HRESULT(
+ WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
+
+ PFN_SET_MARKER_ON_COMMAND_LIST pixSetMarkerOnCommandList = nullptr;
+
+ // Functions from D3D11.dll
+ PFN_D3D11ON12_CREATE_DEVICE d3d11on12CreateDevice = nullptr;
+
+ private:
+ MaybeError LoadD3D12();
+ MaybeError LoadD3D11();
+ MaybeError LoadDXGI();
+ void LoadDXCLibraries();
+ void LoadDXIL(const std::string& baseWindowsSDKPath);
+ void LoadDXCompiler(const std::string& baseWindowsSDKPath);
+ MaybeError LoadFXCompiler();
+ void LoadPIXRuntime();
+
+ DynamicLib mD3D12Lib;
+ DynamicLib mD3D11Lib;
+ DynamicLib mDXGILib;
+ DynamicLib mDXILLib;
+ DynamicLib mDXCompilerLib;
+ DynamicLib mFXCompilerLib;
+ DynamicLib mPIXEventRuntimeLib;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp
new file mode 100644
index 00000000000..458c23df184
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.cpp
@@ -0,0 +1,75 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/QuerySetD3D12.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+ D3D12_QUERY_HEAP_TYPE D3D12QueryHeapType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return D3D12_QUERY_HEAP_TYPE_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
+ }
+ }
+ } // anonymous namespace
+
+ // static
+ ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
+ Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
+ DAWN_TRY(querySet->Initialize());
+ return querySet;
+ }
+
+ MaybeError QuerySet::Initialize() {
+ D3D12_QUERY_HEAP_DESC queryHeapDesc = {};
+ queryHeapDesc.Type = D3D12QueryHeapType(GetQueryType());
+ queryHeapDesc.Count = std::max(GetQueryCount(), uint32_t(1u));
+
+ ID3D12Device* d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ d3d12Device->CreateQueryHeap(&queryHeapDesc, IID_PPV_ARGS(&mQueryHeap)),
+ "ID3D12Device::CreateQueryHeap"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ ID3D12QueryHeap* QuerySet::GetQueryHeap() const {
+ return mQueryHeap.Get();
+ }
+
+ QuerySet::~QuerySet() = default;
+
+ void QuerySet::DestroyImpl() {
+ QuerySetBase::DestroyImpl();
+ ToBackend(GetDevice())->ReferenceUntilUnused(mQueryHeap);
+ mQueryHeap = nullptr;
+ }
+
+ void QuerySet::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mQueryHeap.Get(), "Dawn_QuerySet", GetLabel());
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h
new file mode 100644
index 00000000000..5ace7923845
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/QuerySetD3D12.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_QUERYSETD3D12_H_
+#define DAWNNATIVE_D3D12_QUERYSETD3D12_H_
+
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class QuerySet : public QuerySetBase {
+ public:
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
+
+ ID3D12QueryHeap* GetQueryHeap() const;
+
+ private:
+ ~QuerySet() override;
+ using QuerySetBase::QuerySetBase;
+ MaybeError Initialize();
+
+ // Dawn API
+ void DestroyImpl() override;
+ void SetLabelImpl() override;
+
+ ComPtr<ID3D12QueryHeap> mQueryHeap;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_QUERYSETD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp
new file mode 100644
index 00000000000..cb92f2160b2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.cpp
@@ -0,0 +1,54 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/QueueD3D12.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/d3d12/CommandBufferD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native::d3d12 {
+
+ Queue::Queue(Device* device) : QueueBase(device) {
+ }
+
+ MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ Device* device = ToBackend(GetDevice());
+
+ DAWN_TRY(device->Tick());
+
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
+ "CommandBufferD3D12::RecordCommands");
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
+ }
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording,
+ "CommandBufferD3D12::RecordCommands");
+
+ DAWN_TRY(device->ExecutePendingCommandContext());
+
+ DAWN_TRY(device->NextSerial());
+ return {};
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h
new file mode 100644
index 00000000000..6f15a7dfbd2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/QueueD3D12.h
@@ -0,0 +1,37 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_QUEUED3D12_H_
+#define DAWNNATIVE_D3D12_QUEUED3D12_H_
+
+#include "dawn/native/Queue.h"
+
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class Queue final : public QueueBase {
+ public:
+ Queue(Device* device);
+
+ private:
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_QUEUED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
new file mode 100644
index 00000000000..a1ce85367f7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
@@ -0,0 +1,244 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/RenderPassBuilderD3D12.h"
+
+#include "dawn/native/Format.h"
+#include "dawn/native/d3d12/CommandBufferD3D12.h"
+#include "dawn/native/d3d12/Forward.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE D3D12BeginningAccessType(wgpu::LoadOp loadOp) {
+ switch (loadOp) {
+ case wgpu::LoadOp::Clear:
+ return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR;
+ case wgpu::LoadOp::Load:
+ return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_PRESERVE;
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
+ switch (storeOp) {
+ case wgpu::StoreOp::Discard:
+ return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
+ case wgpu::StoreOp::Store:
+ return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS D3D12EndingAccessResolveParameters(
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination) {
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS resolveParameters;
+
+ resolveParameters.Format = resolveDestination->GetD3D12Format();
+ resolveParameters.pSrcResource =
+ ToBackend(resolveSource->GetTexture())->GetD3D12Resource();
+ resolveParameters.pDstResource =
+ ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
+
+ // Clear or preserve the resolve source.
+ if (storeOp == wgpu::StoreOp::Discard) {
+ resolveParameters.PreserveResolveSource = false;
+ } else if (storeOp == wgpu::StoreOp::Store) {
+ resolveParameters.PreserveResolveSource = true;
+ }
+
+ // RESOLVE_MODE_AVERAGE is only valid for non-integer formats.
+ // TODO: Investigate and determine how integer format resolves should work in WebGPU.
+ switch (resolveDestination->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+ case wgpu::TextureComponentType::Sint:
+ case wgpu::TextureComponentType::Uint:
+ resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_MAX;
+ break;
+ case wgpu::TextureComponentType::Float:
+ resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_AVERAGE;
+ break;
+
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
+ }
+
+ resolveParameters.SubresourceCount = 1;
+
+ return resolveParameters;
+ }
+
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS
+ D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
+ Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
+ ASSERT(resolveDestinationTexture->GetFormat().aspects == Aspect::Color);
+
+ subresourceParameters.DstX = 0;
+ subresourceParameters.DstY = 0;
+ subresourceParameters.SrcSubresource = 0;
+ subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
+ resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer(),
+ Aspect::Color);
+ // Resolving a specified sub-rect is only valid on hardware that supports sample
+ // positions. This means even {0, 0, width, height} would be invalid if unsupported. To
+ // avoid this, we assume sub-rect resolves never work by setting them to all zeros or
+ // "empty" to resolve the entire region.
+ subresourceParameters.SrcRect = {0, 0, 0, 0};
+
+ return subresourceParameters;
+ }
+ } // anonymous namespace
+
+ RenderPassBuilder::RenderPassBuilder(bool hasUAV) {
+ if (hasUAV) {
+ mRenderPassFlags = D3D12_RENDER_PASS_FLAG_ALLOW_UAV_WRITES;
+ }
+ }
+
+ void RenderPassBuilder::SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
+ D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
+ ASSERT(mColorAttachmentCount < kMaxColorAttachmentsTyped);
+ mRenderTargetViews[attachmentIndex] = baseDescriptor;
+ mRenderPassRenderTargetDescriptors[attachmentIndex].cpuDescriptor = baseDescriptor;
+ mColorAttachmentCount++;
+ }
+
+ void RenderPassBuilder::SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
+ mRenderPassDepthStencilDesc.cpuDescriptor = baseDescriptor;
+ }
+
+ ColorAttachmentIndex RenderPassBuilder::GetColorAttachmentCount() const {
+ return mColorAttachmentCount;
+ }
+
+ bool RenderPassBuilder::HasDepth() const {
+ return mHasDepth;
+ }
+
+ ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
+ RenderPassBuilder::GetRenderPassRenderTargetDescriptors() const {
+ return {mRenderPassRenderTargetDescriptors.data(), mColorAttachmentCount};
+ }
+
+ const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC*
+ RenderPassBuilder::GetRenderPassDepthStencilDescriptor() const {
+ return &mRenderPassDepthStencilDesc;
+ }
+
+ D3D12_RENDER_PASS_FLAGS RenderPassBuilder::GetRenderPassFlags() const {
+ return mRenderPassFlags;
+ }
+
+ const D3D12_CPU_DESCRIPTOR_HANDLE* RenderPassBuilder::GetRenderTargetViews() const {
+ return mRenderTargetViews.data();
+ }
+
+ void RenderPassBuilder::SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
+ wgpu::LoadOp loadOp,
+ dawn::native::Color clearColor,
+ DXGI_FORMAT format) {
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Type =
+ D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassRenderTargetDescriptors[attachment]
+ .BeginningAccess.Clear.ClearValue.Color[0] = clearColor.r;
+ mRenderPassRenderTargetDescriptors[attachment]
+ .BeginningAccess.Clear.ClearValue.Color[1] = clearColor.g;
+ mRenderPassRenderTargetDescriptors[attachment]
+ .BeginningAccess.Clear.ClearValue.Color[2] = clearColor.b;
+ mRenderPassRenderTargetDescriptors[attachment]
+ .BeginningAccess.Clear.ClearValue.Color[3] = clearColor.a;
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Format =
+ format;
+ }
+ }
+
+ void RenderPassBuilder::SetRenderTargetEndingAccess(ColorAttachmentIndex attachment,
+ wgpu::StoreOp storeOp) {
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+ D3D12EndingAccessType(storeOp);
+ }
+
+ void RenderPassBuilder::SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination) {
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_RESOLVE;
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve =
+ D3D12EndingAccessResolveParameters(storeOp, resolveSource, resolveDestination);
+
+ mSubresourceParams[attachment] =
+ D3D12EndingAccessResolveSubresourceParameters(resolveDestination);
+
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve.pSubresourceParameters =
+ &mSubresourceParams[attachment];
+ }
+
+ void RenderPassBuilder::SetDepthAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ float clearDepth,
+ DXGI_FORMAT format) {
+ mHasDepth = true;
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth =
+ clearDepth;
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.Format = format;
+ }
+ mRenderPassDepthStencilDesc.DepthEndingAccess.Type = D3D12EndingAccessType(storeOp);
+ }
+
+ void RenderPassBuilder::SetStencilAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ uint8_t clearStencil,
+ DXGI_FORMAT format) {
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.DepthStencil
+ .Stencil = clearStencil;
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.Format = format;
+ }
+ mRenderPassDepthStencilDesc.StencilEndingAccess.Type = D3D12EndingAccessType(storeOp);
+ }
+
+ void RenderPassBuilder::SetDepthNoAccess() {
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Type =
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+ mRenderPassDepthStencilDesc.DepthEndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+ }
+
+ void RenderPassBuilder::SetDepthStencilNoAccess() {
+ SetDepthNoAccess();
+ SetStencilNoAccess();
+ }
+
+ void RenderPassBuilder::SetStencilNoAccess() {
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Type =
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+ mRenderPassDepthStencilDesc.StencilEndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
new file mode 100644
index 00000000000..2b926a7c669
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
@@ -0,0 +1,98 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
+#define DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_span.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn::native::d3d12 {
+
+ class TextureView;
+
+ // RenderPassBuilder stores parameters related to render pass load and store operations.
+ // When the D3D12 render pass API is available, the needed descriptors can be fetched
+ // directly from the RenderPassBuilder. When the D3D12 render pass API is not available, the
+ // descriptors are still fetched and any information necessary to emulate the load and store
+ // operations is extracted from the descriptors.
+ class RenderPassBuilder {
+ public:
+ RenderPassBuilder(bool hasUAV);
+
+ ColorAttachmentIndex GetColorAttachmentCount() const;
+
+ // Returns descriptors that are fed directly to BeginRenderPass, or are used as parameter
+ // storage if D3D12 render pass API is unavailable.
+ ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
+ GetRenderPassRenderTargetDescriptors() const;
+ const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* GetRenderPassDepthStencilDescriptor() const;
+
+ D3D12_RENDER_PASS_FLAGS GetRenderPassFlags() const;
+
+ // Returns attachment RTVs to use with OMSetRenderTargets.
+ const D3D12_CPU_DESCRIPTOR_HANDLE* GetRenderTargetViews() const;
+
+ bool HasDepth() const;
+
+ // Functions that set the appropriate values in the render pass descriptors.
+ void SetDepthAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ float clearDepth,
+ DXGI_FORMAT format);
+ void SetDepthNoAccess();
+ void SetDepthStencilNoAccess();
+ void SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
+ wgpu::LoadOp loadOp,
+ dawn::native::Color clearColor,
+ DXGI_FORMAT format);
+ void SetRenderTargetEndingAccess(ColorAttachmentIndex attachment, wgpu::StoreOp storeOp);
+ void SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination);
+ void SetStencilAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ uint8_t clearStencil,
+ DXGI_FORMAT format);
+ void SetStencilNoAccess();
+
+ void SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
+ D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
+ void SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
+
+ private:
+ ColorAttachmentIndex mColorAttachmentCount{uint8_t(0)};
+ bool mHasDepth = false;
+ D3D12_RENDER_PASS_FLAGS mRenderPassFlags = D3D12_RENDER_PASS_FLAG_NONE;
+ D3D12_RENDER_PASS_DEPTH_STENCIL_DESC mRenderPassDepthStencilDesc;
+ ityp::
+ array<ColorAttachmentIndex, D3D12_RENDER_PASS_RENDER_TARGET_DESC, kMaxColorAttachments>
+ mRenderPassRenderTargetDescriptors;
+ ityp::array<ColorAttachmentIndex, D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments>
+ mRenderTargetViews;
+ ityp::array<ColorAttachmentIndex,
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS,
+ kMaxColorAttachments>
+ mSubresourceParams;
+ };
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
new file mode 100644
index 00000000000..1e40bd6e80b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
@@ -0,0 +1,490 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/RenderPipelineD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+#include <d3dcompiler.h>
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+ DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ return DXGI_FORMAT_R8G8_UINT;
+ case wgpu::VertexFormat::Uint8x4:
+ return DXGI_FORMAT_R8G8B8A8_UINT;
+ case wgpu::VertexFormat::Sint8x2:
+ return DXGI_FORMAT_R8G8_SINT;
+ case wgpu::VertexFormat::Sint8x4:
+ return DXGI_FORMAT_R8G8B8A8_SINT;
+ case wgpu::VertexFormat::Unorm8x2:
+ return DXGI_FORMAT_R8G8_UNORM;
+ case wgpu::VertexFormat::Unorm8x4:
+ return DXGI_FORMAT_R8G8B8A8_UNORM;
+ case wgpu::VertexFormat::Snorm8x2:
+ return DXGI_FORMAT_R8G8_SNORM;
+ case wgpu::VertexFormat::Snorm8x4:
+ return DXGI_FORMAT_R8G8B8A8_SNORM;
+ case wgpu::VertexFormat::Uint16x2:
+ return DXGI_FORMAT_R16G16_UINT;
+ case wgpu::VertexFormat::Uint16x4:
+ return DXGI_FORMAT_R16G16B16A16_UINT;
+ case wgpu::VertexFormat::Sint16x2:
+ return DXGI_FORMAT_R16G16_SINT;
+ case wgpu::VertexFormat::Sint16x4:
+ return DXGI_FORMAT_R16G16B16A16_SINT;
+ case wgpu::VertexFormat::Unorm16x2:
+ return DXGI_FORMAT_R16G16_UNORM;
+ case wgpu::VertexFormat::Unorm16x4:
+ return DXGI_FORMAT_R16G16B16A16_UNORM;
+ case wgpu::VertexFormat::Snorm16x2:
+ return DXGI_FORMAT_R16G16_SNORM;
+ case wgpu::VertexFormat::Snorm16x4:
+ return DXGI_FORMAT_R16G16B16A16_SNORM;
+ case wgpu::VertexFormat::Float16x2:
+ return DXGI_FORMAT_R16G16_FLOAT;
+ case wgpu::VertexFormat::Float16x4:
+ return DXGI_FORMAT_R16G16B16A16_FLOAT;
+ case wgpu::VertexFormat::Float32:
+ return DXGI_FORMAT_R32_FLOAT;
+ case wgpu::VertexFormat::Float32x2:
+ return DXGI_FORMAT_R32G32_FLOAT;
+ case wgpu::VertexFormat::Float32x3:
+ return DXGI_FORMAT_R32G32B32_FLOAT;
+ case wgpu::VertexFormat::Float32x4:
+ return DXGI_FORMAT_R32G32B32A32_FLOAT;
+ case wgpu::VertexFormat::Uint32:
+ return DXGI_FORMAT_R32_UINT;
+ case wgpu::VertexFormat::Uint32x2:
+ return DXGI_FORMAT_R32G32_UINT;
+ case wgpu::VertexFormat::Uint32x3:
+ return DXGI_FORMAT_R32G32B32_UINT;
+ case wgpu::VertexFormat::Uint32x4:
+ return DXGI_FORMAT_R32G32B32A32_UINT;
+ case wgpu::VertexFormat::Sint32:
+ return DXGI_FORMAT_R32_SINT;
+ case wgpu::VertexFormat::Sint32x2:
+ return DXGI_FORMAT_R32G32_SINT;
+ case wgpu::VertexFormat::Sint32x3:
+ return DXGI_FORMAT_R32G32B32_SINT;
+ case wgpu::VertexFormat::Sint32x4:
+ return DXGI_FORMAT_R32G32B32A32_SINT;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ D3D12_INPUT_CLASSIFICATION VertexStepModeFunction(wgpu::VertexStepMode mode) {
+ switch (mode) {
+ case wgpu::VertexStepMode::Vertex:
+ return D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
+ case wgpu::VertexStepMode::Instance:
+ return D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
+ }
+ }
+
+ D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return D3D_PRIMITIVE_TOPOLOGY_POINTLIST;
+ case wgpu::PrimitiveTopology::LineList:
+ return D3D_PRIMITIVE_TOPOLOGY_LINELIST;
+ case wgpu::PrimitiveTopology::LineStrip:
+ return D3D_PRIMITIVE_TOPOLOGY_LINESTRIP;
+ case wgpu::PrimitiveTopology::TriangleList:
+ return D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
+ }
+ }
+
+ D3D12_PRIMITIVE_TOPOLOGY_TYPE D3D12PrimitiveTopologyType(
+ wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineStrip:
+ return D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
+ case wgpu::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
+ }
+ }
+
+ D3D12_CULL_MODE D3D12CullMode(wgpu::CullMode mode) {
+ switch (mode) {
+ case wgpu::CullMode::None:
+ return D3D12_CULL_MODE_NONE;
+ case wgpu::CullMode::Front:
+ return D3D12_CULL_MODE_FRONT;
+ case wgpu::CullMode::Back:
+ return D3D12_CULL_MODE_BACK;
+ }
+ }
+
+ D3D12_BLEND D3D12Blend(wgpu::BlendFactor factor) {
+ switch (factor) {
+ case wgpu::BlendFactor::Zero:
+ return D3D12_BLEND_ZERO;
+ case wgpu::BlendFactor::One:
+ return D3D12_BLEND_ONE;
+ case wgpu::BlendFactor::Src:
+ return D3D12_BLEND_SRC_COLOR;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return D3D12_BLEND_INV_SRC_COLOR;
+ case wgpu::BlendFactor::SrcAlpha:
+ return D3D12_BLEND_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
+ return D3D12_BLEND_INV_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return D3D12_BLEND_DEST_COLOR;
+ case wgpu::BlendFactor::OneMinusDst:
+ return D3D12_BLEND_INV_DEST_COLOR;
+ case wgpu::BlendFactor::DstAlpha:
+ return D3D12_BLEND_DEST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDstAlpha:
+ return D3D12_BLEND_INV_DEST_ALPHA;
+ case wgpu::BlendFactor::SrcAlphaSaturated:
+ return D3D12_BLEND_SRC_ALPHA_SAT;
+ case wgpu::BlendFactor::Constant:
+ return D3D12_BLEND_BLEND_FACTOR;
+ case wgpu::BlendFactor::OneMinusConstant:
+ return D3D12_BLEND_INV_BLEND_FACTOR;
+ }
+ }
+
+ // When a blend factor is defined for the alpha channel, any of the factors that don't
+ // explicitly state that they apply to alpha should be treated as their explicitly-alpha
+ // equivalents. See: https://github.com/gpuweb/gpuweb/issues/65
+ D3D12_BLEND D3D12AlphaBlend(wgpu::BlendFactor factor) {
+ switch (factor) {
+ case wgpu::BlendFactor::Src:
+ return D3D12_BLEND_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return D3D12_BLEND_INV_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return D3D12_BLEND_DEST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDst:
+ return D3D12_BLEND_INV_DEST_ALPHA;
+
+ // Other blend factors translate to the same D3D12 enum as the color blend factors.
+ default:
+ return D3D12Blend(factor);
+ }
+ }
+
+ D3D12_BLEND_OP D3D12BlendOperation(wgpu::BlendOperation operation) {
+ switch (operation) {
+ case wgpu::BlendOperation::Add:
+ return D3D12_BLEND_OP_ADD;
+ case wgpu::BlendOperation::Subtract:
+ return D3D12_BLEND_OP_SUBTRACT;
+ case wgpu::BlendOperation::ReverseSubtract:
+ return D3D12_BLEND_OP_REV_SUBTRACT;
+ case wgpu::BlendOperation::Min:
+ return D3D12_BLEND_OP_MIN;
+ case wgpu::BlendOperation::Max:
+ return D3D12_BLEND_OP_MAX;
+ }
+ }
+
+ uint8_t D3D12RenderTargetWriteMask(wgpu::ColorWriteMask writeMask) {
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Red) ==
+ D3D12_COLOR_WRITE_ENABLE_RED,
+ "ColorWriteMask values must match");
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Green) ==
+ D3D12_COLOR_WRITE_ENABLE_GREEN,
+ "ColorWriteMask values must match");
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Blue) ==
+ D3D12_COLOR_WRITE_ENABLE_BLUE,
+ "ColorWriteMask values must match");
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Alpha) ==
+ D3D12_COLOR_WRITE_ENABLE_ALPHA,
+ "ColorWriteMask values must match");
+ return static_cast<uint8_t>(writeMask);
+ }
+
+ D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorTargetState* state) {
+ D3D12_RENDER_TARGET_BLEND_DESC blendDesc;
+ blendDesc.BlendEnable = state->blend != nullptr;
+ if (blendDesc.BlendEnable) {
+ blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
+ blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
+ blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
+ blendDesc.SrcBlendAlpha = D3D12AlphaBlend(state->blend->alpha.srcFactor);
+ blendDesc.DestBlendAlpha = D3D12AlphaBlend(state->blend->alpha.dstFactor);
+ blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
+ }
+ blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
+ blendDesc.LogicOpEnable = false;
+ blendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
+ return blendDesc;
+ }
+
+ D3D12_STENCIL_OP StencilOp(wgpu::StencilOperation op) {
+ switch (op) {
+ case wgpu::StencilOperation::Keep:
+ return D3D12_STENCIL_OP_KEEP;
+ case wgpu::StencilOperation::Zero:
+ return D3D12_STENCIL_OP_ZERO;
+ case wgpu::StencilOperation::Replace:
+ return D3D12_STENCIL_OP_REPLACE;
+ case wgpu::StencilOperation::IncrementClamp:
+ return D3D12_STENCIL_OP_INCR_SAT;
+ case wgpu::StencilOperation::DecrementClamp:
+ return D3D12_STENCIL_OP_DECR_SAT;
+ case wgpu::StencilOperation::Invert:
+ return D3D12_STENCIL_OP_INVERT;
+ case wgpu::StencilOperation::IncrementWrap:
+ return D3D12_STENCIL_OP_INCR;
+ case wgpu::StencilOperation::DecrementWrap:
+ return D3D12_STENCIL_OP_DECR;
+ }
+ }
+
+ D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilFaceState& descriptor) {
+ D3D12_DEPTH_STENCILOP_DESC desc;
+
+ desc.StencilFailOp = StencilOp(descriptor.failOp);
+ desc.StencilDepthFailOp = StencilOp(descriptor.depthFailOp);
+ desc.StencilPassOp = StencilOp(descriptor.passOp);
+ desc.StencilFunc = ToD3D12ComparisonFunc(descriptor.compare);
+
+ return desc;
+ }
+
+ D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
+ D3D12_DEPTH_STENCIL_DESC mDepthStencilDescriptor;
+ mDepthStencilDescriptor.DepthEnable = TRUE;
+ mDepthStencilDescriptor.DepthWriteMask = descriptor->depthWriteEnabled
+ ? D3D12_DEPTH_WRITE_MASK_ALL
+ : D3D12_DEPTH_WRITE_MASK_ZERO;
+ mDepthStencilDescriptor.DepthFunc = ToD3D12ComparisonFunc(descriptor->depthCompare);
+
+ mDepthStencilDescriptor.StencilEnable = StencilTestEnabled(descriptor) ? TRUE : FALSE;
+ mDepthStencilDescriptor.StencilReadMask =
+ static_cast<UINT8>(descriptor->stencilReadMask);
+ mDepthStencilDescriptor.StencilWriteMask =
+ static_cast<UINT8>(descriptor->stencilWriteMask);
+
+ mDepthStencilDescriptor.FrontFace = StencilOpDesc(descriptor->stencilFront);
+ mDepthStencilDescriptor.BackFace = StencilOpDesc(descriptor->stencilBack);
+ return mDepthStencilDescriptor;
+ }
+
+ D3D12_INDEX_BUFFER_STRIP_CUT_VALUE ComputeIndexBufferStripCutValue(
+ wgpu::PrimitiveTopology primitiveTopology,
+ wgpu::IndexFormat indexFormat) {
+ if (primitiveTopology != wgpu::PrimitiveTopology::TriangleStrip &&
+ primitiveTopology != wgpu::PrimitiveTopology::LineStrip) {
+ return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
+ }
+
+ switch (indexFormat) {
+ case wgpu::IndexFormat::Uint16:
+ return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF;
+ case wgpu::IndexFormat::Uint32:
+ return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF;
+ case wgpu::IndexFormat::Undefined:
+ return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
+ }
+ }
+
+ } // anonymous namespace
+
+ Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(device, descriptor));
+ }
+
+ MaybeError RenderPipeline::Initialize() {
+ Device* device = ToBackend(GetDevice());
+ uint32_t compileFlags = 0;
+
+ if (!device->IsToggleEnabled(Toggle::UseDXC) &&
+ !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
+ compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
+ }
+
+ if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
+ compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
+ }
+
+ // SPRIV-cross does matrix multiplication expecting row major matrices
+ compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
+
+ // FXC can miscompile code that depends on special float values (NaN, INF, etc) when IEEE
+ // strictness is not enabled. See crbug.com/tint/976.
+ compileFlags |= D3DCOMPILE_IEEE_STRICTNESS;
+
+ D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
+
+ PerStage<ProgrammableStage> pipelineStages = GetAllStages();
+
+ PerStage<D3D12_SHADER_BYTECODE*> shaders;
+ shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
+ shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
+
+ PerStage<CompiledShader> compiledShader;
+
+ for (auto stage : IterateStages(GetStageMask())) {
+ DAWN_TRY_ASSIGN(
+ compiledShader[stage],
+ ToBackend(pipelineStages[stage].module)
+ ->Compile(pipelineStages[stage], stage, ToBackend(GetLayout()), compileFlags));
+ *shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
+ }
+
+ mFirstOffsetInfo = compiledShader[SingleShaderStage::Vertex].firstOffsetInfo;
+
+ PipelineLayout* layout = ToBackend(GetLayout());
+
+ descriptorD3D12.pRootSignature = layout->GetRootSignature();
+
+ // D3D12 logs warnings if any empty input state is used
+ std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes> inputElementDescriptors;
+ if (GetAttributeLocationsUsed().any()) {
+ descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
+ }
+
+ descriptorD3D12.IBStripCutValue =
+ ComputeIndexBufferStripCutValue(GetPrimitiveTopology(), GetStripIndexFormat());
+
+ descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
+ descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
+ descriptorD3D12.RasterizerState.FrontCounterClockwise =
+ (GetFrontFace() == wgpu::FrontFace::CCW) ? TRUE : FALSE;
+ descriptorD3D12.RasterizerState.DepthBias = GetDepthBias();
+ descriptorD3D12.RasterizerState.DepthBiasClamp = GetDepthBiasClamp();
+ descriptorD3D12.RasterizerState.SlopeScaledDepthBias = GetDepthBiasSlopeScale();
+ descriptorD3D12.RasterizerState.DepthClipEnable = TRUE;
+ descriptorD3D12.RasterizerState.MultisampleEnable = (GetSampleCount() > 1) ? TRUE : FALSE;
+ descriptorD3D12.RasterizerState.AntialiasedLineEnable = FALSE;
+ descriptorD3D12.RasterizerState.ForcedSampleCount = 0;
+ descriptorD3D12.RasterizerState.ConservativeRaster =
+ D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
+
+ if (HasDepthStencilAttachment()) {
+ descriptorD3D12.DSVFormat = D3D12TextureFormat(GetDepthStencilFormat());
+ }
+
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ descriptorD3D12.RTVFormats[static_cast<uint8_t>(i)] =
+ D3D12TextureFormat(GetColorAttachmentFormat(i));
+ descriptorD3D12.BlendState.RenderTarget[static_cast<uint8_t>(i)] =
+ ComputeColorDesc(GetColorTargetState(i));
+ }
+ descriptorD3D12.NumRenderTargets = static_cast<uint32_t>(GetColorAttachmentsMask().count());
+
+ descriptorD3D12.BlendState.AlphaToCoverageEnable = IsAlphaToCoverageEnabled();
+ descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
+
+ descriptorD3D12.DepthStencilState = ComputeDepthStencilDesc(GetDepthStencilState());
+
+ descriptorD3D12.SampleMask = GetSampleMask();
+ descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
+ descriptorD3D12.SampleDesc.Count = GetSampleCount();
+ descriptorD3D12.SampleDesc.Quality = 0;
+
+ mD3d12PrimitiveTopology = D3D12PrimitiveTopology(GetPrimitiveTopology());
+
+ DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
+ &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
+ "D3D12 create graphics pipeline state"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ RenderPipeline::~RenderPipeline() = default;
+
+ void RenderPipeline::DestroyImpl() {
+ RenderPipelineBase::DestroyImpl();
+ ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+ }
+
+ D3D12_PRIMITIVE_TOPOLOGY RenderPipeline::GetD3D12PrimitiveTopology() const {
+ return mD3d12PrimitiveTopology;
+ }
+
+ ID3D12PipelineState* RenderPipeline::GetPipelineState() const {
+ return mPipelineState.Get();
+ }
+
+ const FirstOffsetInfo& RenderPipeline::GetFirstOffsetInfo() const {
+ return mFirstOffsetInfo;
+ }
+
+ void RenderPipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_RenderPipeline", GetLabel());
+ }
+
+ D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
+ std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
+ unsigned int count = 0;
+ for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+ D3D12_INPUT_ELEMENT_DESC& inputElementDescriptor = (*inputElementDescriptors)[count++];
+
+ const VertexAttributeInfo& attribute = GetAttribute(loc);
+
+ // If the HLSL semantic is TEXCOORDN the SemanticName should be "TEXCOORD" and the
+ // SemanticIndex N
+ inputElementDescriptor.SemanticName = "TEXCOORD";
+ inputElementDescriptor.SemanticIndex = static_cast<uint8_t>(loc);
+ inputElementDescriptor.Format = VertexFormatType(attribute.format);
+ inputElementDescriptor.InputSlot = static_cast<uint8_t>(attribute.vertexBufferSlot);
+
+ const VertexBufferInfo& input = GetVertexBuffer(attribute.vertexBufferSlot);
+
+ inputElementDescriptor.AlignedByteOffset = attribute.offset;
+ inputElementDescriptor.InputSlotClass = VertexStepModeFunction(input.stepMode);
+ if (inputElementDescriptor.InputSlotClass ==
+ D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA) {
+ inputElementDescriptor.InstanceDataStepRate = 0;
+ } else {
+ inputElementDescriptor.InstanceDataStepRate = 1;
+ }
+ }
+
+ D3D12_INPUT_LAYOUT_DESC inputLayoutDescriptor;
+ inputLayoutDescriptor.pInputElementDescs = &(*inputElementDescriptors)[0];
+ inputLayoutDescriptor.NumElements = count;
+ return inputLayoutDescriptor;
+ }
+
+ void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h
new file mode 100644
index 00000000000..13d4a1af25b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/RenderPipelineD3D12.h
@@ -0,0 +1,62 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
+#define DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class RenderPipeline final : public RenderPipelineBase {
+ public:
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ RenderPipeline() = delete;
+
+ MaybeError Initialize() override;
+
+ D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
+ ID3D12PipelineState* GetPipelineState() const;
+
+ const FirstOffsetInfo& GetFirstOffsetInfo() const;
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ ~RenderPipeline() override;
+
+ void DestroyImpl() override;
+
+ using RenderPipelineBase::RenderPipelineBase;
+ D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
+ std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
+
+ D3D12_PRIMITIVE_TOPOLOGY mD3d12PrimitiveTopology;
+ ComPtr<ID3D12PipelineState> mPipelineState;
+ FirstOffsetInfo mFirstOffsetInfo;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
new file mode 100644
index 00000000000..b7aab2ccfdd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
@@ -0,0 +1,371 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+
+#include "dawn/native/d3d12/AdapterD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/Forward.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ ResidencyManager::ResidencyManager(Device* device)
+ : mDevice(device),
+ mResidencyManagementEnabled(
+ device->IsToggleEnabled(Toggle::UseD3D12ResidencyManagement)) {
+ UpdateVideoMemoryInfo();
+ }
+
+ // Increments number of locks on a heap to ensure the heap remains resident.
+ MaybeError ResidencyManager::LockAllocation(Pageable* pageable) {
+ if (!mResidencyManagementEnabled) {
+ return {};
+ }
+
+ // If the heap isn't already resident, make it resident.
+ if (!pageable->IsInResidencyLRUCache() && !pageable->IsResidencyLocked()) {
+ ID3D12Pageable* d3d12Pageable = pageable->GetD3D12Pageable();
+ uint64_t size = pageable->GetSize();
+
+ DAWN_TRY(MakeAllocationsResident(GetMemorySegmentInfo(pageable->GetMemorySegment()),
+ size, 1, &d3d12Pageable));
+ }
+
+ // Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
+ if (pageable->IsInResidencyLRUCache()) {
+ pageable->RemoveFromList();
+ }
+
+ pageable->IncrementResidencyLock();
+
+ return {};
+ }
+
+ // Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
+ // inserted into the LRU cache and becomes eligible for eviction.
+ void ResidencyManager::UnlockAllocation(Pageable* pageable) {
+ if (!mResidencyManagementEnabled) {
+ return;
+ }
+
+ ASSERT(pageable->IsResidencyLocked());
+ ASSERT(!pageable->IsInResidencyLRUCache());
+ pageable->DecrementResidencyLock();
+
+ // If another lock still exists on the heap, nothing further should be done.
+ if (pageable->IsResidencyLocked()) {
+ return;
+ }
+
+ // When all locks have been removed, the resource remains resident and becomes tracked in
+ // the corresponding LRU.
+ TrackResidentAllocation(pageable);
+ }
+
+ // Returns the appropriate MemorySegmentInfo for a given MemorySegment.
+ ResidencyManager::MemorySegmentInfo* ResidencyManager::GetMemorySegmentInfo(
+ MemorySegment memorySegment) {
+ switch (memorySegment) {
+ case MemorySegment::Local:
+ return &mVideoMemoryInfo.local;
+ case MemorySegment::NonLocal:
+ ASSERT(!mDevice->GetDeviceInfo().isUMA);
+ return &mVideoMemoryInfo.nonLocal;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // Allows an application component external to Dawn to cap Dawn's residency budgets to prevent
+ // competition for device memory. Returns the amount of memory reserved, which may be less
+ // that the requested reservation when under pressure.
+ uint64_t ResidencyManager::SetExternalMemoryReservation(MemorySegment segment,
+ uint64_t requestedReservationSize) {
+ MemorySegmentInfo* segmentInfo = GetMemorySegmentInfo(segment);
+
+ segmentInfo->externalRequest = requestedReservationSize;
+
+ UpdateMemorySegmentInfo(segmentInfo);
+
+ return segmentInfo->externalReservation;
+ }
+
+ void ResidencyManager::UpdateVideoMemoryInfo() {
+ UpdateMemorySegmentInfo(&mVideoMemoryInfo.local);
+ if (!mDevice->GetDeviceInfo().isUMA) {
+ UpdateMemorySegmentInfo(&mVideoMemoryInfo.nonLocal);
+ }
+ }
+
+ void ResidencyManager::UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo) {
+ DXGI_QUERY_VIDEO_MEMORY_INFO queryVideoMemoryInfo;
+
+ ToBackend(mDevice->GetAdapter())
+ ->GetHardwareAdapter()
+ ->QueryVideoMemoryInfo(0, segmentInfo->dxgiSegment, &queryVideoMemoryInfo);
+
+ // The video memory budget provided by QueryVideoMemoryInfo is defined by the operating
+ // system, and may be lower than expected in certain scenarios. Under memory pressure, we
+ // cap the external reservation to half the available budget, which prevents the external
+ // component from consuming a disproportionate share of memory and ensures that Dawn can
+ // continue to make forward progress. Note the choice to halve memory is arbitrarily chosen
+ // and subject to future experimentation.
+ segmentInfo->externalReservation =
+ std::min(queryVideoMemoryInfo.Budget / 2, segmentInfo->externalRequest);
+
+ segmentInfo->usage = queryVideoMemoryInfo.CurrentUsage - segmentInfo->externalReservation;
+
+ // If we're restricting the budget for testing, leave the budget as is.
+ if (mRestrictBudgetForTesting) {
+ return;
+ }
+
+ // We cap Dawn's budget to 95% of the provided budget. Leaving some budget unused
+ // decreases fluctuations in the operating-system-defined budget, which improves stability
+ // for both Dawn and other applications on the system. Note the value of 95% is arbitrarily
+ // chosen and subject to future experimentation.
+ static constexpr float kBudgetCap = 0.95;
+ segmentInfo->budget =
+ (queryVideoMemoryInfo.Budget - segmentInfo->externalReservation) * kBudgetCap;
+ }
+
+ // Removes a heap from the LRU and returns the least recently used heap when possible. Returns
+ // nullptr when nothing further can be evicted.
+ ResultOrError<Pageable*> ResidencyManager::RemoveSingleEntryFromLRU(
+ MemorySegmentInfo* memorySegment) {
+ // If the LRU is empty, return nullptr to allow execution to continue. Note that fully
+ // emptying the LRU is undesirable, because it can mean either 1) the LRU is not accurately
+ // accounting for Dawn's GPU allocations, or 2) a component external to Dawn is using all of
+ // the process budget and starving Dawn, which will cause thrash.
+ if (memorySegment->lruCache.empty()) {
+ return nullptr;
+ }
+
+ Pageable* pageable = memorySegment->lruCache.head()->value();
+
+ ExecutionSerial lastSubmissionSerial = pageable->GetLastSubmission();
+
+ // If the next candidate for eviction was inserted into the LRU during the current serial,
+ // it is because more memory is being used in a single command list than is available.
+ // In this scenario, we cannot make any more resources resident and thrashing must occur.
+ if (lastSubmissionSerial == mDevice->GetPendingCommandSerial()) {
+ return nullptr;
+ }
+
+ // We must ensure that any previous use of a resource has completed before the resource can
+ // be evicted.
+ if (lastSubmissionSerial > mDevice->GetCompletedCommandSerial()) {
+ DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
+ }
+
+ pageable->RemoveFromList();
+ return pageable;
+ }
+
+ MaybeError ResidencyManager::EnsureCanAllocate(uint64_t allocationSize,
+ MemorySegment memorySegment) {
+ if (!mResidencyManagementEnabled) {
+ return {};
+ }
+
+ uint64_t bytesEvicted;
+ DAWN_TRY_ASSIGN(bytesEvicted,
+ EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment)));
+ DAWN_UNUSED(bytesEvicted);
+
+ return {};
+ }
+
+ // Any time we need to make something resident, we must check that we have enough free memory to
+ // make the new object resident while also staying within budget. If there isn't enough
+ // memory, we should evict until there is. Returns the number of bytes evicted.
+ ResultOrError<uint64_t> ResidencyManager::EnsureCanMakeResident(
+ uint64_t sizeToMakeResident,
+ MemorySegmentInfo* memorySegment) {
+ ASSERT(mResidencyManagementEnabled);
+
+ UpdateMemorySegmentInfo(memorySegment);
+
+ uint64_t memoryUsageAfterMakeResident = sizeToMakeResident + memorySegment->usage;
+
+ // Return when we can call MakeResident and remain under budget.
+ if (memoryUsageAfterMakeResident < memorySegment->budget) {
+ return 0;
+ }
+
+ std::vector<ID3D12Pageable*> resourcesToEvict;
+ uint64_t sizeNeededToBeUnderBudget = memoryUsageAfterMakeResident - memorySegment->budget;
+ uint64_t sizeEvicted = 0;
+ while (sizeEvicted < sizeNeededToBeUnderBudget) {
+ Pageable* pageable;
+ DAWN_TRY_ASSIGN(pageable, RemoveSingleEntryFromLRU(memorySegment));
+
+ // If no heap was returned, then nothing more can be evicted.
+ if (pageable == nullptr) {
+ break;
+ }
+
+ sizeEvicted += pageable->GetSize();
+ resourcesToEvict.push_back(pageable->GetD3D12Pageable());
+ }
+
+ if (resourcesToEvict.size() > 0) {
+ DAWN_TRY(CheckHRESULT(
+ mDevice->GetD3D12Device()->Evict(resourcesToEvict.size(), resourcesToEvict.data()),
+ "Evicting resident heaps to free memory"));
+ }
+
+ return sizeEvicted;
+ }
+
+ // Given a list of heaps that are pending usage, this function will estimate memory needed,
+ // evict resources until enough space is available, then make resident any heaps scheduled for
+ // usage.
+ MaybeError ResidencyManager::EnsureHeapsAreResident(Heap** heaps, size_t heapCount) {
+ if (!mResidencyManagementEnabled) {
+ return {};
+ }
+
+ std::vector<ID3D12Pageable*> localHeapsToMakeResident;
+ std::vector<ID3D12Pageable*> nonLocalHeapsToMakeResident;
+ uint64_t localSizeToMakeResident = 0;
+ uint64_t nonLocalSizeToMakeResident = 0;
+
+ ExecutionSerial pendingCommandSerial = mDevice->GetPendingCommandSerial();
+ for (size_t i = 0; i < heapCount; i++) {
+ Heap* heap = heaps[i];
+
+ // Heaps that are locked resident are not tracked in the LRU cache.
+ if (heap->IsResidencyLocked()) {
+ continue;
+ }
+
+ if (heap->IsInResidencyLRUCache()) {
+ // If the heap is already in the LRU, we must remove it and append again below to
+ // update its position in the LRU.
+ heap->RemoveFromList();
+ } else {
+ if (heap->GetMemorySegment() == MemorySegment::Local) {
+ localSizeToMakeResident += heap->GetSize();
+ localHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
+ } else {
+ nonLocalSizeToMakeResident += heap->GetSize();
+ nonLocalHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
+ }
+ }
+
+ // If we submit a command list to the GPU, we must ensure that heaps referenced by that
+ // command list stay resident at least until that command list has finished execution.
+ // Setting this serial unnecessarily can leave the LRU in a state where nothing is
+ // eligible for eviction, even though some evictions may be possible.
+ heap->SetLastSubmission(pendingCommandSerial);
+
+ // Insert the heap into the appropriate LRU.
+ TrackResidentAllocation(heap);
+ }
+
+ if (localSizeToMakeResident > 0) {
+ return MakeAllocationsResident(&mVideoMemoryInfo.local, localSizeToMakeResident,
+ localHeapsToMakeResident.size(),
+ localHeapsToMakeResident.data());
+ }
+
+ if (nonLocalSizeToMakeResident > 0) {
+ ASSERT(!mDevice->GetDeviceInfo().isUMA);
+ return MakeAllocationsResident(&mVideoMemoryInfo.nonLocal, nonLocalSizeToMakeResident,
+ nonLocalHeapsToMakeResident.size(),
+ nonLocalHeapsToMakeResident.data());
+ }
+
+ return {};
+ }
+
+ MaybeError ResidencyManager::MakeAllocationsResident(MemorySegmentInfo* segment,
+ uint64_t sizeToMakeResident,
+ uint64_t numberOfObjectsToMakeResident,
+ ID3D12Pageable** allocations) {
+ uint64_t bytesEvicted;
+ DAWN_TRY_ASSIGN(bytesEvicted, EnsureCanMakeResident(sizeToMakeResident, segment));
+ DAWN_UNUSED(bytesEvicted);
+
+ // Note that MakeResident is a synchronous function and can add a significant
+ // overhead to command recording. In the future, it may be possible to decrease this
+ // overhead by using MakeResident on a secondary thread, or by instead making use of
+ // the EnqueueMakeResident function (which is not available on all Windows 10
+ // platforms).
+ HRESULT hr =
+ mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
+
+ // A MakeResident call can fail if there's not enough available memory. This
+ // could occur when there's significant fragmentation or if the allocation size
+ // estimates are incorrect. We may be able to continue execution by evicting some
+ // more memory and calling MakeResident again.
+ while (FAILED(hr)) {
+ constexpr uint32_t kAdditonalSizeToEvict = 50000000; // 50MB
+
+ uint64_t sizeEvicted = 0;
+
+ DAWN_TRY_ASSIGN(sizeEvicted, EnsureCanMakeResident(kAdditonalSizeToEvict, segment));
+
+ // If nothing can be evicted after MakeResident has failed, we cannot continue
+ // execution and must throw a fatal error.
+ if (sizeEvicted == 0) {
+ return DAWN_OUT_OF_MEMORY_ERROR(
+ "MakeResident has failed due to excessive video memory usage.");
+ }
+
+ hr =
+ mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
+ }
+
+ return {};
+ }
+
+ // Inserts a heap at the bottom of the LRU. The passed heap must be resident or scheduled to
+ // become resident within the current serial. Failing to call this function when an allocation
+ // is implicitly made resident will cause the residency manager to view the allocation as
+ // non-resident and call MakeResident - which will make D3D12's internal residency refcount on
+ // the allocation out of sync with Dawn.
+ void ResidencyManager::TrackResidentAllocation(Pageable* pageable) {
+ if (!mResidencyManagementEnabled) {
+ return;
+ }
+
+ ASSERT(pageable->IsInList() == false);
+ GetMemorySegmentInfo(pageable->GetMemorySegment())->lruCache.Append(pageable);
+ }
+
+ // Places an artifical cap on Dawn's budget so we can test in a predictable manner. If used,
+ // this function must be called before any resources have been created.
+ void ResidencyManager::RestrictBudgetForTesting(uint64_t artificialBudgetCap) {
+ ASSERT(mVideoMemoryInfo.nonLocal.lruCache.empty());
+ ASSERT(!mRestrictBudgetForTesting);
+
+ mRestrictBudgetForTesting = true;
+ UpdateVideoMemoryInfo();
+
+ // Dawn has a non-zero memory usage even before any resources have been created, and this
+ // value can vary depending on the environment Dawn is running in. By adding this in
+ // addition to the artificial budget cap, we can create a predictable and reproducible
+ // budget for testing.
+ mVideoMemoryInfo.local.budget = mVideoMemoryInfo.local.usage + artificialBudgetCap;
+ if (!mDevice->GetDeviceInfo().isUMA) {
+ mVideoMemoryInfo.nonLocal.budget =
+ mVideoMemoryInfo.nonLocal.usage + artificialBudgetCap;
+ }
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h
new file mode 100644
index 00000000000..26d9cf06e22
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResidencyManagerD3D12.h
@@ -0,0 +1,82 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
+#define DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/native/D3D12Backend.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+ class Heap;
+ class Pageable;
+
+ class ResidencyManager {
+ public:
+ ResidencyManager(Device* device);
+
+ MaybeError LockAllocation(Pageable* pageable);
+ void UnlockAllocation(Pageable* pageable);
+
+ MaybeError EnsureCanAllocate(uint64_t allocationSize, MemorySegment memorySegment);
+ MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
+
+ uint64_t SetExternalMemoryReservation(MemorySegment segment,
+ uint64_t requestedReservationSize);
+
+ void TrackResidentAllocation(Pageable* pageable);
+
+ void RestrictBudgetForTesting(uint64_t artificialBudgetCap);
+
+ private:
+ struct MemorySegmentInfo {
+ const DXGI_MEMORY_SEGMENT_GROUP dxgiSegment;
+ LinkedList<Pageable> lruCache = {};
+ uint64_t budget = 0;
+ uint64_t usage = 0;
+ uint64_t externalReservation = 0;
+ uint64_t externalRequest = 0;
+ };
+
+ struct VideoMemoryInfo {
+ MemorySegmentInfo local = {DXGI_MEMORY_SEGMENT_GROUP_LOCAL};
+ MemorySegmentInfo nonLocal = {DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL};
+ };
+
+ MemorySegmentInfo* GetMemorySegmentInfo(MemorySegment memorySegment);
+ ResultOrError<uint64_t> EnsureCanMakeResident(uint64_t allocationSize,
+ MemorySegmentInfo* memorySegment);
+ ResultOrError<Pageable*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
+ MaybeError MakeAllocationsResident(MemorySegmentInfo* segment,
+ uint64_t sizeToMakeResident,
+ uint64_t numberOfObjectsToMakeResident,
+ ID3D12Pageable** allocations);
+ void UpdateVideoMemoryInfo();
+ void UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo);
+
+ Device* mDevice;
+ bool mResidencyManagementEnabled = false;
+ bool mRestrictBudgetForTesting = false;
+ VideoMemoryInfo mVideoMemoryInfo = {};
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
new file mode 100644
index 00000000000..41733c476d6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -0,0 +1,414 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ResourceAllocatorManagerD3D12.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+ namespace {
+ MemorySegment GetMemorySegment(Device* device, D3D12_HEAP_TYPE heapType) {
+ if (device->GetDeviceInfo().isUMA) {
+ return MemorySegment::Local;
+ }
+
+ D3D12_HEAP_PROPERTIES heapProperties =
+ device->GetD3D12Device()->GetCustomHeapProperties(0, heapType);
+
+ if (heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L1) {
+ return MemorySegment::Local;
+ }
+
+ return MemorySegment::NonLocal;
+ }
+
+ D3D12_HEAP_TYPE GetD3D12HeapType(ResourceHeapKind resourceHeapKind) {
+ switch (resourceHeapKind) {
+ case Readback_OnlyBuffers:
+ case Readback_AllBuffersAndTextures:
+ return D3D12_HEAP_TYPE_READBACK;
+ case Default_AllBuffersAndTextures:
+ case Default_OnlyBuffers:
+ case Default_OnlyNonRenderableOrDepthTextures:
+ case Default_OnlyRenderableOrDepthTextures:
+ return D3D12_HEAP_TYPE_DEFAULT;
+ case Upload_OnlyBuffers:
+ case Upload_AllBuffersAndTextures:
+ return D3D12_HEAP_TYPE_UPLOAD;
+ case EnumCount:
+ UNREACHABLE();
+ }
+ }
+
+ D3D12_HEAP_FLAGS GetD3D12HeapFlags(ResourceHeapKind resourceHeapKind) {
+ switch (resourceHeapKind) {
+ case Default_AllBuffersAndTextures:
+ case Readback_AllBuffersAndTextures:
+ case Upload_AllBuffersAndTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES;
+ case Default_OnlyBuffers:
+ case Readback_OnlyBuffers:
+ case Upload_OnlyBuffers:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
+ case Default_OnlyNonRenderableOrDepthTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
+ case Default_OnlyRenderableOrDepthTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
+ case EnumCount:
+ UNREACHABLE();
+ }
+ }
+
+ ResourceHeapKind GetResourceHeapKind(D3D12_RESOURCE_DIMENSION dimension,
+ D3D12_HEAP_TYPE heapType,
+ D3D12_RESOURCE_FLAGS flags,
+ uint32_t resourceHeapTier) {
+ if (resourceHeapTier >= 2) {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_UPLOAD:
+ return Upload_AllBuffersAndTextures;
+ case D3D12_HEAP_TYPE_DEFAULT:
+ return Default_AllBuffersAndTextures;
+ case D3D12_HEAP_TYPE_READBACK:
+ return Readback_AllBuffersAndTextures;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ switch (dimension) {
+ case D3D12_RESOURCE_DIMENSION_BUFFER: {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_UPLOAD:
+ return Upload_OnlyBuffers;
+ case D3D12_HEAP_TYPE_DEFAULT:
+ return Default_OnlyBuffers;
+ case D3D12_HEAP_TYPE_READBACK:
+ return Readback_OnlyBuffers;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
+ case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
+ case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_DEFAULT: {
+ if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+ (flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
+ return Default_OnlyRenderableOrDepthTextures;
+ }
+ return Default_OnlyNonRenderableOrDepthTextures;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ uint64_t GetResourcePlacementAlignment(ResourceHeapKind resourceHeapKind,
+ uint32_t sampleCount,
+ uint64_t requestedAlignment) {
+ switch (resourceHeapKind) {
+ // Small resources can take advantage of smaller alignments. For example,
+ // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
+ // Must be non-depth or without render-target to use small resource alignment.
+ // This also applies to MSAA textures (4MB => 64KB).
+ //
+ // Note: Only known to be used for small textures; however, MSDN suggests
+ // it could be extended for more cases. If so, this could default to always
+ // attempt small resource placement.
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
+ case Default_OnlyNonRenderableOrDepthTextures:
+ return (sampleCount > 1) ? D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT
+ : D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
+ default:
+ return requestedAlignment;
+ }
+ }
+
+ bool IsClearValueOptimizable(const D3D12_RESOURCE_DESC& resourceDescriptor) {
+ // Optimized clear color cannot be set on buffers, non-render-target/depth-stencil
+ // textures, or typeless resources
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createcommittedresource
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+ return !IsTypeless(resourceDescriptor.Format) &&
+ resourceDescriptor.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
+ (resourceDescriptor.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET |
+ D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
+ }
+
+ } // namespace
+
+ ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
+ mResourceHeapTier = (mDevice->IsToggleEnabled(Toggle::UseD3D12ResourceHeapTier2))
+ ? mDevice->GetDeviceInfo().resourceHeapTier
+ : 1;
+
+ for (uint32_t i = 0; i < ResourceHeapKind::EnumCount; i++) {
+ const ResourceHeapKind resourceHeapKind = static_cast<ResourceHeapKind>(i);
+ mHeapAllocators[i] = std::make_unique<HeapAllocator>(
+ mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind),
+ GetMemorySegment(device, GetD3D12HeapType(resourceHeapKind)));
+ mPooledHeapAllocators[i] =
+ std::make_unique<PooledResourceMemoryAllocator>(mHeapAllocators[i].get());
+ mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
+ kMaxHeapSize, kMinHeapSize, mPooledHeapAllocators[i].get());
+ }
+ }
+
+ ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage) {
+ // In order to suppress a warning in the D3D12 debug layer, we need to specify an
+ // optimized clear value. As there are no negative consequences when picking a mismatched
+ // clear value, we use zero as the optimized clear value. This also enables fast clears on
+ // some architectures.
+ D3D12_CLEAR_VALUE zero{};
+ D3D12_CLEAR_VALUE* optimizedClearValue = nullptr;
+ if (IsClearValueOptimizable(resourceDescriptor)) {
+ zero.Format = resourceDescriptor.Format;
+ optimizedClearValue = &zero;
+ }
+
+ // TODO(crbug.com/dawn/849): Conditionally disable sub-allocation.
+ // For very large resources, there is no benefit to suballocate.
+ // For very small resources, it is inefficent to suballocate given the min. heap
+ // size could be much larger then the resource allocation.
+ // Attempt to satisfy the request using sub-allocation (placed resource in a heap).
+ ResourceHeapAllocation subAllocation;
+ DAWN_TRY_ASSIGN(subAllocation, CreatePlacedResource(heapType, resourceDescriptor,
+ optimizedClearValue, initialUsage));
+ if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return std::move(subAllocation);
+ }
+
+ // If sub-allocation fails, fall-back to direct allocation (committed resource).
+ ResourceHeapAllocation directAllocation;
+ DAWN_TRY_ASSIGN(directAllocation,
+ CreateCommittedResource(heapType, resourceDescriptor, optimizedClearValue,
+ initialUsage));
+ if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return std::move(directAllocation);
+ }
+
+ // If direct allocation fails, the system is probably out of memory.
+ return DAWN_OUT_OF_MEMORY_ERROR("Allocation failed");
+ }
+
+ void ResourceAllocatorManager::Tick(ExecutionSerial completedSerial) {
+ for (ResourceHeapAllocation& allocation :
+ mAllocationsToDelete.IterateUpTo(completedSerial)) {
+ if (allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated) {
+ FreeMemory(allocation);
+ }
+ }
+ mAllocationsToDelete.ClearUpTo(completedSerial);
+ mHeapsToDelete.ClearUpTo(completedSerial);
+ }
+
+ void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
+ if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return;
+ }
+
+ mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+
+ // Directly allocated ResourceHeapAllocations are created with a heap object that must be
+ // manually deleted upon deallocation. See ResourceAllocatorManager::CreateCommittedResource
+ // for more information. Acquire this heap as a unique_ptr and add it to the queue of heaps
+ // to delete. It cannot be deleted immediately because it may be in use by in-flight or
+ // pending commands.
+ if (allocation.GetInfo().mMethod == AllocationMethod::kDirect) {
+ mHeapsToDelete.Enqueue(std::unique_ptr<ResourceHeapBase>(allocation.GetResourceHeap()),
+ mDevice->GetPendingCommandSerial());
+ }
+
+ // Invalidate the allocation immediately in case one accidentally
+ // calls DeallocateMemory again using the same allocation.
+ allocation.Invalidate();
+
+ ASSERT(allocation.GetD3D12Resource() == nullptr);
+ }
+
+ void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
+ ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+
+ D3D12_HEAP_PROPERTIES heapProp;
+ allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
+
+ const D3D12_RESOURCE_DESC resourceDescriptor = allocation.GetD3D12Resource()->GetDesc();
+
+ const size_t resourceHeapKindIndex =
+ GetResourceHeapKind(resourceDescriptor.Dimension, heapProp.Type,
+ resourceDescriptor.Flags, mResourceHeapTier);
+
+ mSubAllocatedResourceAllocators[resourceHeapKindIndex]->Deallocate(allocation);
+ }
+
+ ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
+ D3D12_RESOURCE_STATES initialUsage) {
+ const ResourceHeapKind resourceHeapKind =
+ GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
+ requestedResourceDescriptor.Flags, mResourceHeapTier);
+
+ D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
+ resourceDescriptor.Alignment = GetResourcePlacementAlignment(
+ resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
+ requestedResourceDescriptor.Alignment);
+
+ // TODO(bryan.bernhart): Figure out how to compute the alignment without calling this
+ // twice.
+ D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+
+ // If the requested resource alignment was rejected, let D3D tell us what the
+ // required alignment is for this resource.
+ if (resourceDescriptor.Alignment != 0 &&
+ resourceDescriptor.Alignment != resourceInfo.Alignment) {
+ resourceDescriptor.Alignment = 0;
+ resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+ }
+
+ // If d3d tells us the resource size is invalid, treat the error as OOM.
+ // Otherwise, creating the resource could cause a device loss (too large).
+ // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
+ // incorrectly allocate a mismatched size.
+ if (resourceInfo.SizeInBytes == 0 ||
+ resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
+ }
+
+ BuddyMemoryAllocator* allocator =
+ mSubAllocatedResourceAllocators[static_cast<size_t>(resourceHeapKind)].get();
+
+ ResourceMemoryAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation,
+ allocator->Allocate(resourceInfo.SizeInBytes, resourceInfo.Alignment));
+ if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return ResourceHeapAllocation{}; // invalid
+ }
+
+ Heap* heap = ToBackend(allocation.GetResourceHeap());
+
+ // Before calling CreatePlacedResource, we must ensure the target heap is resident.
+ // CreatePlacedResource will fail if it is not.
+ DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(heap));
+
+ // With placed resources, a single heap can be reused.
+ // The resource placed at an offset is only reclaimed
+ // upon Tick or after the last command list using the resource has completed
+ // on the GPU. This means the same physical memory is not reused
+ // within the same command-list and does not require additional synchronization (aliasing
+ // barrier).
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+ ComPtr<ID3D12Resource> placedResource;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreatePlacedResource(
+ heap->GetD3D12Heap(), allocation.GetOffset(), &resourceDescriptor, initialUsage,
+ optimizedClearValue, IID_PPV_ARGS(&placedResource)),
+ "ID3D12Device::CreatePlacedResource"));
+
+ // After CreatePlacedResource has finished, the heap can be unlocked from residency. This
+ // will insert it into the residency LRU.
+ mDevice->GetResidencyManager()->UnlockAllocation(heap);
+
+ return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
+ std::move(placedResource), heap};
+ }
+
+ ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
+ D3D12_RESOURCE_STATES initialUsage) {
+ D3D12_HEAP_PROPERTIES heapProperties;
+ heapProperties.Type = heapType;
+ heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+ heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+ heapProperties.CreationNodeMask = 0;
+ heapProperties.VisibleNodeMask = 0;
+
+ // If d3d tells us the resource size is invalid, treat the error as OOM.
+ // Otherwise, creating the resource could cause a device loss (too large).
+ // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
+ // incorrectly allocate a mismatched size.
+ D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+ if (resourceInfo.SizeInBytes == 0 ||
+ resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
+ }
+
+ if (resourceInfo.SizeInBytes > kMaxHeapSize) {
+ return ResourceHeapAllocation{}; // Invalid
+ }
+
+ // CreateCommittedResource will implicitly make the created resource resident. We must
+ // ensure enough free memory exists before allocating to avoid an out-of-memory error when
+ // overcommitted.
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(
+ resourceInfo.SizeInBytes, GetMemorySegment(mDevice, heapType)));
+
+ // Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
+ // provided to CreateCommittedResource.
+ ComPtr<ID3D12Resource> committedResource;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreateCommittedResource(
+ &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage,
+ optimizedClearValue, IID_PPV_ARGS(&committedResource)),
+ "ID3D12Device::CreateCommittedResource"));
+
+ // When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
+ // resource allocation. Because Dawn's memory residency management occurs at the resource
+ // heap granularity, every directly allocated ResourceHeapAllocation also stores a Heap
+ // object. This object is created manually, and must be deleted manually upon deallocation
+ // of the committed resource.
+ Heap* heap = new Heap(committedResource, GetMemorySegment(mDevice, heapType),
+ resourceInfo.SizeInBytes);
+
+ // Calling CreateCommittedResource implicitly calls MakeResident on the resource. We must
+ // track this to avoid calling MakeResident a second time.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(heap);
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+
+ return ResourceHeapAllocation{info,
+ /*offset*/ 0, std::move(committedResource), heap};
+ }
+
+ void ResourceAllocatorManager::DestroyPool() {
+ for (auto& alloc : mPooledHeapAllocators) {
+ alloc->DestroyPool();
+ }
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
new file mode 100644
index 00000000000..331c982c7d1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
@@ -0,0 +1,108 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
+#define DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/BuddyMemoryAllocator.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/PooledResourceMemoryAllocator.h"
+#include "dawn/native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+
+#include <array>
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ // Resource heap types + flags combinations are named after the D3D constants.
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_flags
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_type
+ enum ResourceHeapKind {
+
+ // Resource heap tier 2
+ // Allows resource heaps to contain all buffer and textures types.
+ // This enables better heap re-use by avoiding the need for separate heaps and
+ // also reduces fragmentation.
+ Readback_AllBuffersAndTextures,
+ Upload_AllBuffersAndTextures,
+ Default_AllBuffersAndTextures,
+
+ // Resource heap tier 1
+ // Resource heaps only support types from a single resource category.
+ Readback_OnlyBuffers,
+ Upload_OnlyBuffers,
+ Default_OnlyBuffers,
+
+ Default_OnlyNonRenderableOrDepthTextures,
+ Default_OnlyRenderableOrDepthTextures,
+
+ EnumCount,
+ InvalidEnum = EnumCount,
+ };
+
+ // Manages a list of resource allocators used by the device to create resources using
+ // multiple allocation methods.
+ class ResourceAllocatorManager {
+ public:
+ ResourceAllocatorManager(Device* device);
+
+ ResultOrError<ResourceHeapAllocation> AllocateMemory(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ void DeallocateMemory(ResourceHeapAllocation& allocation);
+
+ void Tick(ExecutionSerial lastCompletedSerial);
+
+ void DestroyPool();
+
+ private:
+ void FreeMemory(ResourceHeapAllocation& allocation);
+
+ ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ Device* mDevice;
+ uint32_t mResourceHeapTier;
+
+ static constexpr uint64_t kMaxHeapSize = 32ll * 1024ll * 1024ll * 1024ll; // 32GB
+ static constexpr uint64_t kMinHeapSize = 4ll * 1024ll * 1024ll; // 4MB
+
+ std::array<std::unique_ptr<BuddyMemoryAllocator>, ResourceHeapKind::EnumCount>
+ mSubAllocatedResourceAllocators;
+ std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
+
+ std::array<std::unique_ptr<PooledResourceMemoryAllocator>, ResourceHeapKind::EnumCount>
+ mPooledHeapAllocators;
+
+ SerialQueue<ExecutionSerial, ResourceHeapAllocation> mAllocationsToDelete;
+ SerialQueue<ExecutionSerial, std::unique_ptr<ResourceHeapBase>> mHeapsToDelete;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
new file mode 100644
index 00000000000..910e4fb1245
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -0,0 +1,43 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+
+#include <utility>
+
+namespace dawn::native::d3d12 {
+ ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ComPtr<ID3D12Resource> resource,
+ Heap* heap)
+ : ResourceMemoryAllocation(info, offset, heap), mResource(std::move(resource)) {
+ ASSERT((info.mMethod == AllocationMethod::kExternal) == (heap == nullptr));
+ }
+
+ void ResourceHeapAllocation::Invalidate() {
+ ResourceMemoryAllocation::Invalidate();
+ mResource.Reset();
+ }
+
+ ID3D12Resource* ResourceHeapAllocation::GetD3D12Resource() const {
+ return mResource.Get();
+ }
+
+ D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
+ return mResource->GetGPUVirtualAddress();
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
new file mode 100644
index 00000000000..c9de601e280
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
@@ -0,0 +1,48 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
+#define DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Heap;
+
+ class ResourceHeapAllocation : public ResourceMemoryAllocation {
+ public:
+ ResourceHeapAllocation() = default;
+ ResourceHeapAllocation(const AllocationInfo& info,
+ uint64_t offset,
+ ComPtr<ID3D12Resource> resource,
+ Heap* heap);
+ ~ResourceHeapAllocation() override = default;
+ ResourceHeapAllocation(const ResourceHeapAllocation&) = default;
+ ResourceHeapAllocation& operator=(const ResourceHeapAllocation&) = default;
+
+ void Invalidate() override;
+
+ ID3D12Resource* GetD3D12Resource() const;
+ D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
+
+ private:
+ ComPtr<ID3D12Resource> mResource;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp
new file mode 100644
index 00000000000..c656931947b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.cpp
@@ -0,0 +1,106 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/SamplerD3D12.h"
+
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+ D3D12_TEXTURE_ADDRESS_MODE AddressMode(wgpu::AddressMode mode) {
+ switch (mode) {
+ case wgpu::AddressMode::Repeat:
+ return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
+ case wgpu::AddressMode::MirrorRepeat:
+ return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
+ case wgpu::AddressMode::ClampToEdge:
+ return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
+ }
+ }
+ } // namespace
+
+ // static
+ Ref<Sampler> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(device, descriptor));
+ }
+
+ Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
+ : SamplerBase(device, descriptor) {
+ D3D12_FILTER_TYPE minFilter;
+ switch (descriptor->minFilter) {
+ case wgpu::FilterMode::Nearest:
+ minFilter = D3D12_FILTER_TYPE_POINT;
+ break;
+ case wgpu::FilterMode::Linear:
+ minFilter = D3D12_FILTER_TYPE_LINEAR;
+ break;
+ }
+
+ D3D12_FILTER_TYPE magFilter;
+ switch (descriptor->magFilter) {
+ case wgpu::FilterMode::Nearest:
+ magFilter = D3D12_FILTER_TYPE_POINT;
+ break;
+ case wgpu::FilterMode::Linear:
+ magFilter = D3D12_FILTER_TYPE_LINEAR;
+ break;
+ }
+
+ D3D12_FILTER_TYPE mipmapFilter;
+ switch (descriptor->mipmapFilter) {
+ case wgpu::FilterMode::Nearest:
+ mipmapFilter = D3D12_FILTER_TYPE_POINT;
+ break;
+ case wgpu::FilterMode::Linear:
+ mipmapFilter = D3D12_FILTER_TYPE_LINEAR;
+ break;
+ }
+
+ D3D12_FILTER_REDUCTION_TYPE reduction =
+ descriptor->compare == wgpu::CompareFunction::Undefined
+ ? D3D12_FILTER_REDUCTION_TYPE_STANDARD
+ : D3D12_FILTER_REDUCTION_TYPE_COMPARISON;
+
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_sampler_desc
+ mSamplerDesc.MaxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
+
+ if (mSamplerDesc.MaxAnisotropy > 1) {
+ mSamplerDesc.Filter = D3D12_ENCODE_ANISOTROPIC_FILTER(reduction);
+ } else {
+ mSamplerDesc.Filter =
+ D3D12_ENCODE_BASIC_FILTER(minFilter, magFilter, mipmapFilter, reduction);
+ }
+
+ mSamplerDesc.AddressU = AddressMode(descriptor->addressModeU);
+ mSamplerDesc.AddressV = AddressMode(descriptor->addressModeV);
+ mSamplerDesc.AddressW = AddressMode(descriptor->addressModeW);
+ mSamplerDesc.MipLODBias = 0.f;
+
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ mSamplerDesc.ComparisonFunc = ToD3D12ComparisonFunc(descriptor->compare);
+ } else {
+ // Still set the function so it's not garbage.
+ mSamplerDesc.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
+ }
+ mSamplerDesc.MinLOD = descriptor->lodMinClamp;
+ mSamplerDesc.MaxLOD = descriptor->lodMaxClamp;
+ }
+
+ const D3D12_SAMPLER_DESC& Sampler::GetSamplerDescriptor() const {
+ return mSamplerDesc;
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h
new file mode 100644
index 00000000000..e296afbf3e1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerD3D12.h
@@ -0,0 +1,40 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SAMPLERD3D12_H_
+#define DAWNNATIVE_D3D12_SAMPLERD3D12_H_
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class Sampler final : public SamplerBase {
+ public:
+ static Ref<Sampler> Create(Device* device, const SamplerDescriptor* descriptor);
+
+ const D3D12_SAMPLER_DESC& GetSamplerDescriptor() const;
+
+ private:
+ Sampler(Device* device, const SamplerDescriptor* descriptor);
+ ~Sampler() override = default;
+ D3D12_SAMPLER_DESC mSamplerDesc = {};
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_SAMPLERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
new file mode 100644
index 00000000000..4659b36e95c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
@@ -0,0 +1,166 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/SamplerHeapCacheD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/HashUtils.h"
+#include "dawn/native/d3d12/BindGroupD3D12.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/Forward.h"
+#include "dawn/native/d3d12/SamplerD3D12.h"
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ SamplerHeapCacheEntry::SamplerHeapCacheEntry(std::vector<Sampler*> samplers)
+ : mSamplers(std::move(samplers)) {
+ }
+
+ SamplerHeapCacheEntry::SamplerHeapCacheEntry(SamplerHeapCache* cache,
+ StagingDescriptorAllocator* allocator,
+ std::vector<Sampler*> samplers,
+ CPUDescriptorHeapAllocation allocation)
+ : mCPUAllocation(std::move(allocation)),
+ mSamplers(std::move(samplers)),
+ mAllocator(allocator),
+ mCache(cache) {
+ ASSERT(mCache != nullptr);
+ ASSERT(mCPUAllocation.IsValid());
+ ASSERT(!mSamplers.empty());
+ }
+
+ std::vector<Sampler*>&& SamplerHeapCacheEntry::AcquireSamplers() {
+ return std::move(mSamplers);
+ }
+
+ SamplerHeapCacheEntry::~SamplerHeapCacheEntry() {
+ // If this is a blueprint then the CPU allocation cannot exist and has no entry to remove.
+ if (mCPUAllocation.IsValid()) {
+ mCache->RemoveCacheEntry(this);
+ mAllocator->Deallocate(&mCPUAllocation);
+ }
+
+ ASSERT(!mCPUAllocation.IsValid());
+ }
+
+ bool SamplerHeapCacheEntry::Populate(Device* device,
+ ShaderVisibleDescriptorAllocator* allocator) {
+ if (allocator->IsAllocationStillValid(mGPUAllocation)) {
+ return true;
+ }
+
+ ASSERT(!mSamplers.empty());
+
+ // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+ // If either failed, return early to re-allocate and switch the heaps.
+ const uint32_t descriptorCount = mSamplers.size();
+ D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+ if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
+ &baseCPUDescriptor, &mGPUAllocation)) {
+ return false;
+ }
+
+ // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+ // simple copies per bindgroup, a single non-simple copy could be issued.
+ // TODO(dawn:155): Consider doing this optimization.
+ device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+ mCPUAllocation.GetBaseDescriptor(),
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+
+ return true;
+ }
+
+ D3D12_GPU_DESCRIPTOR_HANDLE SamplerHeapCacheEntry::GetBaseDescriptor() const {
+ return mGPUAllocation.GetBaseDescriptor();
+ }
+
+ ResultOrError<Ref<SamplerHeapCacheEntry>> SamplerHeapCache::GetOrCreate(
+ const BindGroup* group,
+ StagingDescriptorAllocator* samplerAllocator) {
+ const BindGroupLayout* bgl = ToBackend(group->GetLayout());
+
+ // If a previously created bindgroup used the same samplers, the backing sampler heap
+ // allocation can be reused. The packed list of samplers acts as the key to lookup the
+ // allocation in a cache.
+ // TODO(dawn:155): Avoid re-allocating the vector each lookup.
+ std::vector<Sampler*> samplers;
+ samplers.reserve(bgl->GetSamplerDescriptorCount());
+
+ for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+ bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+ if (bindingInfo.bindingType == BindingInfoType::Sampler) {
+ samplers.push_back(ToBackend(group->GetBindingAsSampler(bindingIndex)));
+ }
+ }
+
+ // Check the cache if there exists a sampler heap allocation that corresponds to the
+ // samplers.
+ SamplerHeapCacheEntry blueprint(std::move(samplers));
+ auto iter = mCache.find(&blueprint);
+ if (iter != mCache.end()) {
+ return Ref<SamplerHeapCacheEntry>(*iter);
+ }
+
+ // Steal the sampler vector back from the blueprint to avoid creating a new copy for the
+ // real entry below.
+ samplers = std::move(blueprint.AcquireSamplers());
+
+ CPUDescriptorHeapAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation, samplerAllocator->AllocateCPUDescriptors());
+
+ const uint32_t samplerSizeIncrement = samplerAllocator->GetSizeIncrement();
+ ID3D12Device* d3d12Device = mDevice->GetD3D12Device();
+
+ for (uint32_t i = 0; i < samplers.size(); ++i) {
+ const auto& samplerDesc = samplers[i]->GetSamplerDescriptor();
+ d3d12Device->CreateSampler(&samplerDesc,
+ allocation.OffsetFrom(samplerSizeIncrement, i));
+ }
+
+ Ref<SamplerHeapCacheEntry> entry = AcquireRef(new SamplerHeapCacheEntry(
+ this, samplerAllocator, std::move(samplers), std::move(allocation)));
+ mCache.insert(entry.Get());
+ return std::move(entry);
+ }
+
+ SamplerHeapCache::SamplerHeapCache(Device* device) : mDevice(device) {
+ }
+
+ SamplerHeapCache::~SamplerHeapCache() {
+ ASSERT(mCache.empty());
+ }
+
+ void SamplerHeapCache::RemoveCacheEntry(SamplerHeapCacheEntry* entry) {
+ ASSERT(entry->GetRefCountForTesting() == 0);
+ size_t removedCount = mCache.erase(entry);
+ ASSERT(removedCount == 1);
+ }
+
+ size_t SamplerHeapCacheEntry::HashFunc::operator()(const SamplerHeapCacheEntry* entry) const {
+ size_t hash = 0;
+ for (const Sampler* sampler : entry->mSamplers) {
+ HashCombine(&hash, sampler);
+ }
+ return hash;
+ }
+
+ bool SamplerHeapCacheEntry::EqualityFunc::operator()(const SamplerHeapCacheEntry* a,
+ const SamplerHeapCacheEntry* b) const {
+ return a->mSamplers == b->mSamplers;
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
new file mode 100644
index 00000000000..be38d219cf4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
@@ -0,0 +1,107 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
+#define DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+
+#include <unordered_set>
+
+// |SamplerHeapCacheEntry| maintains a cache of sampler descriptor heap allocations.
+// Each entry represents one or more sampler descriptors that co-exist in a CPU and
+// GPU descriptor heap. The CPU-side allocation is deallocated once the final reference
+// has been released while the GPU-side allocation is deallocated when the GPU is finished.
+//
+// The BindGroupLayout hands out these entries upon constructing the bindgroup. If the entry is not
+// invalid, it will allocate and initialize so it may be reused by another bindgroup.
+//
+// The cache is primary needed for the GPU sampler heap, which is much smaller than the view heap
+// and switches incur expensive pipeline flushes.
+namespace dawn::native::d3d12 {
+
+ class BindGroup;
+ class Device;
+ class Sampler;
+ class SamplerHeapCache;
+ class StagingDescriptorAllocator;
+ class ShaderVisibleDescriptorAllocator;
+
+ // Wraps sampler descriptor heap allocations in a cache.
+ class SamplerHeapCacheEntry : public RefCounted {
+ public:
+ SamplerHeapCacheEntry() = default;
+ SamplerHeapCacheEntry(std::vector<Sampler*> samplers);
+ SamplerHeapCacheEntry(SamplerHeapCache* cache,
+ StagingDescriptorAllocator* allocator,
+ std::vector<Sampler*> samplers,
+ CPUDescriptorHeapAllocation allocation);
+ ~SamplerHeapCacheEntry() override;
+
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+
+ std::vector<Sampler*>&& AcquireSamplers();
+
+ bool Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator);
+
+ // Functors necessary for the unordered_map<SamplerHeapCacheEntry*>-based cache.
+ struct HashFunc {
+ size_t operator()(const SamplerHeapCacheEntry* entry) const;
+ };
+
+ struct EqualityFunc {
+ bool operator()(const SamplerHeapCacheEntry* a, const SamplerHeapCacheEntry* b) const;
+ };
+
+ private:
+ CPUDescriptorHeapAllocation mCPUAllocation;
+ GPUDescriptorHeapAllocation mGPUAllocation;
+
+ // Storing raw pointer because the sampler object will be already hashed
+ // by the device and will already be unique.
+ std::vector<Sampler*> mSamplers;
+
+ StagingDescriptorAllocator* mAllocator = nullptr;
+ SamplerHeapCache* mCache = nullptr;
+ };
+
+ // Cache descriptor heap allocations so that we don't create duplicate ones for every
+ // BindGroup.
+ class SamplerHeapCache {
+ public:
+ SamplerHeapCache(Device* device);
+ ~SamplerHeapCache();
+
+ ResultOrError<Ref<SamplerHeapCacheEntry>> GetOrCreate(
+ const BindGroup* group,
+ StagingDescriptorAllocator* samplerAllocator);
+
+ void RemoveCacheEntry(SamplerHeapCacheEntry* entry);
+
+ private:
+ Device* mDevice;
+
+ using Cache = std::unordered_set<SamplerHeapCacheEntry*,
+ SamplerHeapCacheEntry::HashFunc,
+ SamplerHeapCacheEntry::EqualityFunc>;
+
+ Cache mCache;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
new file mode 100644
index 00000000000..0dea76e1d6b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
@@ -0,0 +1,846 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ShaderModuleD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/WindowsUtils.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/d3d12/BindGroupLayoutD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/PipelineLayoutD3D12.h"
+#include "dawn/native/d3d12/PlatformFunctions.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <d3dcompiler.h>
+
+#include <tint/tint.h>
+#include <map>
+#include <sstream>
+#include <unordered_map>
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+ ResultOrError<uint64_t> GetDXCompilerVersion(ComPtr<IDxcValidator> dxcValidator) {
+ ComPtr<IDxcVersionInfo> versionInfo;
+ DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
+ "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
+
+ uint32_t compilerMajor, compilerMinor;
+ DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
+ "IDxcVersionInfo::GetVersion"));
+
+ // Pack both into a single version number.
+ return (uint64_t(compilerMajor) << uint64_t(32)) + compilerMinor;
+ }
+
+ uint64_t GetD3DCompilerVersion() {
+ return D3D_COMPILER_VERSION;
+ }
+
+ struct CompareBindingPoint {
+ constexpr bool operator()(const tint::transform::BindingPoint& lhs,
+ const tint::transform::BindingPoint& rhs) const {
+ if (lhs.group != rhs.group) {
+ return lhs.group < rhs.group;
+ } else {
+ return lhs.binding < rhs.binding;
+ }
+ }
+ };
+
+ void Serialize(std::stringstream& output, const tint::ast::Access& access) {
+ output << access;
+ }
+
+ void Serialize(std::stringstream& output,
+ const tint::transform::BindingPoint& binding_point) {
+ output << "(BindingPoint";
+ output << " group=" << binding_point.group;
+ output << " binding=" << binding_point.binding;
+ output << ")";
+ }
+
+ template <typename T,
+ typename = typename std::enable_if<std::is_fundamental<T>::value>::type>
+ void Serialize(std::stringstream& output, const T& val) {
+ output << val;
+ }
+
+ template <typename T>
+ void Serialize(std::stringstream& output,
+ const std::unordered_map<tint::transform::BindingPoint, T>& map) {
+ output << "(map";
+
+ std::map<tint::transform::BindingPoint, T, CompareBindingPoint> sorted(map.begin(),
+ map.end());
+ for (auto& [bindingPoint, value] : sorted) {
+ output << " ";
+ Serialize(output, bindingPoint);
+ output << "=";
+ Serialize(output, value);
+ }
+ output << ")";
+ }
+
+ void Serialize(std::stringstream& output,
+ const tint::writer::ArrayLengthFromUniformOptions& arrayLengthFromUniform) {
+ output << "(ArrayLengthFromUniformOptions";
+ output << " ubo_binding=";
+ Serialize(output, arrayLengthFromUniform.ubo_binding);
+ output << " bindpoint_to_size_index=";
+ Serialize(output, arrayLengthFromUniform.bindpoint_to_size_index);
+ output << ")";
+ }
+
+ // 32 bit float has 7 decimal digits of precision so setting n to 8 should be enough
+ std::string FloatToStringWithPrecision(float v, std::streamsize n = 8) {
+ std::ostringstream out;
+ out.precision(n);
+ out << std::fixed << v;
+ return out.str();
+ }
+
+ std::string GetHLSLValueString(EntryPointMetadata::OverridableConstant::Type dawnType,
+ const OverridableConstantScalar* entry,
+ double value = 0) {
+ switch (dawnType) {
+ case EntryPointMetadata::OverridableConstant::Type::Boolean:
+ return std::to_string(entry ? entry->b : static_cast<int32_t>(value));
+ case EntryPointMetadata::OverridableConstant::Type::Float32:
+ return FloatToStringWithPrecision(entry ? entry->f32
+ : static_cast<float>(value));
+ case EntryPointMetadata::OverridableConstant::Type::Int32:
+ return std::to_string(entry ? entry->i32 : static_cast<int32_t>(value));
+ case EntryPointMetadata::OverridableConstant::Type::Uint32:
+ return std::to_string(entry ? entry->u32 : static_cast<uint32_t>(value));
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ constexpr char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
+
+ void GetOverridableConstantsDefines(
+ std::vector<std::pair<std::string, std::string>>* defineStrings,
+ const PipelineConstantEntries* pipelineConstantEntries,
+ const EntryPointMetadata::OverridableConstantsMap* shaderEntryPointConstants) {
+ std::unordered_set<std::string> overriddenConstants;
+
+ // Set pipeline overridden values
+ for (const auto& [name, value] : *pipelineConstantEntries) {
+ overriddenConstants.insert(name);
+
+ // This is already validated so `name` must exist
+ const auto& moduleConstant = shaderEntryPointConstants->at(name);
+
+ defineStrings->emplace_back(
+ kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
+ GetHLSLValueString(moduleConstant.type, nullptr, value));
+ }
+
+ // Set shader initialized default values
+ for (const auto& iter : *shaderEntryPointConstants) {
+ const std::string& name = iter.first;
+ if (overriddenConstants.count(name) != 0) {
+ // This constant already has overridden value
+ continue;
+ }
+
+ const auto& moduleConstant = shaderEntryPointConstants->at(name);
+
+ // Uninitialized default values are okay since they ar only defined to pass
+ // compilation but not used
+ defineStrings->emplace_back(
+ kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
+ GetHLSLValueString(moduleConstant.type, &moduleConstant.defaultValue));
+ }
+ }
+
+ // The inputs to a shader compilation. These have been intentionally isolated from the
+ // device to help ensure that the pipeline cache key contains all inputs for compilation.
+ struct ShaderCompilationRequest {
+ enum Compiler { FXC, DXC };
+
+ // Common inputs
+ Compiler compiler;
+ const tint::Program* program;
+ const char* entryPointName;
+ SingleShaderStage stage;
+ uint32_t compileFlags;
+ bool disableSymbolRenaming;
+ tint::transform::BindingRemapper::BindingPoints remappedBindingPoints;
+ tint::transform::BindingRemapper::AccessControls remappedAccessControls;
+ bool isRobustnessEnabled;
+ bool usesNumWorkgroups;
+ uint32_t numWorkgroupsRegisterSpace;
+ uint32_t numWorkgroupsShaderRegister;
+ tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
+ std::vector<std::pair<std::string, std::string>> defineStrings;
+
+ // FXC/DXC common inputs
+ bool disableWorkgroupInit;
+
+ // FXC inputs
+ uint64_t fxcVersion;
+
+ // DXC inputs
+ uint64_t dxcVersion;
+ const D3D12DeviceInfo* deviceInfo;
+ bool hasShaderFloat16Feature;
+
+ static ResultOrError<ShaderCompilationRequest> Create(
+ const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t compileFlags,
+ const Device* device,
+ const tint::Program* program,
+ const EntryPointMetadata& entryPoint,
+ const ProgrammableStage& programmableStage) {
+ Compiler compiler;
+ uint64_t dxcVersion = 0;
+ if (device->IsToggleEnabled(Toggle::UseDXC)) {
+ compiler = Compiler::DXC;
+ DAWN_TRY_ASSIGN(dxcVersion, GetDXCompilerVersion(device->GetDxcValidator()));
+ } else {
+ compiler = Compiler::FXC;
+ }
+
+ using tint::transform::BindingPoint;
+ using tint::transform::BindingRemapper;
+
+ BindingRemapper::BindingPoints remappedBindingPoints;
+ BindingRemapper::AccessControls remappedAccessControls;
+
+ tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
+ arrayLengthFromUniform.ubo_binding = {
+ layout->GetDynamicStorageBufferLengthsRegisterSpace(),
+ layout->GetDynamicStorageBufferLengthsShaderRegister()};
+
+ const BindingInfoArray& moduleBindingInfo = entryPoint.bindings;
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+ const auto& groupBindingInfo = moduleBindingInfo[group];
+
+ // d3d12::BindGroupLayout packs the bindings per HLSL register-space. We modify
+ // the Tint AST to make the "bindings" decoration match the offset chosen by
+ // d3d12::BindGroupLayout so that Tint produces HLSL with the correct registers
+ // assigned to each interface variable.
+ for (const auto& [binding, bindingInfo] : groupBindingInfo) {
+ BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+ BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+ bgl->GetShaderRegister(bindingIndex)};
+ if (srcBindingPoint != dstBindingPoint) {
+ remappedBindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+ }
+
+ // Declaring a read-only storage buffer in HLSL but specifying a storage
+ // buffer in the BGL produces the wrong output. Force read-only storage
+ // buffer bindings to be treated as UAV instead of SRV. Internal storage
+ // buffer is a storage buffer used in the internal pipeline.
+ const bool forceStorageBufferAsUAV =
+ (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
+ (bgl->GetBindingInfo(bindingIndex).buffer.type ==
+ wgpu::BufferBindingType::Storage ||
+ bgl->GetBindingInfo(bindingIndex).buffer.type ==
+ kInternalStorageBufferBinding));
+ if (forceStorageBufferAsUAV) {
+ remappedAccessControls.emplace(srcBindingPoint,
+ tint::ast::Access::kReadWrite);
+ }
+ }
+
+ // Add arrayLengthFromUniform options
+ {
+ for (const auto& bindingAndRegisterOffset :
+ layout->GetDynamicStorageBufferLengthInfo()[group]
+ .bindingAndRegisterOffsets) {
+ BindingNumber binding = bindingAndRegisterOffset.binding;
+ uint32_t registerOffset = bindingAndRegisterOffset.registerOffset;
+
+ BindingPoint bindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+ // Get the renamed binding point if it was remapped.
+ auto it = remappedBindingPoints.find(bindingPoint);
+ if (it != remappedBindingPoints.end()) {
+ bindingPoint = it->second;
+ }
+
+ arrayLengthFromUniform.bindpoint_to_size_index.emplace(bindingPoint,
+ registerOffset);
+ }
+ }
+ }
+
+ ShaderCompilationRequest request;
+ request.compiler = compiler;
+ request.program = program;
+ request.entryPointName = entryPointName;
+ request.stage = stage;
+ request.compileFlags = compileFlags;
+ request.disableSymbolRenaming =
+ device->IsToggleEnabled(Toggle::DisableSymbolRenaming);
+ request.remappedBindingPoints = std::move(remappedBindingPoints);
+ request.remappedAccessControls = std::move(remappedAccessControls);
+ request.isRobustnessEnabled = device->IsRobustnessEnabled();
+ request.disableWorkgroupInit =
+ device->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+ request.usesNumWorkgroups = entryPoint.usesNumWorkgroups;
+ request.numWorkgroupsShaderRegister = layout->GetNumWorkgroupsShaderRegister();
+ request.numWorkgroupsRegisterSpace = layout->GetNumWorkgroupsRegisterSpace();
+ request.arrayLengthFromUniform = std::move(arrayLengthFromUniform);
+ request.fxcVersion = compiler == Compiler::FXC ? GetD3DCompilerVersion() : 0;
+ request.dxcVersion = compiler == Compiler::DXC ? dxcVersion : 0;
+ request.deviceInfo = &device->GetDeviceInfo();
+ request.hasShaderFloat16Feature = device->IsFeatureEnabled(Feature::ShaderFloat16);
+
+ GetOverridableConstantsDefines(
+ &request.defineStrings, &programmableStage.constants,
+ &programmableStage.module->GetEntryPoint(programmableStage.entryPoint)
+ .overridableConstants);
+
+ return std::move(request);
+ }
+
+ ResultOrError<PersistentCacheKey> CreateCacheKey() const {
+ // Generate the WGSL from the Tint program so it's normalized.
+ // TODO(tint:1180): Consider using a binary serialization of the tint AST for a more
+ // compact representation.
+ auto result = tint::writer::wgsl::Generate(program, tint::writer::wgsl::Options{});
+ if (!result.success) {
+ std::ostringstream errorStream;
+ errorStream << "Tint WGSL failure:" << std::endl;
+ errorStream << "Generator: " << result.error << std::endl;
+ return DAWN_INTERNAL_ERROR(errorStream.str().c_str());
+ }
+
+ std::stringstream stream;
+
+ // Prefix the key with the type to avoid collisions from another type that could
+ // have the same key.
+ stream << static_cast<uint32_t>(PersistentKeyType::Shader);
+ stream << "\n";
+
+ stream << result.wgsl.length();
+ stream << "\n";
+
+ stream << result.wgsl;
+ stream << "\n";
+
+ stream << "(ShaderCompilationRequest";
+ stream << " compiler=" << compiler;
+ stream << " entryPointName=" << entryPointName;
+ stream << " stage=" << uint32_t(stage);
+ stream << " compileFlags=" << compileFlags;
+ stream << " disableSymbolRenaming=" << disableSymbolRenaming;
+
+ stream << " remappedBindingPoints=";
+ Serialize(stream, remappedBindingPoints);
+
+ stream << " remappedAccessControls=";
+ Serialize(stream, remappedAccessControls);
+
+ stream << " useNumWorkgroups=" << usesNumWorkgroups;
+ stream << " numWorkgroupsRegisterSpace=" << numWorkgroupsRegisterSpace;
+ stream << " numWorkgroupsShaderRegister=" << numWorkgroupsShaderRegister;
+
+ stream << " arrayLengthFromUniform=";
+ Serialize(stream, arrayLengthFromUniform);
+
+ stream << " shaderModel=" << deviceInfo->shaderModel;
+ stream << " disableWorkgroupInit=" << disableWorkgroupInit;
+ stream << " isRobustnessEnabled=" << isRobustnessEnabled;
+ stream << " fxcVersion=" << fxcVersion;
+ stream << " dxcVersion=" << dxcVersion;
+ stream << " hasShaderFloat16Feature=" << hasShaderFloat16Feature;
+
+ stream << " defines={";
+ for (const auto& [name, value] : defineStrings) {
+ stream << " <" << name << "," << value << ">";
+ }
+ stream << " }";
+
+ stream << ")";
+ stream << "\n";
+
+ return PersistentCacheKey(std::istreambuf_iterator<char>{stream},
+ std::istreambuf_iterator<char>{});
+ }
+ };
+
+ std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
+ std::vector<const wchar_t*> arguments;
+ if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
+ arguments.push_back(L"/Gec");
+ }
+ if (compileFlags & D3DCOMPILE_IEEE_STRICTNESS) {
+ arguments.push_back(L"/Gis");
+ }
+ constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
+ if (compileFlags & d3dCompileFlagsBits) {
+ switch (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
+ case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+ arguments.push_back(L"/O0");
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+ arguments.push_back(L"/O2");
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+ arguments.push_back(L"/O3");
+ break;
+ }
+ }
+ if (compileFlags & D3DCOMPILE_DEBUG) {
+ arguments.push_back(L"/Zi");
+ }
+ if (compileFlags & D3DCOMPILE_PACK_MATRIX_ROW_MAJOR) {
+ arguments.push_back(L"/Zpr");
+ }
+ if (compileFlags & D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR) {
+ arguments.push_back(L"/Zpc");
+ }
+ if (compileFlags & D3DCOMPILE_AVOID_FLOW_CONTROL) {
+ arguments.push_back(L"/Gfa");
+ }
+ if (compileFlags & D3DCOMPILE_PREFER_FLOW_CONTROL) {
+ arguments.push_back(L"/Gfp");
+ }
+ if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
+ arguments.push_back(L"/res_may_alias");
+ }
+
+ if (enable16BitTypes) {
+ // enable-16bit-types are only allowed in -HV 2018 (default)
+ arguments.push_back(L"/enable-16bit-types");
+ }
+
+ arguments.push_back(L"-HV");
+ arguments.push_back(L"2018");
+
+ return arguments;
+ }
+
+ ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(IDxcLibrary* dxcLibrary,
+ IDxcCompiler* dxcCompiler,
+ const ShaderCompilationRequest& request,
+ const std::string& hlslSource) {
+ ComPtr<IDxcBlobEncoding> sourceBlob;
+ DAWN_TRY(
+ CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
+ hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
+ "DXC create blob"));
+
+ std::wstring entryPointW;
+ DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(request.entryPointName));
+
+ std::vector<const wchar_t*> arguments =
+ GetDXCArguments(request.compileFlags, request.hasShaderFloat16Feature);
+
+ // Build defines for overridable constants
+ std::vector<std::pair<std::wstring, std::wstring>> defineStrings;
+ defineStrings.reserve(request.defineStrings.size());
+ for (const auto& [name, value] : request.defineStrings) {
+ defineStrings.emplace_back(UTF8ToWStr(name.c_str()), UTF8ToWStr(value.c_str()));
+ }
+
+ std::vector<DxcDefine> dxcDefines;
+ dxcDefines.reserve(defineStrings.size());
+ for (const auto& [name, value] : defineStrings) {
+ dxcDefines.push_back({name.c_str(), value.c_str()});
+ }
+
+ ComPtr<IDxcOperationResult> result;
+ DAWN_TRY(CheckHRESULT(
+ dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
+ request.deviceInfo->shaderProfiles[request.stage].c_str(),
+ arguments.data(), arguments.size(), dxcDefines.data(),
+ dxcDefines.size(), nullptr, &result),
+ "DXC compile"));
+
+ HRESULT hr;
+ DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
+
+ if (FAILED(hr)) {
+ ComPtr<IDxcBlobEncoding> errors;
+ DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
+
+ return DAWN_FORMAT_VALIDATION_ERROR("DXC compile failed with: %s",
+ static_cast<char*>(errors->GetBufferPointer()));
+ }
+
+ ComPtr<IDxcBlob> compiledShader;
+ DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
+ return std::move(compiledShader);
+ }
+
+ std::string CompileFlagsToStringFXC(uint32_t compileFlags) {
+ struct Flag {
+ uint32_t value;
+ const char* name;
+ };
+ constexpr Flag flags[] = {
+ // Populated from d3dcompiler.h
+#define F(f) Flag{f, #f}
+ F(D3DCOMPILE_DEBUG),
+ F(D3DCOMPILE_SKIP_VALIDATION),
+ F(D3DCOMPILE_SKIP_OPTIMIZATION),
+ F(D3DCOMPILE_PACK_MATRIX_ROW_MAJOR),
+ F(D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR),
+ F(D3DCOMPILE_PARTIAL_PRECISION),
+ F(D3DCOMPILE_FORCE_VS_SOFTWARE_NO_OPT),
+ F(D3DCOMPILE_FORCE_PS_SOFTWARE_NO_OPT),
+ F(D3DCOMPILE_NO_PRESHADER),
+ F(D3DCOMPILE_AVOID_FLOW_CONTROL),
+ F(D3DCOMPILE_PREFER_FLOW_CONTROL),
+ F(D3DCOMPILE_ENABLE_STRICTNESS),
+ F(D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY),
+ F(D3DCOMPILE_IEEE_STRICTNESS),
+ F(D3DCOMPILE_RESERVED16),
+ F(D3DCOMPILE_RESERVED17),
+ F(D3DCOMPILE_WARNINGS_ARE_ERRORS),
+ F(D3DCOMPILE_RESOURCES_MAY_ALIAS),
+ F(D3DCOMPILE_ENABLE_UNBOUNDED_DESCRIPTOR_TABLES),
+ F(D3DCOMPILE_ALL_RESOURCES_BOUND),
+ F(D3DCOMPILE_DEBUG_NAME_FOR_SOURCE),
+ F(D3DCOMPILE_DEBUG_NAME_FOR_BINARY),
+#undef F
+ };
+
+ std::string result;
+ for (const Flag& f : flags) {
+ if ((compileFlags & f.value) != 0) {
+ result += f.name + std::string("\n");
+ }
+ }
+
+ // Optimization level must be handled separately as two bits are used, and the values
+ // don't map neatly to 0-3.
+ constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
+ switch (compileFlags & d3dCompileFlagsBits) {
+ case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+ result += "D3DCOMPILE_OPTIMIZATION_LEVEL0";
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL1:
+ result += "D3DCOMPILE_OPTIMIZATION_LEVEL1";
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+ result += "D3DCOMPILE_OPTIMIZATION_LEVEL2";
+ break;
+ case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+ result += "D3DCOMPILE_OPTIMIZATION_LEVEL3";
+ break;
+ }
+ result += std::string("\n");
+
+ return result;
+ }
+
+ ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(const PlatformFunctions* functions,
+ const ShaderCompilationRequest& request,
+ const std::string& hlslSource) {
+ const char* targetProfile = nullptr;
+ switch (request.stage) {
+ case SingleShaderStage::Vertex:
+ targetProfile = "vs_5_1";
+ break;
+ case SingleShaderStage::Fragment:
+ targetProfile = "ps_5_1";
+ break;
+ case SingleShaderStage::Compute:
+ targetProfile = "cs_5_1";
+ break;
+ }
+
+ ComPtr<ID3DBlob> compiledShader;
+ ComPtr<ID3DBlob> errors;
+
+ // Build defines for overridable constants
+ const D3D_SHADER_MACRO* pDefines = nullptr;
+ std::vector<D3D_SHADER_MACRO> fxcDefines;
+ if (request.defineStrings.size() > 0) {
+ fxcDefines.reserve(request.defineStrings.size() + 1);
+ for (const auto& [name, value] : request.defineStrings) {
+ fxcDefines.push_back({name.c_str(), value.c_str()});
+ }
+ // d3dCompile D3D_SHADER_MACRO* pDefines is a nullptr terminated array
+ fxcDefines.push_back({nullptr, nullptr});
+ pDefines = fxcDefines.data();
+ }
+
+ DAWN_INVALID_IF(FAILED(functions->d3dCompile(
+ hlslSource.c_str(), hlslSource.length(), nullptr, pDefines, nullptr,
+ request.entryPointName, targetProfile, request.compileFlags, 0,
+ &compiledShader, &errors)),
+ "D3D compile failed with: %s",
+ static_cast<char*>(errors->GetBufferPointer()));
+
+ return std::move(compiledShader);
+ }
+
+ ResultOrError<std::string> TranslateToHLSL(dawn::platform::Platform* platform,
+ const ShaderCompilationRequest& request,
+ std::string* remappedEntryPointName) {
+ std::ostringstream errorStream;
+ errorStream << "Tint HLSL failure:" << std::endl;
+
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
+
+ if (request.isRobustnessEnabled) {
+ transformManager.Add<tint::transform::Robustness>();
+ }
+
+ transformManager.Add<tint::transform::BindingRemapper>();
+
+ transformManager.Add<tint::transform::SingleEntryPoint>();
+ transformInputs.Add<tint::transform::SingleEntryPoint::Config>(request.entryPointName);
+
+ transformManager.Add<tint::transform::Renamer>();
+
+ if (request.disableSymbolRenaming) {
+ // We still need to rename HLSL reserved keywords
+ transformInputs.Add<tint::transform::Renamer::Config>(
+ tint::transform::Renamer::Target::kHlslKeywords);
+ }
+
+ // D3D12 registers like `t3` and `c3` have the same bindingOffset number in
+ // the remapping but should not be considered a collision because they have
+ // different types.
+ const bool mayCollide = true;
+ transformInputs.Add<tint::transform::BindingRemapper::Remappings>(
+ std::move(request.remappedBindingPoints), std::move(request.remappedAccessControls),
+ mayCollide);
+
+ tint::Program transformedProgram;
+ tint::transform::DataMap transformOutputs;
+ {
+ TRACE_EVENT0(platform, General, "RunTransforms");
+ DAWN_TRY_ASSIGN(transformedProgram,
+ RunTransforms(&transformManager, request.program, transformInputs,
+ &transformOutputs, nullptr));
+ }
+
+ if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+ auto it = data->remappings.find(request.entryPointName);
+ if (it != data->remappings.end()) {
+ *remappedEntryPointName = it->second;
+ } else {
+ DAWN_INVALID_IF(!request.disableSymbolRenaming,
+ "Could not find remapped name for entry point.");
+
+ *remappedEntryPointName = request.entryPointName;
+ }
+ } else {
+ return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
+ }
+
+ tint::writer::hlsl::Options options;
+ options.disable_workgroup_init = request.disableWorkgroupInit;
+ if (request.usesNumWorkgroups) {
+ options.root_constant_binding_point.group = request.numWorkgroupsRegisterSpace;
+ options.root_constant_binding_point.binding = request.numWorkgroupsShaderRegister;
+ }
+ // TODO(dawn:549): HLSL generation outputs the indices into the
+ // array_length_from_uniform buffer that were actually used. When the blob cache can
+ // store more than compiled shaders, we should reflect these used indices and store
+ // them as well. This would allow us to only upload root constants that are actually
+ // read by the shader.
+ options.array_length_from_uniform = request.arrayLengthFromUniform;
+ TRACE_EVENT0(platform, General, "tint::writer::hlsl::Generate");
+ auto result = tint::writer::hlsl::Generate(&transformedProgram, options);
+ DAWN_INVALID_IF(!result.success, "An error occured while generating HLSL: %s",
+ result.error);
+
+ return std::move(result.hlsl);
+ }
+
+ template <typename F>
+ MaybeError CompileShader(dawn::platform::Platform* platform,
+ const PlatformFunctions* functions,
+ IDxcLibrary* dxcLibrary,
+ IDxcCompiler* dxcCompiler,
+ ShaderCompilationRequest&& request,
+ bool dumpShaders,
+ F&& DumpShadersEmitLog,
+ CompiledShader* compiledShader) {
+ // Compile the source shader to HLSL.
+ std::string hlslSource;
+ std::string remappedEntryPoint;
+ DAWN_TRY_ASSIGN(hlslSource, TranslateToHLSL(platform, request, &remappedEntryPoint));
+ if (dumpShaders) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
+ DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
+ request.entryPointName = remappedEntryPoint.c_str();
+ switch (request.compiler) {
+ case ShaderCompilationRequest::Compiler::DXC: {
+ TRACE_EVENT0(platform, General, "CompileShaderDXC");
+ DAWN_TRY_ASSIGN(compiledShader->compiledDXCShader,
+ CompileShaderDXC(dxcLibrary, dxcCompiler, request, hlslSource));
+ break;
+ }
+ case ShaderCompilationRequest::Compiler::FXC: {
+ TRACE_EVENT0(platform, General, "CompileShaderFXC");
+ DAWN_TRY_ASSIGN(compiledShader->compiledFXCShader,
+ CompileShaderFXC(functions, request, hlslSource));
+ break;
+ }
+ }
+
+ if (dumpShaders && request.compiler == ShaderCompilationRequest::Compiler::FXC) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* FXC compile flags */ " << std::endl
+ << CompileFlagsToStringFXC(request.compileFlags) << std::endl;
+
+ dumpedMsg << "/* Dumped disassembled DXBC */" << std::endl;
+
+ ComPtr<ID3DBlob> disassembly;
+ if (FAILED(functions->d3dDisassemble(
+ compiledShader->compiledFXCShader->GetBufferPointer(),
+ compiledShader->compiledFXCShader->GetBufferSize(), 0, nullptr,
+ &disassembly))) {
+ dumpedMsg << "D3D disassemble failed" << std::endl;
+ } else {
+ dumpedMsg << reinterpret_cast<const char*>(disassembly->GetBufferPointer());
+ }
+ DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
+
+ return {};
+ }
+
+ } // anonymous namespace
+
+ // static
+ ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+ DAWN_TRY(module->Initialize(parseResult));
+ return module;
+ }
+
+ ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor) {
+ }
+
+ MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+ return InitializeBase(parseResult);
+ }
+
+ ResultOrError<CompiledShader> ShaderModule::Compile(const ProgrammableStage& programmableStage,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t compileFlags) {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleD3D12::Compile");
+ ASSERT(!IsError());
+
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ Device* device = ToBackend(GetDevice());
+
+ CompiledShader compiledShader = {};
+
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
+
+ const tint::Program* program = GetTintProgram();
+ tint::Program programAsValue;
+
+ AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+ if (stage == SingleShaderStage::Vertex) {
+ transformManager.Add<tint::transform::FirstIndexOffset>();
+ transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
+ layout->GetFirstIndexOffsetShaderRegister(),
+ layout->GetFirstIndexOffsetRegisterSpace());
+ }
+
+ tint::transform::DataMap transformOutputs;
+ DAWN_TRY_ASSIGN(programAsValue, RunTransforms(&transformManager, program, transformInputs,
+ &transformOutputs, nullptr));
+ program = &programAsValue;
+
+ if (stage == SingleShaderStage::Vertex) {
+ if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
+ // TODO(dawn:549): Consider adding this information to the pipeline cache once we
+ // can store more than the shader blob in it.
+ compiledShader.firstOffsetInfo.usesVertexIndex = data->has_vertex_index;
+ if (compiledShader.firstOffsetInfo.usesVertexIndex) {
+ compiledShader.firstOffsetInfo.vertexIndexOffset = data->first_vertex_offset;
+ }
+ compiledShader.firstOffsetInfo.usesInstanceIndex = data->has_instance_index;
+ if (compiledShader.firstOffsetInfo.usesInstanceIndex) {
+ compiledShader.firstOffsetInfo.instanceIndexOffset =
+ data->first_instance_offset;
+ }
+ }
+ }
+
+ ShaderCompilationRequest request;
+ DAWN_TRY_ASSIGN(
+ request, ShaderCompilationRequest::Create(
+ programmableStage.entryPoint.c_str(), stage, layout, compileFlags, device,
+ program, GetEntryPoint(programmableStage.entryPoint), programmableStage));
+
+ PersistentCacheKey shaderCacheKey;
+ DAWN_TRY_ASSIGN(shaderCacheKey, request.CreateCacheKey());
+
+ DAWN_TRY_ASSIGN(
+ compiledShader.cachedShader,
+ device->GetPersistentCache()->GetOrCreate(
+ shaderCacheKey, [&](auto doCache) -> MaybeError {
+ DAWN_TRY(CompileShader(
+ device->GetPlatform(), device->GetFunctions(),
+ device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcLibrary().Get()
+ : nullptr,
+ device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcCompiler().Get()
+ : nullptr,
+ std::move(request), device->IsToggleEnabled(Toggle::DumpShaders),
+ [&](WGPULoggingType loggingType, const char* message) {
+ GetDevice()->EmitLog(loggingType, message);
+ },
+ &compiledShader));
+ const D3D12_SHADER_BYTECODE shader = compiledShader.GetD3D12ShaderBytecode();
+ doCache(shader.pShaderBytecode, shader.BytecodeLength);
+ return {};
+ }));
+
+ return std::move(compiledShader);
+ }
+
+ D3D12_SHADER_BYTECODE CompiledShader::GetD3D12ShaderBytecode() const {
+ if (cachedShader.buffer != nullptr) {
+ return {cachedShader.buffer.get(), cachedShader.bufferSize};
+ } else if (compiledFXCShader != nullptr) {
+ return {compiledFXCShader->GetBufferPointer(), compiledFXCShader->GetBufferSize()};
+ } else if (compiledDXCShader != nullptr) {
+ return {compiledDXCShader->GetBufferPointer(), compiledDXCShader->GetBufferSize()};
+ }
+ UNREACHABLE();
+ return {};
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h
new file mode 100644
index 00000000000..2fd3a80380c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderModuleD3D12.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
+#define DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
+
+#include "dawn/native/PersistentCache.h"
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native {
+ struct ProgrammableStage;
+} // namespace dawn::native
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+ class PipelineLayout;
+
+ struct FirstOffsetInfo {
+ bool usesVertexIndex;
+ uint32_t vertexIndexOffset;
+ bool usesInstanceIndex;
+ uint32_t instanceIndexOffset;
+ };
+
+ // Manages a ref to one of the various representations of shader blobs and information used to
+ // emulate vertex/instance index starts
+ struct CompiledShader {
+ ScopedCachedBlob cachedShader;
+ ComPtr<ID3DBlob> compiledFXCShader;
+ ComPtr<IDxcBlob> compiledDXCShader;
+ D3D12_SHADER_BYTECODE GetD3D12ShaderBytecode() const;
+
+ FirstOffsetInfo firstOffsetInfo;
+ };
+
+ class ShaderModule final : public ShaderModuleBase {
+ public:
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
+
+ ResultOrError<CompiledShader> Compile(const ProgrammableStage& programmableStage,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t compileFlags);
+
+ private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModule() override = default;
+ MaybeError Initialize(ShaderModuleParseResult* parseResult);
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
new file mode 100644
index 00000000000..32d6cd6cb9c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
@@ -0,0 +1,254 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ // Limits the min/max heap size to always be some known value for testing.
+ // Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
+ // We change the value from {1024, 512} to {32, 16} because we use blending
+ // for D3D12DescriptorHeapTests.EncodeManyUBO and R16Float has limited range
+ // and low precision at big integer.
+ static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {32, 16};
+
+ uint32_t GetD3D12ShaderVisibleHeapMinSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
+ bool useSmallSize) {
+ if (useSmallSize) {
+ return kShaderVisibleSmallHeapSizes[heapType];
+ }
+
+ // Minimum heap size must be large enough to satisfy the largest descriptor allocation
+ // request and to amortize the cost of sub-allocation. But small enough to avoid wasting
+ // memory should only a tiny fraction ever be used.
+ // TODO(dawn:155): Figure out these values.
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ return 4096;
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return 256;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ uint32_t GetD3D12ShaderVisibleHeapMaxSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
+ bool useSmallSize) {
+ if (useSmallSize) {
+ return kShaderVisibleSmallHeapSizes[heapType];
+ }
+
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // static
+ ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>>
+ ShaderVisibleDescriptorAllocator::Create(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+ std::unique_ptr<ShaderVisibleDescriptorAllocator> allocator =
+ std::make_unique<ShaderVisibleDescriptorAllocator>(device, heapType);
+ DAWN_TRY(allocator->AllocateAndSwitchShaderVisibleHeap());
+ return std::move(allocator);
+ }
+
+ ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(
+ Device* device,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType)
+ : mHeapType(heapType),
+ mDevice(device),
+ mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+ mDescriptorCount(GetD3D12ShaderVisibleHeapMinSize(
+ heapType,
+ mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting))) {
+ ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
+ heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+ }
+
+ bool ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(
+ uint32_t descriptorCount,
+ ExecutionSerial pendingSerial,
+ D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
+ GPUDescriptorHeapAllocation* allocation) {
+ ASSERT(mHeap != nullptr);
+ const uint64_t startOffset = mAllocator.Allocate(descriptorCount, pendingSerial);
+ if (startOffset == RingBufferAllocator::kInvalidOffset) {
+ return false;
+ }
+
+ ID3D12DescriptorHeap* descriptorHeap = mHeap->GetD3D12DescriptorHeap();
+
+ const uint64_t heapOffset = mSizeIncrement * startOffset;
+
+ // Check for 32-bit overflow since CPU heap start handle uses size_t.
+ const size_t cpuHeapStartPtr = descriptorHeap->GetCPUDescriptorHandleForHeapStart().ptr;
+
+ ASSERT(heapOffset <= std::numeric_limits<size_t>::max() - cpuHeapStartPtr);
+
+ *baseCPUDescriptor = {cpuHeapStartPtr + static_cast<size_t>(heapOffset)};
+
+ const D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor = {
+ descriptorHeap->GetGPUDescriptorHandleForHeapStart().ptr + heapOffset};
+
+ // Record both the device and heap serials to determine later if the allocations are
+ // still valid.
+ *allocation = GPUDescriptorHeapAllocation{baseGPUDescriptor, pendingSerial, mHeapSerial};
+
+ return true;
+ }
+
+ ID3D12DescriptorHeap* ShaderVisibleDescriptorAllocator::GetShaderVisibleHeap() const {
+ return mHeap->GetD3D12DescriptorHeap();
+ }
+
+ void ShaderVisibleDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
+ mAllocator.Deallocate(completedSerial);
+ }
+
+ ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>>
+ ShaderVisibleDescriptorAllocator::AllocateHeap(uint32_t descriptorCount) const {
+ // The size in bytes of a descriptor heap is best calculated by the increment size
+ // multiplied by the number of descriptors. In practice, this is only an estimate and
+ // the actual size may vary depending on the driver.
+ const uint64_t kSize = mSizeIncrement * descriptorCount;
+
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
+
+ ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
+ D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+ heapDescriptor.Type = mHeapType;
+ heapDescriptor.NumDescriptors = descriptorCount;
+ heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
+ heapDescriptor.NodeMask = 0;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
+ &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
+ "ID3D12Device::CreateDescriptorHeap"));
+
+ std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap =
+ std::make_unique<ShaderVisibleDescriptorHeap>(std::move(d3d12DescriptorHeap), kSize);
+
+ // We must track the allocation in the LRU when it is created, otherwise the residency
+ // manager will see the allocation as non-resident in the later call to LockAllocation.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
+
+ return std::move(descriptorHeap);
+ }
+
+ // Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
+ MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
+ std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
+ // Dynamically allocate using a two-phase allocation strategy.
+ // The first phase increasingly grows a small heap in binary sizes for light users while the
+ // second phase pool-allocates largest sized heaps for heavy users.
+ if (mHeap != nullptr) {
+ mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
+
+ const uint32_t maxDescriptorCount = GetD3D12ShaderVisibleHeapMaxSize(
+ mHeapType,
+ mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+ if (mDescriptorCount < maxDescriptorCount) {
+ // Phase #1. Grow the heaps in powers-of-two.
+ mDevice->ReferenceUntilUnused(mHeap->GetD3D12DescriptorHeap());
+ mDescriptorCount = std::min(mDescriptorCount * 2, maxDescriptorCount);
+ } else {
+ // Phase #2. Pool-allocate heaps.
+ // Return the switched out heap to the pool and retrieve the oldest heap that is no
+ // longer used by GPU. This maintains a heap buffer to avoid frequently re-creating
+ // heaps for heavy users.
+ // TODO(dawn:256): Consider periodically triming to avoid OOM.
+ mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
+ if (mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
+ descriptorHeap = std::move(mPool.front().heap);
+ mPool.pop_front();
+ }
+ }
+ }
+
+ if (descriptorHeap == nullptr) {
+ DAWN_TRY_ASSIGN(descriptorHeap, AllocateHeap(mDescriptorCount));
+ }
+
+ DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
+
+ // Create a FIFO buffer from the recently created heap.
+ mHeap = std::move(descriptorHeap);
+ mAllocator = RingBufferAllocator(mDescriptorCount);
+
+ // Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
+ // serial. When a bindgroup attempts to re-populate, it will compare with its recorded
+ // heap serial.
+ mHeapSerial++;
+
+ return {};
+ }
+
+ HeapVersionID ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSerialForTesting() const {
+ return mHeapSerial;
+ }
+
+ uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting() const {
+ return mAllocator.GetSize();
+ }
+
+ uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisiblePoolSizeForTesting() const {
+ return mPool.size();
+ }
+
+ bool ShaderVisibleDescriptorAllocator::IsShaderVisibleHeapLockedResidentForTesting() const {
+ return mHeap->IsResidencyLocked();
+ }
+
+ bool ShaderVisibleDescriptorAllocator::IsLastShaderVisibleHeapInLRUForTesting() const {
+ ASSERT(!mPool.empty());
+ return mPool.back().heap->IsInResidencyLRUCache();
+ }
+
+ bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(
+ const GPUDescriptorHeapAllocation& allocation) const {
+ // Consider valid if allocated for the pending submit and the shader visible heaps
+ // have not switched over.
+ return (allocation.GetLastUsageSerial() > mDevice->GetCompletedCommandSerial() &&
+ allocation.GetHeapSerial() == mHeapSerial);
+ }
+
+ ShaderVisibleDescriptorHeap::ShaderVisibleDescriptorHeap(
+ ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
+ uint64_t size)
+ : Pageable(d3d12DescriptorHeap, MemorySegment::Local, size),
+ mD3d12DescriptorHeap(std::move(d3d12DescriptorHeap)) {
+ }
+
+ ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
+ return mD3d12DescriptorHeap.Get();
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
new file mode 100644
index 00000000000..dca8b2995dc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
@@ -0,0 +1,105 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
+#define DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
+
+#include "dawn/native/Error.h"
+#include "dawn/native/RingBufferAllocator.h"
+#include "dawn/native/d3d12/IntegerTypes.h"
+#include "dawn/native/d3d12/PageableD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+#include <list>
+
+// |ShaderVisibleDescriptorAllocator| allocates a variable-sized block of descriptors from a GPU
+// descriptor heap pool.
+// Internally, it manages a list of heaps using a ringbuffer block allocator. The heap is in one
+// of two states: switched in or out. Only a switched in heap can be bound to the pipeline. If
+// the heap is full, the caller must switch-in a new heap before re-allocating and the old one
+// is returned to the pool.
+namespace dawn::native::d3d12 {
+
+ class Device;
+ class GPUDescriptorHeapAllocation;
+
+ class ShaderVisibleDescriptorHeap : public Pageable {
+ public:
+ ShaderVisibleDescriptorHeap(ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
+ uint64_t size);
+ ID3D12DescriptorHeap* GetD3D12DescriptorHeap() const;
+
+ private:
+ ComPtr<ID3D12DescriptorHeap> mD3d12DescriptorHeap;
+ };
+
+ class ShaderVisibleDescriptorAllocator {
+ public:
+ static ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>> Create(
+ Device* device,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+
+ ShaderVisibleDescriptorAllocator(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+
+ // Returns true if the allocation was successful, when false is returned the current heap is
+ // full and AllocateAndSwitchShaderVisibleHeap() must be called.
+ bool AllocateGPUDescriptors(uint32_t descriptorCount,
+ ExecutionSerial pendingSerial,
+ D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
+ GPUDescriptorHeapAllocation* allocation);
+
+ void Tick(ExecutionSerial completedSerial);
+
+ ID3D12DescriptorHeap* GetShaderVisibleHeap() const;
+ MaybeError AllocateAndSwitchShaderVisibleHeap();
+
+ // For testing purposes only.
+ HeapVersionID GetShaderVisibleHeapSerialForTesting() const;
+ uint64_t GetShaderVisibleHeapSizeForTesting() const;
+ uint64_t GetShaderVisiblePoolSizeForTesting() const;
+ bool IsShaderVisibleHeapLockedResidentForTesting() const;
+ bool IsLastShaderVisibleHeapInLRUForTesting() const;
+
+ bool IsAllocationStillValid(const GPUDescriptorHeapAllocation& allocation) const;
+
+ private:
+ struct SerialDescriptorHeap {
+ ExecutionSerial heapSerial;
+ std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
+ };
+
+ ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>> AllocateHeap(
+ uint32_t descriptorCount) const;
+
+ std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
+ RingBufferAllocator mAllocator;
+ std::list<SerialDescriptorHeap> mPool;
+ D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
+
+ Device* mDevice;
+
+ // The serial value of 0 means the shader-visible heaps have not been allocated.
+ // This value is never returned in the GPUDescriptorHeapAllocation after
+ // AllocateGPUDescriptors() is called.
+ HeapVersionID mHeapSerial = HeapVersionID(0);
+
+ uint32_t mSizeIncrement;
+
+ // The descriptor count is the current size of the heap in number of descriptors.
+ // This is stored on the allocator to avoid extra conversions.
+ uint32_t mDescriptorCount = 0;
+ };
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp
new file mode 100644
index 00000000000..e608a14e45d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.cpp
@@ -0,0 +1,77 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/StagingBufferD3D12.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ StagingBuffer::StagingBuffer(size_t size, Device* device)
+ : StagingBufferBase(size), mDevice(device) {
+ }
+
+ MaybeError StagingBuffer::Initialize() {
+ D3D12_RESOURCE_DESC resourceDescriptor;
+ resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+ resourceDescriptor.Alignment = 0;
+ resourceDescriptor.Width = GetSize();
+ resourceDescriptor.Height = 1;
+ resourceDescriptor.DepthOrArraySize = 1;
+ resourceDescriptor.MipLevels = 1;
+ resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
+ resourceDescriptor.SampleDesc.Count = 1;
+ resourceDescriptor.SampleDesc.Quality = 0;
+ resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
+ resourceDescriptor.Flags = D3D12_RESOURCE_FLAG_NONE;
+
+ DAWN_TRY_ASSIGN(mUploadHeap,
+ mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
+ D3D12_RESOURCE_STATE_GENERIC_READ));
+
+ // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+ // evicted. This buffer should already have been made resident when it was created.
+ DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(
+ ToBackend(mUploadHeap.GetResourceHeap())));
+
+ SetDebugName(mDevice, GetResource(), "Dawn_StagingBuffer");
+
+ return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
+ }
+
+ StagingBuffer::~StagingBuffer() {
+ // Always check if the allocation is valid before Unmap.
+ // The resource would not exist had it failed to allocate.
+ if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return;
+ }
+
+ // The underlying heap was locked in residency upon creation. We must unlock it when this
+ // buffer becomes unmapped.
+ mDevice->GetResidencyManager()->UnlockAllocation(ToBackend(mUploadHeap.GetResourceHeap()));
+
+ // Invalidate the CPU virtual address & flush cache (if needed).
+ GetResource()->Unmap(0, nullptr);
+ mMappedPointer = nullptr;
+
+ mDevice->DeallocateMemory(mUploadHeap);
+ }
+
+ ID3D12Resource* StagingBuffer::GetResource() const {
+ return mUploadHeap.GetD3D12Resource();
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h
new file mode 100644
index 00000000000..b8105416493
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingBufferD3D12.h
@@ -0,0 +1,41 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_STAGINGBUFFERD3D12_H_
+#define DAWNNATIVE_STAGINGBUFFERD3D12_H_
+
+#include "dawn/native/StagingBuffer.h"
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class StagingBuffer : public StagingBufferBase {
+ public:
+ StagingBuffer(size_t size, Device* device);
+ ~StagingBuffer() override;
+
+ ID3D12Resource* GetResource() const;
+
+ MaybeError Initialize() override;
+
+ private:
+ Device* mDevice;
+ ResourceHeapAllocation mUploadHeap;
+ };
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_STAGINGBUFFERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
new file mode 100644
index 00000000000..b64da3007a1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
@@ -0,0 +1,152 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Math.h"
+
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ StagingDescriptorAllocator::StagingDescriptorAllocator(Device* device,
+ uint32_t descriptorCount,
+ uint32_t heapSize,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType)
+ : mDevice(device),
+ mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+ mBlockSize(descriptorCount * mSizeIncrement),
+ mHeapSize(RoundUp(heapSize, descriptorCount)),
+ mHeapType(heapType) {
+ ASSERT(descriptorCount <= heapSize);
+ }
+
+ StagingDescriptorAllocator::~StagingDescriptorAllocator() {
+ const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
+ for (auto& buffer : mPool) {
+ ASSERT(buffer.freeBlockIndices.size() == freeBlockIndicesSize);
+ }
+ ASSERT(mAvailableHeaps.size() == mPool.size());
+ }
+
+ ResultOrError<CPUDescriptorHeapAllocation>
+ StagingDescriptorAllocator::AllocateCPUDescriptors() {
+ if (mAvailableHeaps.empty()) {
+ DAWN_TRY(AllocateCPUHeap());
+ }
+
+ ASSERT(!mAvailableHeaps.empty());
+
+ const uint32_t heapIndex = mAvailableHeaps.back();
+ NonShaderVisibleBuffer& buffer = mPool[heapIndex];
+
+ ASSERT(!buffer.freeBlockIndices.empty());
+
+ const Index blockIndex = buffer.freeBlockIndices.back();
+
+ buffer.freeBlockIndices.pop_back();
+
+ if (buffer.freeBlockIndices.empty()) {
+ mAvailableHeaps.pop_back();
+ }
+
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor = {
+ buffer.heap->GetCPUDescriptorHandleForHeapStart().ptr + (blockIndex * mBlockSize)};
+
+ return CPUDescriptorHeapAllocation{baseCPUDescriptor, heapIndex};
+ }
+
+ MaybeError StagingDescriptorAllocator::AllocateCPUHeap() {
+ D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+ heapDescriptor.Type = mHeapType;
+ heapDescriptor.NumDescriptors = mHeapSize;
+ heapDescriptor.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
+ heapDescriptor.NodeMask = 0;
+
+ ComPtr<ID3D12DescriptorHeap> heap;
+ DAWN_TRY(CheckHRESULT(
+ mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
+ "ID3D12Device::CreateDescriptorHeap"));
+
+ NonShaderVisibleBuffer newBuffer;
+ newBuffer.heap = std::move(heap);
+
+ const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
+ newBuffer.freeBlockIndices.reserve(freeBlockIndicesSize);
+
+ for (Index blockIndex = 0; blockIndex < freeBlockIndicesSize; blockIndex++) {
+ newBuffer.freeBlockIndices.push_back(blockIndex);
+ }
+
+ mAvailableHeaps.push_back(mPool.size());
+ mPool.emplace_back(std::move(newBuffer));
+
+ return {};
+ }
+
+ void StagingDescriptorAllocator::Deallocate(CPUDescriptorHeapAllocation* allocation) {
+ ASSERT(allocation->IsValid());
+
+ const uint32_t heapIndex = allocation->GetHeapIndex();
+
+ ASSERT(heapIndex < mPool.size());
+
+ // Insert the deallocated block back into the free-list. Order does not matter. However,
+ // having blocks be non-contigious could slow down future allocations due to poor cache
+ // locality.
+ // TODO(dawn:155): Consider more optimization.
+ std::vector<Index>& freeBlockIndices = mPool[heapIndex].freeBlockIndices;
+ if (freeBlockIndices.empty()) {
+ mAvailableHeaps.emplace_back(heapIndex);
+ }
+
+ const D3D12_CPU_DESCRIPTOR_HANDLE heapStart =
+ mPool[heapIndex].heap->GetCPUDescriptorHandleForHeapStart();
+
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = allocation->OffsetFrom(0, 0);
+
+ const Index blockIndex = (baseDescriptor.ptr - heapStart.ptr) / mBlockSize;
+
+ freeBlockIndices.emplace_back(blockIndex);
+
+ // Invalidate the handle in case the developer accidentally uses it again.
+ allocation->Invalidate();
+ }
+
+ uint32_t StagingDescriptorAllocator::GetSizeIncrement() const {
+ return mSizeIncrement;
+ }
+
+ StagingDescriptorAllocator::Index StagingDescriptorAllocator::GetFreeBlockIndicesSize() const {
+ return ((mHeapSize * mSizeIncrement) / mBlockSize);
+ }
+
+ ResultOrError<CPUDescriptorHeapAllocation>
+ StagingDescriptorAllocator::AllocateTransientCPUDescriptors() {
+ CPUDescriptorHeapAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation, AllocateCPUDescriptors());
+ mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+ return allocation;
+ }
+
+ void StagingDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
+ for (CPUDescriptorHeapAllocation& allocation :
+ mAllocationsToDelete.IterateUpTo(completedSerial)) {
+ Deallocate(&allocation);
+ }
+
+ mAllocationsToDelete.ClearUpTo(completedSerial);
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
new file mode 100644
index 00000000000..454aa09add8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
@@ -0,0 +1,85 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
+#define DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
+
+#include "dawn/native/Error.h"
+
+#include "dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
+
+#include <vector>
+
+// |StagingDescriptorAllocator| allocates a fixed-size block of descriptors from a CPU
+// descriptor heap pool.
+// Internally, it manages a list of heaps using a fixed-size block allocator. The fixed-size
+// block allocator is backed by a list of free blocks (free-list). The heap is in one of two
+// states: AVAILABLE or not. To allocate, the next free block is removed from the free-list
+// and the corresponding heap offset is returned. The AVAILABLE heap always has room for
+// at-least one free block. If no AVAILABLE heap exists, a new heap is created and inserted
+// back into the pool to be immediately used. To deallocate, the block corresponding to the
+// offset is inserted back into the free-list.
+namespace dawn::native::d3d12 {
+
+ class Device;
+
+ class StagingDescriptorAllocator {
+ public:
+ StagingDescriptorAllocator() = default;
+ StagingDescriptorAllocator(Device* device,
+ uint32_t descriptorCount,
+ uint32_t heapSize,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+ ~StagingDescriptorAllocator();
+
+ ResultOrError<CPUDescriptorHeapAllocation> AllocateCPUDescriptors();
+
+ // Will call Deallocate when the serial is passed.
+ ResultOrError<CPUDescriptorHeapAllocation> AllocateTransientCPUDescriptors();
+
+ void Deallocate(CPUDescriptorHeapAllocation* allocation);
+
+ uint32_t GetSizeIncrement() const;
+
+ void Tick(ExecutionSerial completedSerial);
+
+ private:
+ using Index = uint16_t;
+
+ struct NonShaderVisibleBuffer {
+ ComPtr<ID3D12DescriptorHeap> heap;
+ std::vector<Index> freeBlockIndices;
+ };
+
+ MaybeError AllocateCPUHeap();
+
+ Index GetFreeBlockIndicesSize() const;
+
+ std::vector<uint32_t> mAvailableHeaps; // Indices into the pool.
+ std::vector<NonShaderVisibleBuffer> mPool;
+
+ Device* mDevice;
+
+ uint32_t mSizeIncrement; // Size of the descriptor (in bytes).
+ uint32_t mBlockSize; // Size of the block of descriptors (in bytes).
+ uint32_t mHeapSize; // Size of the heap (in number of descriptors).
+
+ D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
+
+ SerialQueue<ExecutionSerial, CPUDescriptorHeapAllocation> mAllocationsToDelete;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp
new file mode 100644
index 00000000000..f37f2f7ed86
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.cpp
@@ -0,0 +1,377 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/SwapChainD3D12.h"
+
+#include "dawn/native/Surface.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+#include <dawn/dawn_wsi.h>
+
+#include <windows.ui.xaml.media.dxinterop.h>
+
+namespace dawn::native::d3d12 {
+ namespace {
+
+ uint32_t PresentModeToBufferCount(wgpu::PresentMode mode) {
+ switch (mode) {
+ case wgpu::PresentMode::Immediate:
+ case wgpu::PresentMode::Fifo:
+ return 2;
+ case wgpu::PresentMode::Mailbox:
+ return 3;
+ }
+ }
+
+ uint32_t PresentModeToSwapInterval(wgpu::PresentMode mode) {
+ switch (mode) {
+ case wgpu::PresentMode::Immediate:
+ case wgpu::PresentMode::Mailbox:
+ return 0;
+ case wgpu::PresentMode::Fifo:
+ return 1;
+ }
+ }
+
+ UINT PresentModeToSwapChainFlags(wgpu::PresentMode mode) {
+ UINT flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
+
+ if (mode == wgpu::PresentMode::Immediate) {
+ flags |= DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
+ }
+
+ return flags;
+ }
+
+ DXGI_USAGE ToDXGIUsage(wgpu::TextureUsage usage) {
+ DXGI_USAGE dxgiUsage = DXGI_CPU_ACCESS_NONE;
+ if (usage & wgpu::TextureUsage::TextureBinding) {
+ dxgiUsage |= DXGI_USAGE_SHADER_INPUT;
+ }
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ dxgiUsage |= DXGI_USAGE_UNORDERED_ACCESS;
+ }
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ dxgiUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ }
+ return dxgiUsage;
+ }
+
+ } // namespace
+
+ // OldSwapChain
+
+ // static
+ Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
+ }
+
+ OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ DawnWSIContextD3D12 wsiContext = {};
+ wsiContext.device = ToAPI(GetDevice());
+ im.Init(im.userData, &wsiContext);
+
+ ASSERT(im.textureUsage != WGPUTextureUsage_None);
+ mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+ }
+
+ OldSwapChain::~OldSwapChain() = default;
+
+ TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+ const auto& im = GetImplementation();
+ DawnSwapChainNextTexture next = {};
+ DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+ if (error) {
+ device->HandleError(InternalErrorType::Internal, error);
+ return nullptr;
+ }
+
+ ComPtr<ID3D12Resource> d3d12Texture = static_cast<ID3D12Resource*>(next.texture.ptr);
+ Ref<Texture> dawnTexture;
+ if (device->ConsumedError(
+ Texture::Create(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture)),
+ &dawnTexture)) {
+ return nullptr;
+ }
+
+ return dawnTexture.Detach();
+ }
+
+ MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
+ Device* device = ToBackend(GetDevice());
+
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+ // Perform the necessary transition for the texture to be presented.
+ ToBackend(view->GetTexture())
+ ->TrackUsageAndTransitionNow(commandContext, mTextureUsage,
+ view->GetSubresourceRange());
+
+ DAWN_TRY(device->ExecutePendingCommandContext());
+
+ return {};
+ }
+
+ // SwapChain
+
+ // static
+ ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+ }
+
+ SwapChain::~SwapChain() = default;
+
+ void SwapChain::DestroyImpl() {
+ SwapChainBase::DestroyImpl();
+ DetachFromSurface();
+ }
+
+ // Initializes the swapchain on the surface. Note that `previousSwapChain` may or may not be
+ // nullptr. If it is not nullptr it means that it is the swapchain previously in use on the
+ // surface and that we have a chance to reuse it's underlying IDXGISwapChain and "buffers".
+ MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ ASSERT(GetSurface()->GetType() == Surface::Type::WindowsHWND);
+
+ // Precompute the configuration parameters we want for the DXGI swapchain.
+ mConfig.bufferCount = PresentModeToBufferCount(GetPresentMode());
+ mConfig.format = D3D12TextureFormat(GetFormat());
+ mConfig.swapChainFlags = PresentModeToSwapChainFlags(GetPresentMode());
+ mConfig.usage = ToDXGIUsage(GetUsage());
+
+ // There is no previous swapchain so we can create one directly and don't have anything else
+ // to do.
+ if (previousSwapChain == nullptr) {
+ return InitializeSwapChainFromScratch();
+ }
+
+ // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::D3D12,
+ "D3D12 SwapChain cannot switch backend types from %s to %s.",
+ previousSwapChain->GetBackendType(), wgpu::BackendType::D3D12);
+
+ // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
+ SwapChain* previousD3D12SwapChain = static_cast<SwapChain*>(previousSwapChain);
+
+ // TODO(crbug.com/dawn/269): Figure out switching an HWND between devices, it might
+ // require just losing the reference to the swapchain, but might also need to wait for
+ // all previous operations to complete.
+ DAWN_INVALID_IF(GetDevice() != previousSwapChain->GetDevice(),
+ "D3D12 SwapChain cannot switch between D3D Devices");
+
+ // The previous swapchain is on the same device so we want to reuse it but it is still not
+ // always possible. Because DXGI requires that a new swapchain be created if the
+ // DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING flag is changed.
+ bool canReuseSwapChain =
+ ((mConfig.swapChainFlags ^ previousD3D12SwapChain->mConfig.swapChainFlags) &
+ DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) == 0;
+
+ // We can't reuse the previous swapchain, so we destroy it and wait for all of its reference
+ // to be forgotten (otherwise DXGI complains that there are outstanding references).
+ if (!canReuseSwapChain) {
+ DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
+ return InitializeSwapChainFromScratch();
+ }
+
+ // After all this we know we can reuse the swapchain, see if it is possible to also reuse
+ // the buffers.
+ mDXGISwapChain = std::move(previousD3D12SwapChain->mDXGISwapChain);
+
+ bool canReuseBuffers = GetWidth() == previousSwapChain->GetWidth() &&
+ GetHeight() == previousSwapChain->GetHeight() &&
+ GetFormat() == previousSwapChain->GetFormat() &&
+ GetPresentMode() == previousSwapChain->GetPresentMode();
+ if (canReuseBuffers) {
+ mBuffers = std::move(previousD3D12SwapChain->mBuffers);
+ mBufferLastUsedSerials = std::move(previousD3D12SwapChain->mBufferLastUsedSerials);
+ mCurrentBuffer = previousD3D12SwapChain->mCurrentBuffer;
+ return {};
+ }
+
+ // We can't reuse the buffers so we need to resize, IDXGSwapChain->ResizeBuffers requires
+ // that all references to buffers are lost before it is called. Contrary to D3D11, the
+ // application is responsible for keeping references to the buffers until the GPU is done
+ // using them so we have no choice but to synchrounously wait for all operations to complete
+ // on the previous swapchain and then lose references to its buffers.
+ DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
+ DAWN_TRY(
+ CheckHRESULT(mDXGISwapChain->ResizeBuffers(mConfig.bufferCount, GetWidth(), GetHeight(),
+ mConfig.format, mConfig.swapChainFlags),
+ "IDXGISwapChain::ResizeBuffer"));
+ return CollectSwapChainBuffers();
+ }
+
+ MaybeError SwapChain::InitializeSwapChainFromScratch() {
+ ASSERT(mDXGISwapChain == nullptr);
+
+ Device* device = ToBackend(GetDevice());
+
+ DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+ swapChainDesc.Width = GetWidth();
+ swapChainDesc.Height = GetHeight();
+ swapChainDesc.Format = mConfig.format;
+ swapChainDesc.Stereo = false;
+ swapChainDesc.SampleDesc.Count = 1;
+ swapChainDesc.SampleDesc.Quality = 0;
+ swapChainDesc.BufferUsage = mConfig.usage;
+ swapChainDesc.BufferCount = mConfig.bufferCount;
+ swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
+ swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+ swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
+ swapChainDesc.Flags = mConfig.swapChainFlags;
+
+ ComPtr<IDXGIFactory2> factory2 = nullptr;
+ DAWN_TRY(CheckHRESULT(device->GetFactory()->QueryInterface(IID_PPV_ARGS(&factory2)),
+ "Getting IDXGIFactory2"));
+
+ ComPtr<IDXGISwapChain1> swapChain1;
+ switch (GetSurface()->GetType()) {
+ case Surface::Type::WindowsHWND: {
+ DAWN_TRY(CheckHRESULT(
+ factory2->CreateSwapChainForHwnd(device->GetCommandQueue().Get(),
+ static_cast<HWND>(GetSurface()->GetHWND()),
+ &swapChainDesc, nullptr, nullptr, &swapChain1),
+ "Creating the IDXGISwapChain1"));
+ break;
+ }
+ case Surface::Type::WindowsCoreWindow: {
+ DAWN_TRY(CheckHRESULT(
+ factory2->CreateSwapChainForCoreWindow(device->GetCommandQueue().Get(),
+ GetSurface()->GetCoreWindow(),
+ &swapChainDesc, nullptr, &swapChain1),
+ "Creating the IDXGISwapChain1"));
+ break;
+ }
+ case Surface::Type::WindowsSwapChainPanel: {
+ DAWN_TRY(CheckHRESULT(
+ factory2->CreateSwapChainForComposition(device->GetCommandQueue().Get(),
+ &swapChainDesc, nullptr, &swapChain1),
+ "Creating the IDXGISwapChain1"));
+ ComPtr<ISwapChainPanelNative> swapChainPanelNative;
+ DAWN_TRY(CheckHRESULT(GetSurface()->GetSwapChainPanel()->QueryInterface(
+ IID_PPV_ARGS(&swapChainPanelNative)),
+ "Getting ISwapChainPanelNative"));
+ DAWN_TRY(CheckHRESULT(swapChainPanelNative->SetSwapChain(swapChain1.Get()),
+ "Setting SwapChain"));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ DAWN_TRY(CheckHRESULT(swapChain1.As(&mDXGISwapChain), "Gettting IDXGISwapChain1"));
+
+ return CollectSwapChainBuffers();
+ }
+
+ MaybeError SwapChain::CollectSwapChainBuffers() {
+ ASSERT(mDXGISwapChain != nullptr);
+ ASSERT(mBuffers.empty());
+
+ mBuffers.resize(mConfig.bufferCount);
+ for (uint32_t i = 0; i < mConfig.bufferCount; i++) {
+ DAWN_TRY(CheckHRESULT(mDXGISwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])),
+ "Getting IDXGISwapChain buffer"));
+ }
+
+ // Pretend all the buffers were last used at the beginning of time.
+ mBufferLastUsedSerials.resize(mConfig.bufferCount, ExecutionSerial(0));
+ return {};
+ }
+
+ MaybeError SwapChain::PresentImpl() {
+ Device* device = ToBackend(GetDevice());
+
+ // Transition the texture to the present state as required by IDXGISwapChain1::Present()
+ // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
+ // presentable texture to present at the end of submits that use them.
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+ mApiTexture->TrackUsageAndTransitionNow(commandContext, kPresentTextureUsage,
+ mApiTexture->GetAllSubresources());
+ DAWN_TRY(device->ExecutePendingCommandContext());
+
+ // Do the actual present. DXGI_STATUS_OCCLUDED is a valid return value that's just a
+ // message to the application that it could stop rendering.
+ HRESULT presentResult =
+ mDXGISwapChain->Present(PresentModeToSwapInterval(GetPresentMode()), 0);
+ if (presentResult != DXGI_STATUS_OCCLUDED) {
+ DAWN_TRY(CheckHRESULT(presentResult, "IDXGISwapChain::Present"));
+ }
+
+ // Record that "new" is the last time the buffer has been used.
+ DAWN_TRY(device->NextSerial());
+ mBufferLastUsedSerials[mCurrentBuffer] = device->GetPendingCommandSerial();
+
+ mApiTexture->APIDestroy();
+ mApiTexture = nullptr;
+
+ return {};
+ }
+
+ ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
+ Device* device = ToBackend(GetDevice());
+
+ // Synchronously wait until previous operations on the next swapchain buffer are finished.
+ // This is the logic that performs frame pacing.
+ // TODO(crbug.com/dawn/269): Consider whether this should be lifted for Mailbox so that
+ // there is not frame pacing.
+ mCurrentBuffer = mDXGISwapChain->GetCurrentBackBufferIndex();
+ DAWN_TRY(device->WaitForSerial(mBufferLastUsedSerials[mCurrentBuffer]));
+
+ // Create the API side objects for this use of the swapchain's buffer.
+ TextureDescriptor descriptor = GetSwapChainBaseTextureDescriptor(this);
+ DAWN_TRY_ASSIGN(mApiTexture, Texture::Create(ToBackend(GetDevice()), &descriptor,
+ mBuffers[mCurrentBuffer]));
+
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mApiTexture->APICreateView();
+ }
+
+ MaybeError SwapChain::DetachAndWaitForDeallocation() {
+ DetachFromSurface();
+
+ // DetachFromSurface calls Texture->Destroy that enqueues the D3D12 resource in a
+ // SerialQueue with the current "pending serial" so that we don't destroy the texture
+ // before it is finished being used. Flush the commands and wait for that serial to be
+ // passed, then Tick the device to make sure the reference to the D3D12 texture is removed.
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(device->NextSerial());
+ DAWN_TRY(device->WaitForSerial(device->GetLastSubmittedCommandSerial()));
+ return device->TickImpl();
+ }
+
+ void SwapChain::DetachFromSurfaceImpl() {
+ if (mApiTexture != nullptr) {
+ mApiTexture->APIDestroy();
+ mApiTexture = nullptr;
+ }
+
+ mDXGISwapChain = nullptr;
+ mBuffers.clear();
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h
new file mode 100644
index 00000000000..eb99f1c88cf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/SwapChainD3D12.h
@@ -0,0 +1,89 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SWAPCHAIND3D12_H_
+#define DAWNNATIVE_D3D12_SWAPCHAIND3D12_H_
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class Device;
+ class Texture;
+
+ class OldSwapChain final : public OldSwapChainBase {
+ public:
+ static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+
+ protected:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain() override;
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
+
+ wgpu::TextureUsage mTextureUsage;
+ };
+
+ class SwapChain final : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+
+ private:
+ ~SwapChain() override;
+
+ void DestroyImpl() override;
+
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+ struct Config {
+ // Information that's passed to the D3D12 swapchain creation call.
+ UINT bufferCount;
+ UINT swapChainFlags;
+ DXGI_FORMAT format;
+ DXGI_USAGE usage;
+ };
+
+ // NewSwapChainBase implementation
+ MaybeError PresentImpl() override;
+ ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+
+ // Does the swapchain initialization steps assuming there is nothing we can reuse.
+ MaybeError InitializeSwapChainFromScratch();
+ // Does the swapchain initialization step of gathering the buffers.
+ MaybeError CollectSwapChainBuffers();
+ // Calls DetachFromSurface but also synchronously waits until all references to the
+ // swapchain and buffers are removed, as that's a constraint for some DXGI operations.
+ MaybeError DetachAndWaitForDeallocation();
+
+ Config mConfig;
+
+ ComPtr<IDXGISwapChain3> mDXGISwapChain;
+ std::vector<ComPtr<ID3D12Resource>> mBuffers;
+ std::vector<ExecutionSerial> mBufferLastUsedSerials;
+ uint32_t mCurrentBuffer = 0;
+
+ Ref<Texture> mApiTexture;
+ };
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_SWAPCHAIN_D3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp
new file mode 100644
index 00000000000..83e55fd18f0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.cpp
@@ -0,0 +1,539 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/TextureCopySplitter.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+ Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
+ uint32_t offset,
+ uint32_t bytesPerRow) {
+ ASSERT(bytesPerRow != 0);
+ uint32_t byteOffsetX = offset % bytesPerRow;
+ uint32_t byteOffsetY = offset - byteOffsetX;
+
+ return {byteOffsetX / blockInfo.byteSize * blockInfo.width,
+ byteOffsetY / bytesPerRow * blockInfo.height, 0};
+ }
+
+ uint64_t OffsetToFirstCopiedTexel(const TexelBlockInfo& blockInfo,
+ uint32_t bytesPerRow,
+ uint64_t alignedOffset,
+ Origin3D bufferOffset) {
+ ASSERT(bufferOffset.z == 0);
+ return alignedOffset + bufferOffset.x * blockInfo.byteSize / blockInfo.width +
+ bufferOffset.y * bytesPerRow / blockInfo.height;
+ }
+
+ uint64_t AlignDownForDataPlacement(uint32_t offset) {
+ return offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
+ }
+ } // namespace
+
+ TextureCopySubresource::CopyInfo* TextureCopySubresource::AddCopy() {
+ ASSERT(this->count < kMaxTextureCopyRegions);
+ return &this->copies[this->count++];
+ }
+
+ TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow) {
+ TextureCopySubresource copy;
+
+ ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+
+ // The copies must be 512-aligned. To do this, we calculate the first 512-aligned address
+ // preceding our data.
+ uint64_t alignedOffset = AlignDownForDataPlacement(offset);
+
+ // If the provided offset to the data was already 512-aligned, we can simply copy the data
+ // without further translation.
+ if (offset == alignedOffset) {
+ copy.count = 1;
+
+ copy.copies[0].alignedOffset = alignedOffset;
+ copy.copies[0].textureOffset = origin;
+ copy.copies[0].copySize = copySize;
+ copy.copies[0].bufferOffset = {0, 0, 0};
+ copy.copies[0].bufferSize = copySize;
+
+ return copy;
+ }
+
+ ASSERT(alignedOffset < offset);
+ ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
+
+ // We must reinterpret our aligned offset into X and Y offsets with respect to the row
+ // pitch.
+ //
+ // You can visualize the data in the buffer like this:
+ // |-----------------------++++++++++++++++++++++++++++++++|
+ // ^ 512-aligned address ^ Aligned offset ^ End of copy data
+ //
+ // Now when you consider the row pitch, you can visualize the data like this:
+ // |~~~~~~~~~~~~~~~~|
+ // |~~~~~+++++++++++|
+ // |++++++++++++++++|
+ // |+++++~~~~~~~~~~~|
+ // |<---row pitch-->|
+ //
+ // The X and Y offsets calculated in ComputeTexelOffsets can be visualized like this:
+ // |YYYYYYYYYYYYYYYY|
+ // |XXXXXX++++++++++|
+ // |++++++++++++++++|
+ // |++++++~~~~~~~~~~|
+ // |<---row pitch-->|
+ Origin3D texelOffset = ComputeTexelOffsets(
+ blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow);
+
+ ASSERT(texelOffset.y <= blockInfo.height);
+ ASSERT(texelOffset.z == 0);
+
+ uint32_t copyBytesPerRowPitch = copySize.width / blockInfo.width * blockInfo.byteSize;
+ uint32_t byteOffsetInRowPitch = texelOffset.x / blockInfo.width * blockInfo.byteSize;
+ if (copyBytesPerRowPitch + byteOffsetInRowPitch <= bytesPerRow) {
+ // The region's rows fit inside the bytes per row. In this case, extend the width of the
+ // PlacedFootprint and copy the buffer with an offset location
+ // |<------------- bytes per row ------------->|
+ //
+ // |-------------------------------------------|
+ // | |
+ // | +++++++++++++++++~~~~~~~~~|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++ |
+ // |-------------------------------------------|
+
+ // Copy 0:
+ // |----------------------------------|
+ // | |
+ // | +++++++++++++++++|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+ // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+ // |----------------------------------|
+
+ copy.count = 1;
+
+ copy.copies[0].alignedOffset = alignedOffset;
+ copy.copies[0].textureOffset = origin;
+ copy.copies[0].copySize = copySize;
+ copy.copies[0].bufferOffset = texelOffset;
+
+ copy.copies[0].bufferSize.width = copySize.width + texelOffset.x;
+ copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
+ copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ return copy;
+ }
+
+ // The region's rows straddle the bytes per row. Split the copy into two copies
+ // |<------------- bytes per row ------------->|
+ //
+ // |-------------------------------------------|
+ // | |
+ // | ++++++++|
+ // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |+++++++++ |
+ // |-------------------------------------------|
+
+ // Copy 0:
+ // |-------------------------------------------|
+ // | |
+ // | ++++++++|
+ // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+ // |-------------------------------------------|
+
+ // Copy 1:
+ // |---------|
+ // | |
+ // | |
+ // |+++++++++|
+ // |+++++++++|
+ // |+++++++++|
+ // |+++++++++|
+ // |+++++++++|
+ // |---------|
+
+ copy.count = 2;
+
+ copy.copies[0].alignedOffset = alignedOffset;
+ copy.copies[0].textureOffset = origin;
+
+ ASSERT(bytesPerRow > byteOffsetInRowPitch);
+ uint32_t texelsPerRow = bytesPerRow / blockInfo.byteSize * blockInfo.width;
+ copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
+ copy.copies[0].copySize.height = copySize.height;
+ copy.copies[0].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ copy.copies[0].bufferOffset = texelOffset;
+ copy.copies[0].bufferSize.width = texelsPerRow;
+ copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
+ copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ uint64_t offsetForCopy1 =
+ offset + copy.copies[0].copySize.width / blockInfo.width * blockInfo.byteSize;
+ uint64_t alignedOffsetForCopy1 = AlignDownForDataPlacement(offsetForCopy1);
+ Origin3D texelOffsetForCopy1 = ComputeTexelOffsets(
+ blockInfo, static_cast<uint32_t>(offsetForCopy1 - alignedOffsetForCopy1), bytesPerRow);
+
+ ASSERT(texelOffsetForCopy1.y <= blockInfo.height);
+ ASSERT(texelOffsetForCopy1.z == 0);
+
+ copy.copies[1].alignedOffset = alignedOffsetForCopy1;
+ copy.copies[1].textureOffset.x = origin.x + copy.copies[0].copySize.width;
+ copy.copies[1].textureOffset.y = origin.y;
+ copy.copies[1].textureOffset.z = origin.z;
+
+ ASSERT(copySize.width > copy.copies[0].copySize.width);
+ copy.copies[1].copySize.width = copySize.width - copy.copies[0].copySize.width;
+ copy.copies[1].copySize.height = copySize.height;
+ copy.copies[1].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ copy.copies[1].bufferOffset = texelOffsetForCopy1;
+ copy.copies[1].bufferSize.width = copy.copies[1].copySize.width + texelOffsetForCopy1.x;
+ copy.copies[1].bufferSize.height = copySize.height + texelOffsetForCopy1.y;
+ copy.copies[1].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+ return copy;
+ }
+
+ TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ TextureCopySplits copies;
+
+ const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+
+ // The function Compute2DTextureCopySubresource() decides how to split the copy based on:
+ // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+ // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
+ // Each layer of a 2D array might need to be split, but because of the WebGPU
+ // constraint that "bytesPerRow" must be a multiple of 256, all odd (resp. all even) layers
+ // will be at an offset multiple of 512 of each other, which means they will all result in
+ // the same 2D split. Thus we can just compute the copy splits for the first and second
+ // layers, and reuse them for the remaining layers by adding the related offset of each
+ // layer. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
+ // share the same copy split, so in this situation we just need to compute copy split once
+ // and reuse it for all the layers.
+ Extent3D copyOneLayerSize = copySize;
+ Origin3D copyFirstLayerOrigin = origin;
+ copyOneLayerSize.depthOrArrayLayers = 1;
+ copyFirstLayerOrigin.z = 0;
+
+ copies.copySubresources[0] = Compute2DTextureCopySubresource(
+ copyFirstLayerOrigin, copyOneLayerSize, blockInfo, offset, bytesPerRow);
+
+ // When the copy only refers one texture 2D array layer,
+ // copies.copySubresources[1] will never be used so we can safely early return here.
+ if (copySize.depthOrArrayLayers == 1) {
+ return copies;
+ }
+
+ if (bytesPerLayer % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
+ copies.copySubresources[1] = copies.copySubresources[0];
+ copies.copySubresources[1].copies[0].alignedOffset += bytesPerLayer;
+ copies.copySubresources[1].copies[1].alignedOffset += bytesPerLayer;
+ } else {
+ const uint64_t bufferOffsetNextLayer = offset + bytesPerLayer;
+ copies.copySubresources[1] =
+ Compute2DTextureCopySubresource(copyFirstLayerOrigin, copyOneLayerSize, blockInfo,
+ bufferOffsetNextLayer, bytesPerRow);
+ }
+
+ return copies;
+ }
+
+ void Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
+ Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ TextureCopySubresource& copy,
+ uint32_t i) {
+ // Let's assign data and show why copy region generated by ComputeTextureCopySubresource
+ // is incorrect if there is an empty row at the beginning of the copy block.
+ // Assuming that bytesPerRow is 256 and we are doing a B2T copy, and copy size is {width: 2,
+ // height: 4, depthOrArrayLayers: 3}. Then the data layout in buffer is demonstrated
+ // as below:
+ //
+ // |<----- bytes per row ------>|
+ //
+ // |----------------------------|
+ // row (N - 1) | |
+ // row N | ++~~~~~~~~~|
+ // row (N + 1) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 2) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 3) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 4) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 5) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 6) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 7) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 8) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 9) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 10) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+ // row (N + 11) |~~~~~~~~~~~~~~~~~++ |
+ // |----------------------------|
+
+ // The copy we mean to do is the following:
+ //
+ // - image 0: row N to row (N + 3),
+ // - image 1: row (N + 4) to row (N + 7),
+ // - image 2: row (N + 8) to row (N + 11).
+ //
+ // Note that alignedOffset is at the beginning of row (N - 1), while buffer offset makes
+ // the copy start at row N. Row (N - 1) is the empty row between alignedOffset and offset.
+ //
+ // The 2D copy region of image 0 we received from Compute2DTextureCopySubresource() is
+ // the following:
+ //
+ // |-------------------|
+ // row (N - 1) | |
+ // row N | ++|
+ // row (N + 1) |~~~~~~~~~~~~~~~~~++|
+ // row (N + 2) |~~~~~~~~~~~~~~~~~++|
+ // row (N + 3) |~~~~~~~~~~~~~~~~~++|
+ // |-------------------|
+ //
+ // However, if we simply expand the copy region of image 0 to all depth ranges of a 3D
+ // texture, we will copy 5 rows every time, and every first row of each slice will be
+ // skipped. As a result, the copied data will be:
+ //
+ // - image 0: row N to row (N + 3), which is correct. Row (N - 1) is skipped.
+ // - image 1: row (N + 5) to row (N + 8) because row (N + 4) is skipped. It is incorrect.
+ //
+ // Likewise, all other image followed will be incorrect because we wrongly keep skipping
+ // one row for each depth slice.
+ //
+ // Solution: split the copy region to two copies: copy 3 (rowsPerImage - 1) rows in and
+ // expand to all depth slices in the first copy. 3 rows + one skipped rows = 4 rows, which
+ // equals to rowsPerImage. Then copy the last row in the second copy. However, the copy
+ // block of the last row of the last image may out-of-bound (see the details below), so
+ // we need an extra copy for the very last row.
+
+ // Copy 0: copy 3 rows, not 4 rows.
+ // _____________________
+ // / /|
+ // / / |
+ // |-------------------| |
+ // row (N - 1) | | |
+ // row N | ++| |
+ // row (N + 1) |~~~~~~~~~~~~~~~~~++| /
+ // row (N + 2) |~~~~~~~~~~~~~~~~~++|/
+ // |-------------------|
+
+ // Copy 1: move down two rows and copy the last row on image 0, and expand to
+ // copySize.depthOrArrayLayers - 1 depth slices. Note that if we expand it to all depth
+ // slices, the last copy block will be row (N + 9) to row (N + 12). Row (N + 11) might
+ // be the last row of the entire buffer. Then row (N + 12) will be out-of-bound.
+ // _____________________
+ // / /|
+ // / / |
+ // |-------------------| |
+ // row (N + 1) | | |
+ // row (N + 2) | | |
+ // row (N + 3) | ++| /
+ // row (N + 4) |~~~~~~~~~~~~~~~~~~~|/
+ // |-------------------|
+ //
+ // copy 2: copy the last row of the last image.
+ // |-------------------|
+ // row (N + 11)| ++|
+ // |-------------------|
+
+ // Copy 0: copy copySize.height - 1 rows
+ TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
+ copy0.copySize.height = copySize.height - blockInfo.height;
+ copy0.bufferSize.height = rowsPerImage * blockInfo.height; // rowsPerImageInTexels
+
+ // Copy 1: move down 2 rows and copy the last row on image 0, and expand to all depth slices
+ // but the last one.
+ TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
+ *copy1 = copy0;
+ copy1->alignedOffset += 2 * bytesPerRow;
+ copy1->textureOffset.y += copySize.height - blockInfo.height;
+ // Offset two rows from the copy height for the bufferOffset (See the figure above):
+ // - one for the row we advanced in the buffer: row (N + 4).
+ // - one for the last row we want to copy: row (N + 3) itself.
+ copy1->bufferOffset.y = copySize.height - 2 * blockInfo.height;
+ copy1->copySize.height = blockInfo.height;
+ copy1->copySize.depthOrArrayLayers--;
+ copy1->bufferSize.depthOrArrayLayers--;
+
+ // Copy 2: copy the last row of the last image.
+ uint64_t offsetForCopy0 = OffsetToFirstCopiedTexel(blockInfo, bytesPerRow,
+ copy0.alignedOffset, copy0.bufferOffset);
+ uint64_t offsetForLastRowOfLastImage =
+ offsetForCopy0 + bytesPerRow * (copy0.copySize.height +
+ rowsPerImage * (copySize.depthOrArrayLayers - 1));
+ uint64_t alignedOffsetForLastRowOfLastImage =
+ AlignDownForDataPlacement(offsetForLastRowOfLastImage);
+ Origin3D texelOffsetForLastRowOfLastImage = ComputeTexelOffsets(
+ blockInfo,
+ static_cast<uint32_t>(offsetForLastRowOfLastImage - alignedOffsetForLastRowOfLastImage),
+ bytesPerRow);
+
+ TextureCopySubresource::CopyInfo* copy2 = copy.AddCopy();
+ copy2->alignedOffset = alignedOffsetForLastRowOfLastImage;
+ copy2->textureOffset = copy1->textureOffset;
+ copy2->textureOffset.z = origin.z + copySize.depthOrArrayLayers - 1;
+ copy2->copySize = copy1->copySize;
+ copy2->copySize.depthOrArrayLayers = 1;
+ copy2->bufferOffset = texelOffsetForLastRowOfLastImage;
+ copy2->bufferSize.width = copy1->bufferSize.width;
+ ASSERT(copy2->copySize.height == 1);
+ copy2->bufferSize.height = copy2->bufferOffset.y + copy2->copySize.height;
+ copy2->bufferSize.depthOrArrayLayers = 1;
+ }
+
+ void Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(Extent3D copySize,
+ uint32_t bytesPerRow,
+ TextureCopySubresource& copy,
+ uint32_t i) {
+ // Read the comments of Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight() for
+ // the reason why it is incorrect if we simply extend the copy region to all depth slices
+ // when there is an empty first row at the copy region.
+ //
+ // If the copy height is odd, we can use two copies to make it correct:
+ // - copy 0: only copy the first depth slice. Keep other arguments the same.
+ // - copy 1: copy all rest depth slices because it will start without an empty row if
+ // copy height is odd. Odd height + one (empty row) is even. An even row number times
+ // bytesPerRow (256) will be aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+
+ // Copy 0: copy the first depth slice (image 0)
+ TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
+ copy0.copySize.depthOrArrayLayers = 1;
+ copy0.bufferSize.depthOrArrayLayers = 1;
+
+ // Copy 1: copy the rest depth slices in one shot
+ TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
+ *copy1 = copy0;
+ ASSERT(copySize.height % 2 == 1);
+ copy1->alignedOffset += (copySize.height + 1) * bytesPerRow;
+ ASSERT(copy1->alignedOffset % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0);
+ // textureOffset.z should add one because the first slice has already been copied in copy0.
+ copy1->textureOffset.z++;
+ // bufferOffset.y should be 0 because we skipped the first depth slice and there is no empty
+ // row in this copy region.
+ copy1->bufferOffset.y = 0;
+ copy1->copySize.height = copySize.height;
+ copy1->copySize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+ copy1->bufferSize.height = copySize.height;
+ copy1->bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+ }
+
+ TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ // To compute the copy region(s) for 3D textures, we call Compute2DTextureCopySubresource
+ // and get copy region(s) for the first slice of the copy, then extend to all depth slices
+ // and become a 3D copy. However, this doesn't work as easily as that due to some corner
+ // cases.
+ //
+ // For example, if bufferSize.height is greater than rowsPerImage in the generated copy
+ // region and we simply extend the 2D copy region to all copied depth slices, copied data
+ // will be incorrectly offset for each depth slice except the first one.
+ //
+ // For these special cases, we need to recompute the copy regions for 3D textures via
+ // split the incorrect copy region to a couple more copy regions.
+
+ // Call Compute2DTextureCopySubresource and get copy regions. This function has already
+ // forwarded "copySize.depthOrArrayLayers" to all depth slices.
+ TextureCopySubresource copySubresource =
+ Compute2DTextureCopySubresource(origin, copySize, blockInfo, offset, bytesPerRow);
+
+ ASSERT(copySubresource.count <= 2);
+ // If copySize.depth is 1, we can return copySubresource. Because we don't need to extend
+ // the copy region(s) to other depth slice(s).
+ if (copySize.depthOrArrayLayers == 1) {
+ return copySubresource;
+ }
+
+ uint32_t rowsPerImageInTexels = rowsPerImage * blockInfo.height;
+ // The copy region(s) generated by Compute2DTextureCopySubresource might be incorrect.
+ // However, we may append a couple more copy regions in the for loop below. We don't need
+ // to revise these new added copy regions.
+ uint32_t originalCopyCount = copySubresource.count;
+ for (uint32_t i = 0; i < originalCopyCount; ++i) {
+ // There can be one empty row at most in a copy region.
+ ASSERT(copySubresource.copies[i].bufferSize.height <=
+ rowsPerImageInTexels + blockInfo.height);
+ Extent3D& bufferSize = copySubresource.copies[i].bufferSize;
+
+ if (bufferSize.height == rowsPerImageInTexels) {
+ // If the copy region's bufferSize.height equals to rowsPerImageInTexels, we can use
+ // this copy region without any modification.
+ continue;
+ }
+
+ if (bufferSize.height < rowsPerImageInTexels) {
+ // If we are copying multiple depth slices, we should skip rowsPerImageInTexels rows
+ // for each slice even though we only copy partial rows in each slice sometimes.
+ bufferSize.height = rowsPerImageInTexels;
+ } else {
+ // bufferSize.height > rowsPerImageInTexels. There is an empty row in this copy
+ // region due to alignment adjustment.
+
+ // bytesPerRow is definitely 256, and it is definitely a full copy on height.
+ // Otherwise, bufferSize.height wount be greater than rowsPerImageInTexels and
+ // there won't be an empty row at the beginning of this copy region.
+ ASSERT(bytesPerRow == D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
+ ASSERT(copySize.height == rowsPerImageInTexels);
+
+ if (copySize.height % 2 == 0) {
+ // If copySize.height is even and there is an empty row at the beginning of the
+ // first slice of the copy region, the offset of all depth slices will never be
+ // aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512) and there is always
+ // an empty row at each depth slice. We need a totally different approach to
+ // split the copy region.
+ Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
+ origin, copySize, blockInfo, bytesPerRow, rowsPerImage, copySubresource, i);
+ } else {
+ // If copySize.height is odd and there is an empty row at the beginning of the
+ // first slice of the copy region, we can split the copy region into two copies:
+ // copy0 to copy the first slice, copy1 to copy the rest slices because the
+ // offset of slice 1 is aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+ // without an empty row. This is an easier case relative to cases with even copy
+ // height.
+ Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(
+ copySize, bytesPerRow, copySubresource, i);
+ }
+ }
+ }
+
+ return copySubresource;
+ }
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h
new file mode 100644
index 00000000000..d549b906dad
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureCopySplitter.h
@@ -0,0 +1,97 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
+#define DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn::native {
+
+ struct TexelBlockInfo;
+
+} // namespace dawn::native
+
+namespace dawn::native::d3d12 {
+
+ struct TextureCopySubresource {
+ static constexpr unsigned int kMaxTextureCopyRegions = 4;
+
+ struct CopyInfo {
+ uint64_t alignedOffset = 0;
+ Origin3D textureOffset;
+ Origin3D bufferOffset;
+ Extent3D bufferSize;
+
+ Extent3D copySize;
+ };
+
+ CopyInfo* AddCopy();
+
+ uint32_t count = 0;
+ std::array<CopyInfo, kMaxTextureCopyRegions> copies;
+ };
+
+ struct TextureCopySplits {
+ static constexpr uint32_t kMaxTextureCopySubresources = 2;
+
+ std::array<TextureCopySubresource, kMaxTextureCopySubresources> copySubresources;
+ };
+
+ // This function is shared by 2D and 3D texture copy splitter. But it only knows how to handle
+ // 2D non-arrayed textures correctly, and just forwards "copySize.depthOrArrayLayers". See
+ // details in Compute{2D|3D}TextureCopySplits about how we generate copy regions for 2D array
+ // and 3D textures based on this function.
+ // The resulting copies triggered by API like CopyTextureRegion are equivalent to the copy
+ // regions defines by the arguments of TextureCopySubresource returned by this function and its
+ // counterparts. These arguments should strictly conform to particular invariants. Otherwise,
+ // D3D12 driver may report validation errors when we call CopyTextureRegion. Some important
+ // invariants are listed below. For more details
+ // of these invariants, see src/dawn/tests/unittests/d3d12/CopySplitTests.cpp.
+ // - Inside each copy region: 1) its buffer offset plus copy size should be less than its
+ // buffer size, 2) its buffer offset on y-axis should be less than copy format's
+ // blockInfo.height, 3) its buffer offset on z-axis should be 0.
+ // - Each copy region has an offset (aka alignedOffset) aligned to
+ // D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT
+ // - The buffer footprint of each copy region should be entirely within the copied buffer,
+ // which means that the last "texel" of the buffer footprint doesn't go past the end of
+ // the buffer even though the last "texel" might not be copied.
+ // - If there are multiple copy regions, each copy region should not overlap with the others.
+ // - Copy region(s) combined should exactly be equivalent to the texture region to be copied.
+ // - Every pixel accessed by every copy region should not be out of the bound of the copied
+ // texture and buffer.
+ TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow);
+
+ TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
+
+ TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp
new file mode 100644
index 00000000000..95f9cb9b3d1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.cpp
@@ -0,0 +1,1381 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/TextureD3D12.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/D3D11on12Util.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+#include "dawn/native/d3d12/HeapD3D12.h"
+#include "dawn/native/d3d12/ResourceAllocatorManagerD3D12.h"
+#include "dawn/native/d3d12/StagingBufferD3D12.h"
+#include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
+#include "dawn/native/d3d12/TextureCopySplitter.h"
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+namespace dawn::native::d3d12 {
+
+ namespace {
+
+ D3D12_RESOURCE_STATES D3D12TextureUsage(wgpu::TextureUsage usage, const Format& format) {
+ D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
+
+ if (usage & kPresentTextureUsage) {
+ // The present usage is only used internally by the swapchain and is never used in
+ // combination with other usages.
+ ASSERT(usage == kPresentTextureUsage);
+ return D3D12_RESOURCE_STATE_PRESENT;
+ }
+
+ if (usage & wgpu::TextureUsage::CopySrc) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
+ }
+ if (usage & wgpu::TextureUsage::CopyDst) {
+ resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+ }
+ if (usage & (wgpu::TextureUsage::TextureBinding)) {
+ resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+ D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
+ }
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+ }
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ if (format.HasDepthOrStencil()) {
+ resourceState |= D3D12_RESOURCE_STATE_DEPTH_WRITE;
+ } else {
+ resourceState |= D3D12_RESOURCE_STATE_RENDER_TARGET;
+ }
+ }
+
+ if (usage & kReadOnlyRenderAttachment) {
+ // There is no STENCIL_READ state. Readonly for stencil is bundled with DEPTH_READ.
+ resourceState |= D3D12_RESOURCE_STATE_DEPTH_READ;
+ }
+
+ return resourceState;
+ }
+
+ D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::TextureUsage usage,
+ const Format& format,
+ bool isMultisampledTexture) {
+ D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
+
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
+ }
+
+ // A multisampled resource must have either D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET or
+ // D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL set in D3D12_RESOURCE_DESC::Flags.
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_resource_desc
+ if ((usage & wgpu::TextureUsage::RenderAttachment) != 0 || isMultisampledTexture) {
+ if (format.HasDepthOrStencil()) {
+ flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
+ } else {
+ flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
+ }
+ }
+
+ ASSERT(!(flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+ flags == D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL);
+ return flags;
+ }
+
+ D3D12_RESOURCE_DIMENSION D3D12TextureDimension(wgpu::TextureDimension dimension) {
+ switch (dimension) {
+ case wgpu::TextureDimension::e1D:
+ return D3D12_RESOURCE_DIMENSION_TEXTURE1D;
+ case wgpu::TextureDimension::e2D:
+ return D3D12_RESOURCE_DIMENSION_TEXTURE2D;
+ case wgpu::TextureDimension::e3D:
+ return D3D12_RESOURCE_DIMENSION_TEXTURE3D;
+ }
+ }
+
+ DXGI_FORMAT D3D12TypelessTextureFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ return DXGI_FORMAT_R8_TYPELESS;
+
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::Depth16Unorm:
+ return DXGI_FORMAT_R16_TYPELESS;
+
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ return DXGI_FORMAT_R8G8_TYPELESS;
+
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Float:
+ return DXGI_FORMAT_R32_TYPELESS;
+
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ return DXGI_FORMAT_R16G16_TYPELESS;
+
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ return DXGI_FORMAT_R8G8B8A8_TYPELESS;
+
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return DXGI_FORMAT_B8G8R8A8_TYPELESS;
+
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return DXGI_FORMAT_R10G10B10A2_TYPELESS;
+
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return DXGI_FORMAT_R11G11B10_FLOAT;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Float:
+ return DXGI_FORMAT_R32G32_TYPELESS;
+
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ return DXGI_FORMAT_R16G16B16A16_TYPELESS;
+
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Float:
+ return DXGI_FORMAT_R32G32B32A32_TYPELESS;
+
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ return DXGI_FORMAT_R32_TYPELESS;
+
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return DXGI_FORMAT_R24G8_TYPELESS;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return DXGI_FORMAT_R32G8X24_TYPELESS;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ return DXGI_FORMAT_BC1_TYPELESS;
+
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ return DXGI_FORMAT_BC2_TYPELESS;
+
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ return DXGI_FORMAT_BC3_TYPELESS;
+
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC4RUnorm:
+ return DXGI_FORMAT_BC4_TYPELESS;
+
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ return DXGI_FORMAT_BC5_TYPELESS;
+
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ return DXGI_FORMAT_BC6H_TYPELESS;
+
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return DXGI_FORMAT_BC7_TYPELESS;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ // TODO(dawn:666): implement stencil8
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ } // namespace
+
+ DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R8Unorm:
+ return DXGI_FORMAT_R8_UNORM;
+ case wgpu::TextureFormat::R8Snorm:
+ return DXGI_FORMAT_R8_SNORM;
+ case wgpu::TextureFormat::R8Uint:
+ return DXGI_FORMAT_R8_UINT;
+ case wgpu::TextureFormat::R8Sint:
+ return DXGI_FORMAT_R8_SINT;
+
+ case wgpu::TextureFormat::R16Uint:
+ return DXGI_FORMAT_R16_UINT;
+ case wgpu::TextureFormat::R16Sint:
+ return DXGI_FORMAT_R16_SINT;
+ case wgpu::TextureFormat::R16Float:
+ return DXGI_FORMAT_R16_FLOAT;
+ case wgpu::TextureFormat::RG8Unorm:
+ return DXGI_FORMAT_R8G8_UNORM;
+ case wgpu::TextureFormat::RG8Snorm:
+ return DXGI_FORMAT_R8G8_SNORM;
+ case wgpu::TextureFormat::RG8Uint:
+ return DXGI_FORMAT_R8G8_UINT;
+ case wgpu::TextureFormat::RG8Sint:
+ return DXGI_FORMAT_R8G8_SINT;
+
+ case wgpu::TextureFormat::R32Uint:
+ return DXGI_FORMAT_R32_UINT;
+ case wgpu::TextureFormat::R32Sint:
+ return DXGI_FORMAT_R32_SINT;
+ case wgpu::TextureFormat::R32Float:
+ return DXGI_FORMAT_R32_FLOAT;
+ case wgpu::TextureFormat::RG16Uint:
+ return DXGI_FORMAT_R16G16_UINT;
+ case wgpu::TextureFormat::RG16Sint:
+ return DXGI_FORMAT_R16G16_SINT;
+ case wgpu::TextureFormat::RG16Float:
+ return DXGI_FORMAT_R16G16_FLOAT;
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return DXGI_FORMAT_R8G8B8A8_UNORM;
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return DXGI_FORMAT_R8G8B8A8_SNORM;
+ case wgpu::TextureFormat::RGBA8Uint:
+ return DXGI_FORMAT_R8G8B8A8_UINT;
+ case wgpu::TextureFormat::RGBA8Sint:
+ return DXGI_FORMAT_R8G8B8A8_SINT;
+ case wgpu::TextureFormat::BGRA8Unorm:
+ return DXGI_FORMAT_B8G8R8A8_UNORM;
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return DXGI_FORMAT_R10G10B10A2_UNORM;
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return DXGI_FORMAT_R11G11B10_FLOAT;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+
+ case wgpu::TextureFormat::RG32Uint:
+ return DXGI_FORMAT_R32G32_UINT;
+ case wgpu::TextureFormat::RG32Sint:
+ return DXGI_FORMAT_R32G32_SINT;
+ case wgpu::TextureFormat::RG32Float:
+ return DXGI_FORMAT_R32G32_FLOAT;
+ case wgpu::TextureFormat::RGBA16Uint:
+ return DXGI_FORMAT_R16G16B16A16_UINT;
+ case wgpu::TextureFormat::RGBA16Sint:
+ return DXGI_FORMAT_R16G16B16A16_SINT;
+ case wgpu::TextureFormat::RGBA16Float:
+ return DXGI_FORMAT_R16G16B16A16_FLOAT;
+
+ case wgpu::TextureFormat::RGBA32Uint:
+ return DXGI_FORMAT_R32G32B32A32_UINT;
+ case wgpu::TextureFormat::RGBA32Sint:
+ return DXGI_FORMAT_R32G32B32A32_SINT;
+ case wgpu::TextureFormat::RGBA32Float:
+ return DXGI_FORMAT_R32G32B32A32_FLOAT;
+
+ case wgpu::TextureFormat::Depth16Unorm:
+ return DXGI_FORMAT_D16_UNORM;
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ return DXGI_FORMAT_D32_FLOAT;
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return DXGI_FORMAT_D24_UNORM_S8_UINT;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ return DXGI_FORMAT_BC1_UNORM;
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ return DXGI_FORMAT_BC1_UNORM_SRGB;
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ return DXGI_FORMAT_BC2_UNORM;
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ return DXGI_FORMAT_BC2_UNORM_SRGB;
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ return DXGI_FORMAT_BC3_UNORM;
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ return DXGI_FORMAT_BC3_UNORM_SRGB;
+ case wgpu::TextureFormat::BC4RSnorm:
+ return DXGI_FORMAT_BC4_SNORM;
+ case wgpu::TextureFormat::BC4RUnorm:
+ return DXGI_FORMAT_BC4_UNORM;
+ case wgpu::TextureFormat::BC5RGSnorm:
+ return DXGI_FORMAT_BC5_SNORM;
+ case wgpu::TextureFormat::BC5RGUnorm:
+ return DXGI_FORMAT_BC5_UNORM;
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ return DXGI_FORMAT_BC6H_SF16;
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ return DXGI_FORMAT_BC6H_UF16;
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ return DXGI_FORMAT_BC7_UNORM;
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return DXGI_FORMAT_BC7_UNORM_SRGB;
+
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ return DXGI_FORMAT_NV12;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+ // TODO(dawn:666): implement stencil8
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "Texture dimension (%s) is not %s.", descriptor->dimension,
+ wgpu::TextureDimension::e2D);
+
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
+ "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+ descriptor->sampleCount);
+
+ return {};
+ }
+
+ MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
+ const TextureDescriptor* dawnDescriptor) {
+ const D3D12_RESOURCE_DESC d3dDescriptor = d3d12Resource->GetDesc();
+ DAWN_INVALID_IF(
+ (dawnDescriptor->size.width != d3dDescriptor.Width) ||
+ (dawnDescriptor->size.height != d3dDescriptor.Height) ||
+ (dawnDescriptor->size.depthOrArrayLayers != 1),
+ "D3D12 texture size (Width: %u, Height: %u, DepthOrArraySize: 1) doesn't match Dawn "
+ "descriptor size (width: %u, height: %u, depthOrArrayLayers: %u).",
+ d3dDescriptor.Width, d3dDescriptor.Height, dawnDescriptor->size.width,
+ dawnDescriptor->size.height, dawnDescriptor->size.depthOrArrayLayers);
+
+ const DXGI_FORMAT dxgiFormatFromDescriptor = D3D12TextureFormat(dawnDescriptor->format);
+ DAWN_INVALID_IF(
+ dxgiFormatFromDescriptor != d3dDescriptor.Format,
+ "D3D12 texture format (%x) is not compatible with Dawn descriptor format (%s).",
+ d3dDescriptor.Format, dawnDescriptor->format);
+
+ DAWN_INVALID_IF(d3dDescriptor.MipLevels != 1,
+ "D3D12 texture number of miplevels (%u) is not 1.",
+ d3dDescriptor.MipLevels);
+
+ DAWN_INVALID_IF(d3dDescriptor.DepthOrArraySize != 1,
+ "D3D12 texture array size (%u) is not 1.", d3dDescriptor.DepthOrArraySize);
+
+ // Shared textures cannot be multi-sample so no need to check those.
+ ASSERT(d3dDescriptor.SampleDesc.Count == 1);
+ ASSERT(d3dDescriptor.SampleDesc.Quality == 0);
+
+ return {};
+ }
+
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_shared_resource_compatibility_tier
+ MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat) {
+ const bool supportsSharedResourceCapabilityTier1 =
+ device->GetDeviceInfo().supportsSharedResourceCapabilityTier1;
+ switch (textureFormat) {
+ // MSDN docs are not correct, NV12 requires at-least tier 1.
+ case DXGI_FORMAT_NV12:
+ if (supportsSharedResourceCapabilityTier1) {
+ return {};
+ }
+ break;
+ default:
+ break;
+ }
+
+ return DAWN_FORMAT_VALIDATION_ERROR("DXGI format does not support cross-API sharing.");
+ }
+
+ // static
+ ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor) {
+ Ref<Texture> dawnTexture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+
+ DAWN_INVALID_IF(dawnTexture->GetFormat().IsMultiPlanar(),
+ "Cannot create a multi-planar formatted texture directly");
+
+ DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
+ return std::move(dawnTexture);
+ }
+
+ // static
+ ResultOrError<Ref<Texture>> Texture::CreateExternalImage(
+ Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture,
+ bool isInitialized) {
+ Ref<Texture> dawnTexture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+ DAWN_TRY(dawnTexture->InitializeAsExternalTexture(
+ descriptor, std::move(d3d12Texture), std::move(d3d11on12Resource), isSwapChainTexture));
+
+ // Importing a multi-planar format must be initialized. This is required because
+ // a shared multi-planar format cannot be initialized by Dawn.
+ DAWN_INVALID_IF(
+ !isInitialized && dawnTexture->GetFormat().IsMultiPlanar(),
+ "Cannot create a texture with a multi-planar format (%s) with uninitialized data.",
+ dawnTexture->GetFormat().format);
+
+ dawnTexture->SetIsSubresourceContentInitialized(isInitialized,
+ dawnTexture->GetAllSubresources());
+ return std::move(dawnTexture);
+ }
+
+ // static
+ ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture) {
+ Ref<Texture> dawnTexture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+ DAWN_TRY(dawnTexture->InitializeAsSwapChainTexture(std::move(d3d12Texture)));
+ return std::move(dawnTexture);
+ }
+
+ MaybeError Texture::InitializeAsExternalTexture(
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture) {
+ mD3D11on12Resource = std::move(d3d11on12Resource);
+ mSwapChainTexture = isSwapChainTexture;
+
+ D3D12_RESOURCE_DESC desc = d3d12Texture->GetDesc();
+ mD3D12ResourceFlags = desc.Flags;
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kExternal;
+ // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+ // texture is owned externally. The texture's owning entity must remain responsible for
+ // memory management.
+ mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
+
+ SetLabelHelper("Dawn_ExternalTexture");
+
+ return {};
+ }
+
+ MaybeError Texture::InitializeAsInternalTexture() {
+ D3D12_RESOURCE_DESC resourceDescriptor;
+ resourceDescriptor.Dimension = D3D12TextureDimension(GetDimension());
+ resourceDescriptor.Alignment = 0;
+
+ const Extent3D& size = GetSize();
+ resourceDescriptor.Width = size.width;
+ resourceDescriptor.Height = size.height;
+ resourceDescriptor.DepthOrArraySize = size.depthOrArrayLayers;
+
+ // This will need to be much more nuanced when WebGPU has
+ // texture view compatibility rules.
+ const bool needsTypelessFormat =
+ GetFormat().HasDepthOrStencil() &&
+ (GetInternalUsage() & wgpu::TextureUsage::TextureBinding) != 0;
+
+ DXGI_FORMAT dxgiFormat = needsTypelessFormat
+ ? D3D12TypelessTextureFormat(GetFormat().format)
+ : D3D12TextureFormat(GetFormat().format);
+
+ resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
+ resourceDescriptor.Format = dxgiFormat;
+ resourceDescriptor.SampleDesc.Count = GetSampleCount();
+ resourceDescriptor.SampleDesc.Quality = 0;
+ resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
+ resourceDescriptor.Flags =
+ D3D12ResourceFlags(GetInternalUsage(), GetFormat(), IsMultisampledTexture());
+ mD3D12ResourceFlags = resourceDescriptor.Flags;
+
+ DAWN_TRY_ASSIGN(mResourceAllocation,
+ ToBackend(GetDevice())
+ ->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
+ D3D12_RESOURCE_STATE_COMMON));
+
+ SetLabelImpl();
+
+ Device* device = ToBackend(GetDevice());
+
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+ DAWN_TRY(ClearTexture(commandContext, GetAllSubresources(),
+ TextureBase::ClearValue::NonZero));
+ }
+
+ return {};
+ }
+
+ MaybeError Texture::InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture) {
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kExternal;
+ // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+ // texture is owned externally. The texture's owning entity must remain responsible for
+ // memory management.
+ mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
+
+ SetLabelHelper("Dawn_SwapChainTexture");
+
+ return {};
+ }
+
+ Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+ : TextureBase(device, descriptor, state),
+ mSubresourceStateAndDecay(
+ GetFormat().aspects,
+ GetArrayLayers(),
+ GetNumMipLevels(),
+ {D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON, kMaxExecutionSerial, false}) {
+ }
+
+ Texture::~Texture() {
+ }
+
+ void Texture::DestroyImpl() {
+ TextureBase::DestroyImpl();
+
+ Device* device = ToBackend(GetDevice());
+
+ // In PIX's D3D12-only mode, there is no way to determine frame boundaries
+ // for WebGPU since Dawn does not manage DXGI swap chains. Without assistance,
+ // PIX will wait forever for a present that never happens.
+ // If we know we're dealing with a swapbuffer texture, inform PIX we've
+ // "presented" the texture so it can determine frame boundaries and use its
+ // contents for the UI.
+ if (mSwapChainTexture) {
+ ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
+ if (d3dSharingContract != nullptr) {
+ d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource(), 0, 0);
+ }
+ }
+
+ device->DeallocateMemory(mResourceAllocation);
+
+ // Now that we've deallocated the memory, the texture is no longer a swap chain texture.
+ // We can set mSwapChainTexture to false to avoid passing a nullptr to
+ // ID3D12SharingContract::Present.
+ mSwapChainTexture = false;
+ }
+
+ DXGI_FORMAT Texture::GetD3D12Format() const {
+ return D3D12TextureFormat(GetFormat().format);
+ }
+
+ ID3D12Resource* Texture::GetD3D12Resource() const {
+ return mResourceAllocation.GetD3D12Resource();
+ }
+
+ DXGI_FORMAT Texture::GetD3D12CopyableSubresourceFormat(Aspect aspect) const {
+ ASSERT(GetFormat().aspects & aspect);
+
+ switch (GetFormat().format) {
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ switch (aspect) {
+ case Aspect::Depth:
+ return DXGI_FORMAT_R32_FLOAT;
+ case Aspect::Stencil:
+ return DXGI_FORMAT_R8_UINT;
+ default:
+ UNREACHABLE();
+ }
+ default:
+ ASSERT(HasOneBit(GetFormat().aspects));
+ return GetD3D12Format();
+ }
+ }
+
+ MaybeError Texture::AcquireKeyedMutex() {
+ ASSERT(mD3D11on12Resource != nullptr);
+ return mD3D11on12Resource->AcquireKeyedMutex();
+ }
+
+ void Texture::ReleaseKeyedMutex() {
+ ASSERT(mD3D11on12Resource != nullptr);
+ mD3D11on12Resource->ReleaseKeyedMutex();
+ }
+
+ void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()), range);
+ }
+
+ void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage) {
+ TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()),
+ GetAllSubresources());
+ }
+
+ void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState) {
+ TrackUsageAndTransitionNow(commandContext, newState, GetAllSubresources());
+ }
+
+ void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range) {
+ if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+ }
+
+ std::vector<D3D12_RESOURCE_BARRIER> barriers;
+
+ // TODO(enga): Consider adding a Count helper.
+ uint32_t aspectCount = 0;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ aspectCount++;
+ DAWN_UNUSED(aspect);
+ }
+
+ barriers.reserve(range.levelCount * range.layerCount * aspectCount);
+
+ TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
+ if (barriers.size()) {
+ commandContext->GetCommandList()->ResourceBarrier(barriers.size(), barriers.data());
+ }
+ }
+
+ void Texture::TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ const SubresourceRange& range,
+ StateAndDecay* state,
+ D3D12_RESOURCE_STATES newState,
+ ExecutionSerial pendingCommandSerial) const {
+ // Reuse the subresource(s) directly and avoid transition when it isn't needed, and
+ // return false.
+ if (state->lastState == newState) {
+ return;
+ }
+
+ D3D12_RESOURCE_STATES lastState = state->lastState;
+
+ // The COMMON state represents a state where no write operations can be pending, and
+ // where all pixels are uncompressed. This makes it possible to transition to and
+ // from some states without synchronization (i.e. without an explicit
+ // ResourceBarrier call). Textures can be implicitly promoted to 1) a single write
+ // state, or 2) multiple read states. Textures will implicitly decay to the COMMON
+ // state when all of the following are true: 1) the texture is accessed on a command
+ // list, 2) the ExecuteCommandLists call that uses that command list has ended, and
+ // 3) the texture was promoted implicitly to a read-only state and is still in that
+ // state.
+ // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
+
+ // To track implicit decays, we must record the pending serial on which that
+ // transition will occur. When that texture is used again, the previously recorded
+ // serial must be compared to the last completed serial to determine if the texture
+ // has implicity decayed to the common state.
+ if (state->isValidToDecay && pendingCommandSerial > state->lastDecaySerial) {
+ lastState = D3D12_RESOURCE_STATE_COMMON;
+ }
+
+ // Update the tracked state.
+ state->lastState = newState;
+
+ // Destination states that qualify for an implicit promotion for a
+ // non-simultaneous-access texture: NON_PIXEL_SHADER_RESOURCE,
+ // PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
+ {
+ static constexpr D3D12_RESOURCE_STATES kD3D12PromotableReadOnlyStates =
+ D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+ D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
+
+ if (lastState == D3D12_RESOURCE_STATE_COMMON) {
+ if (IsSubset(newState, kD3D12PromotableReadOnlyStates)) {
+ // Implicit texture state decays can only occur when the texture was implicitly
+ // transitioned to a read-only state. isValidToDecay is needed to differentiate
+ // between resources that were implictly or explicitly transitioned to a
+ // read-only state.
+ state->isValidToDecay = true;
+ state->lastDecaySerial = pendingCommandSerial;
+ return;
+ } else if (newState == D3D12_RESOURCE_STATE_COPY_DEST) {
+ state->isValidToDecay = false;
+ return;
+ }
+ }
+ }
+
+ D3D12_RESOURCE_BARRIER barrier;
+ barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+ barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+ barrier.Transition.pResource = GetD3D12Resource();
+ barrier.Transition.StateBefore = lastState;
+ barrier.Transition.StateAfter = newState;
+
+ bool isFullRange = range.baseArrayLayer == 0 && range.baseMipLevel == 0 &&
+ range.layerCount == GetArrayLayers() &&
+ range.levelCount == GetNumMipLevels() &&
+ range.aspects == GetFormat().aspects;
+
+ // Use a single transition for all subresources if possible.
+ if (isFullRange) {
+ barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+ barriers->push_back(barrier);
+ } else {
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
+ barrier.Transition.Subresource =
+ GetSubresourceIndex(range.baseMipLevel + mipLevel,
+ range.baseArrayLayer + arrayLayer, aspect);
+ barriers->push_back(barrier);
+ }
+ }
+ }
+ }
+
+ state->isValidToDecay = false;
+ }
+
+ void Texture::HandleTransitionSpecialCases(CommandRecordingContext* commandContext) {
+ // Textures with keyed mutexes can be written from other graphics queues. Hence, they
+ // must be acquired before command list submission to ensure work from the other queues
+ // has finished. See Device::ExecuteCommandContext.
+ if (mD3D11on12Resource != nullptr) {
+ commandContext->AddToSharedTextureList(this);
+ }
+ }
+
+ void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ TransitionUsageAndGetResourceBarrier(commandContext, barrier,
+ D3D12TextureUsage(usage, GetFormat()), range);
+ }
+
+ void Texture::TransitionUsageAndGetResourceBarrier(
+ CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range) {
+ HandleTransitionSpecialCases(commandContext);
+
+ const ExecutionSerial pendingCommandSerial =
+ ToBackend(GetDevice())->GetPendingCommandSerial();
+
+ mSubresourceStateAndDecay.Update(
+ range, [&](const SubresourceRange& updateRange, StateAndDecay* state) {
+ TransitionSubresourceRange(barriers, updateRange, state, newState,
+ pendingCommandSerial);
+ });
+ }
+
+ void Texture::TrackUsageAndGetResourceBarrierForPass(
+ CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ const TextureSubresourceUsage& textureUsages) {
+ if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+ }
+
+ HandleTransitionSpecialCases(commandContext);
+
+ const ExecutionSerial pendingCommandSerial =
+ ToBackend(GetDevice())->GetPendingCommandSerial();
+
+ mSubresourceStateAndDecay.Merge(textureUsages, [&](const SubresourceRange& mergeRange,
+ StateAndDecay* state,
+ wgpu::TextureUsage usage) {
+ // Skip if this subresource is not used during the current pass
+ if (usage == wgpu::TextureUsage::None) {
+ return;
+ }
+
+ D3D12_RESOURCE_STATES newState = D3D12TextureUsage(usage, GetFormat());
+ TransitionSubresourceRange(barriers, mergeRange, state, newState, pendingCommandSerial);
+ });
+ }
+
+ D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t mipLevel,
+ uint32_t baseSlice,
+ uint32_t sliceCount) const {
+ D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
+ rtvDesc.Format = GetD3D12Format();
+ if (IsMultisampledTexture()) {
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(GetNumMipLevels() == 1);
+ ASSERT(sliceCount == 1);
+ ASSERT(baseSlice == 0);
+ ASSERT(mipLevel == 0);
+ rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
+ return rtvDesc;
+ }
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
+ // array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
+ // them as 1-layer 2D array textures. (Just like how we treat SRVs)
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
+ // _rtv
+ rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
+ rtvDesc.Texture2DArray.FirstArraySlice = baseSlice;
+ rtvDesc.Texture2DArray.ArraySize = sliceCount;
+ rtvDesc.Texture2DArray.MipSlice = mipLevel;
+ rtvDesc.Texture2DArray.PlaneSlice = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
+ rtvDesc.Texture3D.MipSlice = mipLevel;
+ rtvDesc.Texture3D.FirstWSlice = baseSlice;
+ rtvDesc.Texture3D.WSize = sliceCount;
+ break;
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ break;
+ }
+ return rtvDesc;
+ }
+
+ D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t mipLevel,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ Aspect aspects,
+ bool depthReadOnly,
+ bool stencilReadOnly) const {
+ D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc;
+ dsvDesc.Format = GetD3D12Format();
+ dsvDesc.Flags = D3D12_DSV_FLAG_NONE;
+ if (depthReadOnly && aspects & Aspect::Depth) {
+ dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_DEPTH;
+ }
+ if (stencilReadOnly && aspects & Aspect::Stencil) {
+ dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_STENCIL;
+ }
+
+ if (IsMultisampledTexture()) {
+ ASSERT(GetNumMipLevels() == 1);
+ ASSERT(layerCount == 1);
+ ASSERT(baseArrayLayer == 0);
+ ASSERT(mipLevel == 0);
+ dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
+ } else {
+ dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
+ dsvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
+ dsvDesc.Texture2DArray.ArraySize = layerCount;
+ dsvDesc.Texture2DArray.MipSlice = mipLevel;
+ }
+
+ return dsvDesc;
+ }
+
+ MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue) {
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ Device* device = ToBackend(GetDevice());
+
+ uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+
+ if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) != 0) {
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
+
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ // Iterate the aspects individually to determine which clear flags to use.
+ D3D12_CLEAR_FLAGS clearFlags = {};
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ switch (aspect) {
+ case Aspect::Depth:
+ clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+ break;
+ case Aspect::Stencil:
+ clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (clearFlags == 0) {
+ continue;
+ }
+
+ CPUDescriptorHeapAllocation dsvHandle;
+ DAWN_TRY_ASSIGN(
+ dsvHandle,
+ device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
+ const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor =
+ dsvHandle.GetBaseDescriptor();
+ D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc =
+ GetDSVDescriptor(level, layer, 1, range.aspects, false, false);
+ device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
+ baseDescriptor);
+
+ commandList->ClearDepthStencilView(baseDescriptor, clearFlags, fClearColor,
+ clearColor, 0, nullptr);
+ }
+ }
+ } else if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) != 0) {
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET, range);
+
+ const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor, fClearColor};
+
+ ASSERT(range.aspects == Aspect::Color);
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ CPUDescriptorHeapAllocation rtvHeap;
+ DAWN_TRY_ASSIGN(
+ rtvHeap,
+ device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+ const D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetBaseDescriptor();
+
+ uint32_t baseSlice = layer;
+ uint32_t sliceCount = 1;
+ if (GetDimension() == wgpu::TextureDimension::e3D) {
+ baseSlice = 0;
+ sliceCount = std::max(GetDepth() >> level, 1u);
+ }
+ D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
+ GetRTVDescriptor(level, baseSlice, sliceCount);
+ device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
+ rtvHandle);
+ commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
+ }
+ }
+ } else {
+ // create temp buffer with clear color to copy to the texture image
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
+
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
+
+ Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+
+ uint32_t bytesPerRow =
+ Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+ kTextureBytesPerRowAlignment);
+ uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+ blockInfo.byteSize));
+ memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ // compute d3d12 texture copy locations for texture and buffer
+ Extent3D copySize = GetMipLevelPhysicalSize(level);
+
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ TextureCopy textureCopy;
+ textureCopy.texture = this;
+ textureCopy.origin = {0, 0, layer};
+ textureCopy.mipLevel = level;
+ textureCopy.aspect = aspect;
+ RecordBufferTextureCopyWithBufferHandle(
+ BufferTextureCopyDirection::B2T, commandList,
+ ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+ uploadHandle.startOffset, bytesPerRow, GetHeight(), textureCopy,
+ copySize);
+ }
+ }
+ }
+ }
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, range);
+ GetDevice()->IncrementLazyClearCountForTesting();
+ }
+ return {};
+ }
+
+ void Texture::SetLabelHelper(const char* prefix) {
+ SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), prefix,
+ GetLabel());
+ }
+
+ void Texture::SetLabelImpl() {
+ SetLabelHelper("Dawn_InternalTexture");
+ }
+
+ void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range) {
+ if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
+ }
+ if (!IsSubresourceContentInitialized(range)) {
+ // If subresource has not been initialized, clear it to black as it could contain
+ // dirty bits from recycled memory
+ GetDevice()->ConsumedError(
+ ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
+ }
+ }
+
+ bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
+ return lastState == other.lastState && lastDecaySerial == other.lastDecaySerial &&
+ isValidToDecay == other.isValidToDecay;
+ }
+
+ // static
+ Ref<TextureView> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return AcquireRef(new TextureView(texture, descriptor));
+ }
+
+ TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
+ : TextureViewBase(texture, descriptor) {
+ mSrvDesc.Format = D3D12TextureFormat(descriptor->format);
+ mSrvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+
+ // TODO(enga): This will need to be much more nuanced when WebGPU has
+ // texture view compatibility rules.
+ UINT planeSlice = 0;
+ if (GetFormat().HasDepthOrStencil()) {
+ // Configure the SRV descriptor to reinterpret the texture allocated as
+ // TYPELESS as a single-plane shader-accessible view.
+ switch (descriptor->format) {
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT;
+ break;
+ case wgpu::TextureFormat::Depth16Unorm:
+ mSrvDesc.Format = DXGI_FORMAT_R16_UNORM;
+ break;
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ switch (descriptor->aspect) {
+ case wgpu::TextureAspect::DepthOnly:
+ planeSlice = 0;
+ mSrvDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS;
+ break;
+ case wgpu::TextureAspect::StencilOnly:
+ planeSlice = 1;
+ mSrvDesc.Format = DXGI_FORMAT_X24_TYPELESS_G8_UINT;
+ // Stencil is accessed using the .g component in the shader.
+ // Map it to the zeroth component to match other APIs.
+ mSrvDesc.Shader4ComponentMapping =
+ D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
+ D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+ break;
+ case wgpu::TextureAspect::All:
+ // A single aspect is not selected. The texture view must not be
+ // sampled.
+ mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
+ break;
+
+ // Depth formats cannot use plane aspects.
+ case wgpu::TextureAspect::Plane0Only:
+ case wgpu::TextureAspect::Plane1Only:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ switch (descriptor->aspect) {
+ case wgpu::TextureAspect::DepthOnly:
+ planeSlice = 0;
+ mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
+ break;
+ case wgpu::TextureAspect::StencilOnly:
+ planeSlice = 1;
+ mSrvDesc.Format = DXGI_FORMAT_X32_TYPELESS_G8X24_UINT;
+ // Stencil is accessed using the .g component in the shader.
+ // Map it to the zeroth component to match other APIs.
+ mSrvDesc.Shader4ComponentMapping =
+ D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
+ D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+ D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+ break;
+ case wgpu::TextureAspect::All:
+ // A single aspect is not selected. The texture view must not be
+ // sampled.
+ mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
+ break;
+
+ // Depth formats cannot use plane aspects.
+ case wgpu::TextureAspect::Plane0Only:
+ case wgpu::TextureAspect::Plane1Only:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ // Per plane view formats must have the plane slice number be the index of the plane in the
+ // array of textures.
+ if (texture->GetFormat().IsMultiPlanar()) {
+ const Aspect planeAspect = ConvertViewAspect(GetFormat(), descriptor->aspect);
+ planeSlice = GetAspectIndex(planeAspect);
+ mSrvDesc.Format =
+ D3D12TextureFormat(texture->GetFormat().GetAspectInfo(planeAspect).format);
+ }
+
+ // Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer
+ // and layer count in D3D12_TEX2D_SRV. For 2D texture views, we treat them as 1-layer 2D
+ // array textures.
+ // Multisampled textures may only be one array layer, so we use
+ // D3D12_SRV_DIMENSION_TEXTURE2DMS.
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_srv
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array_srv
+ if (GetTexture()->IsMultisampledTexture()) {
+ switch (descriptor->dimension) {
+ case wgpu::TextureViewDimension::e2DArray:
+ ASSERT(texture->GetArrayLayers() == 1);
+ [[fallthrough]];
+ case wgpu::TextureViewDimension::e2D:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DMS;
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ switch (descriptor->dimension) {
+ case wgpu::TextureViewDimension::e1D:
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE1D;
+ mSrvDesc.Texture1D.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.Texture1D.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.Texture1D.ResourceMinLODClamp = 0;
+ break;
+
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
+ mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
+ mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
+ mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.Texture2DArray.PlaneSlice = planeSlice;
+ mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
+ break;
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(descriptor->arrayLayerCount % 6 == 0);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
+ mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
+ mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
+ mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
+ break;
+ case wgpu::TextureViewDimension::e3D:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e3D);
+ mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE3D;
+ mSrvDesc.Texture3D.MostDetailedMip = descriptor->baseMipLevel;
+ mSrvDesc.Texture3D.MipLevels = descriptor->mipLevelCount;
+ mSrvDesc.Texture3D.ResourceMinLODClamp = 0;
+ break;
+
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+ }
+ }
+
+ DXGI_FORMAT TextureView::GetD3D12Format() const {
+ return D3D12TextureFormat(GetFormat().format);
+ }
+
+ const D3D12_SHADER_RESOURCE_VIEW_DESC& TextureView::GetSRVDescriptor() const {
+ ASSERT(mSrvDesc.Format != DXGI_FORMAT_UNKNOWN);
+ return mSrvDesc;
+ }
+
+ D3D12_RENDER_TARGET_VIEW_DESC TextureView::GetRTVDescriptor() const {
+ return ToBackend(GetTexture())
+ ->GetRTVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount());
+ }
+
+ D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor(bool depthReadOnly,
+ bool stencilReadOnly) const {
+ ASSERT(GetLevelCount() == 1);
+ return ToBackend(GetTexture())
+ ->GetDSVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount(),
+ GetAspects(), depthReadOnly, stencilReadOnly);
+ }
+
+ D3D12_UNORDERED_ACCESS_VIEW_DESC TextureView::GetUAVDescriptor() const {
+ D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc;
+ uavDesc.Format = GetD3D12Format();
+
+ ASSERT(!GetTexture()->IsMultisampledTexture());
+ switch (GetDimension()) {
+ case wgpu::TextureViewDimension::e1D:
+ uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE1D;
+ uavDesc.Texture1D.MipSlice = GetBaseMipLevel();
+ break;
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
+ uavDesc.Texture2DArray.FirstArraySlice = GetBaseArrayLayer();
+ uavDesc.Texture2DArray.ArraySize = GetLayerCount();
+ uavDesc.Texture2DArray.MipSlice = GetBaseMipLevel();
+ uavDesc.Texture2DArray.PlaneSlice = 0;
+ break;
+ case wgpu::TextureViewDimension::e3D:
+ uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE3D;
+ uavDesc.Texture3D.FirstWSlice = 0;
+ uavDesc.Texture3D.WSize = GetTexture()->GetDepth() >> GetBaseMipLevel();
+ uavDesc.Texture3D.MipSlice = GetBaseMipLevel();
+ break;
+ // Cube and Cubemap can't be used as storage texture. So there is no need to create UAV
+ // descriptor for them.
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+ return uavDesc;
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h
new file mode 100644
index 00000000000..76572bab59e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/TextureD3D12.h
@@ -0,0 +1,162 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_TEXTURED3D12_H_
+#define DAWNNATIVE_D3D12_TEXTURED3D12_H_
+
+#include "dawn/native/Texture.h"
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/d3d12/IntegerTypes.h"
+#include "dawn/native/d3d12/ResourceHeapAllocationD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ class CommandRecordingContext;
+ class Device;
+ class D3D11on12ResourceCacheEntry;
+
+ DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
+ MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
+ const TextureDescriptor* descriptor);
+ MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
+ MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat);
+
+ class Texture final : public TextureBase {
+ public:
+ static ResultOrError<Ref<Texture>> Create(Device* device,
+ const TextureDescriptor* descriptor);
+ static ResultOrError<Ref<Texture>> CreateExternalImage(
+ Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture,
+ bool isInitialized);
+ static ResultOrError<Ref<Texture>> Create(Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture);
+
+ DXGI_FORMAT GetD3D12Format() const;
+ ID3D12Resource* GetD3D12Resource() const;
+ DXGI_FORMAT GetD3D12CopyableSubresourceFormat(Aspect aspect) const;
+
+ D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(uint32_t mipLevel,
+ uint32_t baseSlice,
+ uint32_t sliceCount) const;
+ D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t mipLevel,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ Aspect aspects,
+ bool depthReadOnly,
+ bool stencilReadOnly) const;
+
+ void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range);
+
+ MaybeError AcquireKeyedMutex();
+ void ReleaseKeyedMutex();
+
+ void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ const TextureSubresourceUsage& textureUsages);
+ void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range);
+ void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage);
+ void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState);
+
+ private:
+ Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
+ ~Texture() override;
+ using TextureBase::TextureBase;
+
+ MaybeError InitializeAsInternalTexture();
+ MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ bool isSwapChainTexture);
+ MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
+
+ void SetLabelHelper(const char* prefix);
+
+ // Dawn API
+ void SetLabelImpl() override;
+ void DestroyImpl() override;
+
+ MaybeError ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue);
+
+ // Barriers implementation details.
+ struct StateAndDecay {
+ D3D12_RESOURCE_STATES lastState;
+ ExecutionSerial lastDecaySerial;
+ bool isValidToDecay;
+
+ bool operator==(const StateAndDecay& other) const;
+ };
+ void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+ D3D12_RESOURCE_STATES newState,
+ const SubresourceRange& range);
+ void TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+ const SubresourceRange& range,
+ StateAndDecay* state,
+ D3D12_RESOURCE_STATES subresourceNewState,
+ ExecutionSerial pendingCommandSerial) const;
+ void HandleTransitionSpecialCases(CommandRecordingContext* commandContext);
+
+ SubresourceStorage<StateAndDecay> mSubresourceStateAndDecay;
+
+ ResourceHeapAllocation mResourceAllocation;
+ bool mSwapChainTexture = false;
+ D3D12_RESOURCE_FLAGS mD3D12ResourceFlags;
+
+ Ref<D3D11on12ResourceCacheEntry> mD3D11on12Resource;
+ };
+
+ class TextureView final : public TextureViewBase {
+ public:
+ static Ref<TextureView> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+
+ DXGI_FORMAT GetD3D12Format() const;
+
+ const D3D12_SHADER_RESOURCE_VIEW_DESC& GetSRVDescriptor() const;
+ D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor() const;
+ D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(bool depthReadOnly,
+ bool stencilReadOnly) const;
+ D3D12_UNORDERED_ACCESS_VIEW_DESC GetUAVDescriptor() const;
+
+ private:
+ TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+ D3D12_SHADER_RESOURCE_VIEW_DESC mSrvDesc;
+ };
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_TEXTURED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp
new file mode 100644
index 00000000000..8d4749fd95a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.cpp
@@ -0,0 +1,308 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/d3d12/UtilsD3D12.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/CommandRecordingContext.h"
+#include "dawn/native/d3d12/D3D12Error.h"
+#include "dawn/native/d3d12/DeviceD3D12.h"
+
+#include <stringapiset.h>
+
+namespace dawn::native::d3d12 {
+
+ ResultOrError<std::wstring> ConvertStringToWstring(const char* str) {
+ size_t len = strlen(str);
+ if (len == 0) {
+ return std::wstring();
+ }
+ int numChars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, nullptr, 0);
+ if (numChars == 0) {
+ return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
+ }
+ std::wstring result;
+ result.resize(numChars);
+ int numConvertedChars =
+ MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, &result[0], numChars);
+ if (numConvertedChars != numChars) {
+ return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
+ }
+ return std::move(result);
+ }
+
+ D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
+ switch (func) {
+ case wgpu::CompareFunction::Never:
+ return D3D12_COMPARISON_FUNC_NEVER;
+ case wgpu::CompareFunction::Less:
+ return D3D12_COMPARISON_FUNC_LESS;
+ case wgpu::CompareFunction::LessEqual:
+ return D3D12_COMPARISON_FUNC_LESS_EQUAL;
+ case wgpu::CompareFunction::Greater:
+ return D3D12_COMPARISON_FUNC_GREATER;
+ case wgpu::CompareFunction::GreaterEqual:
+ return D3D12_COMPARISON_FUNC_GREATER_EQUAL;
+ case wgpu::CompareFunction::Equal:
+ return D3D12_COMPARISON_FUNC_EQUAL;
+ case wgpu::CompareFunction::NotEqual:
+ return D3D12_COMPARISON_FUNC_NOT_EQUAL;
+ case wgpu::CompareFunction::Always:
+ return D3D12_COMPARISON_FUNC_ALWAYS;
+
+ case wgpu::CompareFunction::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
+ uint32_t level,
+ uint32_t layer,
+ Aspect aspect) {
+ D3D12_TEXTURE_COPY_LOCATION copyLocation;
+ copyLocation.pResource = texture->GetD3D12Resource();
+ copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
+ copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, layer, aspect);
+
+ return copyLocation;
+ }
+
+ D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
+ const Texture* texture,
+ ID3D12Resource* bufferResource,
+ const Extent3D& bufferSize,
+ const uint64_t offset,
+ const uint32_t rowPitch,
+ Aspect aspect) {
+ D3D12_TEXTURE_COPY_LOCATION bufferLocation;
+ bufferLocation.pResource = bufferResource;
+ bufferLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
+ bufferLocation.PlacedFootprint.Offset = offset;
+ bufferLocation.PlacedFootprint.Footprint.Format =
+ texture->GetD3D12CopyableSubresourceFormat(aspect);
+ bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
+ bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
+ bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depthOrArrayLayers;
+ bufferLocation.PlacedFootprint.Footprint.RowPitch = rowPitch;
+ return bufferLocation;
+ }
+
+ D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize) {
+ D3D12_BOX sourceRegion;
+ sourceRegion.left = offset.x;
+ sourceRegion.top = offset.y;
+ sourceRegion.front = offset.z;
+ sourceRegion.right = offset.x + copySize.width;
+ sourceRegion.bottom = offset.y + copySize.height;
+ sourceRegion.back = offset.z + copySize.depthOrArrayLayers;
+ return sourceRegion;
+ }
+
+ bool IsTypeless(DXGI_FORMAT format) {
+ // List generated from <dxgiformat.h>
+ switch (format) {
+ case DXGI_FORMAT_R32G32B32A32_TYPELESS:
+ case DXGI_FORMAT_R32G32B32_TYPELESS:
+ case DXGI_FORMAT_R16G16B16A16_TYPELESS:
+ case DXGI_FORMAT_R32G32_TYPELESS:
+ case DXGI_FORMAT_R32G8X24_TYPELESS:
+ case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
+ case DXGI_FORMAT_R10G10B10A2_TYPELESS:
+ case DXGI_FORMAT_R8G8B8A8_TYPELESS:
+ case DXGI_FORMAT_R16G16_TYPELESS:
+ case DXGI_FORMAT_R32_TYPELESS:
+ case DXGI_FORMAT_R24G8_TYPELESS:
+ case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
+ case DXGI_FORMAT_R8G8_TYPELESS:
+ case DXGI_FORMAT_R16_TYPELESS:
+ case DXGI_FORMAT_R8_TYPELESS:
+ case DXGI_FORMAT_BC1_TYPELESS:
+ case DXGI_FORMAT_BC2_TYPELESS:
+ case DXGI_FORMAT_BC3_TYPELESS:
+ case DXGI_FORMAT_BC4_TYPELESS:
+ case DXGI_FORMAT_BC5_TYPELESS:
+ case DXGI_FORMAT_B8G8R8A8_TYPELESS:
+ case DXGI_FORMAT_B8G8R8X8_TYPELESS:
+ case DXGI_FORMAT_BC6H_TYPELESS:
+ case DXGI_FORMAT_BC7_TYPELESS:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ void RecordBufferTextureCopyFromSplits(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ const TextureCopySubresource& baseCopySplit,
+ ID3D12Resource* bufferResource,
+ uint64_t baseOffset,
+ uint64_t bufferBytesPerRow,
+ TextureBase* textureBase,
+ uint32_t textureMiplevel,
+ uint32_t textureLayer,
+ Aspect aspect) {
+ Texture* texture = ToBackend(textureBase);
+ const D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
+
+ for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
+ const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
+
+ // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
+ // members in TextureCopySubresource::CopyInfo.
+ const uint64_t offsetBytes = info.alignedOffset + baseOffset;
+ const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
+ offsetBytes, bufferBytesPerRow, aspect);
+ if (direction == BufferTextureCopyDirection::B2T) {
+ const D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
+
+ commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
+ info.textureOffset.y, info.textureOffset.z,
+ &bufferLocation, &sourceRegion);
+ } else {
+ ASSERT(direction == BufferTextureCopyDirection::T2B);
+ const D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
+
+ commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
+ info.bufferOffset.y, info.bufferOffset.z,
+ &textureLocation, &sourceRegion);
+ }
+ }
+ }
+
+ void Record2DBufferTextureCopyWithSplit(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const TextureCopy& textureCopy,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copySize) {
+ // See comments in Compute2DTextureCopySplits() for more details.
+ const TextureCopySplits copySplits = Compute2DTextureCopySplits(
+ textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
+
+ const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+
+ // copySplits.copySubresources[1] is always calculated for the second copy layer with
+ // extra "bytesPerLayer" copy offset compared with the first copy layer. So
+ // here we use an array bufferOffsetsForNextLayer to record the extra offsets
+ // for each copy layer: bufferOffsetsForNextLayer[0] is the extra offset for
+ // the next copy layer that uses copySplits.copySubresources[0], and
+ // bufferOffsetsForNextLayer[1] is the extra offset for the next copy layer
+ // that uses copySplits.copySubresources[1].
+ std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources>
+ bufferOffsetsForNextLayer = {{0u, 0u}};
+
+ for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
+ const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
+
+ const TextureCopySubresource& copySplitPerLayerBase =
+ copySplits.copySubresources[splitIndex];
+ const uint64_t bufferOffsetForNextLayer = bufferOffsetsForNextLayer[splitIndex];
+ const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
+
+ RecordBufferTextureCopyFromSplits(direction, commandList, copySplitPerLayerBase,
+ bufferResource, bufferOffsetForNextLayer, bytesPerRow,
+ textureCopy.texture.Get(), textureCopy.mipLevel,
+ copyTextureLayer, textureCopy.aspect);
+
+ bufferOffsetsForNextLayer[splitIndex] +=
+ bytesPerLayer * copySplits.copySubresources.size();
+ }
+ }
+
+ void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ ASSERT(HasOneBit(textureCopy.aspect));
+
+ TextureBase* texture = textureCopy.texture.Get();
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D: {
+ // 1D textures copy splits are a subset of the single-layer 2D texture copy splits,
+ // at least while 1D textures can only have a single array layer.
+ ASSERT(texture->GetArrayLayers() == 1);
+
+ TextureCopySubresource copyRegions = Compute2DTextureCopySubresource(
+ textureCopy.origin, copySize, blockInfo, offset, bytesPerRow);
+ RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions,
+ bufferResource, 0, bytesPerRow, texture,
+ textureCopy.mipLevel, 0, textureCopy.aspect);
+ break;
+ }
+
+ // Record the CopyTextureRegion commands for 2D textures, with special handling of array
+ // layers since each require their own set of copies.
+ case wgpu::TextureDimension::e2D:
+ Record2DBufferTextureCopyWithSplit(direction, commandList, bufferResource, offset,
+ bytesPerRow, rowsPerImage, textureCopy,
+ blockInfo, copySize);
+ break;
+
+ case wgpu::TextureDimension::e3D: {
+ // See comments in Compute3DTextureCopySplits() for more details.
+ TextureCopySubresource copyRegions = Compute3DTextureCopySplits(
+ textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
+
+ RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions,
+ bufferResource, 0, bytesPerRow, texture,
+ textureCopy.mipLevel, 0, textureCopy.aspect);
+ break;
+ }
+ }
+ }
+
+ void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ RecordBufferTextureCopyWithBufferHandle(direction, commandList,
+ ToBackend(bufferCopy.buffer)->GetD3D12Resource(),
+ bufferCopy.offset, bufferCopy.bytesPerRow,
+ bufferCopy.rowsPerImage, textureCopy, copySize);
+ }
+
+ void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label) {
+ if (!object) {
+ return;
+ }
+
+ if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+ object->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(prefix), prefix);
+ return;
+ }
+
+ std::string objectName = prefix;
+ objectName += "_";
+ objectName += label;
+ object->SetPrivateData(WKPDID_D3DDebugObjectName, objectName.length(), objectName.c_str());
+ }
+
+} // namespace dawn::native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h
new file mode 100644
index 00000000000..00c850f385c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/UtilsD3D12.h
@@ -0,0 +1,74 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_UTILSD3D12_H_
+#define DAWNNATIVE_D3D12_UTILSD3D12_H_
+
+#include "dawn/native/Commands.h"
+#include "dawn/native/d3d12/BufferD3D12.h"
+#include "dawn/native/d3d12/TextureCopySplitter.h"
+#include "dawn/native/d3d12/TextureD3D12.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native::d3d12 {
+
+ ResultOrError<std::wstring> ConvertStringToWstring(const char* str);
+
+ D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
+
+ D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
+ uint32_t level,
+ uint32_t layer,
+ Aspect aspect);
+
+ D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
+ const Texture* texture,
+ ID3D12Resource* bufferResource,
+ const Extent3D& bufferSize,
+ const uint64_t offset,
+ const uint32_t rowPitch,
+ Aspect aspect);
+ D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize);
+
+ bool IsTypeless(DXGI_FORMAT format);
+
+ enum class BufferTextureCopyDirection {
+ B2T,
+ T2B,
+ };
+
+ void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
+
+ void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
+ ID3D12GraphicsCommandList* commandList,
+ const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
+
+ void SetDebugName(Device* device,
+ ID3D12Object* object,
+ const char* prefix,
+ std::string label = "");
+
+} // namespace dawn::native::d3d12
+
+#endif // DAWNNATIVE_D3D12_UTILSD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h b/chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h
new file mode 100644
index 00000000000..f020fec14d7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/d3d12/d3d12_platform.h
@@ -0,0 +1,37 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_D3D12PLATFORM_H_
+#define DAWNNATIVE_D3D12_D3D12PLATFORM_H_
+
+// Pre-emptively include windows.h but remove its macros so that they aren't set when declaring the
+// COM interfaces. Otherwise ID3D12InfoQueue::GetMessage would be either GetMessageA or GetMessageW
+// which causes compilation errors.
+#include "dawn/common/windows_with_undefs.h"
+
+#include <d3d11_2.h>
+#include <d3d11on12.h>
+#include <d3d12.h>
+#include <dxcapi.h>
+#include <dxgi1_4.h>
+#include <wrl.h>
+
+// DXProgrammableCapture.h takes a dependency on other platform header
+// files, so it must be defined after them.
+#include <DXProgrammableCapture.h>
+#include <dxgidebug.h>
+
+using Microsoft::WRL::ComPtr;
+
+#endif // DAWNNATIVE_D3D12_D3D12PLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/dawn_platform.h b/chromium/third_party/dawn/src/dawn/native/dawn_platform.h
new file mode 100644
index 00000000000..c8863afba09
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/dawn_platform.h
@@ -0,0 +1,62 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_DAWNPLATFORM_H_
+#define DAWNNATIVE_DAWNPLATFORM_H_
+
+// Use webgpu_cpp to have the enum and bitfield definitions
+#include <dawn/webgpu_cpp.h>
+
+#include <dawn/native/dawn_platform_autogen.h>
+
+namespace dawn::native {
+
+ // kEnumCount is a constant specifying the number of enums in a WebGPU enum type,
+ // if the enums are contiguous, making it suitable for iteration.
+ // It is defined in dawn_platform_autogen.h
+ template <typename T>
+ constexpr uint32_t kEnumCount = EnumCount<T>::value;
+
+ // Extra buffer usages
+ // Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
+ // usage as storage buffer in the internal pipeline.
+ static constexpr wgpu::BufferUsage kInternalStorageBuffer =
+ static_cast<wgpu::BufferUsage>(0x40000000);
+
+ // Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
+ static constexpr wgpu::BufferUsage kReadOnlyStorageBuffer =
+ static_cast<wgpu::BufferUsage>(0x80000000);
+
+ // Extra texture usages
+ // Add an extra texture usage (readonly render attachment usage) for render pass resource
+ // tracking
+ static constexpr wgpu::TextureUsage kReadOnlyRenderAttachment =
+ static_cast<wgpu::TextureUsage>(0x40000000);
+
+ // Internal usage to help tracking when a subresource is used as render attachment usage
+ // more than once in a render pass.
+ static constexpr wgpu::TextureUsage kAgainAsRenderAttachment =
+ static_cast<wgpu::TextureUsage>(0x80000001);
+
+ // Add an extra texture usage for textures that will be presented, for use in backends
+ // that needs to transition to present usage.
+ // This currently aliases wgpu::TextureUsage::Present, we would assign it
+ // some bit when wgpu::TextureUsage::Present is removed.
+ static constexpr wgpu::TextureUsage kPresentTextureUsage = wgpu::TextureUsage::Present;
+
+ static constexpr wgpu::BufferBindingType kInternalStorageBufferBinding =
+ static_cast<wgpu::BufferBindingType>(0xFFFFFFFF);
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_DAWNPLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h
new file mode 100644
index 00000000000..0dd72045fac
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.h
@@ -0,0 +1,33 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BACKENDMTL_H_
+#define DAWNNATIVE_METAL_BACKENDMTL_H_
+
+#include "dawn/native/BackendConnection.h"
+
+namespace dawn::native::metal {
+
+ class Backend : public BackendConnection {
+ public:
+ Backend(InstanceBase* instance);
+
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) override;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_BACKENDMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm
new file mode 100644
index 00000000000..920bb1da44a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BackendMTL.mm
@@ -0,0 +1,646 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/BackendMTL.h"
+
+#include "dawn/common/CoreFoundationRef.h"
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/NSRef.h"
+#include "dawn/common/Platform.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/MetalBackend.h"
+#include "dawn/native/metal/BufferMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+#if defined(DAWN_PLATFORM_MACOS)
+# import <IOKit/IOKitLib.h>
+# include "dawn/common/IOKitRef.h"
+#endif
+
+#include <vector>
+
+namespace dawn::native::metal {
+
+ namespace {
+
+ struct PCIIDs {
+ uint32_t vendorId;
+ uint32_t deviceId;
+ };
+
+ struct Vendor {
+ const char* trademark;
+ uint32_t vendorId;
+ };
+
+#if defined(DAWN_PLATFORM_MACOS)
+ const Vendor kVendors[] = {{"AMD", gpu_info::kVendorID_AMD},
+ {"Radeon", gpu_info::kVendorID_AMD},
+ {"Intel", gpu_info::kVendorID_Intel},
+ {"Geforce", gpu_info::kVendorID_Nvidia},
+ {"Quadro", gpu_info::kVendorID_Nvidia}};
+
+ // Find vendor ID from MTLDevice name.
+ MaybeError GetVendorIdFromVendors(id<MTLDevice> device, PCIIDs* ids) {
+ uint32_t vendorId = 0;
+ const char* deviceName = [device.name UTF8String];
+ for (const auto& it : kVendors) {
+ if (strstr(deviceName, it.trademark) != nullptr) {
+ vendorId = it.vendorId;
+ break;
+ }
+ }
+
+ if (vendorId == 0) {
+ return DAWN_INTERNAL_ERROR("Failed to find vendor id with the device");
+ }
+
+ // Set vendor id with 0
+ *ids = PCIIDs{vendorId, 0};
+ return {};
+ }
+
+ // Extracts an integer property from a registry entry.
+ uint32_t GetEntryProperty(io_registry_entry_t entry, CFStringRef name) {
+ uint32_t value = 0;
+
+ // Recursively search registry entry and its parents for property name
+ // The data should release with CFRelease
+ CFRef<CFDataRef> data =
+ AcquireCFRef(static_cast<CFDataRef>(IORegistryEntrySearchCFProperty(
+ entry, kIOServicePlane, name, kCFAllocatorDefault,
+ kIORegistryIterateRecursively | kIORegistryIterateParents)));
+
+ if (data == nullptr) {
+ return value;
+ }
+
+ // CFDataGetBytePtr() is guaranteed to return a read-only pointer
+ value = *reinterpret_cast<const uint32_t*>(CFDataGetBytePtr(data.Get()));
+ return value;
+ }
+
+ // Queries the IO Registry to find the PCI device and vendor IDs of the MTLDevice.
+ // The registry entry correponding to [device registryID] doesn't contain the exact PCI ids
+ // because it corresponds to a driver. However its parent entry corresponds to the device
+ // itself and has uint32_t "device-id" and "registry-id" keys. For example on a dual-GPU
+ // MacBook Pro 2017 the IORegistry explorer shows the following tree (simplified here):
+ //
+ // - PCI0@0
+ // | - AppleACPIPCI
+ // | | - IGPU@2 (type IOPCIDevice)
+ // | | | - IntelAccelerator (type IOGraphicsAccelerator2)
+ // | | - PEG0@1
+ // | | | - IOPP
+ // | | | | - GFX0@0 (type IOPCIDevice)
+ // | | | | | - AMDRadeonX4000_AMDBaffinGraphicsAccelerator (type IOGraphicsAccelerator2)
+ //
+ // [device registryID] is the ID for one of the IOGraphicsAccelerator2 and we can see that
+ // their parent always is an IOPCIDevice that has properties for the device and vendor IDs.
+ MaybeError API_AVAILABLE(macos(10.13))
+ GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+ // Get a matching dictionary for the IOGraphicsAccelerator2
+ CFRef<CFMutableDictionaryRef> matchingDict =
+ AcquireCFRef(IORegistryEntryIDMatching([device registryID]));
+ if (matchingDict == nullptr) {
+ return DAWN_INTERNAL_ERROR("Failed to create the matching dict for the device");
+ }
+
+ // IOServiceGetMatchingService will consume the reference on the matching dictionary,
+ // so we don't need to release the dictionary.
+ IORef<io_registry_entry_t> acceleratorEntry = AcquireIORef(
+ IOServiceGetMatchingService(kIOMasterPortDefault, matchingDict.Detach()));
+ if (acceleratorEntry == IO_OBJECT_NULL) {
+ return DAWN_INTERNAL_ERROR(
+ "Failed to get the IO registry entry for the accelerator");
+ }
+
+ // Get the parent entry that will be the IOPCIDevice
+ IORef<io_registry_entry_t> deviceEntry;
+ if (IORegistryEntryGetParentEntry(acceleratorEntry.Get(), kIOServicePlane,
+ deviceEntry.InitializeInto()) != kIOReturnSuccess) {
+ return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the device");
+ }
+
+ ASSERT(deviceEntry != IO_OBJECT_NULL);
+
+ uint32_t vendorId = GetEntryProperty(deviceEntry.Get(), CFSTR("vendor-id"));
+ uint32_t deviceId = GetEntryProperty(deviceEntry.Get(), CFSTR("device-id"));
+
+ *ids = PCIIDs{vendorId, deviceId};
+
+ return {};
+ }
+
+ MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+ // [device registryID] is introduced on macOS 10.13+, otherwise workaround to get vendor
+ // id by vendor name on old macOS
+ if (@available(macos 10.13, *)) {
+ return GetDeviceIORegistryPCIInfo(device, ids);
+ } else {
+ return GetVendorIdFromVendors(device, ids);
+ }
+ }
+
+ bool IsMetalSupported() {
+ // Metal was first introduced in macOS 10.11
+ // WebGPU is targeted at macOS 10.12+
+ // TODO(dawn:1181): Dawn native should allow non-conformant WebGPU on macOS 10.11
+ return IsMacOSVersionAtLeast(10, 12);
+ }
+#elif defined(DAWN_PLATFORM_IOS)
+ MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+ DAWN_UNUSED(device);
+ *ids = PCIIDs{0, 0};
+ return {};
+ }
+
+ bool IsMetalSupported() {
+ return true;
+ }
+#else
+# error "Unsupported Apple platform."
+#endif
+
+ DAWN_NOINLINE bool IsCounterSamplingBoundarySupport(id<MTLDevice> device)
+ API_AVAILABLE(macos(11.0), ios(14.0)) {
+ bool isBlitBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtBlitBoundary];
+ bool isDispatchBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtDispatchBoundary];
+ bool isDrawBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtDrawBoundary];
+
+ return isBlitBoundarySupported && isDispatchBoundarySupported &&
+ isDrawBoundarySupported;
+ }
+
+ // This method has seen hard-to-debug crashes. See crbug.com/dawn/1102.
+ // For now, it is written defensively, with many potentially unnecessary guards until
+ // we narrow down the cause of the problem.
+ DAWN_NOINLINE bool IsGPUCounterSupported(id<MTLDevice> device,
+ MTLCommonCounterSet counterSetName,
+ std::vector<MTLCommonCounter> counterNames)
+ API_AVAILABLE(macos(10.15), ios(14.0)) {
+ NSPRef<id<MTLCounterSet>> counterSet = nil;
+ if (![device respondsToSelector:@selector(counterSets)]) {
+ dawn::ErrorLog() << "MTLDevice does not respond to selector: counterSets.";
+ return false;
+ }
+ NSArray<id<MTLCounterSet>>* counterSets = device.counterSets;
+ if (counterSets == nil) {
+ // On some systems, [device counterSets] may be null and not an empty array.
+ return false;
+ }
+ // MTLDevice’s counterSets property declares which counter sets it supports. Check
+ // whether it's available on the device before requesting a counter set.
+ // Note: Don't do for..in loop to avoid potentially crashy interaction with
+ // NSFastEnumeration.
+ for (NSUInteger i = 0; i < counterSets.count; ++i) {
+ id<MTLCounterSet> set = [counterSets objectAtIndex:i];
+ if ([set.name caseInsensitiveCompare:counterSetName] == NSOrderedSame) {
+ counterSet = set;
+ break;
+ }
+ }
+
+ // The counter set is not supported.
+ if (counterSet == nil) {
+ return false;
+ }
+
+ if (![*counterSet respondsToSelector:@selector(counters)]) {
+ dawn::ErrorLog() << "MTLCounterSet does not respond to selector: counters.";
+ return false;
+ }
+ NSArray<id<MTLCounter>>* countersInSet = (*counterSet).counters;
+ if (countersInSet == nil) {
+ // On some systems, [MTLCounterSet counters] may be null and not an empty array.
+ return false;
+ }
+
+ // A GPU might support a counter set, but only support a subset of the counters in that
+ // set, check if the counter set supports all specific counters we need. Return false
+ // if there is a counter unsupported.
+ for (MTLCommonCounter counterName : counterNames) {
+ bool found = false;
+ // Note: Don't do for..in loop to avoid potentially crashy interaction with
+ // NSFastEnumeration.
+ for (NSUInteger i = 0; i < countersInSet.count; ++i) {
+ id<MTLCounter> counter = [countersInSet objectAtIndex:i];
+ if ([counter.name caseInsensitiveCompare:counterName] == NSOrderedSame) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return false;
+ }
+ }
+
+ if (@available(macOS 11.0, iOS 14.0, *)) {
+ // Check whether it can read GPU counters at the specified command boundary. Apple
+ // family GPUs do not support sampling between different Metal commands, because
+ // they defer fragment processing until after the GPU processes all the primitives
+ // in the render pass.
+ if (!IsCounterSamplingBoundarySupport(device)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ } // anonymous namespace
+
+ // The Metal backend's Adapter.
+
+ class Adapter : public AdapterBase {
+ public:
+ Adapter(InstanceBase* instance, id<MTLDevice> device)
+ : AdapterBase(instance, wgpu::BackendType::Metal), mDevice(device) {
+ mName = std::string([[*mDevice name] UTF8String]);
+
+ PCIIDs ids;
+ if (!instance->ConsumedError(GetDevicePCIInfo(device, &ids))) {
+ mVendorId = ids.vendorId;
+ mDeviceId = ids.deviceId;
+ }
+
+#if defined(DAWN_PLATFORM_IOS)
+ mAdapterType = wgpu::AdapterType::IntegratedGPU;
+ const char* systemName = "iOS ";
+#elif defined(DAWN_PLATFORM_MACOS)
+ if ([device isLowPower]) {
+ mAdapterType = wgpu::AdapterType::IntegratedGPU;
+ } else {
+ mAdapterType = wgpu::AdapterType::DiscreteGPU;
+ }
+ const char* systemName = "macOS ";
+#else
+# error "Unsupported Apple platform."
+#endif
+
+ NSString* osVersion = [[NSProcessInfo processInfo] operatingSystemVersionString];
+ mDriverDescription =
+ "Metal driver on " + std::string(systemName) + [osVersion UTF8String];
+ }
+
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override {
+ // Via dawn::native::metal::WrapIOSurface
+ return true;
+ }
+
+ private:
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+ const DeviceDescriptor* descriptor) override {
+ return Device::Create(this, mDevice, descriptor);
+ }
+
+ MaybeError InitializeImpl() override {
+ return {};
+ }
+
+ MaybeError InitializeSupportedFeaturesImpl() override {
+#if defined(DAWN_PLATFORM_MACOS)
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+ }
+#endif
+
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ if (IsGPUCounterSupported(
+ *mDevice, MTLCommonCounterSetStatistic,
+ {MTLCommonCounterVertexInvocations, MTLCommonCounterClipperInvocations,
+ MTLCommonCounterClipperPrimitivesOut, MTLCommonCounterFragmentInvocations,
+ MTLCommonCounterComputeKernelInvocations})) {
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ }
+
+ if (IsGPUCounterSupported(*mDevice, MTLCommonCounterSetTimestamp,
+ {MTLCommonCounterTimestamp})) {
+ bool enableTimestampQuery = true;
+
+#if defined(DAWN_PLATFORM_MACOS)
+ // Disable timestamp query on < macOS 11.0 on AMD GPU because WriteTimestamp
+ // fails to call without any copy commands on MTLBlitCommandEncoder. This issue
+ // has been fixed on macOS 11.0. See crbug.com/dawn/545.
+ if (gpu_info::IsAMD(mVendorId) && !IsMacOSVersionAtLeast(11)) {
+ enableTimestampQuery = false;
+ }
+#endif
+
+ if (enableTimestampQuery) {
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+ }
+ }
+ }
+
+ if (@available(macOS 10.11, iOS 11.0, *)) {
+ mSupportedFeatures.EnableFeature(Feature::DepthClamping);
+ }
+
+ if (@available(macOS 10.11, iOS 9.0, *)) {
+ mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+ }
+
+#if defined(DAWN_PLATFORM_MACOS)
+ // MTLPixelFormatDepth24Unorm_Stencil8 is only available on macOS 10.11+
+ if ([*mDevice isDepth24Stencil8PixelFormatSupported]) {
+ mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+ }
+#endif
+
+ return {};
+ }
+
+ enum class MTLGPUFamily {
+ Apple1,
+ Apple2,
+ Apple3,
+ Apple4,
+ Apple5,
+ Apple6,
+ Apple7,
+ Mac1,
+ Mac2,
+ };
+
+ ResultOrError<MTLGPUFamily> GetMTLGPUFamily() const {
+ // https://developer.apple.com/documentation/metal/mtldevice/detecting_gpu_features_and_metal_software_versions?language=objc
+
+ if (@available(macOS 10.15, iOS 10.13, *)) {
+ if ([*mDevice supportsFamily:MTLGPUFamilyMac2]) {
+ return MTLGPUFamily::Mac2;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
+ return MTLGPUFamily::Mac1;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple7]) {
+ return MTLGPUFamily::Apple7;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple6]) {
+ return MTLGPUFamily::Apple6;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple5]) {
+ return MTLGPUFamily::Apple5;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple4]) {
+ return MTLGPUFamily::Apple4;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
+ return MTLGPUFamily::Apple3;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
+ return MTLGPUFamily::Apple2;
+ }
+ if ([*mDevice supportsFamily:MTLGPUFamilyApple1]) {
+ return MTLGPUFamily::Apple1;
+ }
+ }
+
+#if TARGET_OS_OSX
+ if (@available(macOS 10.14, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily2_v1]) {
+ return MTLGPUFamily::Mac2;
+ }
+ }
+ if (@available(macOS 10.11, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+ return MTLGPUFamily::Mac1;
+ }
+ }
+#elif TARGET_OS_IOS
+ if (@available(iOS 10.11, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily4_v1]) {
+ return MTLGPUFamily::Apple4;
+ }
+ }
+ if (@available(iOS 9.0, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
+ return MTLGPUFamily::Apple3;
+ }
+ }
+ if (@available(iOS 8.0, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+ return MTLGPUFamily::Apple2;
+ }
+ }
+ if (@available(iOS 8.0, *)) {
+ if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+ return MTLGPUFamily::Apple1;
+ }
+ }
+#endif
+ return DAWN_INTERNAL_ERROR("Unsupported Metal device");
+ }
+
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
+ struct MTLDeviceLimits {
+ uint32_t maxVertexAttribsPerDescriptor;
+ uint32_t maxBufferArgumentEntriesPerFunc;
+ uint32_t maxTextureArgumentEntriesPerFunc;
+ uint32_t maxSamplerStateArgumentEntriesPerFunc;
+ uint32_t maxThreadsPerThreadgroup;
+ uint32_t maxTotalThreadgroupMemory;
+ uint32_t maxFragmentInputComponents;
+ uint32_t max1DTextureSize;
+ uint32_t max2DTextureSize;
+ uint32_t max3DTextureSize;
+ uint32_t maxTextureArrayLayers;
+ uint32_t minBufferOffsetAlignment;
+ };
+
+ struct LimitsForFamily {
+ uint32_t MTLDeviceLimits::*limit;
+ ityp::array<MTLGPUFamily, uint32_t, 9> values;
+ };
+
+ // clang-format off
+ // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
+ // Apple Mac
+ // 1, 2, 3, 4, 5, 6, 7, 1, 2
+ constexpr LimitsForFamily kMTLLimits[12] = {
+ {&MTLDeviceLimits::maxVertexAttribsPerDescriptor, { 31u, 31u, 31u, 31u, 31u, 31u, 31u, 31u, 31u }},
+ {&MTLDeviceLimits::maxBufferArgumentEntriesPerFunc, { 31u, 31u, 31u, 31u, 31u, 31u, 31u, 31u, 31u }},
+ {&MTLDeviceLimits::maxTextureArgumentEntriesPerFunc, { 31u, 31u, 31u, 96u, 96u, 128u, 128u, 128u, 128u }},
+ {&MTLDeviceLimits::maxSamplerStateArgumentEntriesPerFunc, { 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u }},
+ {&MTLDeviceLimits::maxThreadsPerThreadgroup, { 512u, 512u, 512u, 1024u, 1024u, 1024u, 1024u, 1024u, 1024u }},
+ {&MTLDeviceLimits::maxTotalThreadgroupMemory, { 16352u, 16352u, 16384u, 32768u, 32768u, 32768u, 32768u, 32768u, 32768u }},
+ {&MTLDeviceLimits::maxFragmentInputComponents, { 60u, 60u, 60u, 124u, 124u, 124u, 124u, 124u, 124u }},
+ {&MTLDeviceLimits::max1DTextureSize, { 8192u, 8192u, 16384u, 16384u, 16384u, 16384u, 16384u, 16384u, 16384u }},
+ {&MTLDeviceLimits::max2DTextureSize, { 8192u, 8192u, 16384u, 16384u, 16384u, 16384u, 16384u, 16384u, 16384u }},
+ {&MTLDeviceLimits::max3DTextureSize, { 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u }},
+ {&MTLDeviceLimits::maxTextureArrayLayers, { 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u }},
+ {&MTLDeviceLimits::minBufferOffsetAlignment, { 4u, 4u, 4u, 4u, 4u, 4u, 4u, 256u, 256u }},
+ };
+ // clang-format on
+
+ MTLGPUFamily mtlGPUFamily;
+ DAWN_TRY_ASSIGN(mtlGPUFamily, GetMTLGPUFamily());
+
+ MTLDeviceLimits mtlLimits;
+ for (const auto& limitsForFamily : kMTLLimits) {
+ mtlLimits.*limitsForFamily.limit = limitsForFamily.values[mtlGPUFamily];
+ }
+
+ GetDefaultLimits(&limits->v1);
+
+ limits->v1.maxTextureDimension1D = mtlLimits.max1DTextureSize;
+ limits->v1.maxTextureDimension2D = mtlLimits.max2DTextureSize;
+ limits->v1.maxTextureDimension3D = mtlLimits.max3DTextureSize;
+ limits->v1.maxTextureArrayLayers = mtlLimits.maxTextureArrayLayers;
+
+ uint32_t maxBuffersPerStage = mtlLimits.maxBufferArgumentEntriesPerFunc;
+ maxBuffersPerStage -= 1; // One slot is reserved to store buffer lengths.
+
+ uint32_t baseMaxBuffersPerStage = limits->v1.maxStorageBuffersPerShaderStage +
+ limits->v1.maxUniformBuffersPerShaderStage +
+ limits->v1.maxVertexBuffers;
+
+ ASSERT(maxBuffersPerStage >= baseMaxBuffersPerStage);
+ {
+ uint32_t additional = maxBuffersPerStage - baseMaxBuffersPerStage;
+ limits->v1.maxStorageBuffersPerShaderStage += additional / 3;
+ limits->v1.maxUniformBuffersPerShaderStage += additional / 3;
+ limits->v1.maxVertexBuffers += (additional - 2 * (additional / 3));
+ }
+
+ uint32_t baseMaxTexturesPerStage = limits->v1.maxSampledTexturesPerShaderStage +
+ limits->v1.maxStorageTexturesPerShaderStage;
+
+ ASSERT(mtlLimits.maxTextureArgumentEntriesPerFunc >= baseMaxTexturesPerStage);
+ {
+ uint32_t additional =
+ mtlLimits.maxTextureArgumentEntriesPerFunc - baseMaxTexturesPerStage;
+ limits->v1.maxSampledTexturesPerShaderStage += additional / 2;
+ limits->v1.maxStorageTexturesPerShaderStage += (additional - additional / 2);
+ }
+
+ limits->v1.maxSamplersPerShaderStage = mtlLimits.maxSamplerStateArgumentEntriesPerFunc;
+
+ // Metal limits are per-function, so the layout limits are the same as the stage
+ // limits. Note: this should likely change if the implementation uses Metal argument
+ // buffers. Non-dynamic buffers will probably be bound argument buffers, but dynamic
+ // buffers may be set directly.
+ // Mac GPU families with tier 1 argument buffers support 64
+ // buffers, 128 textures, and 16 samplers. Mac GPU families
+ // with tier 2 argument buffers support 500000 buffers and
+ // textures, and 1024 unique samplers
+ limits->v1.maxDynamicUniformBuffersPerPipelineLayout =
+ limits->v1.maxUniformBuffersPerShaderStage;
+ limits->v1.maxDynamicStorageBuffersPerPipelineLayout =
+ limits->v1.maxStorageBuffersPerShaderStage;
+
+ // The WebGPU limit is the limit across all vertex buffers, combined.
+ limits->v1.maxVertexAttributes =
+ limits->v1.maxVertexBuffers * mtlLimits.maxVertexAttribsPerDescriptor;
+
+ limits->v1.maxInterStageShaderComponents = mtlLimits.maxFragmentInputComponents;
+
+ limits->v1.maxComputeWorkgroupStorageSize = mtlLimits.maxTotalThreadgroupMemory;
+ limits->v1.maxComputeInvocationsPerWorkgroup = mtlLimits.maxThreadsPerThreadgroup;
+ limits->v1.maxComputeWorkgroupSizeX = mtlLimits.maxThreadsPerThreadgroup;
+ limits->v1.maxComputeWorkgroupSizeY = mtlLimits.maxThreadsPerThreadgroup;
+ limits->v1.maxComputeWorkgroupSizeZ = mtlLimits.maxThreadsPerThreadgroup;
+
+ limits->v1.minUniformBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
+ limits->v1.minStorageBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
+
+ uint64_t maxBufferSize = Buffer::QueryMaxBufferLength(*mDevice);
+
+ // Metal has no documented limit on the size of a binding. Use the maximum
+ // buffer size.
+ limits->v1.maxUniformBufferBindingSize = maxBufferSize;
+ limits->v1.maxStorageBufferBindingSize = maxBufferSize;
+
+ // TODO(crbug.com/dawn/685):
+ // LIMITS NOT SET:
+ // - maxBindGroups
+ // - maxVertexBufferArrayStride
+
+ return {};
+ }
+
+ NSPRef<id<MTLDevice>> mDevice;
+ };
+
+ // Implementation of the Metal backend's BackendConnection
+
+ Backend::Backend(InstanceBase* instance)
+ : BackendConnection(instance, wgpu::BackendType::Metal) {
+ if (GetInstance()->IsBackendValidationEnabled()) {
+ setenv("METAL_DEVICE_WRAPPER_TYPE", "1", 1);
+ }
+ }
+
+ std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+ AdapterDiscoveryOptions options;
+ auto result = DiscoverAdapters(&options);
+ if (result.IsError()) {
+ GetInstance()->ConsumedError(result.AcquireError());
+ return {};
+ }
+ return result.AcquireSuccess();
+ }
+
+ ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ ASSERT(optionsBase->backendType == WGPUBackendType_Metal);
+
+ std::vector<Ref<AdapterBase>> adapters;
+ BOOL supportedVersion = NO;
+#if defined(DAWN_PLATFORM_MACOS)
+ if (@available(macOS 10.11, *)) {
+ supportedVersion = YES;
+
+ NSRef<NSArray<id<MTLDevice>>> devices = AcquireNSRef(MTLCopyAllDevices());
+
+ for (id<MTLDevice> device in devices.Get()) {
+ Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance(), device));
+ if (!GetInstance()->ConsumedError(adapter->Initialize())) {
+ adapters.push_back(std::move(adapter));
+ }
+ }
+ }
+#endif
+
+#if defined(DAWN_PLATFORM_IOS)
+ if (@available(iOS 8.0, *)) {
+ supportedVersion = YES;
+ // iOS only has a single device so MTLCopyAllDevices doesn't exist there.
+ Ref<Adapter> adapter =
+ AcquireRef(new Adapter(GetInstance(), MTLCreateSystemDefaultDevice()));
+ if (!GetInstance()->ConsumedError(adapter->Initialize())) {
+ adapters.push_back(std::move(adapter));
+ }
+ }
+#endif
+ if (!supportedVersion) {
+ UNREACHABLE();
+ }
+ return adapters;
+ }
+
+ BackendConnection* Connect(InstanceBase* instance) {
+ if (!IsMetalSupported()) {
+ return nullptr;
+ }
+ return new Backend(instance);
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h
new file mode 100644
index 00000000000..bf4c3e92ccb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
+#define DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
+
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/native/BindGroupLayout.h"
+
+namespace dawn::native::metal {
+
+ class BindGroup;
+ class Device;
+
+ class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ static Ref<BindGroupLayout> Create(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup);
+
+ private:
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+ ~BindGroupLayout() override = default;
+
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm
new file mode 100644
index 00000000000..e413bdd87de
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupLayoutMTL.mm
@@ -0,0 +1,45 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/BindGroupLayoutMTL.h"
+
+#include "dawn/native/metal/BindGroupMTL.h"
+
+namespace dawn::native::metal {
+
+ // static
+ Ref<BindGroupLayout> BindGroupLayout::Create(
+ DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+ }
+
+ BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ }
+
+ Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
+ }
+
+ void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+ mBindGroupAllocator.Deallocate(bindGroup);
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h
new file mode 100644
index 00000000000..238635c0e86
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BINDGROUPMTL_H_
+#define DAWNNATIVE_METAL_BINDGROUPMTL_H_
+
+#include "dawn/common/PlacementAllocated.h"
+#include "dawn/native/BindGroup.h"
+
+namespace dawn::native::metal {
+
+ class Device;
+
+ class BindGroup final : public BindGroupBase, public PlacementAllocated {
+ public:
+ static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
+
+ BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+
+ private:
+ ~BindGroup() override;
+
+ void DestroyImpl() override;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_BINDGROUPMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm
new file mode 100644
index 00000000000..a8e02a805f5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BindGroupMTL.mm
@@ -0,0 +1,37 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/BindGroupMTL.h"
+
+#include "dawn/native/metal/BindGroupLayoutMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+namespace dawn::native::metal {
+
+ BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(this, device, descriptor) {
+ }
+
+ BindGroup::~BindGroup() = default;
+
+ void BindGroup::DestroyImpl() {
+ BindGroupBase::DestroyImpl();
+ ToBackend(GetLayout())->DeallocateBindGroup(this);
+ }
+
+ // static
+ Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h
new file mode 100644
index 00000000000..8eb9a36d0f9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.h
@@ -0,0 +1,67 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BUFFERMTL_H_
+#define DAWNNATIVE_METAL_BUFFERMTL_H_
+
+#include "dawn/common/NSRef.h"
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Buffer.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class CommandRecordingContext;
+ class Device;
+
+ class Buffer final : public BufferBase {
+ public:
+ static ResultOrError<Ref<Buffer>> Create(Device* device,
+ const BufferDescriptor* descriptor);
+ id<MTLBuffer> GetMTLBuffer() const;
+
+ bool EnsureDataInitialized(CommandRecordingContext* commandContext);
+ bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size);
+ bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy);
+
+ static uint64_t QueryMaxBufferLength(id<MTLDevice> mtlDevice);
+
+ private:
+ using BufferBase::BufferBase;
+ MaybeError Initialize(bool mappedAtCreation);
+
+ ~Buffer() override;
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ void* GetMappedPointerImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+
+ void InitializeToZero(CommandRecordingContext* commandContext);
+ void ClearBuffer(CommandRecordingContext* commandContext,
+ uint8_t clearValue,
+ uint64_t offset = 0,
+ uint64_t size = 0);
+
+ NSPRef<id<MTLBuffer>> mMtlBuffer;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_BUFFERMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm
new file mode 100644
index 00000000000..695872a3e83
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/BufferMTL.mm
@@ -0,0 +1,240 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/BufferMTL.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/metal/CommandRecordingContext.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+#include <limits>
+
+namespace dawn::native::metal {
+ // The size of uniform buffer and storage buffer need to be aligned to 16 bytes which is the
+ // largest alignment of supported data types
+ static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
+
+ // static
+ ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+ DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+ return std::move(buffer);
+ }
+
+ // static
+ uint64_t Buffer::QueryMaxBufferLength(id<MTLDevice> mtlDevice) {
+ if (@available(iOS 12, tvOS 12, macOS 10.14, *)) {
+ return [mtlDevice maxBufferLength];
+ }
+
+ // Earlier versions of Metal had maximums defined in the Metal feature set tables
+ // https://metalbyexample.com/wp-content/uploads/Metal-Feature-Set-Tables-2018.pdf
+#if defined(DAWN_PLATFORM_MACOS)
+ // 10.12 and 10.13 have a 1Gb limit.
+ if (@available(macOS 10.12, *)) {
+ // |maxBufferLength| isn't always available on older systems. If available, use
+ // |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
+ // but don't have a way to discover a better limit. MoltenVK also uses this heuristic.
+ return 1024 * 1024 * 1024;
+ }
+ // 10.11 has a 256Mb limit
+ if (@available(maxOS 10.11, *)) {
+ return 256 * 1024 * 1024;
+ }
+#else
+ // macOS / tvOS: 256Mb limit in versions without [MTLDevice maxBufferLength]
+ return 256 * 1024 * 1024;
+#endif
+ }
+
+ MaybeError Buffer::Initialize(bool mappedAtCreation) {
+ MTLResourceOptions storageMode;
+ if (GetUsage() & kMappableBufferUsages) {
+ storageMode = MTLResourceStorageModeShared;
+ } else {
+ storageMode = MTLResourceStorageModePrivate;
+ }
+
+ uint32_t alignment = 1;
+#ifdef DAWN_PLATFORM_MACOS
+ // [MTLBlitCommandEncoder fillBuffer] requires the size to be a multiple of 4 on MacOS.
+ alignment = 4;
+#endif
+
+ // Metal validation layer requires the size of uniform buffer and storage buffer to be no
+ // less than the size of the buffer block defined in shader, and the overall size of the
+ // buffer must be aligned to the largest alignment of its members.
+ if (GetUsage() &
+ (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+ ASSERT(IsAligned(kMinUniformOrStorageBufferAlignment, alignment));
+ alignment = kMinUniformOrStorageBufferAlignment;
+ }
+
+ // The vertex pulling transform requires at least 4 bytes in the buffer.
+ // 0-sized vertex buffer bindings are allowed, so we always need an additional 4 bytes
+ // after the end.
+ NSUInteger extraBytes = 0u;
+ if ((GetUsage() & wgpu::BufferUsage::Vertex) != 0) {
+ extraBytes = 4u;
+ }
+
+ if (GetSize() > std::numeric_limits<NSUInteger>::max() - extraBytes) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ NSUInteger currentSize =
+ std::max(static_cast<NSUInteger>(GetSize()) + extraBytes, NSUInteger(4));
+
+ if (currentSize > std::numeric_limits<NSUInteger>::max() - alignment) {
+ // Alignment would overlow.
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ currentSize = Align(currentSize, alignment);
+
+ uint64_t maxBufferSize = QueryMaxBufferLength(ToBackend(GetDevice())->GetMTLDevice());
+ if (currentSize > maxBufferSize) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+
+ mAllocatedSize = currentSize;
+ mMtlBuffer.Acquire([ToBackend(GetDevice())->GetMTLDevice()
+ newBufferWithLength:currentSize
+ options:storageMode]);
+ if (mMtlBuffer == nullptr) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation failed");
+ }
+
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+ !mappedAtCreation) {
+ CommandRecordingContext* commandContext =
+ ToBackend(GetDevice())->GetPendingCommandContext();
+ ClearBuffer(commandContext, uint8_t(1u));
+ }
+
+ // Initialize the padding bytes to zero.
+ if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
+ !mappedAtCreation) {
+ uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+ if (paddingBytes > 0) {
+ uint32_t clearSize = Align(paddingBytes, 4);
+ uint64_t clearOffset = GetAllocatedSize() - clearSize;
+
+ CommandRecordingContext* commandContext =
+ ToBackend(GetDevice())->GetPendingCommandContext();
+ ClearBuffer(commandContext, 0, clearOffset, clearSize);
+ }
+ }
+ return {};
+ }
+
+ Buffer::~Buffer() = default;
+
+ id<MTLBuffer> Buffer::GetMTLBuffer() const {
+ return mMtlBuffer.Get();
+ }
+
+ bool Buffer::IsCPUWritableAtCreation() const {
+ // TODO(enga): Handle CPU-visible memory on UMA
+ return GetUsage() & kMappableBufferUsages;
+ }
+
+ MaybeError Buffer::MapAtCreationImpl() {
+ return {};
+ }
+
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ CommandRecordingContext* commandContext =
+ ToBackend(GetDevice())->GetPendingCommandContext();
+ EnsureDataInitialized(commandContext);
+
+ return {};
+ }
+
+ void* Buffer::GetMappedPointerImpl() {
+ return [*mMtlBuffer contents];
+ }
+
+ void Buffer::UnmapImpl() {
+ // Nothing to do, Metal StorageModeShared buffers are always mapped.
+ }
+
+ void Buffer::DestroyImpl() {
+ BufferBase::DestroyImpl();
+ mMtlBuffer = nullptr;
+ }
+
+ bool Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ InitializeToZero(commandContext);
+ return true;
+ }
+
+ bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ return false;
+ }
+
+ InitializeToZero(commandContext);
+ return true;
+ }
+
+ bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ return false;
+ }
+
+ InitializeToZero(commandContext);
+ return true;
+ }
+
+ void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+ ASSERT(NeedsInitialization());
+
+ ClearBuffer(commandContext, uint8_t(0u));
+
+ SetIsDataInitialized();
+ GetDevice()->IncrementLazyClearCountForTesting();
+ }
+
+ void Buffer::ClearBuffer(CommandRecordingContext* commandContext,
+ uint8_t clearValue,
+ uint64_t offset,
+ uint64_t size) {
+ ASSERT(commandContext != nullptr);
+ size = size > 0 ? size : GetAllocatedSize();
+ ASSERT(size > 0);
+ [commandContext->EnsureBlit() fillBuffer:mMtlBuffer.Get()
+ range:NSMakeRange(offset, size)
+ value:clearValue];
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h
new file mode 100644
index 00000000000..29db87057fa
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
+#define DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Error.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native {
+ class CommandEncoder;
+}
+
+namespace dawn::native::metal {
+
+ class CommandRecordingContext;
+ class Device;
+ class Texture;
+
+ void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+ id<MTLBuffer> mtlBuffer,
+ uint64_t bufferSize,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Texture* texture,
+ uint32_t mipLevel,
+ const Origin3D& origin,
+ Aspect aspect,
+ const Extent3D& copySize);
+
+ class CommandBuffer final : public CommandBufferBase {
+ public:
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
+
+ MaybeError FillCommands(CommandRecordingContext* commandContext);
+
+ private:
+ using CommandBufferBase::CommandBufferBase;
+
+ MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
+ MaybeError EncodeRenderPass(CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height);
+
+ MaybeError EncodeRenderPassInternal(CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height);
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm
new file mode 100644
index 00000000000..86b88a8e83d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/CommandBufferMTL.mm
@@ -0,0 +1,1594 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/CommandBufferMTL.h"
+
+#include "dawn/native/BindGroupTracker.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/metal/BindGroupMTL.h"
+#include "dawn/native/metal/BufferMTL.h"
+#include "dawn/native/metal/ComputePipelineMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+#include "dawn/native/metal/QuerySetMTL.h"
+#include "dawn/native/metal/RenderPipelineMTL.h"
+#include "dawn/native/metal/SamplerMTL.h"
+#include "dawn/native/metal/StagingBufferMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+#include <tint/tint.h>
+
+namespace dawn::native::metal {
+
+ namespace {
+
+ // Allows this file to use MTLStoreActionStoreAndMultismapleResolve because the logic is
+ // first to compute what the "best" Metal render pass descriptor is, then fix it up if we
+ // are not on macOS 10.12 (i.e. the EmulateStoreAndMSAAResolve toggle is on).
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
+ constexpr MTLStoreAction kMTLStoreActionStoreAndMultisampleResolve =
+ MTLStoreActionStoreAndMultisampleResolve;
+#pragma clang diagnostic pop
+
+ MTLIndexType MTLIndexFormat(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Uint16:
+ return MTLIndexTypeUInt16;
+ case wgpu::IndexFormat::Uint32:
+ return MTLIndexTypeUInt32;
+ case wgpu::IndexFormat::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ NSRef<MTLRenderPassDescriptor> CreateMTLRenderPassDescriptor(
+ BeginRenderPassCmd* renderPass) {
+ // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
+ NSRef<MTLRenderPassDescriptor> descriptorRef =
+ [MTLRenderPassDescriptor renderPassDescriptor];
+ MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
+
+ for (ColorAttachmentIndex attachment :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ uint8_t i = static_cast<uint8_t>(attachment);
+ auto& attachmentInfo = renderPass->colorAttachments[attachment];
+
+ switch (attachmentInfo.loadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
+ descriptor.colorAttachments[i].clearColor = MTLClearColorMake(
+ attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
+ attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
+ break;
+
+ case wgpu::LoadOp::Load:
+ descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
+ break;
+
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+
+ descriptor.colorAttachments[i].texture =
+ ToBackend(attachmentInfo.view->GetTexture())->GetMTLTexture();
+ descriptor.colorAttachments[i].level = attachmentInfo.view->GetBaseMipLevel();
+ descriptor.colorAttachments[i].slice = attachmentInfo.view->GetBaseArrayLayer();
+
+ bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+ if (hasResolveTarget) {
+ descriptor.colorAttachments[i].resolveTexture =
+ ToBackend(attachmentInfo.resolveTarget->GetTexture())->GetMTLTexture();
+ descriptor.colorAttachments[i].resolveLevel =
+ attachmentInfo.resolveTarget->GetBaseMipLevel();
+ descriptor.colorAttachments[i].resolveSlice =
+ attachmentInfo.resolveTarget->GetBaseArrayLayer();
+
+ switch (attachmentInfo.storeOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.colorAttachments[i].storeAction =
+ kMTLStoreActionStoreAndMultisampleResolve;
+ break;
+ case wgpu::StoreOp::Discard:
+ descriptor.colorAttachments[i].storeAction =
+ MTLStoreActionMultisampleResolve;
+ break;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ switch (attachmentInfo.storeOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
+ break;
+ case wgpu::StoreOp::Discard:
+ descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
+ break;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto& attachmentInfo = renderPass->depthStencilAttachment;
+
+ id<MTLTexture> texture =
+ ToBackend(attachmentInfo.view->GetTexture())->GetMTLTexture();
+ const Format& format = attachmentInfo.view->GetTexture()->GetFormat();
+
+ if (format.HasDepth()) {
+ descriptor.depthAttachment.texture = texture;
+ descriptor.depthAttachment.level = attachmentInfo.view->GetBaseMipLevel();
+ descriptor.depthAttachment.slice = attachmentInfo.view->GetBaseArrayLayer();
+
+ switch (attachmentInfo.depthStoreOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+ break;
+
+ case wgpu::StoreOp::Discard:
+ descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
+ break;
+
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+
+ switch (attachmentInfo.depthLoadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+ descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
+ break;
+
+ case wgpu::LoadOp::Load:
+ descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
+ break;
+
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ if (format.HasStencil()) {
+ descriptor.stencilAttachment.texture = texture;
+ descriptor.stencilAttachment.level = attachmentInfo.view->GetBaseMipLevel();
+ descriptor.stencilAttachment.slice = attachmentInfo.view->GetBaseArrayLayer();
+
+ switch (attachmentInfo.stencilStoreOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+ break;
+
+ case wgpu::StoreOp::Discard:
+ descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
+ break;
+
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+
+ switch (attachmentInfo.stencilLoadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+ descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
+ break;
+
+ case wgpu::LoadOp::Load:
+ descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
+ break;
+
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+
+ if (renderPass->occlusionQuerySet.Get() != nullptr) {
+ descriptor.visibilityResultBuffer =
+ ToBackend(renderPass->occlusionQuerySet.Get())->GetVisibilityBuffer();
+ }
+
+ return descriptorRef;
+ }
+
+ // Helper function for Toggle EmulateStoreAndMSAAResolve
+ void ResolveInAnotherRenderPass(
+ CommandRecordingContext* commandContext,
+ const MTLRenderPassDescriptor* mtlRenderPass,
+ const std::array<id<MTLTexture>, kMaxColorAttachments>& resolveTextures) {
+ // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
+ NSRef<MTLRenderPassDescriptor> mtlRenderPassForResolveRef =
+ [MTLRenderPassDescriptor renderPassDescriptor];
+ MTLRenderPassDescriptor* mtlRenderPassForResolve = mtlRenderPassForResolveRef.Get();
+
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (resolveTextures[i] == nullptr) {
+ continue;
+ }
+
+ mtlRenderPassForResolve.colorAttachments[i].texture =
+ mtlRenderPass.colorAttachments[i].texture;
+ mtlRenderPassForResolve.colorAttachments[i].loadAction = MTLLoadActionLoad;
+ mtlRenderPassForResolve.colorAttachments[i].storeAction =
+ MTLStoreActionMultisampleResolve;
+ mtlRenderPassForResolve.colorAttachments[i].resolveTexture = resolveTextures[i];
+ mtlRenderPassForResolve.colorAttachments[i].resolveLevel =
+ mtlRenderPass.colorAttachments[i].resolveLevel;
+ mtlRenderPassForResolve.colorAttachments[i].resolveSlice =
+ mtlRenderPass.colorAttachments[i].resolveSlice;
+ }
+
+ commandContext->BeginRender(mtlRenderPassForResolve);
+ commandContext->EndRender();
+ }
+
+ // Helper functions for Toggle AlwaysResolveIntoZeroLevelAndLayer
+ ResultOrError<NSPRef<id<MTLTexture>>> CreateResolveTextureForWorkaround(
+ Device* device,
+ MTLPixelFormat mtlFormat,
+ uint32_t width,
+ uint32_t height) {
+ NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+ MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+ mtlDesc.textureType = MTLTextureType2D;
+ mtlDesc.usage = MTLTextureUsageRenderTarget;
+ mtlDesc.pixelFormat = mtlFormat;
+ mtlDesc.width = width;
+ mtlDesc.height = height;
+ mtlDesc.depth = 1;
+ mtlDesc.mipmapLevelCount = 1;
+ mtlDesc.arrayLength = 1;
+ mtlDesc.storageMode = MTLStorageModePrivate;
+ mtlDesc.sampleCount = 1;
+
+ id<MTLTexture> texture = [device->GetMTLDevice() newTextureWithDescriptor:mtlDesc];
+ if (texture == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Allocation of temporary texture failed.");
+ }
+
+ return AcquireNSPRef(texture);
+ }
+
+ void CopyIntoTrueResolveTarget(CommandRecordingContext* commandContext,
+ id<MTLTexture> mtlTrueResolveTexture,
+ uint32_t trueResolveLevel,
+ uint32_t trueResolveSlice,
+ id<MTLTexture> temporaryResolveTexture,
+ uint32_t width,
+ uint32_t height) {
+ [commandContext->EnsureBlit() copyFromTexture:temporaryResolveTexture
+ sourceSlice:0
+ sourceLevel:0
+ sourceOrigin:MTLOriginMake(0, 0, 0)
+ sourceSize:MTLSizeMake(width, height, 1)
+ toTexture:mtlTrueResolveTexture
+ destinationSlice:trueResolveSlice
+ destinationLevel:trueResolveLevel
+ destinationOrigin:MTLOriginMake(0, 0, 0)];
+ }
+
+ // Metal uses a physical addressing mode which means buffers in the shading language are
+ // just pointers to the virtual address of their start. This means there is no way to know
+ // the length of a buffer to compute the length() of unsized arrays at the end of storage
+ // buffers. Tint implements the length() of unsized arrays by requiring an extra
+ // buffer that contains the length of other buffers. This structure that keeps track of the
+ // length of storage buffers and can apply them to the reserved "buffer length buffer" when
+ // needed for a draw or a dispatch.
+ struct StorageBufferLengthTracker {
+ wgpu::ShaderStage dirtyStages = wgpu::ShaderStage::None;
+
+ // The lengths of buffers are stored as 32bit integers because that is the width the
+ // MSL code generated by Tint expects.
+ // UBOs require we align the max buffer count to 4 elements (16 bytes).
+ static constexpr size_t MaxBufferCount = ((kGenericMetalBufferSlots + 3) / 4) * 4;
+ PerStage<std::array<uint32_t, MaxBufferCount>> data;
+
+ void Apply(id<MTLRenderCommandEncoder> render,
+ RenderPipeline* pipeline,
+ bool enableVertexPulling) {
+ wgpu::ShaderStage stagesToApply =
+ dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
+
+ if (stagesToApply == wgpu::ShaderStage::None) {
+ return;
+ }
+
+ if (stagesToApply & wgpu::ShaderStage::Vertex) {
+ uint32_t bufferCount = ToBackend(pipeline->GetLayout())
+ ->GetBufferBindingCount(SingleShaderStage::Vertex);
+
+ if (enableVertexPulling) {
+ bufferCount += pipeline->GetVertexBufferCount();
+ }
+
+ bufferCount = Align(bufferCount, 4);
+ ASSERT(bufferCount <= data[SingleShaderStage::Vertex].size());
+
+ [render setVertexBytes:data[SingleShaderStage::Vertex].data()
+ length:sizeof(uint32_t) * bufferCount
+ atIndex:kBufferLengthBufferSlot];
+ }
+
+ if (stagesToApply & wgpu::ShaderStage::Fragment) {
+ uint32_t bufferCount = ToBackend(pipeline->GetLayout())
+ ->GetBufferBindingCount(SingleShaderStage::Fragment);
+ bufferCount = Align(bufferCount, 4);
+ ASSERT(bufferCount <= data[SingleShaderStage::Fragment].size());
+
+ [render setFragmentBytes:data[SingleShaderStage::Fragment].data()
+ length:sizeof(uint32_t) * bufferCount
+ atIndex:kBufferLengthBufferSlot];
+ }
+
+ // Only mark clean stages that were actually applied.
+ dirtyStages ^= stagesToApply;
+ }
+
+ void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
+ if (!(dirtyStages & wgpu::ShaderStage::Compute)) {
+ return;
+ }
+
+ if (!pipeline->RequiresStorageBufferLength()) {
+ return;
+ }
+
+ uint32_t bufferCount = ToBackend(pipeline->GetLayout())
+ ->GetBufferBindingCount(SingleShaderStage::Compute);
+ bufferCount = Align(bufferCount, 4);
+ ASSERT(bufferCount <= data[SingleShaderStage::Compute].size());
+
+ [compute setBytes:data[SingleShaderStage::Compute].data()
+ length:sizeof(uint32_t) * bufferCount
+ atIndex:kBufferLengthBufferSlot];
+
+ dirtyStages ^= wgpu::ShaderStage::Compute;
+ }
+ };
+
+ // Keeps track of the dirty bind groups so they can be lazily applied when we know the
+ // pipeline state.
+ // Bind groups may be inherited because bind groups are packed in the buffer /
+ // texture tables in contiguous order.
+ class BindGroupTracker : public BindGroupTrackerBase<true, uint64_t> {
+ public:
+ explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
+ : BindGroupTrackerBase(), mLengthTracker(lengthTracker) {
+ }
+
+ template <typename Encoder>
+ void Apply(Encoder encoder) {
+ BeforeApply();
+ for (BindGroupIndex index :
+ IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
+ mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
+ ToBackend(mPipelineLayout));
+ }
+ AfterApply();
+ }
+
+ private:
+ // Handles a call to SetBindGroup, directing the commands to the correct encoder.
+ // There is a single function that takes both encoders to factor code. Other approaches
+ // like templates wouldn't work because the name of methods are different between the
+ // two encoder types.
+ void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
+ id<MTLComputeCommandEncoder> compute,
+ BindGroupIndex index,
+ BindGroup* group,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets,
+ PipelineLayout* pipelineLayout) {
+ uint32_t currentDynamicBufferIndex = 0;
+
+ // TODO(crbug.com/dawn/854): Maintain buffers and offsets arrays in BindGroup
+ // so that we only have to do one setVertexBuffers and one setFragmentBuffers
+ // call here.
+ for (BindingIndex bindingIndex{0};
+ bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ bool hasVertStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Vertex && render != nullptr;
+ bool hasFragStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Fragment && render != nullptr;
+ bool hasComputeStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Compute && compute != nullptr;
+
+ uint32_t vertIndex = 0;
+ uint32_t fragIndex = 0;
+ uint32_t computeIndex = 0;
+
+ if (hasVertStage) {
+ vertIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Vertex)[index][bindingIndex];
+ }
+ if (hasFragStage) {
+ fragIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Fragment)[index][bindingIndex];
+ }
+ if (hasComputeStage) {
+ computeIndex = pipelineLayout->GetBindingIndexInfo(
+ SingleShaderStage::Compute)[index][bindingIndex];
+ }
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ const BufferBinding& binding =
+ group->GetBindingAsBufferBinding(bindingIndex);
+ const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
+ NSUInteger offset = binding.offset;
+
+ // TODO(crbug.com/dawn/854): Record bound buffer status to use
+ // setBufferOffset to achieve better performance.
+ if (bindingInfo.buffer.hasDynamicOffset) {
+ offset += dynamicOffsets[currentDynamicBufferIndex];
+ currentDynamicBufferIndex++;
+ }
+
+ if (hasVertStage) {
+ mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] =
+ binding.size;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
+ [render setVertexBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(vertIndex, 1)];
+ }
+ if (hasFragStage) {
+ mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] =
+ binding.size;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Fragment;
+ [render setFragmentBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(fragIndex, 1)];
+ }
+ if (hasComputeStage) {
+ mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
+ binding.size;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Compute;
+ [compute setBuffers:&buffer
+ offsets:&offset
+ withRange:NSMakeRange(computeIndex, 1)];
+ }
+
+ break;
+ }
+
+ case BindingInfoType::Sampler: {
+ auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+ if (hasVertStage) {
+ [render setVertexSamplerState:sampler->GetMTLSamplerState()
+ atIndex:vertIndex];
+ }
+ if (hasFragStage) {
+ [render setFragmentSamplerState:sampler->GetMTLSamplerState()
+ atIndex:fragIndex];
+ }
+ if (hasComputeStage) {
+ [compute setSamplerState:sampler->GetMTLSamplerState()
+ atIndex:computeIndex];
+ }
+ break;
+ }
+
+ case BindingInfoType::Texture:
+ case BindingInfoType::StorageTexture: {
+ auto textureView =
+ ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ if (hasVertStage) {
+ [render setVertexTexture:textureView->GetMTLTexture()
+ atIndex:vertIndex];
+ }
+ if (hasFragStage) {
+ [render setFragmentTexture:textureView->GetMTLTexture()
+ atIndex:fragIndex];
+ }
+ if (hasComputeStage) {
+ [compute setTexture:textureView->GetMTLTexture()
+ atIndex:computeIndex];
+ }
+ break;
+ }
+
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ }
+ }
+ }
+
+ template <typename... Args>
+ void ApplyBindGroup(id<MTLRenderCommandEncoder> encoder, Args&&... args) {
+ ApplyBindGroupImpl(encoder, nullptr, std::forward<Args&&>(args)...);
+ }
+
+ template <typename... Args>
+ void ApplyBindGroup(id<MTLComputeCommandEncoder> encoder, Args&&... args) {
+ ApplyBindGroupImpl(nullptr, encoder, std::forward<Args&&>(args)...);
+ }
+
+ StorageBufferLengthTracker* mLengthTracker;
+ };
+
+ // Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
+ // all the relevant state.
+ class VertexBufferTracker {
+ public:
+ explicit VertexBufferTracker(StorageBufferLengthTracker* lengthTracker)
+ : mLengthTracker(lengthTracker) {
+ }
+
+ void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset) {
+ mVertexBuffers[slot] = buffer->GetMTLBuffer();
+ mVertexBufferOffsets[slot] = offset;
+
+ ASSERT(buffer->GetSize() < std::numeric_limits<uint32_t>::max());
+ mVertexBufferBindingSizes[slot] =
+ static_cast<uint32_t>(buffer->GetAllocatedSize() - offset);
+ mDirtyVertexBuffers.set(slot);
+ }
+
+ void OnSetPipeline(RenderPipeline* lastPipeline, RenderPipeline* pipeline) {
+ // When a new pipeline is bound we must set all the vertex buffers again because
+ // they might have been offset by the pipeline layout, and they might be packed
+ // differently from the previous pipeline.
+ mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+ }
+
+ void Apply(id<MTLRenderCommandEncoder> encoder,
+ RenderPipeline* pipeline,
+ bool enableVertexPulling) {
+ const auto& vertexBuffersToApply =
+ mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
+
+ for (VertexBufferSlot slot : IterateBitSet(vertexBuffersToApply)) {
+ uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(slot);
+
+ if (enableVertexPulling) {
+ // Insert lengths for vertex buffers bound as storage buffers
+ mLengthTracker->data[SingleShaderStage::Vertex][metalIndex] =
+ mVertexBufferBindingSizes[slot];
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
+ }
+
+ [encoder setVertexBuffers:&mVertexBuffers[slot]
+ offsets:&mVertexBufferOffsets[slot]
+ withRange:NSMakeRange(metalIndex, 1)];
+ }
+
+ mDirtyVertexBuffers.reset();
+ }
+
+ private:
+ // All the indices in these arrays are Dawn vertex buffer indices
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
+ ityp::array<VertexBufferSlot, id<MTLBuffer>, kMaxVertexBuffers> mVertexBuffers;
+ ityp::array<VertexBufferSlot, NSUInteger, kMaxVertexBuffers> mVertexBufferOffsets;
+ ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mVertexBufferBindingSizes;
+
+ StorageBufferLengthTracker* mLengthTracker;
+ };
+
+ } // anonymous namespace
+
+ void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+ id<MTLBuffer> mtlBuffer,
+ uint64_t bufferSize,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Texture* texture,
+ uint32_t mipLevel,
+ const Origin3D& origin,
+ Aspect aspect,
+ const Extent3D& copySize) {
+ TextureBufferCopySplit splitCopies =
+ ComputeTextureBufferCopySplit(texture, mipLevel, origin, copySize, bufferSize, offset,
+ bytesPerRow, rowsPerImage, aspect);
+
+ MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), aspect);
+
+ for (const auto& copyInfo : splitCopies) {
+ uint64_t bufferOffset = copyInfo.bufferOffset;
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D: {
+ [commandContext->EnsureBlit()
+ copyFromBuffer:mtlBuffer
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width, 1, 1)
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:0
+ destinationLevel:mipLevel
+ destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x, 0, 0)
+ options:blitOption];
+ break;
+ }
+ case wgpu::TextureDimension::e2D: {
+ const MTLOrigin textureOrigin =
+ MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+ const MTLSize copyExtent =
+ MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+ for (uint32_t z = copyInfo.textureOrigin.z;
+ z < copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers;
+ ++z) {
+ [commandContext->EnsureBlit() copyFromBuffer:mtlBuffer
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:copyExtent
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:z
+ destinationLevel:mipLevel
+ destinationOrigin:textureOrigin
+ options:blitOption];
+ bufferOffset += copyInfo.bytesPerImage;
+ }
+ break;
+ }
+ case wgpu::TextureDimension::e3D: {
+ [commandContext->EnsureBlit()
+ copyFromBuffer:mtlBuffer
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+ copyInfo.copyExtent.height,
+ copyInfo.copyExtent.depthOrArrayLayers)
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:0
+ destinationLevel:mipLevel
+ destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+ copyInfo.textureOrigin.y,
+ copyInfo.textureOrigin.z)
+ options:blitOption];
+ break;
+ }
+ }
+ }
+ }
+
+ // static
+ Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+ }
+
+ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
+
+ auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
+ CommandRecordingContext* commandContext) {
+ for (size_t i = 0; i < scope.textures.size(); ++i) {
+ Texture* texture = ToBackend(scope.textures[i]);
+
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ scope.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
+ }
+ });
+ }
+ for (BufferBase* bufferBase : scope.buffers) {
+ ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
+ }
+ };
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::BeginComputePass: {
+ mCommands.NextCommand<BeginComputePassCmd>();
+
+ for (const SyncScopeResourceUsage& scope :
+ GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+ LazyClearSyncScope(scope, commandContext);
+ }
+ commandContext->EndBlit();
+
+ DAWN_TRY(EncodeComputePass(commandContext));
+
+ nextComputePassNumber++;
+ break;
+ }
+
+ case Command::BeginRenderPass: {
+ BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+
+ LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
+ commandContext);
+ commandContext->EndBlit();
+
+ LazyClearRenderPassAttachments(cmd);
+ NSRef<MTLRenderPassDescriptor> descriptor = CreateMTLRenderPassDescriptor(cmd);
+ DAWN_TRY(EncodeRenderPass(commandContext, descriptor.Get(), cmd->width,
+ cmd->height));
+
+ nextRenderPassNumber++;
+ break;
+ }
+
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ if (copy->size == 0) {
+ // Skip no-op copies.
+ break;
+ }
+
+ ToBackend(copy->source)->EnsureDataInitialized(commandContext);
+ ToBackend(copy->destination)
+ ->EnsureDataInitializedAsDestination(commandContext,
+ copy->destinationOffset, copy->size);
+
+ [commandContext->EnsureBlit()
+ copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
+ sourceOffset:copy->sourceOffset
+ toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
+ destinationOffset:copy->destinationOffset
+ size:copy->size];
+ break;
+ }
+
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+ auto& copySize = copy->copySize;
+ Buffer* buffer = ToBackend(src.buffer.Get());
+ Texture* texture = ToBackend(dst.texture.Get());
+
+ buffer->EnsureDataInitialized(commandContext);
+ EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
+
+ RecordCopyBufferToTexture(commandContext, buffer->GetMTLBuffer(),
+ buffer->GetSize(), src.offset, src.bytesPerRow,
+ src.rowsPerImage, texture, dst.mipLevel, dst.origin,
+ dst.aspect, copySize);
+ break;
+ }
+
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+ auto& copySize = copy->copySize;
+ Texture* texture = ToBackend(src.texture.Get());
+ Buffer* buffer = ToBackend(dst.buffer.Get());
+
+ buffer->EnsureDataInitializedAsDestination(commandContext, copy);
+
+ texture->EnsureSubresourceContentInitialized(
+ commandContext, GetSubresourcesAffectedByCopy(src, copySize));
+
+ TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
+ texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
+ dst.bytesPerRow, dst.rowsPerImage, src.aspect);
+
+ for (const auto& copyInfo : splitCopies) {
+ MTLBlitOption blitOption =
+ ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
+ uint64_t bufferOffset = copyInfo.bufferOffset;
+
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D: {
+ [commandContext->EnsureBlit()
+ copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:0
+ sourceLevel:src.mipLevel
+ sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+ 0, 0)
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+ 1, 1)
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage
+ options:blitOption];
+ break;
+ }
+
+ case wgpu::TextureDimension::e2D: {
+ const MTLOrigin textureOrigin = MTLOriginMake(
+ copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+ const MTLSize copyExtent = MTLSizeMake(
+ copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+
+ for (uint32_t z = copyInfo.textureOrigin.z;
+ z < copyInfo.textureOrigin.z +
+ copyInfo.copyExtent.depthOrArrayLayers;
+ ++z) {
+ [commandContext->EnsureBlit()
+ copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:z
+ sourceLevel:src.mipLevel
+ sourceOrigin:textureOrigin
+ sourceSize:copyExtent
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage
+ options:blitOption];
+ bufferOffset += copyInfo.bytesPerImage;
+ }
+ break;
+ }
+ case wgpu::TextureDimension::e3D: {
+ [commandContext->EnsureBlit()
+ copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:0
+ sourceLevel:src.mipLevel
+ sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+ copyInfo.textureOrigin.y,
+ copyInfo.textureOrigin.z)
+ sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+ copyInfo.copyExtent.height,
+ copyInfo.copyExtent
+ .depthOrArrayLayers)
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage
+ options:blitOption];
+ break;
+ }
+ }
+ }
+ break;
+ }
+
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy =
+ mCommands.NextCommand<CopyTextureToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ Texture* srcTexture = ToBackend(copy->source.texture.Get());
+ Texture* dstTexture = ToBackend(copy->destination.texture.Get());
+
+ srcTexture->EnsureSubresourceContentInitialized(
+ commandContext,
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
+ EnsureDestinationTextureInitialized(commandContext, dstTexture,
+ copy->destination, copy->copySize);
+
+ const MTLSize sizeOneSlice =
+ MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
+
+ uint32_t sourceLayer = 0;
+ uint32_t sourceOriginZ = 0;
+
+ uint32_t destinationLayer = 0;
+ uint32_t destinationOriginZ = 0;
+
+ uint32_t* sourceZPtr;
+ if (srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+ sourceZPtr = &sourceLayer;
+ } else {
+ sourceZPtr = &sourceOriginZ;
+ }
+
+ uint32_t* destinationZPtr;
+ if (dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+ destinationZPtr = &destinationLayer;
+ } else {
+ destinationZPtr = &destinationOriginZ;
+ }
+
+ // TODO(crbug.com/dawn/782): Do a single T2T copy if both are 1D or 3D.
+ for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+ *sourceZPtr = copy->source.origin.z + z;
+ *destinationZPtr = copy->destination.origin.z + z;
+
+ // Hold the ref until out of scope
+ NSPRef<id<MTLTexture>> dstTextureView =
+ dstTexture->CreateFormatView(srcTexture->GetFormat().format);
+
+ [commandContext->EnsureBlit()
+ copyFromTexture:srcTexture->GetMTLTexture()
+ sourceSlice:sourceLayer
+ sourceLevel:copy->source.mipLevel
+ sourceOrigin:MTLOriginMake(copy->source.origin.x,
+ copy->source.origin.y, sourceOriginZ)
+ sourceSize:sizeOneSlice
+ toTexture:dstTextureView.Get()
+ destinationSlice:destinationLayer
+ destinationLevel:copy->destination.mipLevel
+ destinationOrigin:MTLOriginMake(copy->destination.origin.x,
+ copy->destination.origin.y,
+ destinationOriginZ)];
+ }
+ break;
+ }
+
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+ if (cmd->size == 0) {
+ // Skip no-op copies.
+ break;
+ }
+ Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+
+ bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, cmd->offset, cmd->size);
+
+ if (!clearedToZero) {
+ [commandContext->EnsureBlit() fillBuffer:dstBuffer->GetMTLBuffer()
+ range:NSMakeRange(cmd->offset, cmd->size)
+ value:0u];
+ }
+
+ break;
+ }
+
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ Buffer* destination = ToBackend(cmd->destination.Get());
+
+ destination->EnsureDataInitializedAsDestination(
+ commandContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
+
+ if (querySet->GetQueryType() == wgpu::QueryType::Occlusion) {
+ [commandContext->EnsureBlit()
+ copyFromBuffer:querySet->GetVisibilityBuffer()
+ sourceOffset:NSUInteger(cmd->firstQuery * sizeof(uint64_t))
+ toBuffer:destination->GetMTLBuffer()
+ destinationOffset:NSUInteger(cmd->destinationOffset)
+ size:NSUInteger(cmd->queryCount * sizeof(uint64_t))];
+ } else {
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ [commandContext->EnsureBlit()
+ resolveCounters:querySet->GetCounterSampleBuffer()
+ inRange:NSMakeRange(cmd->firstQuery, cmd->queryCount)
+ destinationBuffer:destination->GetMTLBuffer()
+ destinationOffset:NSUInteger(cmd->destinationOffset)];
+ } else {
+ UNREACHABLE();
+ }
+ }
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ [commandContext->EnsureBlit()
+ sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+ atSampleIndex:NSUInteger(cmd->queryIndex)
+ withBarrier:YES];
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ // MTLCommandBuffer does not implement insertDebugSignpost
+ SkipCommand(&mCommands, type);
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ mCommands.NextCommand<PopDebugGroupCmd>();
+
+ if (@available(macos 10.13, *)) {
+ [commandContext->GetCommands() popDebugGroup];
+ }
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ char* label = mCommands.NextData<char>(cmd->length + 1);
+
+ if (@available(macos 10.13, *)) {
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [commandContext->GetCommands() pushDebugGroup:mtlLabel.Get()];
+ }
+
+ break;
+ }
+
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
+ }
+
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset, size);
+
+ [commandContext->EnsureBlit()
+ copyFromBuffer:ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle()
+ sourceOffset:uploadHandle.startOffset
+ toBuffer:dstBuffer->GetMTLBuffer()
+ destinationOffset:offset
+ size:size];
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ commandContext->EndBlit();
+ return {};
+ }
+
+ MaybeError CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
+ ComputePipeline* lastPipeline = nullptr;
+ StorageBufferLengthTracker storageBufferLengths = {};
+ BindGroupTracker bindGroups(&storageBufferLengths);
+
+ id<MTLComputeCommandEncoder> encoder = commandContext->BeginCompute();
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndComputePass: {
+ mCommands.NextCommand<EndComputePassCmd>();
+ commandContext->EndCompute();
+ return {};
+ }
+
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+ // Skip noop dispatches, it can causes issues on some systems.
+ if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
+ break;
+ }
+
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
+
+ [encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
+ threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
+ break;
+ }
+
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline);
+
+ Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
+ id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+ [encoder dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
+ indirectBufferOffset:dispatch->indirectOffset
+ threadsPerThreadgroup:lastPipeline
+ ->GetLocalWorkGroupSize()];
+ break;
+ }
+
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+ lastPipeline = ToBackend(cmd->pipeline).Get();
+
+ bindGroups.OnSetPipeline(lastPipeline);
+
+ lastPipeline->Encode(encoder);
+ break;
+ }
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+
+ bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+ cmd->dynamicOffsetCount, dynamicOffsets);
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ char* label = mCommands.NextData<char>(cmd->length + 1);
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [encoder insertDebugSignpost:mtlLabel.Get()];
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ mCommands.NextCommand<PopDebugGroupCmd>();
+
+ [encoder popDebugGroup];
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ char* label = mCommands.NextData<char>(cmd->length + 1);
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [encoder pushDebugGroup:mtlLabel.Get()];
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+ atSampleIndex:NSUInteger(cmd->queryIndex)
+ withBarrier:YES];
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+
+ // EndComputePass should have been called
+ UNREACHABLE();
+ }
+
+ MaybeError CommandBuffer::EncodeRenderPass(CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height) {
+ ASSERT(mtlRenderPass);
+
+ Device* device = ToBackend(GetDevice());
+
+ // Handle Toggle AlwaysResolveIntoZeroLevelAndLayer. We must handle this before applying
+ // the store + MSAA resolve workaround, otherwise this toggle will never be handled because
+ // the resolve texture is removed when applying the store + MSAA resolve workaround.
+ if (device->IsToggleEnabled(Toggle::AlwaysResolveIntoZeroLevelAndLayer)) {
+ std::array<id<MTLTexture>, kMaxColorAttachments> trueResolveTextures = {};
+ std::array<uint32_t, kMaxColorAttachments> trueResolveLevels = {};
+ std::array<uint32_t, kMaxColorAttachments> trueResolveSlices = {};
+
+ // Use temporary resolve texture on the resolve targets with non-zero resolveLevel or
+ // resolveSlice.
+ bool useTemporaryResolveTexture = false;
+ std::array<NSPRef<id<MTLTexture>>, kMaxColorAttachments> temporaryResolveTextures = {};
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (mtlRenderPass.colorAttachments[i].resolveTexture == nullptr) {
+ continue;
+ }
+
+ if (mtlRenderPass.colorAttachments[i].resolveLevel == 0 &&
+ mtlRenderPass.colorAttachments[i].resolveSlice == 0) {
+ continue;
+ }
+
+ trueResolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
+ trueResolveLevels[i] = mtlRenderPass.colorAttachments[i].resolveLevel;
+ trueResolveSlices[i] = mtlRenderPass.colorAttachments[i].resolveSlice;
+
+ const MTLPixelFormat mtlFormat = trueResolveTextures[i].pixelFormat;
+ DAWN_TRY_ASSIGN(temporaryResolveTextures[i], CreateResolveTextureForWorkaround(
+ device, mtlFormat, width, height));
+
+ mtlRenderPass.colorAttachments[i].resolveTexture =
+ temporaryResolveTextures[i].Get();
+ mtlRenderPass.colorAttachments[i].resolveLevel = 0;
+ mtlRenderPass.colorAttachments[i].resolveSlice = 0;
+ useTemporaryResolveTexture = true;
+ }
+
+ // If we need to use a temporary resolve texture we need to copy the result of MSAA
+ // resolve back to the true resolve targets.
+ if (useTemporaryResolveTexture) {
+ DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (trueResolveTextures[i] == nullptr) {
+ continue;
+ }
+
+ ASSERT(temporaryResolveTextures[i] != nullptr);
+ CopyIntoTrueResolveTarget(commandContext, trueResolveTextures[i],
+ trueResolveLevels[i], trueResolveSlices[i],
+ temporaryResolveTextures[i].Get(), width, height);
+ }
+ return {};
+ }
+ }
+
+ // Handle Store + MSAA resolve workaround (Toggle EmulateStoreAndMSAAResolve).
+ if (device->IsToggleEnabled(Toggle::EmulateStoreAndMSAAResolve)) {
+ bool hasStoreAndMSAAResolve = false;
+
+ // Remove any store + MSAA resolve and remember them.
+ std::array<id<MTLTexture>, kMaxColorAttachments> resolveTextures = {};
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ if (mtlRenderPass.colorAttachments[i].storeAction ==
+ kMTLStoreActionStoreAndMultisampleResolve) {
+ hasStoreAndMSAAResolve = true;
+ resolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
+
+ mtlRenderPass.colorAttachments[i].storeAction = MTLStoreActionStore;
+ mtlRenderPass.colorAttachments[i].resolveTexture = nullptr;
+ }
+ }
+
+ // If we found a store + MSAA resolve we need to resolve in a different render pass.
+ if (hasStoreAndMSAAResolve) {
+ DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
+ ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
+ return {};
+ }
+ }
+
+ DAWN_TRY(EncodeRenderPassInternal(commandContext, mtlRenderPass, width, height));
+ return {};
+ }
+
+ MaybeError CommandBuffer::EncodeRenderPassInternal(CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height) {
+ bool enableVertexPulling = GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling);
+ RenderPipeline* lastPipeline = nullptr;
+ id<MTLBuffer> indexBuffer = nullptr;
+ uint32_t indexBufferBaseOffset = 0;
+ MTLIndexType indexBufferType;
+ uint64_t indexFormatSize = 0;
+
+ StorageBufferLengthTracker storageBufferLengths = {};
+ VertexBufferTracker vertexBuffers(&storageBufferLengths);
+ BindGroupTracker bindGroups(&storageBufferLengths);
+
+ id<MTLRenderCommandEncoder> encoder = commandContext->BeginRender(mtlRenderPass);
+
+ auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+ switch (type) {
+ case Command::Draw: {
+ DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+ // The instance count must be non-zero, otherwise no-op
+ if (draw->instanceCount != 0) {
+ // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance
+ if (draw->firstInstance == 0) {
+ [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ vertexStart:draw->firstVertex
+ vertexCount:draw->vertexCount
+ instanceCount:draw->instanceCount];
+ } else {
+ [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ vertexStart:draw->firstVertex
+ vertexCount:draw->vertexCount
+ instanceCount:draw->instanceCount
+ baseInstance:draw->firstInstance];
+ }
+ }
+ break;
+ }
+
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+ // The index and instance count must be non-zero, otherwise no-op
+ if (draw->indexCount != 0 && draw->instanceCount != 0) {
+ // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance and
+ // baseVertex.
+ if (draw->baseVertex == 0 && draw->firstInstance == 0) {
+ [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indexCount:draw->indexCount
+ indexType:indexBufferType
+ indexBuffer:indexBuffer
+ indexBufferOffset:indexBufferBaseOffset +
+ draw->firstIndex * indexFormatSize
+ instanceCount:draw->instanceCount];
+ } else {
+ [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indexCount:draw->indexCount
+ indexType:indexBufferType
+ indexBuffer:indexBuffer
+ indexBufferOffset:indexBufferBaseOffset +
+ draw->firstIndex * indexFormatSize
+ instanceCount:draw->instanceCount
+ baseVertex:draw->baseVertex
+ baseInstance:draw->firstInstance];
+ }
+ }
+ break;
+ }
+
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+ [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indirectBuffer:indirectBuffer
+ indirectBufferOffset:draw->indirectOffset];
+ break;
+ }
+
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+ bindGroups.Apply(encoder);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(buffer != nullptr);
+
+ id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+ [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indexType:indexBufferType
+ indexBuffer:indexBuffer
+ indexBufferOffset:indexBufferBaseOffset
+ indirectBuffer:indirectBuffer
+ indirectBufferOffset:draw->indirectOffset];
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+ char* label = iter->NextData<char>(cmd->length + 1);
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [encoder insertDebugSignpost:mtlLabel.Get()];
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ iter->NextCommand<PopDebugGroupCmd>();
+
+ [encoder popDebugGroup];
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+ char* label = iter->NextData<char>(cmd->length + 1);
+ NSRef<NSString> mtlLabel =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+ [encoder pushDebugGroup:mtlLabel.Get()];
+ break;
+ }
+
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+ RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
+
+ vertexBuffers.OnSetPipeline(lastPipeline, newPipeline);
+ bindGroups.OnSetPipeline(newPipeline);
+
+ [encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
+ [encoder setFrontFacingWinding:newPipeline->GetMTLFrontFace()];
+ [encoder setCullMode:newPipeline->GetMTLCullMode()];
+ [encoder setDepthBias:newPipeline->GetDepthBias()
+ slopeScale:newPipeline->GetDepthBiasSlopeScale()
+ clamp:newPipeline->GetDepthBiasClamp()];
+ if (@available(macOS 10.11, iOS 11.0, *)) {
+ MTLDepthClipMode clipMode = newPipeline->ShouldClampDepth()
+ ? MTLDepthClipModeClamp
+ : MTLDepthClipModeClip;
+ [encoder setDepthClipMode:clipMode];
+ }
+ newPipeline->Encode(encoder);
+
+ lastPipeline = newPipeline;
+ break;
+ }
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+
+ bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+ cmd->dynamicOffsetCount, dynamicOffsets);
+ break;
+ }
+
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+ auto b = ToBackend(cmd->buffer.Get());
+ indexBuffer = b->GetMTLBuffer();
+ indexBufferBaseOffset = cmd->offset;
+ indexBufferType = MTLIndexFormat(cmd->format);
+ indexFormatSize = IndexFormatSize(cmd->format);
+ break;
+ }
+
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+
+ vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+ cmd->offset);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ };
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndRenderPass: {
+ mCommands.NextCommand<EndRenderPassCmd>();
+ commandContext->EndRender();
+ return {};
+ }
+
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+ [encoder setStencilReferenceValue:cmd->reference];
+ break;
+ }
+
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+ MTLViewport viewport;
+ viewport.originX = cmd->x;
+ viewport.originY = cmd->y;
+ viewport.width = cmd->width;
+ viewport.height = cmd->height;
+ viewport.znear = cmd->minDepth;
+ viewport.zfar = cmd->maxDepth;
+
+ [encoder setViewport:viewport];
+ break;
+ }
+
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+ MTLScissorRect rect;
+ rect.x = cmd->x;
+ rect.y = cmd->y;
+ rect.width = cmd->width;
+ rect.height = cmd->height;
+
+ [encoder setScissorRect:rect];
+ break;
+ }
+
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+ [encoder setBlendColorRed:cmd->color.r
+ green:cmd->color.g
+ blue:cmd->color.b
+ alpha:cmd->color.a];
+ break;
+ }
+
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+ auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+ for (uint32_t i = 0; i < cmd->count; ++i) {
+ CommandIterator* iter = bundles[i]->GetCommands();
+ iter->Reset();
+ while (iter->NextCommandId(&type)) {
+ EncodeRenderBundleCommand(iter, type);
+ }
+ }
+ break;
+ }
+
+ case Command::BeginOcclusionQuery: {
+ BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+
+ [encoder setVisibilityResultMode:MTLVisibilityResultModeBoolean
+ offset:cmd->queryIndex * sizeof(uint64_t)];
+ break;
+ }
+
+ case Command::EndOcclusionQuery: {
+ EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+
+ [encoder setVisibilityResultMode:MTLVisibilityResultModeDisabled
+ offset:cmd->queryIndex * sizeof(uint64_t)];
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+ atSampleIndex:NSUInteger(cmd->queryIndex)
+ withBarrier:YES];
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ default: {
+ EncodeRenderBundleCommand(&mCommands, type);
+ break;
+ }
+ }
+ }
+
+ // EndRenderPass should have been called
+ UNREACHABLE();
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h
new file mode 100644
index 00000000000..fb06aa873a8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.h
@@ -0,0 +1,59 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
+#define DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
+
+#include "dawn/common/NSRef.h"
+#include "dawn/common/NonCopyable.h"
+#include "dawn/native/Error.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ // This class wraps a MTLCommandBuffer and tracks which Metal encoder is open.
+ // Only one encoder may be open at a time.
+ class CommandRecordingContext : NonMovable {
+ public:
+ CommandRecordingContext();
+ ~CommandRecordingContext();
+
+ id<MTLCommandBuffer> GetCommands();
+ void MarkUsed();
+ bool WasUsed() const;
+
+ MaybeError PrepareNextCommandBuffer(id<MTLCommandQueue> queue);
+ NSPRef<id<MTLCommandBuffer>> AcquireCommands();
+
+ id<MTLBlitCommandEncoder> EnsureBlit();
+ void EndBlit();
+
+ id<MTLComputeCommandEncoder> BeginCompute();
+ void EndCompute();
+
+ id<MTLRenderCommandEncoder> BeginRender(MTLRenderPassDescriptor* descriptor);
+ void EndRender();
+
+ private:
+ NSPRef<id<MTLCommandBuffer>> mCommands;
+ NSPRef<id<MTLBlitCommandEncoder>> mBlit;
+ NSPRef<id<MTLComputeCommandEncoder>> mCompute;
+ NSPRef<id<MTLRenderCommandEncoder>> mRender;
+ bool mInEncoder = false;
+ bool mUsed = false;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm b/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm
new file mode 100644
index 00000000000..cced9a76dea
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/CommandRecordingContext.mm
@@ -0,0 +1,132 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/CommandRecordingContext.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native::metal {
+
+ CommandRecordingContext::CommandRecordingContext() = default;
+
+ CommandRecordingContext::~CommandRecordingContext() {
+ // Commands must be acquired.
+ ASSERT(mCommands == nullptr);
+ }
+
+ id<MTLCommandBuffer> CommandRecordingContext::GetCommands() {
+ return mCommands.Get();
+ }
+
+ void CommandRecordingContext::MarkUsed() {
+ mUsed = true;
+ }
+ bool CommandRecordingContext::WasUsed() const {
+ return mUsed;
+ }
+
+ MaybeError CommandRecordingContext::PrepareNextCommandBuffer(id<MTLCommandQueue> queue) {
+ ASSERT(mCommands == nil);
+ ASSERT(!mUsed);
+
+ // The MTLCommandBuffer will be autoreleased by default.
+ // The autorelease pool may drain before the command buffer is submitted. Retain so it stays
+ // alive.
+ mCommands = AcquireNSPRef([[queue commandBuffer] retain]);
+ if (mCommands == nil) {
+ return DAWN_INTERNAL_ERROR("Failed to allocate an MTLCommandBuffer");
+ }
+
+ return {};
+ }
+
+ NSPRef<id<MTLCommandBuffer>> CommandRecordingContext::AcquireCommands() {
+ // A blit encoder can be left open from WriteBuffer, make sure we close it.
+ if (mCommands != nullptr) {
+ EndBlit();
+ }
+
+ ASSERT(!mInEncoder);
+ mUsed = false;
+ return std::move(mCommands);
+ }
+
+ id<MTLBlitCommandEncoder> CommandRecordingContext::EnsureBlit() {
+ ASSERT(mCommands != nullptr);
+
+ if (mBlit == nullptr) {
+ ASSERT(!mInEncoder);
+ mInEncoder = true;
+
+ // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+ // draining from under us.
+ mBlit.Acquire([[*mCommands blitCommandEncoder] retain]);
+ }
+ return mBlit.Get();
+ }
+
+ void CommandRecordingContext::EndBlit() {
+ ASSERT(mCommands != nullptr);
+
+ if (mBlit != nullptr) {
+ [*mBlit endEncoding];
+ mBlit = nullptr;
+ mInEncoder = false;
+ }
+ }
+
+ id<MTLComputeCommandEncoder> CommandRecordingContext::BeginCompute() {
+ ASSERT(mCommands != nullptr);
+ ASSERT(mCompute == nullptr);
+ ASSERT(!mInEncoder);
+
+ mInEncoder = true;
+ // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+ // draining from under us.
+ mCompute.Acquire([[*mCommands computeCommandEncoder] retain]);
+ return mCompute.Get();
+ }
+
+ void CommandRecordingContext::EndCompute() {
+ ASSERT(mCommands != nullptr);
+ ASSERT(mCompute != nullptr);
+
+ [*mCompute endEncoding];
+ mCompute = nullptr;
+ mInEncoder = false;
+ }
+
+ id<MTLRenderCommandEncoder> CommandRecordingContext::BeginRender(
+ MTLRenderPassDescriptor* descriptor) {
+ ASSERT(mCommands != nullptr);
+ ASSERT(mRender == nullptr);
+ ASSERT(!mInEncoder);
+
+ mInEncoder = true;
+ // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+ // draining from under us.
+ mRender.Acquire([[*mCommands renderCommandEncoderWithDescriptor:descriptor] retain]);
+ return mRender.Get();
+ }
+
+ void CommandRecordingContext::EndRender() {
+ ASSERT(mCommands != nullptr);
+ ASSERT(mRender != nullptr);
+
+ [*mRender endEncoding];
+ mRender = nullptr;
+ mInEncoder = false;
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h
new file mode 100644
index 00000000000..d61db22056e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.h
@@ -0,0 +1,53 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
+#define DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class Device;
+
+ class ComputePipeline final : public ComputePipelineBase {
+ public:
+ static Ref<ComputePipeline> CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+
+ void Encode(id<MTLComputeCommandEncoder> encoder);
+ MTLSize GetLocalWorkGroupSize() const;
+ bool RequiresStorageBufferLength() const;
+
+ private:
+ using ComputePipelineBase::ComputePipelineBase;
+ MaybeError Initialize() override;
+
+ NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
+ MTLSize mLocalWorkgroupSize;
+ bool mRequiresStorageBufferLength;
+ std::vector<uint32_t> mWorkgroupAllocations;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm
new file mode 100644
index 00000000000..a7663b6eb2d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/ComputePipelineMTL.mm
@@ -0,0 +1,89 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/ComputePipelineMTL.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+namespace dawn::native::metal {
+
+ // static
+ Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(device, descriptor));
+ }
+
+ MaybeError ComputePipeline::Initialize() {
+ auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+ ShaderModule::MetalFunctionData computeData;
+
+ DAWN_TRY(CreateMTLFunction(computeStage, SingleShaderStage::Compute, ToBackend(GetLayout()),
+ &computeData));
+
+ NSError* error = nullptr;
+ mMtlComputePipelineState.Acquire([mtlDevice
+ newComputePipelineStateWithFunction:computeData.function.Get()
+ error:&error]);
+ if (error != nullptr) {
+ return DAWN_INTERNAL_ERROR("Error creating pipeline state" +
+ std::string([error.localizedDescription UTF8String]));
+ }
+ ASSERT(mMtlComputePipelineState != nil);
+
+ // Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
+ Origin3D localSize = GetStage(SingleShaderStage::Compute).metadata->localWorkgroupSize;
+ mLocalWorkgroupSize = MTLSizeMake(localSize.x, localSize.y, localSize.z);
+
+ mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
+ mWorkgroupAllocations = std::move(computeData.workgroupAllocations);
+ return {};
+ }
+
+ void ComputePipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
+ [encoder setComputePipelineState:mMtlComputePipelineState.Get()];
+ for (size_t i = 0; i < mWorkgroupAllocations.size(); ++i) {
+ if (mWorkgroupAllocations[i] == 0) {
+ continue;
+ }
+ // Size must be a multiple of 16 bytes.
+ uint32_t rounded = Align<uint32_t>(mWorkgroupAllocations[i], 16);
+ [encoder setThreadgroupMemoryLength:rounded atIndex:i];
+ }
+ }
+
+ MTLSize ComputePipeline::GetLocalWorkGroupSize() const {
+ return mLocalWorkgroupSize;
+ }
+
+ bool ComputePipeline::RequiresStorageBufferLength() const {
+ return mRequiresStorageBufferLength;
+ }
+
+ void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+ std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+ userdata);
+ CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h
new file mode 100644
index 00000000000..d72cc3f3d03
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.h
@@ -0,0 +1,154 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_DEVICEMTL_H_
+#define DAWNNATIVE_METAL_DEVICEMTL_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/metal/CommandRecordingContext.h"
+#include "dawn/native/metal/Forward.h"
+
+#import <IOSurface/IOSurfaceRef.h>
+#import <Metal/Metal.h>
+#import <QuartzCore/QuartzCore.h>
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+
+namespace dawn::native::metal {
+
+ namespace {
+ struct KalmanInfo;
+ }
+
+ class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
+ NSPRef<id<MTLDevice>> mtlDevice,
+ const DeviceDescriptor* descriptor);
+ ~Device() override;
+
+ MaybeError Initialize();
+
+ MaybeError TickImpl() override;
+
+ id<MTLDevice> GetMTLDevice();
+ id<MTLCommandQueue> GetMTLQueue();
+
+ CommandRecordingContext* GetPendingCommandContext();
+ MaybeError SubmitPendingCommandBuffer();
+
+ Ref<Texture> CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
+ IOSurfaceRef ioSurface,
+ uint32_t plane);
+ void WaitForCommandsToBeScheduled();
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& dataLayout,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ private:
+ Device(AdapterBase* adapter,
+ NSPRef<id<MTLDevice>> mtlDevice,
+ const DeviceDescriptor* descriptor);
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+ const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+ void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
+
+ void InitTogglesFromDriver();
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+ NSPRef<id<MTLDevice>> mMtlDevice;
+ NSPRef<id<MTLCommandQueue>> mCommandQueue;
+
+ CommandRecordingContext mCommandContext;
+
+ // The completed serial is updated in a Metal completion handler that can be fired on a
+ // different thread, so it needs to be atomic.
+ std::atomic<uint64_t> mCompletedSerial;
+
+ // mLastSubmittedCommands will be accessed in a Metal schedule handler that can be fired on
+ // a different thread so we guard access to it with a mutex.
+ std::mutex mLastSubmittedCommandsMutex;
+ NSPRef<id<MTLCommandBuffer>> mLastSubmittedCommands;
+
+ // The current estimation of timestamp period
+ float mTimestampPeriod = 1.0f;
+ // The base of CPU timestamp and GPU timestamp to measure the linear regression between GPU
+ // and CPU timestamps.
+ MTLTimestamp mCpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
+ MTLTimestamp mGpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
+ // The parameters for kalman filter
+ std::unique_ptr<KalmanInfo> mKalmanInfo;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_DEVICEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm
new file mode 100644
index 00000000000..55158ea4e7b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/DeviceMTL.mm
@@ -0,0 +1,506 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/DeviceMTL.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Platform.h"
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/metal/BindGroupLayoutMTL.h"
+#include "dawn/native/metal/BindGroupMTL.h"
+#include "dawn/native/metal/BufferMTL.h"
+#include "dawn/native/metal/CommandBufferMTL.h"
+#include "dawn/native/metal/ComputePipelineMTL.h"
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+#include "dawn/native/metal/QuerySetMTL.h"
+#include "dawn/native/metal/QueueMTL.h"
+#include "dawn/native/metal/RenderPipelineMTL.h"
+#include "dawn/native/metal/SamplerMTL.h"
+#include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/StagingBufferMTL.h"
+#include "dawn/native/metal/SwapChainMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <type_traits>
+
+namespace dawn::native::metal {
+
+ namespace {
+
+ // The time interval for each round of kalman filter
+ static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
+
+ struct KalmanInfo {
+ float filterValue; // The estimation value
+ float kalmanGain; // The kalman gain
+ float R; // The covariance of the observation noise
+ float P; // The a posteriori estimate covariance
+ };
+
+ // A simplified kalman filter for estimating timestamp period based on measured values
+ float KalmanFilter(KalmanInfo* info, float measuredValue) {
+ // Optimize kalman gain
+ info->kalmanGain = info->P / (info->P + info->R);
+
+ // Correct filter value
+ info->filterValue =
+ info->kalmanGain * measuredValue + (1.0 - info->kalmanGain) * info->filterValue;
+ // Update estimate covariance
+ info->P = (1.0f - info->kalmanGain) * info->P;
+ return info->filterValue;
+ }
+
+ void API_AVAILABLE(macos(10.15), ios(14))
+ UpdateTimestampPeriod(id<MTLDevice> device,
+ KalmanInfo* info,
+ MTLTimestamp* cpuTimestampStart,
+ MTLTimestamp* gpuTimestampStart,
+ float* timestampPeriod) {
+ // The filter value is converged to an optimal value when the kalman gain is less than
+ // 0.01. At this time, the weight of the measured value is too small to change the next
+ // filter value, the sampling and calculations do not need to continue anymore.
+ if (info->kalmanGain < 0.01f) {
+ return;
+ }
+
+ MTLTimestamp cpuTimestampEnd = 0, gpuTimestampEnd = 0;
+ [device sampleTimestamps:&cpuTimestampEnd gpuTimestamp:&gpuTimestampEnd];
+
+ // Update the timestamp start values when timestamp reset happens
+ if (cpuTimestampEnd < *cpuTimestampStart || gpuTimestampEnd < *gpuTimestampStart) {
+ *cpuTimestampStart = cpuTimestampEnd;
+ *gpuTimestampStart = gpuTimestampEnd;
+ return;
+ }
+
+ if (cpuTimestampEnd - *cpuTimestampStart >= kFilterIntervalInMs) {
+ // The measured timestamp period
+ float measurement = (cpuTimestampEnd - *cpuTimestampStart) /
+ static_cast<float>(gpuTimestampEnd - *gpuTimestampStart);
+
+ // Measurement update
+ *timestampPeriod = KalmanFilter(info, measurement);
+
+ *cpuTimestampStart = cpuTimestampEnd;
+ *gpuTimestampStart = gpuTimestampEnd;
+ }
+ }
+
+ } // namespace
+
+ // static
+ ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
+ NSPRef<id<MTLDevice>> mtlDevice,
+ const DeviceDescriptor* descriptor) {
+ Ref<Device> device = AcquireRef(new Device(adapter, std::move(mtlDevice), descriptor));
+ DAWN_TRY(device->Initialize());
+ return device;
+ }
+
+ Device::Device(AdapterBase* adapter,
+ NSPRef<id<MTLDevice>> mtlDevice,
+ const DeviceDescriptor* descriptor)
+ : DeviceBase(adapter, descriptor), mMtlDevice(std::move(mtlDevice)), mCompletedSerial(0) {
+ }
+
+ Device::~Device() {
+ Destroy();
+ }
+
+ MaybeError Device::Initialize() {
+ InitTogglesFromDriver();
+
+ mCommandQueue.Acquire([*mMtlDevice newCommandQueue]);
+ if (mCommandQueue == nil) {
+ return DAWN_INTERNAL_ERROR("Failed to allocate MTLCommandQueue.");
+ }
+
+ DAWN_TRY(mCommandContext.PrepareNextCommandBuffer(*mCommandQueue));
+
+ if (IsFeatureEnabled(Feature::TimestampQuery)) {
+ // Make a best guess of timestamp period based on device vendor info, and converge it to
+ // an accurate value by the following calculations.
+ mTimestampPeriod = gpu_info::IsIntel(GetAdapter()->GetVendorId()) ? 83.333f : 1.0f;
+
+ // Initialize kalman filter parameters
+ mKalmanInfo = std::make_unique<KalmanInfo>();
+ mKalmanInfo->filterValue = 0.0f;
+ mKalmanInfo->kalmanGain = 0.5f;
+ mKalmanInfo->R =
+ 0.0001f; // The smaller this value is, the smaller the error of measured value is,
+ // the more we can trust the measured value.
+ mKalmanInfo->P = 1.0f;
+
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ // Sample CPU timestamp and GPU timestamp for first time at device creation
+ [*mMtlDevice sampleTimestamps:&mCpuTimestamp gpuTimestamp:&mGpuTimestamp];
+ }
+ }
+
+ return DeviceBase::Initialize(new Queue(this));
+ }
+
+ void Device::InitTogglesFromDriver() {
+ {
+ bool haveStoreAndMSAAResolve = false;
+#if defined(DAWN_PLATFORM_MACOS)
+ if (@available(macOS 10.12, *)) {
+ haveStoreAndMSAAResolve =
+ [*mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
+ }
+#elif defined(DAWN_PLATFORM_IOS)
+ haveStoreAndMSAAResolve =
+ [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
+#endif
+ // On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
+ SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
+
+ bool haveSamplerCompare = true;
+#if defined(DAWN_PLATFORM_IOS)
+ haveSamplerCompare = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+#endif
+ // TODO(crbug.com/dawn/342): Investigate emulation -- possibly expensive.
+ SetToggle(Toggle::MetalDisableSamplerCompare, !haveSamplerCompare);
+
+ bool haveBaseVertexBaseInstance = true;
+#if defined(DAWN_PLATFORM_IOS)
+ haveBaseVertexBaseInstance =
+ [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+#endif
+ // TODO(crbug.com/dawn/343): Investigate emulation.
+ SetToggle(Toggle::DisableBaseVertex, !haveBaseVertexBaseInstance);
+ SetToggle(Toggle::DisableBaseInstance, !haveBaseVertexBaseInstance);
+ }
+
+ // Vertex buffer robustness is implemented by using programmable vertex pulling. Enable
+ // that code path if it isn't explicitly disabled.
+ if (IsRobustnessEnabled()) {
+ SetToggle(Toggle::MetalEnableVertexPulling, true);
+ }
+
+ // TODO(crbug.com/dawn/846): tighten this workaround when the driver bug is fixed.
+ SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
+
+ uint32_t deviceId = GetAdapter()->GetDeviceId();
+ uint32_t vendorId = GetAdapter()->GetVendorId();
+
+ // TODO(crbug.com/dawn/847): Use MTLStorageModeShared instead of MTLStorageModePrivate when
+ // creating MTLCounterSampleBuffer in QuerySet on Intel platforms, otherwise it fails to
+ // create the buffer. Change to use MTLStorageModePrivate when the bug is fixed.
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ bool useSharedMode = gpu_info::IsIntel(vendorId);
+ SetToggle(Toggle::MetalUseSharedModeForCounterSampleBuffer, useSharedMode);
+ }
+
+ // TODO(crbug.com/dawn/1071): r8unorm and rg8unorm textures with multiple mip levels don't
+ // clear properly on Intel Macs.
+ if (gpu_info::IsIntel(vendorId)) {
+ SetToggle(Toggle::DisableR8RG8Mipmaps, true);
+ }
+
+ // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
+ // shader provided. Create a dummy fragment shader module to work around this issue.
+ if (gpu_info::IsIntel(vendorId)) {
+ bool useDummyFragmentShader = true;
+ if (gpu_info::IsSkylake(deviceId)) {
+ useDummyFragmentShader = false;
+ }
+ SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, useDummyFragmentShader);
+ }
+ }
+
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ return BindGroup::Create(this, descriptor);
+ }
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+ }
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+ }
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return CommandBuffer::Create(encoder, descriptor);
+ }
+ Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return ComputePipeline::CreateUninitialized(this, descriptor);
+ }
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return PipelineLayout::Create(this, descriptor);
+ }
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
+ return QuerySet::Create(this, descriptor);
+ }
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return RenderPipeline::CreateUninitialized(this, descriptor);
+ }
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return Sampler::Create(this, descriptor);
+ }
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ return ShaderModule::Create(this, descriptor, parseResult);
+ }
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return OldSwapChain::Create(this, descriptor);
+ }
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+ }
+ ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return Texture::Create(this, descriptor);
+ }
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return TextureView::Create(texture, descriptor);
+ }
+ void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+ }
+ void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+ }
+
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
+ if (frontendCompletedSerial > mCompletedSerial) {
+ // sometimes we increase the serials, in which case the completed serial in
+ // the device base will surpass the completed serial we have in the metal backend, so we
+ // must update ours when we see that the completed serial from device base has
+ // increased.
+ mCompletedSerial = frontendCompletedSerial;
+ }
+ return ExecutionSerial(mCompletedSerial.load());
+ }
+
+ MaybeError Device::TickImpl() {
+ DAWN_TRY(SubmitPendingCommandBuffer());
+
+ // Just run timestamp period calculation when timestamp feature is enabled.
+ if (IsFeatureEnabled(Feature::TimestampQuery)) {
+ if (@available(macos 10.15, iOS 14.0, *)) {
+ UpdateTimestampPeriod(GetMTLDevice(), mKalmanInfo.get(), &mCpuTimestamp,
+ &mGpuTimestamp, &mTimestampPeriod);
+ }
+ }
+
+ return {};
+ }
+
+ id<MTLDevice> Device::GetMTLDevice() {
+ return mMtlDevice.Get();
+ }
+
+ id<MTLCommandQueue> Device::GetMTLQueue() {
+ return mCommandQueue.Get();
+ }
+
+ CommandRecordingContext* Device::GetPendingCommandContext() {
+ mCommandContext.MarkUsed();
+ return &mCommandContext;
+ }
+
+ MaybeError Device::SubmitPendingCommandBuffer() {
+ if (!mCommandContext.WasUsed()) {
+ return {};
+ }
+
+ IncrementLastSubmittedCommandSerial();
+
+ // Acquire the pending command buffer, which is retained. It must be released later.
+ NSPRef<id<MTLCommandBuffer>> pendingCommands = mCommandContext.AcquireCommands();
+
+ // Replace mLastSubmittedCommands with the mutex held so we avoid races between the
+ // schedule handler and this code.
+ {
+ std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+ mLastSubmittedCommands = pendingCommands;
+ }
+
+ // Make a local copy of the pointer to the commands because it's not clear how ObjC blocks
+ // handle types with copy / move constructors being referenced in the block..
+ id<MTLCommandBuffer> pendingCommandsPointer = pendingCommands.Get();
+ [*pendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
+ // This is DRF because we hold the mutex for mLastSubmittedCommands and pendingCommands
+ // is a local value (and not the member itself).
+ std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+ if (this->mLastSubmittedCommands.Get() == pendingCommandsPointer) {
+ this->mLastSubmittedCommands = nullptr;
+ }
+ }];
+
+ // Update the completed serial once the completed handler is fired. Make a local copy of
+ // mLastSubmittedSerial so it is captured by value.
+ ExecutionSerial pendingSerial = GetLastSubmittedCommandSerial();
+ // this ObjC block runs on a different thread
+ [*pendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
+ TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+ uint64_t(pendingSerial));
+ ASSERT(uint64_t(pendingSerial) > mCompletedSerial.load());
+ this->mCompletedSerial = uint64_t(pendingSerial);
+ }];
+
+ TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+ uint64_t(pendingSerial));
+ [*pendingCommands commit];
+
+ return mCommandContext.PrepareNextCommandBuffer(*mCommandQueue);
+ }
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer =
+ std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
+ return std::move(stagingBuffer);
+ }
+
+ MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ // Metal validation layers forbid 0-sized copies, assert it is skipped prior to calling
+ // this function.
+ ASSERT(size != 0);
+
+ ToBackend(destination)
+ ->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset,
+ size);
+
+ id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
+ id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
+ [GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
+ sourceOffset:sourceOffset
+ toBuffer:buffer
+ destinationOffset:destinationOffset
+ size:size];
+ return {};
+ }
+
+ // In Metal we don't write from the CPU to the texture directly which can be done using the
+ // replaceRegion function, because the function requires a non-private storage mode and Dawn
+ // sets the private storage mode by default for all textures except IOSurfaces on macOS.
+ MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& dataLayout,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ Texture* texture = ToBackend(dst->texture.Get());
+ EnsureDestinationTextureInitialized(GetPendingCommandContext(), texture, *dst,
+ copySizePixels);
+
+ RecordCopyBufferToTexture(GetPendingCommandContext(), ToBackend(source)->GetBufferHandle(),
+ source->GetSize(), dataLayout.offset, dataLayout.bytesPerRow,
+ dataLayout.rowsPerImage, texture, dst->mipLevel, dst->origin,
+ dst->aspect, copySizePixels);
+ return {};
+ }
+
+ Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
+ IOSurfaceRef ioSurface,
+ uint32_t plane) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+ return nullptr;
+ }
+ if (ConsumedError(
+ ValidateIOSurfaceCanBeWrapped(this, textureDescriptor, ioSurface, plane))) {
+ return nullptr;
+ }
+
+ Ref<Texture> result;
+ if (ConsumedError(Texture::CreateFromIOSurface(this, descriptor, ioSurface, plane),
+ &result)) {
+ return nullptr;
+ }
+ return result;
+ }
+
+ void Device::WaitForCommandsToBeScheduled() {
+ if (ConsumedError(SubmitPendingCommandBuffer())) {
+ return;
+ }
+
+ // Only lock the object while we take a reference to it, otherwise we could block further
+ // progress if the driver calls the scheduled handler (which also acquires the lock) before
+ // finishing the waitUntilScheduled.
+ NSPRef<id<MTLCommandBuffer>> lastSubmittedCommands;
+ {
+ std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+ lastSubmittedCommands = mLastSubmittedCommands;
+ }
+ [*lastSubmittedCommands waitUntilScheduled];
+ }
+
+ MaybeError Device::WaitForIdleForDestruction() {
+ // Forget all pending commands.
+ mCommandContext.AcquireCommands();
+ DAWN_TRY(CheckPassedSerials());
+
+ // Wait for all commands to be finished so we can free resources
+ while (GetCompletedCommandSerial() != GetLastSubmittedCommandSerial()) {
+ usleep(100);
+ DAWN_TRY(CheckPassedSerials());
+ }
+
+ return {};
+ }
+
+ void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
+
+ // Forget all pending commands.
+ mCommandContext.AcquireCommands();
+
+ mCommandQueue = nullptr;
+ mMtlDevice = nullptr;
+ }
+
+ uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return 1;
+ }
+
+ uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return 1;
+ }
+
+ float Device::GetTimestampPeriodInNS() const {
+ return mTimestampPeriod;
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/Forward.h b/chromium/third_party/dawn/src/dawn/native/metal/Forward.h
new file mode 100644
index 00000000000..bdfc31d98f5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/Forward.h
@@ -0,0 +1,68 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_FORWARD_H_
+#define DAWNNATIVE_METAL_FORWARD_H_
+
+#include "dawn/native/ToBackend.h"
+
+namespace dawn::native::metal {
+
+ class Adapter;
+ class BindGroup;
+ class BindGroupLayout;
+ class Buffer;
+ class CommandBuffer;
+ class ComputePipeline;
+ class Device;
+ class Framebuffer;
+ class PipelineLayout;
+ class QuerySet;
+ class Queue;
+ class RenderPipeline;
+ class Sampler;
+ class ShaderModule;
+ class StagingBuffer;
+ class SwapChain;
+ class Texture;
+ class TextureView;
+
+ struct MetalBackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using StagingBufferType = StagingBuffer;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+ };
+
+ template <typename T>
+ auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
+ return ToBackendBase<MetalBackendTraits>(common);
+ }
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm b/chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm
new file mode 100644
index 00000000000..c01cd40108e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/MetalBackend.mm
@@ -0,0 +1,49 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// MetalBackend.cpp: contains the definition of symbols exported by MetalBackend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/MetalBackend.h"
+
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+
+namespace dawn::native::metal {
+
+ id<MTLDevice> GetMetalDevice(WGPUDevice device) {
+ return ToBackend(FromAPI(device))->GetMTLDevice();
+ }
+
+ AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_Metal) {
+ }
+
+ ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
+ : ExternalImageDescriptor(ExternalImageType::IOSurface) {
+ }
+
+ WGPUTexture WrapIOSurface(WGPUDevice device,
+ const ExternalImageDescriptorIOSurface* cDescriptor) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ Ref<TextureBase> texture = backendDevice->CreateTextureWrappingIOSurface(
+ cDescriptor, cDescriptor->ioSurface, cDescriptor->plane);
+ return ToAPI(texture.Detach());
+ }
+
+ void WaitForCommandsToBeScheduled(WGPUDevice device) {
+ ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h
new file mode 100644
index 00000000000..efd3f512796
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.h
@@ -0,0 +1,62 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
+#define DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
+
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/PipelineLayout.h"
+
+#include "dawn/native/PerStage.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class Device;
+
+ // The number of Metal buffers usable by applications in general
+ static constexpr size_t kMetalBufferTableSize = 31;
+ // The Metal buffer slot that Dawn reserves for its own use to pass more data to shaders
+ static constexpr size_t kBufferLengthBufferSlot = kMetalBufferTableSize - 1;
+ // The number of Metal buffers Dawn can use in a generic way (i.e. that aren't reserved)
+ static constexpr size_t kGenericMetalBufferSlots = kMetalBufferTableSize - 1;
+
+ static constexpr BindGroupIndex kPullingBufferBindingSet = BindGroupIndex(kMaxBindGroups);
+
+ class PipelineLayout final : public PipelineLayoutBase {
+ public:
+ static Ref<PipelineLayout> Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor);
+
+ using BindingIndexInfo =
+ ityp::array<BindGroupIndex,
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup>,
+ kMaxBindGroups>;
+ const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
+
+ // The number of Metal vertex stage buffers used for the whole pipeline layout.
+ uint32_t GetBufferBindingCount(SingleShaderStage stage);
+
+ private:
+ PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+ ~PipelineLayout() override = default;
+ PerStage<BindingIndexInfo> mIndexInfo;
+ PerStage<uint32_t> mBufferBindingCount;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm
new file mode 100644
index 00000000000..5f789eae0e6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/PipelineLayoutMTL.mm
@@ -0,0 +1,82 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+namespace dawn::native::metal {
+
+ // static
+ Ref<PipelineLayout> PipelineLayout::Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ return AcquireRef(new PipelineLayout(device, descriptor));
+ }
+
+ PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
+ : PipelineLayoutBase(device, descriptor) {
+ // Each stage has its own numbering namespace in CompilerMSL.
+ for (auto stage : IterateStages(kAllStages)) {
+ uint32_t bufferIndex = 0;
+ uint32_t samplerIndex = 0;
+ uint32_t textureIndex = 0;
+
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ mIndexInfo[stage][group].resize(GetBindGroupLayout(group)->GetBindingCount());
+
+ for (BindingIndex bindingIndex{0};
+ bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+ if (!(bindingInfo.visibility & StageBit(stage))) {
+ continue;
+ }
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ mIndexInfo[stage][group][bindingIndex] = bufferIndex;
+ bufferIndex++;
+ break;
+
+ case BindingInfoType::Sampler:
+ mIndexInfo[stage][group][bindingIndex] = samplerIndex;
+ samplerIndex++;
+ break;
+
+ case BindingInfoType::Texture:
+ case BindingInfoType::StorageTexture:
+ case BindingInfoType::ExternalTexture:
+ mIndexInfo[stage][group][bindingIndex] = textureIndex;
+ textureIndex++;
+ break;
+ }
+ }
+ }
+
+ mBufferBindingCount[stage] = bufferIndex;
+ }
+ }
+
+ const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(
+ SingleShaderStage stage) const {
+ return mIndexInfo[stage];
+ }
+
+ uint32_t PipelineLayout::GetBufferBindingCount(SingleShaderStage stage) {
+ return mBufferBindingCount[stage];
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h
new file mode 100644
index 00000000000..23d6c445942
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.h
@@ -0,0 +1,54 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_QUERYSETMTL_H_
+#define DAWNNATIVE_METAL_QUERYSETMTL_H_
+
+#include "dawn/native/QuerySet.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class Device;
+
+ class QuerySet final : public QuerySetBase {
+ public:
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
+
+ id<MTLBuffer> GetVisibilityBuffer() const;
+ id<MTLCounterSampleBuffer> GetCounterSampleBuffer() const
+ API_AVAILABLE(macos(10.15), ios(14.0));
+
+ private:
+ ~QuerySet() override;
+ using QuerySetBase::QuerySetBase;
+ MaybeError Initialize();
+
+ // Dawn API
+ void DestroyImpl() override;
+
+ NSPRef<id<MTLBuffer>> mVisibilityBuffer;
+ // Note that mCounterSampleBuffer cannot be an NSRef because the API_AVAILABLE macros don't
+ // propagate nicely through templates.
+ id<MTLCounterSampleBuffer> mCounterSampleBuffer API_AVAILABLE(macos(10.15),
+ ios(14.0)) = nullptr;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_QUERYSETMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm
new file mode 100644
index 00000000000..4882fee86c7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/QuerySetMTL.mm
@@ -0,0 +1,139 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/QuerySetMTL.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/common/Platform.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+namespace dawn::native::metal {
+
+ namespace {
+
+ ResultOrError<id<MTLCounterSampleBuffer>> CreateCounterSampleBuffer(
+ Device* device,
+ MTLCommonCounterSet counterSet,
+ uint32_t count) API_AVAILABLE(macos(10.15), ios(14.0)) {
+ NSRef<MTLCounterSampleBufferDescriptor> descriptorRef =
+ AcquireNSRef([MTLCounterSampleBufferDescriptor new]);
+ MTLCounterSampleBufferDescriptor* descriptor = descriptorRef.Get();
+
+ // To determine which counters are available from a device, we need to iterate through
+ // the counterSets property of a MTLDevice. Then configure which counters will be
+ // sampled by creating a MTLCounterSampleBufferDescriptor and setting its counterSet
+ // property to the matched one of the available set.
+ for (id<MTLCounterSet> set in device->GetMTLDevice().counterSets) {
+ if ([set.name isEqualToString:counterSet]) {
+ descriptor.counterSet = set;
+ break;
+ }
+ }
+ ASSERT(descriptor.counterSet != nullptr);
+
+ descriptor.sampleCount = static_cast<NSUInteger>(std::max(count, uint32_t(1u)));
+ descriptor.storageMode = MTLStorageModePrivate;
+ if (device->IsToggleEnabled(Toggle::MetalUseSharedModeForCounterSampleBuffer)) {
+ descriptor.storageMode = MTLStorageModeShared;
+ }
+
+ NSError* error = nullptr;
+ id<MTLCounterSampleBuffer> counterSampleBuffer =
+ [device->GetMTLDevice() newCounterSampleBufferWithDescriptor:descriptor
+ error:&error];
+ if (error != nullptr) {
+ return DAWN_OUT_OF_MEMORY_ERROR(std::string("Error creating query set: ") +
+ [error.localizedDescription UTF8String]);
+ }
+
+ return counterSampleBuffer;
+ }
+ }
+
+ // static
+ ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
+ Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+ DAWN_TRY(queryset->Initialize());
+ return queryset;
+ }
+
+ MaybeError QuerySet::Initialize() {
+ Device* device = ToBackend(GetDevice());
+
+ switch (GetQueryType()) {
+ case wgpu::QueryType::Occlusion: {
+ // Create buffer for writing 64-bit results.
+ NSUInteger bufferSize = static_cast<NSUInteger>(
+ std::max(GetQueryCount() * sizeof(uint64_t), size_t(4u)));
+ mVisibilityBuffer = AcquireNSPRef([device->GetMTLDevice()
+ newBufferWithLength:bufferSize
+ options:MTLResourceStorageModePrivate]);
+
+ if (mVisibilityBuffer == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate query set.");
+ }
+ break;
+ }
+ case wgpu::QueryType::PipelineStatistics:
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ DAWN_TRY_ASSIGN(mCounterSampleBuffer,
+ CreateCounterSampleBuffer(device, MTLCommonCounterSetStatistic,
+ GetQueryCount()));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case wgpu::QueryType::Timestamp:
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ DAWN_TRY_ASSIGN(mCounterSampleBuffer,
+ CreateCounterSampleBuffer(device, MTLCommonCounterSetTimestamp,
+ GetQueryCount()));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ return {};
+ }
+
+ id<MTLBuffer> QuerySet::GetVisibilityBuffer() const {
+ return mVisibilityBuffer.Get();
+ }
+
+ id<MTLCounterSampleBuffer> QuerySet::GetCounterSampleBuffer() const
+ API_AVAILABLE(macos(10.15), ios(14.0)) {
+ return mCounterSampleBuffer;
+ }
+
+ QuerySet::~QuerySet() = default;
+
+ void QuerySet::DestroyImpl() {
+ QuerySetBase::DestroyImpl();
+
+ mVisibilityBuffer = nullptr;
+
+ // mCounterSampleBuffer isn't an NSRef because API_AVAILABLE doesn't work will with
+ // templates.
+ if (@available(macOS 10.15, iOS 14.0, *)) {
+ [mCounterSampleBuffer release];
+ mCounterSampleBuffer = nullptr;
+ }
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h
new file mode 100644
index 00000000000..fd94e0796a0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.h
@@ -0,0 +1,34 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_QUEUEMTL_H_
+#define DAWNNATIVE_METAL_QUEUEMTL_H_
+
+#include "dawn/native/Queue.h"
+
+namespace dawn::native::metal {
+
+ class Device;
+
+ class Queue final : public QueueBase {
+ public:
+ Queue(Device* device);
+
+ private:
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_QUEUEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm
new file mode 100644
index 00000000000..d489295a866
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/QueueMTL.mm
@@ -0,0 +1,48 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/QueueMTL.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/metal/CommandBufferMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native::metal {
+
+ Queue::Queue(Device* device) : QueueBase(device) {
+ }
+
+ MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ Device* device = ToBackend(GetDevice());
+
+ DAWN_TRY(device->Tick());
+
+ CommandRecordingContext* commandContext = device->GetPendingCommandContext();
+
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(ToBackend(commands[i])->FillCommands(commandContext));
+ }
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
+
+ return device->SubmitPendingCommandBuffer();
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h
new file mode 100644
index 00000000000..a4c6296b31e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
+#define DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class Device;
+
+ class RenderPipeline final : public RenderPipelineBase {
+ public:
+ static Ref<RenderPipelineBase> CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ MTLPrimitiveType GetMTLPrimitiveTopology() const;
+ MTLWinding GetMTLFrontFace() const;
+ MTLCullMode GetMTLCullMode() const;
+
+ void Encode(id<MTLRenderCommandEncoder> encoder);
+
+ id<MTLDepthStencilState> GetMTLDepthStencilState();
+
+ // For each Dawn vertex buffer, give the index in which it will be positioned in the Metal
+ // vertex buffer table.
+ uint32_t GetMtlVertexBufferIndex(VertexBufferSlot slot) const;
+
+ wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
+
+ MaybeError Initialize() override;
+
+ private:
+ using RenderPipelineBase::RenderPipelineBase;
+
+ NSRef<MTLVertexDescriptor> MakeVertexDesc();
+
+ MTLPrimitiveType mMtlPrimitiveTopology;
+ MTLWinding mMtlFrontFace;
+ MTLCullMode mMtlCullMode;
+ NSPRef<id<MTLRenderPipelineState>> mMtlRenderPipelineState;
+ NSPRef<id<MTLDepthStencilState>> mMtlDepthStencilState;
+ ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mMtlVertexBufferIndices;
+
+ wgpu::ShaderStage mStagesRequiringStorageBufferLength = wgpu::ShaderStage::None;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm
new file mode 100644
index 00000000000..6b7e0fecb74
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/RenderPipelineMTL.mm
@@ -0,0 +1,506 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/RenderPipelineMTL.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/VertexFormat.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+#include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+namespace dawn::native::metal {
+
+ namespace {
+ MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ return MTLVertexFormatUChar2;
+ case wgpu::VertexFormat::Uint8x4:
+ return MTLVertexFormatUChar4;
+ case wgpu::VertexFormat::Sint8x2:
+ return MTLVertexFormatChar2;
+ case wgpu::VertexFormat::Sint8x4:
+ return MTLVertexFormatChar4;
+ case wgpu::VertexFormat::Unorm8x2:
+ return MTLVertexFormatUChar2Normalized;
+ case wgpu::VertexFormat::Unorm8x4:
+ return MTLVertexFormatUChar4Normalized;
+ case wgpu::VertexFormat::Snorm8x2:
+ return MTLVertexFormatChar2Normalized;
+ case wgpu::VertexFormat::Snorm8x4:
+ return MTLVertexFormatChar4Normalized;
+ case wgpu::VertexFormat::Uint16x2:
+ return MTLVertexFormatUShort2;
+ case wgpu::VertexFormat::Uint16x4:
+ return MTLVertexFormatUShort4;
+ case wgpu::VertexFormat::Sint16x2:
+ return MTLVertexFormatShort2;
+ case wgpu::VertexFormat::Sint16x4:
+ return MTLVertexFormatShort4;
+ case wgpu::VertexFormat::Unorm16x2:
+ return MTLVertexFormatUShort2Normalized;
+ case wgpu::VertexFormat::Unorm16x4:
+ return MTLVertexFormatUShort4Normalized;
+ case wgpu::VertexFormat::Snorm16x2:
+ return MTLVertexFormatShort2Normalized;
+ case wgpu::VertexFormat::Snorm16x4:
+ return MTLVertexFormatShort4Normalized;
+ case wgpu::VertexFormat::Float16x2:
+ return MTLVertexFormatHalf2;
+ case wgpu::VertexFormat::Float16x4:
+ return MTLVertexFormatHalf4;
+ case wgpu::VertexFormat::Float32:
+ return MTLVertexFormatFloat;
+ case wgpu::VertexFormat::Float32x2:
+ return MTLVertexFormatFloat2;
+ case wgpu::VertexFormat::Float32x3:
+ return MTLVertexFormatFloat3;
+ case wgpu::VertexFormat::Float32x4:
+ return MTLVertexFormatFloat4;
+ case wgpu::VertexFormat::Uint32:
+ return MTLVertexFormatUInt;
+ case wgpu::VertexFormat::Uint32x2:
+ return MTLVertexFormatUInt2;
+ case wgpu::VertexFormat::Uint32x3:
+ return MTLVertexFormatUInt3;
+ case wgpu::VertexFormat::Uint32x4:
+ return MTLVertexFormatUInt4;
+ case wgpu::VertexFormat::Sint32:
+ return MTLVertexFormatInt;
+ case wgpu::VertexFormat::Sint32x2:
+ return MTLVertexFormatInt2;
+ case wgpu::VertexFormat::Sint32x3:
+ return MTLVertexFormatInt3;
+ case wgpu::VertexFormat::Sint32x4:
+ return MTLVertexFormatInt4;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ MTLVertexStepFunction VertexStepModeFunction(wgpu::VertexStepMode mode) {
+ switch (mode) {
+ case wgpu::VertexStepMode::Vertex:
+ return MTLVertexStepFunctionPerVertex;
+ case wgpu::VertexStepMode::Instance:
+ return MTLVertexStepFunctionPerInstance;
+ }
+ }
+
+ MTLPrimitiveType MTLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return MTLPrimitiveTypePoint;
+ case wgpu::PrimitiveTopology::LineList:
+ return MTLPrimitiveTypeLine;
+ case wgpu::PrimitiveTopology::LineStrip:
+ return MTLPrimitiveTypeLineStrip;
+ case wgpu::PrimitiveTopology::TriangleList:
+ return MTLPrimitiveTypeTriangle;
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return MTLPrimitiveTypeTriangleStrip;
+ }
+ }
+
+ MTLPrimitiveTopologyClass MTLInputPrimitiveTopology(
+ wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return MTLPrimitiveTopologyClassPoint;
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineStrip:
+ return MTLPrimitiveTopologyClassLine;
+ case wgpu::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return MTLPrimitiveTopologyClassTriangle;
+ }
+ }
+
+ MTLBlendFactor MetalBlendFactor(wgpu::BlendFactor factor, bool alpha) {
+ switch (factor) {
+ case wgpu::BlendFactor::Zero:
+ return MTLBlendFactorZero;
+ case wgpu::BlendFactor::One:
+ return MTLBlendFactorOne;
+ case wgpu::BlendFactor::Src:
+ return MTLBlendFactorSourceColor;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return MTLBlendFactorOneMinusSourceColor;
+ case wgpu::BlendFactor::SrcAlpha:
+ return MTLBlendFactorSourceAlpha;
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
+ return MTLBlendFactorOneMinusSourceAlpha;
+ case wgpu::BlendFactor::Dst:
+ return MTLBlendFactorDestinationColor;
+ case wgpu::BlendFactor::OneMinusDst:
+ return MTLBlendFactorOneMinusDestinationColor;
+ case wgpu::BlendFactor::DstAlpha:
+ return MTLBlendFactorDestinationAlpha;
+ case wgpu::BlendFactor::OneMinusDstAlpha:
+ return MTLBlendFactorOneMinusDestinationAlpha;
+ case wgpu::BlendFactor::SrcAlphaSaturated:
+ return MTLBlendFactorSourceAlphaSaturated;
+ case wgpu::BlendFactor::Constant:
+ return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
+ case wgpu::BlendFactor::OneMinusConstant:
+ return alpha ? MTLBlendFactorOneMinusBlendAlpha
+ : MTLBlendFactorOneMinusBlendColor;
+ }
+ }
+
+ MTLBlendOperation MetalBlendOperation(wgpu::BlendOperation operation) {
+ switch (operation) {
+ case wgpu::BlendOperation::Add:
+ return MTLBlendOperationAdd;
+ case wgpu::BlendOperation::Subtract:
+ return MTLBlendOperationSubtract;
+ case wgpu::BlendOperation::ReverseSubtract:
+ return MTLBlendOperationReverseSubtract;
+ case wgpu::BlendOperation::Min:
+ return MTLBlendOperationMin;
+ case wgpu::BlendOperation::Max:
+ return MTLBlendOperationMax;
+ }
+ }
+
+ MTLColorWriteMask MetalColorWriteMask(wgpu::ColorWriteMask writeMask,
+ bool isDeclaredInFragmentShader) {
+ if (!isDeclaredInFragmentShader) {
+ return MTLColorWriteMaskNone;
+ }
+
+ MTLColorWriteMask mask = MTLColorWriteMaskNone;
+
+ if (writeMask & wgpu::ColorWriteMask::Red) {
+ mask |= MTLColorWriteMaskRed;
+ }
+ if (writeMask & wgpu::ColorWriteMask::Green) {
+ mask |= MTLColorWriteMaskGreen;
+ }
+ if (writeMask & wgpu::ColorWriteMask::Blue) {
+ mask |= MTLColorWriteMaskBlue;
+ }
+ if (writeMask & wgpu::ColorWriteMask::Alpha) {
+ mask |= MTLColorWriteMaskAlpha;
+ }
+
+ return mask;
+ }
+
+ void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
+ const ColorTargetState* state,
+ bool isDeclaredInFragmentShader) {
+ attachment.blendingEnabled = state->blend != nullptr;
+ if (attachment.blendingEnabled) {
+ attachment.sourceRGBBlendFactor =
+ MetalBlendFactor(state->blend->color.srcFactor, false);
+ attachment.destinationRGBBlendFactor =
+ MetalBlendFactor(state->blend->color.dstFactor, false);
+ attachment.rgbBlendOperation = MetalBlendOperation(state->blend->color.operation);
+ attachment.sourceAlphaBlendFactor =
+ MetalBlendFactor(state->blend->alpha.srcFactor, true);
+ attachment.destinationAlphaBlendFactor =
+ MetalBlendFactor(state->blend->alpha.dstFactor, true);
+ attachment.alphaBlendOperation = MetalBlendOperation(state->blend->alpha.operation);
+ }
+ attachment.writeMask =
+ MetalColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
+ }
+
+ MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
+ switch (stencilOperation) {
+ case wgpu::StencilOperation::Keep:
+ return MTLStencilOperationKeep;
+ case wgpu::StencilOperation::Zero:
+ return MTLStencilOperationZero;
+ case wgpu::StencilOperation::Replace:
+ return MTLStencilOperationReplace;
+ case wgpu::StencilOperation::Invert:
+ return MTLStencilOperationInvert;
+ case wgpu::StencilOperation::IncrementClamp:
+ return MTLStencilOperationIncrementClamp;
+ case wgpu::StencilOperation::DecrementClamp:
+ return MTLStencilOperationDecrementClamp;
+ case wgpu::StencilOperation::IncrementWrap:
+ return MTLStencilOperationIncrementWrap;
+ case wgpu::StencilOperation::DecrementWrap:
+ return MTLStencilOperationDecrementWrap;
+ }
+ }
+
+ NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(const DepthStencilState* descriptor) {
+ NSRef<MTLDepthStencilDescriptor> mtlDepthStencilDescRef =
+ AcquireNSRef([MTLDepthStencilDescriptor new]);
+ MTLDepthStencilDescriptor* mtlDepthStencilDescriptor = mtlDepthStencilDescRef.Get();
+
+ mtlDepthStencilDescriptor.depthCompareFunction =
+ ToMetalCompareFunction(descriptor->depthCompare);
+ mtlDepthStencilDescriptor.depthWriteEnabled = descriptor->depthWriteEnabled;
+
+ if (StencilTestEnabled(descriptor)) {
+ NSRef<MTLStencilDescriptor> backFaceStencilRef =
+ AcquireNSRef([MTLStencilDescriptor new]);
+ MTLStencilDescriptor* backFaceStencil = backFaceStencilRef.Get();
+ NSRef<MTLStencilDescriptor> frontFaceStencilRef =
+ AcquireNSRef([MTLStencilDescriptor new]);
+ MTLStencilDescriptor* frontFaceStencil = frontFaceStencilRef.Get();
+
+ backFaceStencil.stencilCompareFunction =
+ ToMetalCompareFunction(descriptor->stencilBack.compare);
+ backFaceStencil.stencilFailureOperation =
+ MetalStencilOperation(descriptor->stencilBack.failOp);
+ backFaceStencil.depthFailureOperation =
+ MetalStencilOperation(descriptor->stencilBack.depthFailOp);
+ backFaceStencil.depthStencilPassOperation =
+ MetalStencilOperation(descriptor->stencilBack.passOp);
+ backFaceStencil.readMask = descriptor->stencilReadMask;
+ backFaceStencil.writeMask = descriptor->stencilWriteMask;
+
+ frontFaceStencil.stencilCompareFunction =
+ ToMetalCompareFunction(descriptor->stencilFront.compare);
+ frontFaceStencil.stencilFailureOperation =
+ MetalStencilOperation(descriptor->stencilFront.failOp);
+ frontFaceStencil.depthFailureOperation =
+ MetalStencilOperation(descriptor->stencilFront.depthFailOp);
+ frontFaceStencil.depthStencilPassOperation =
+ MetalStencilOperation(descriptor->stencilFront.passOp);
+ frontFaceStencil.readMask = descriptor->stencilReadMask;
+ frontFaceStencil.writeMask = descriptor->stencilWriteMask;
+
+ mtlDepthStencilDescriptor.backFaceStencil = backFaceStencil;
+ mtlDepthStencilDescriptor.frontFaceStencil = frontFaceStencil;
+ }
+
+ return mtlDepthStencilDescRef;
+ }
+
+ MTLWinding MTLFrontFace(wgpu::FrontFace face) {
+ switch (face) {
+ case wgpu::FrontFace::CW:
+ return MTLWindingClockwise;
+ case wgpu::FrontFace::CCW:
+ return MTLWindingCounterClockwise;
+ }
+ }
+
+ MTLCullMode ToMTLCullMode(wgpu::CullMode mode) {
+ switch (mode) {
+ case wgpu::CullMode::None:
+ return MTLCullModeNone;
+ case wgpu::CullMode::Front:
+ return MTLCullModeFront;
+ case wgpu::CullMode::Back:
+ return MTLCullModeBack;
+ }
+ }
+
+ } // anonymous namespace
+
+ // static
+ Ref<RenderPipelineBase> RenderPipeline::CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(device, descriptor));
+ }
+
+ MaybeError RenderPipeline::Initialize() {
+ mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
+ mMtlFrontFace = MTLFrontFace(GetFrontFace());
+ mMtlCullMode = ToMTLCullMode(GetCullMode());
+ auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+
+ NSRef<MTLRenderPipelineDescriptor> descriptorMTLRef =
+ AcquireNSRef([MTLRenderPipelineDescriptor new]);
+ MTLRenderPipelineDescriptor* descriptorMTL = descriptorMTLRef.Get();
+
+ // TODO: MakeVertexDesc should be const in the future, so we don't need to call it here when
+ // vertex pulling is enabled
+ NSRef<MTLVertexDescriptor> vertexDesc = MakeVertexDesc();
+
+ // Calling MakeVertexDesc first is important since it sets indices for packed bindings
+ if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+ vertexDesc = AcquireNSRef([MTLVertexDescriptor new]);
+ }
+ descriptorMTL.vertexDescriptor = vertexDesc.Get();
+
+ const PerStage<ProgrammableStage>& allStages = GetAllStages();
+ const ProgrammableStage& vertexStage = allStages[wgpu::ShaderStage::Vertex];
+ ShaderModule::MetalFunctionData vertexData;
+ DAWN_TRY(CreateMTLFunction(vertexStage, SingleShaderStage::Vertex, ToBackend(GetLayout()),
+ &vertexData, 0xFFFFFFFF, this));
+
+ descriptorMTL.vertexFunction = vertexData.function.Get();
+ if (vertexData.needsStorageBufferLength) {
+ mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
+ }
+
+ if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+ const ProgrammableStage& fragmentStage = allStages[wgpu::ShaderStage::Fragment];
+ ShaderModule::MetalFunctionData fragmentData;
+ DAWN_TRY(CreateMTLFunction(fragmentStage, SingleShaderStage::Fragment,
+ ToBackend(GetLayout()), &fragmentData, GetSampleMask()));
+
+ descriptorMTL.fragmentFunction = fragmentData.function.Get();
+ if (fragmentData.needsStorageBufferLength) {
+ mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
+ }
+
+ const auto& fragmentOutputsWritten = fragmentStage.metadata->fragmentOutputsWritten;
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
+ MetalPixelFormat(GetColorAttachmentFormat(i));
+ const ColorTargetState* descriptor = GetColorTargetState(i);
+ ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)],
+ descriptor, fragmentOutputsWritten[i]);
+ }
+ }
+
+ if (HasDepthStencilAttachment()) {
+ wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
+ const Format& internalFormat = GetDevice()->GetValidInternalFormat(depthStencilFormat);
+ MTLPixelFormat metalFormat = MetalPixelFormat(depthStencilFormat);
+
+ if (internalFormat.HasDepth()) {
+ descriptorMTL.depthAttachmentPixelFormat = metalFormat;
+ }
+ if (internalFormat.HasStencil()) {
+ descriptorMTL.stencilAttachmentPixelFormat = metalFormat;
+ }
+ }
+
+ descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
+ descriptorMTL.sampleCount = GetSampleCount();
+ descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
+
+ NSError* error = nullptr;
+ mMtlRenderPipelineState =
+ AcquireNSPRef([mtlDevice newRenderPipelineStateWithDescriptor:descriptorMTL
+ error:&error]);
+ if (error != nullptr) {
+ return DAWN_INTERNAL_ERROR(std::string("Error creating pipeline state") +
+ [error.localizedDescription UTF8String]);
+ }
+ ASSERT(mMtlRenderPipelineState != nil);
+
+ // Create depth stencil state and cache it, fetch the cached depth stencil state when we
+ // call setDepthStencilState() for a given render pipeline in CommandEncoder, in order
+ // to improve performance.
+ NSRef<MTLDepthStencilDescriptor> depthStencilDesc =
+ MakeDepthStencilDesc(GetDepthStencilState());
+ mMtlDepthStencilState =
+ AcquireNSPRef([mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc.Get()]);
+
+ return {};
+ }
+
+ MTLPrimitiveType RenderPipeline::GetMTLPrimitiveTopology() const {
+ return mMtlPrimitiveTopology;
+ }
+
+ MTLWinding RenderPipeline::GetMTLFrontFace() const {
+ return mMtlFrontFace;
+ }
+
+ MTLCullMode RenderPipeline::GetMTLCullMode() const {
+ return mMtlCullMode;
+ }
+
+ void RenderPipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
+ [encoder setRenderPipelineState:mMtlRenderPipelineState.Get()];
+ }
+
+ id<MTLDepthStencilState> RenderPipeline::GetMTLDepthStencilState() {
+ return mMtlDepthStencilState.Get();
+ }
+
+ uint32_t RenderPipeline::GetMtlVertexBufferIndex(VertexBufferSlot slot) const {
+ ASSERT(slot < kMaxVertexBuffersTyped);
+ return mMtlVertexBufferIndices[slot];
+ }
+
+ wgpu::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
+ return mStagesRequiringStorageBufferLength;
+ }
+
+ NSRef<MTLVertexDescriptor> RenderPipeline::MakeVertexDesc() {
+ MTLVertexDescriptor* mtlVertexDescriptor = [MTLVertexDescriptor new];
+
+ // Vertex buffers are packed after all the buffers for the bind groups.
+ uint32_t mtlVertexBufferIndex =
+ ToBackend(GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
+
+ for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& info = GetVertexBuffer(slot);
+
+ MTLVertexBufferLayoutDescriptor* layoutDesc = [MTLVertexBufferLayoutDescriptor new];
+ if (info.arrayStride == 0) {
+ // For MTLVertexStepFunctionConstant, the stepRate must be 0,
+ // but the arrayStride must NOT be 0, so we made up it with
+ // max(attrib.offset + sizeof(attrib) for each attrib)
+ size_t maxArrayStride = 0;
+ for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& attrib = GetAttribute(loc);
+ // Only use the attributes that use the current input
+ if (attrib.vertexBufferSlot != slot) {
+ continue;
+ }
+ maxArrayStride =
+ std::max(maxArrayStride, GetVertexFormatInfo(attrib.format).byteSize +
+ size_t(attrib.offset));
+ }
+ layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
+ layoutDesc.stepRate = 0;
+ // Metal requires the stride must be a multiple of 4 bytes, align it with next
+ // multiple of 4 if it's not.
+ layoutDesc.stride = Align(maxArrayStride, 4);
+ } else {
+ layoutDesc.stepFunction = VertexStepModeFunction(info.stepMode);
+ layoutDesc.stepRate = 1;
+ layoutDesc.stride = info.arrayStride;
+ }
+
+ mtlVertexDescriptor.layouts[mtlVertexBufferIndex] = layoutDesc;
+ [layoutDesc release];
+
+ mMtlVertexBufferIndices[slot] = mtlVertexBufferIndex;
+ mtlVertexBufferIndex++;
+ }
+
+ for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& info = GetAttribute(loc);
+
+ auto attribDesc = [MTLVertexAttributeDescriptor new];
+ attribDesc.format = VertexFormatType(info.format);
+ attribDesc.offset = info.offset;
+ attribDesc.bufferIndex = mMtlVertexBufferIndices[info.vertexBufferSlot];
+ mtlVertexDescriptor.attributes[static_cast<uint8_t>(loc)] = attribDesc;
+ [attribDesc release];
+ }
+
+ return AcquireNSRef(mtlVertexDescriptor);
+ }
+
+ void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h
new file mode 100644
index 00000000000..166fbe4117e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.h
@@ -0,0 +1,44 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_SAMPLERMTL_H_
+#define DAWNNATIVE_METAL_SAMPLERMTL_H_
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class Device;
+
+ class Sampler final : public SamplerBase {
+ public:
+ static ResultOrError<Ref<Sampler>> Create(Device* device,
+ const SamplerDescriptor* descriptor);
+
+ id<MTLSamplerState> GetMTLSamplerState();
+
+ private:
+ using SamplerBase::SamplerBase;
+ MaybeError Initialize(const SamplerDescriptor* descriptor);
+
+ NSPRef<id<MTLSamplerState>> mMtlSamplerState;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_SAMPLERMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm
new file mode 100644
index 00000000000..235b2f8204a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/SamplerMTL.mm
@@ -0,0 +1,106 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/SamplerMTL.h"
+
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+namespace dawn::native::metal {
+
+ namespace {
+ MTLSamplerMinMagFilter FilterModeToMinMagFilter(wgpu::FilterMode mode) {
+ switch (mode) {
+ case wgpu::FilterMode::Nearest:
+ return MTLSamplerMinMagFilterNearest;
+ case wgpu::FilterMode::Linear:
+ return MTLSamplerMinMagFilterLinear;
+ }
+ }
+
+ MTLSamplerMipFilter FilterModeToMipFilter(wgpu::FilterMode mode) {
+ switch (mode) {
+ case wgpu::FilterMode::Nearest:
+ return MTLSamplerMipFilterNearest;
+ case wgpu::FilterMode::Linear:
+ return MTLSamplerMipFilterLinear;
+ }
+ }
+
+ MTLSamplerAddressMode AddressMode(wgpu::AddressMode mode) {
+ switch (mode) {
+ case wgpu::AddressMode::Repeat:
+ return MTLSamplerAddressModeRepeat;
+ case wgpu::AddressMode::MirrorRepeat:
+ return MTLSamplerAddressModeMirrorRepeat;
+ case wgpu::AddressMode::ClampToEdge:
+ return MTLSamplerAddressModeClampToEdge;
+ }
+ }
+ }
+
+ // static
+ ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
+ const SamplerDescriptor* descriptor) {
+ DAWN_INVALID_IF(
+ descriptor->compare != wgpu::CompareFunction::Undefined &&
+ device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare),
+ "Sampler compare function (%s) not supported. Compare functions are disabled with the "
+ "Metal backend.",
+ descriptor->compare);
+
+ Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
+ DAWN_TRY(sampler->Initialize(descriptor));
+ return sampler;
+ }
+
+ MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
+ NSRef<MTLSamplerDescriptor> mtlDescRef = AcquireNSRef([MTLSamplerDescriptor new]);
+ MTLSamplerDescriptor* mtlDesc = mtlDescRef.Get();
+
+ mtlDesc.minFilter = FilterModeToMinMagFilter(descriptor->minFilter);
+ mtlDesc.magFilter = FilterModeToMinMagFilter(descriptor->magFilter);
+ mtlDesc.mipFilter = FilterModeToMipFilter(descriptor->mipmapFilter);
+
+ mtlDesc.sAddressMode = AddressMode(descriptor->addressModeU);
+ mtlDesc.tAddressMode = AddressMode(descriptor->addressModeV);
+ mtlDesc.rAddressMode = AddressMode(descriptor->addressModeW);
+
+ mtlDesc.lodMinClamp = descriptor->lodMinClamp;
+ mtlDesc.lodMaxClamp = descriptor->lodMaxClamp;
+ // https://developer.apple.com/documentation/metal/mtlsamplerdescriptor/1516164-maxanisotropy
+ mtlDesc.maxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
+
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ // Sampler compare is unsupported before A9, which we validate in
+ // Sampler::Create.
+ mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
+ // The value is default-initialized in the else-case, and we don't set it or the
+ // Metal debug device errors.
+ }
+
+ mMtlSamplerState = AcquireNSPRef(
+ [ToBackend(GetDevice())->GetMTLDevice() newSamplerStateWithDescriptor:mtlDesc]);
+
+ if (mMtlSamplerState == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate sampler.");
+ }
+ return {};
+ }
+
+ id<MTLSamplerState> Sampler::GetMTLSamplerState() {
+ return mMtlSamplerState.Get();
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h
new file mode 100644
index 00000000000..0188bb17f14
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_SHADERMODULEMTL_H_
+#define DAWNNATIVE_METAL_SHADERMODULEMTL_H_
+
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/common/NSRef.h"
+#include "dawn/native/Error.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class Device;
+ class PipelineLayout;
+ class RenderPipeline;
+
+ class ShaderModule final : public ShaderModuleBase {
+ public:
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
+
+ struct MetalFunctionData {
+ NSPRef<id<MTLFunction>> function;
+ bool needsStorageBufferLength;
+ std::vector<uint32_t> workgroupAllocations;
+ };
+
+ // MTLFunctionConstantValues needs @available tag to compile
+ // Use id (like void*) in function signature as workaround and do static cast inside
+ MaybeError CreateFunction(const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ MetalFunctionData* out,
+ id constantValues = nil,
+ uint32_t sampleMask = 0xFFFFFFFF,
+ const RenderPipeline* renderPipeline = nullptr);
+
+ private:
+ ResultOrError<std::string> TranslateToMSL(const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline,
+ std::string* remappedEntryPointName,
+ bool* needsStorageBufferLength,
+ bool* hasInvariantAttribute,
+ std::vector<uint32_t>* workgroupAllocations);
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModule() override = default;
+ MaybeError Initialize(ShaderModuleParseResult* parseResult);
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_SHADERMODULEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm
new file mode 100644
index 00000000000..e182898d2a1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/ShaderModuleMTL.mm
@@ -0,0 +1,278 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/ShaderModuleMTL.h"
+
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/PipelineLayoutMTL.h"
+#include "dawn/native/metal/RenderPipelineMTL.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <tint/tint.h>
+
+#include <sstream>
+
+namespace dawn::native::metal {
+
+ // static
+ ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+ DAWN_TRY(module->Initialize(parseResult));
+ return module;
+ }
+
+ ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor) {
+ }
+
+ MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+ return InitializeBase(parseResult);
+ }
+
+ ResultOrError<std::string> ShaderModule::TranslateToMSL(
+ const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline,
+ std::string* remappedEntryPointName,
+ bool* needsStorageBufferLength,
+ bool* hasInvariantAttribute,
+ std::vector<uint32_t>* workgroupAllocations) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ std::ostringstream errorStream;
+ errorStream << "Tint MSL failure:" << std::endl;
+
+ // Remap BindingNumber to BindingIndex in WGSL shader
+ using BindingRemapper = tint::transform::BindingRemapper;
+ using BindingPoint = tint::transform::BindingPoint;
+ BindingRemapper::BindingPoints bindingPoints;
+ BindingRemapper::AccessControls accessControls;
+
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase::BindingMap& bindingMap =
+ layout->GetBindGroupLayout(group)->GetBindingMap();
+ for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+ const BindingInfo& bindingInfo =
+ layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+
+ if (!(bindingInfo.visibility & StageBit(stage))) {
+ continue;
+ }
+
+ uint32_t shaderIndex = layout->GetBindingIndexInfo(stage)[group][bindingIndex];
+
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(bindingNumber)};
+ BindingPoint dstBindingPoint{0, shaderIndex};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+ }
+ }
+ }
+
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
+
+ // We only remap bindings for the target entry point, so we need to strip all other entry
+ // points to avoid generating invalid bindings for them.
+ transformManager.Add<tint::transform::SingleEntryPoint>();
+ transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+
+ AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+ if (stage == SingleShaderStage::Vertex &&
+ GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+ transformManager.Add<tint::transform::VertexPulling>();
+ AddVertexPullingTransformConfig(*renderPipeline, entryPointName,
+ kPullingBufferBindingSet, &transformInputs);
+
+ for (VertexBufferSlot slot :
+ IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+ uint32_t metalIndex = renderPipeline->GetMtlVertexBufferIndex(slot);
+
+ // Tell Tint to map (kPullingBufferBindingSet, slot) to this MSL buffer index.
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(kPullingBufferBindingSet),
+ static_cast<uint8_t>(slot)};
+ BindingPoint dstBindingPoint{0, metalIndex};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+ }
+ }
+ }
+ if (GetDevice()->IsRobustnessEnabled()) {
+ transformManager.Add<tint::transform::Robustness>();
+ }
+ transformManager.Add<tint::transform::BindingRemapper>();
+ transformManager.Add<tint::transform::Renamer>();
+
+ if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
+ // We still need to rename MSL reserved keywords
+ transformInputs.Add<tint::transform::Renamer::Config>(
+ tint::transform::Renamer::Target::kMslKeywords);
+ }
+
+ transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+ std::move(accessControls),
+ /* mayCollide */ true);
+
+ tint::Program program;
+ tint::transform::DataMap transformOutputs;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(),
+ transformInputs, &transformOutputs, nullptr));
+ }
+
+ if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+ auto it = data->remappings.find(entryPointName);
+ if (it != data->remappings.end()) {
+ *remappedEntryPointName = it->second;
+ } else {
+ DAWN_INVALID_IF(!GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming),
+ "Could not find remapped name for entry point.");
+
+ *remappedEntryPointName = entryPointName;
+ }
+ } else {
+ return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
+ }
+
+ tint::writer::msl::Options options;
+ options.buffer_size_ubo_index = kBufferLengthBufferSlot;
+ options.fixed_sample_mask = sampleMask;
+ options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+ options.emit_vertex_point_size =
+ stage == SingleShaderStage::Vertex &&
+ renderPipeline->GetPrimitiveTopology() == wgpu::PrimitiveTopology::PointList;
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::msl::Generate");
+ auto result = tint::writer::msl::Generate(&program, options);
+ DAWN_INVALID_IF(!result.success, "An error occured while generating MSL: %s.",
+ result.error);
+
+ *needsStorageBufferLength = result.needs_storage_buffer_sizes;
+ *hasInvariantAttribute = result.has_invariant_attribute;
+ *workgroupAllocations = std::move(result.workgroup_allocations[*remappedEntryPointName]);
+
+ return std::move(result.msl);
+ }
+
+ MaybeError ShaderModule::CreateFunction(const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ ShaderModule::MetalFunctionData* out,
+ id constantValuesPointer,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline) {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleMTL::CreateFunction");
+
+ ASSERT(!IsError());
+ ASSERT(out);
+
+ // Vertex stages must specify a renderPipeline
+ if (stage == SingleShaderStage::Vertex) {
+ ASSERT(renderPipeline != nullptr);
+ }
+
+ std::string remappedEntryPointName;
+ std::string msl;
+ bool hasInvariantAttribute = false;
+ DAWN_TRY_ASSIGN(msl,
+ TranslateToMSL(entryPointName, stage, layout, sampleMask, renderPipeline,
+ &remappedEntryPointName, &out->needsStorageBufferLength,
+ &hasInvariantAttribute, &out->workgroupAllocations));
+
+ // Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
+ // category. -Wunused-variable in particular comes up a lot in generated code, and some
+ // (old?) Metal drivers accidentally treat it as a MTLLibraryErrorCompileError instead
+ // of a warning.
+ msl = R"(
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wall"
+#endif
+)" + msl;
+
+ if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* Dumped generated MSL */" << std::endl << msl;
+ GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
+
+ NSRef<NSString> mslSource = AcquireNSRef([[NSString alloc] initWithUTF8String:msl.c_str()]);
+
+ NSRef<MTLCompileOptions> compileOptions = AcquireNSRef([[MTLCompileOptions alloc] init]);
+ if (hasInvariantAttribute) {
+ if (@available(macOS 11.0, iOS 13.0, *)) {
+ (*compileOptions).preserveInvariance = true;
+ }
+ }
+ auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+ NSError* error = nullptr;
+
+ NSPRef<id<MTLLibrary>> library;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLDevice::newLibraryWithSource");
+ library = AcquireNSPRef([mtlDevice newLibraryWithSource:mslSource.Get()
+ options:compileOptions.Get()
+ error:&error]);
+ }
+
+ if (error != nullptr) {
+ DAWN_INVALID_IF(error.code != MTLLibraryErrorCompileWarning,
+ "Unable to create library object: %s.",
+ [error.localizedDescription UTF8String]);
+ }
+ ASSERT(library != nil);
+
+ NSRef<NSString> name =
+ AcquireNSRef([[NSString alloc] initWithUTF8String:remappedEntryPointName.c_str()]);
+
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLLibrary::newFunctionWithName");
+ if (constantValuesPointer != nil) {
+ if (@available(macOS 10.12, *)) {
+ MTLFunctionConstantValues* constantValues = constantValuesPointer;
+ out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()
+ constantValues:constantValues
+ error:&error]);
+ if (error != nullptr) {
+ if (error.code != MTLLibraryErrorCompileWarning) {
+ return DAWN_VALIDATION_ERROR(std::string("Function compile error: ") +
+ [error.localizedDescription UTF8String]);
+ }
+ }
+ ASSERT(out->function != nil);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()]);
+ }
+ }
+
+ if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
+ GetEntryPoint(entryPointName).usedVertexInputs.any()) {
+ out->needsStorageBufferLength = true;
+ }
+
+ return {};
+ }
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h
new file mode 100644
index 00000000000..4400598db1d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.h
@@ -0,0 +1,42 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_STAGINGBUFFERMETAL_H_
+#define DAWNNATIVE_STAGINGBUFFERMETAL_H_
+
+#include "dawn/native/StagingBuffer.h"
+
+#include "dawn/common/NSRef.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class Device;
+
+ class StagingBuffer : public StagingBufferBase {
+ public:
+ StagingBuffer(size_t size, Device* device);
+
+ id<MTLBuffer> GetBufferHandle() const;
+
+ MaybeError Initialize() override;
+
+ private:
+ Device* mDevice;
+ NSPRef<id<MTLBuffer>> mBuffer;
+ };
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_STAGINGBUFFERMETAL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm
new file mode 100644
index 00000000000..a3fd91ff3a2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/StagingBufferMTL.mm
@@ -0,0 +1,46 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/StagingBufferMTL.h"
+#include "dawn/native/metal/DeviceMTL.h"
+
+namespace dawn::native::metal {
+
+ StagingBuffer::StagingBuffer(size_t size, Device* device)
+ : StagingBufferBase(size), mDevice(device) {
+ }
+
+ MaybeError StagingBuffer::Initialize() {
+ const size_t bufferSize = GetSize();
+ mBuffer = AcquireNSPRef([mDevice->GetMTLDevice()
+ newBufferWithLength:bufferSize
+ options:MTLResourceStorageModeShared]);
+
+ if (mBuffer == nullptr) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+
+ mMappedPointer = [*mBuffer contents];
+ if (mMappedPointer == nullptr) {
+ return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
+ }
+
+ return {};
+ }
+
+ id<MTLBuffer> StagingBuffer::GetBufferHandle() const {
+ return mBuffer.Get();
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h
new file mode 100644
index 00000000000..a0f1ccc1a24
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.h
@@ -0,0 +1,67 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_SWAPCHAINMTL_H_
+#define DAWNNATIVE_METAL_SWAPCHAINMTL_H_
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/common/NSRef.h"
+
+@class CAMetalLayer;
+@protocol CAMetalDrawable;
+
+namespace dawn::native::metal {
+
+ class Device;
+ class Texture;
+
+ class OldSwapChain final : public OldSwapChainBase {
+ public:
+ static Ref<OldSwapChain> Create(Device* deivce, const SwapChainDescriptor* descriptor);
+
+ protected:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain() override;
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
+ };
+
+ class SwapChain final : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+ ~SwapChain() override;
+
+ private:
+ void DestroyImpl() override;
+
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+ NSRef<CAMetalLayer> mLayer;
+
+ NSPRef<id<CAMetalDrawable>> mCurrentDrawable;
+ Ref<Texture> mTexture;
+
+ MaybeError PresentImpl() override;
+ ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_SWAPCHAINMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm
new file mode 100644
index 00000000000..0291a41aee3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/SwapChainMTL.mm
@@ -0,0 +1,155 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/SwapChainMTL.h"
+
+#include "dawn/native/Surface.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+
+#include <dawn/dawn_wsi.h>
+
+#import <QuartzCore/CAMetalLayer.h>
+
+namespace dawn::native::metal {
+
+ // OldSwapChain
+
+ // static
+ Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
+ }
+
+ OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ DawnWSIContextMetal wsiContext = {};
+ wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
+ wsiContext.queue = ToBackend(GetDevice())->GetMTLQueue();
+ im.Init(im.userData, &wsiContext);
+ }
+
+ OldSwapChain::~OldSwapChain() {
+ }
+
+ TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ const auto& im = GetImplementation();
+ DawnSwapChainNextTexture next = {};
+ DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+ if (error) {
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
+ return nullptr;
+ }
+
+ id<MTLTexture> nativeTexture = reinterpret_cast<id<MTLTexture>>(next.texture.ptr);
+
+ return Texture::CreateWrapping(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
+ }
+
+ MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
+ return {};
+ }
+
+ // SwapChain
+
+ // static
+ ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+ }
+
+ SwapChain::~SwapChain() = default;
+
+ void SwapChain::DestroyImpl() {
+ SwapChainBase::DestroyImpl();
+ DetachFromSurface();
+ }
+
+ MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ ASSERT(GetSurface()->GetType() == Surface::Type::MetalLayer);
+
+ if (previousSwapChain != nullptr) {
+ // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Metal,
+ "Metal SwapChain cannot switch backend types from %s to %s.",
+ previousSwapChain->GetBackendType(), wgpu::BackendType::Metal);
+
+ previousSwapChain->DetachFromSurface();
+ }
+
+ mLayer = static_cast<CAMetalLayer*>(GetSurface()->GetMetalLayer());
+ ASSERT(mLayer != nullptr);
+
+ CGSize size = {};
+ size.width = GetWidth();
+ size.height = GetHeight();
+ [*mLayer setDrawableSize:size];
+
+ [*mLayer setFramebufferOnly:(GetUsage() == wgpu::TextureUsage::RenderAttachment)];
+ [*mLayer setDevice:ToBackend(GetDevice())->GetMTLDevice()];
+ [*mLayer setPixelFormat:MetalPixelFormat(GetFormat())];
+
+#if defined(DAWN_PLATFORM_MACOS)
+ if (@available(macos 10.13, *)) {
+ [*mLayer setDisplaySyncEnabled:(GetPresentMode() != wgpu::PresentMode::Immediate)];
+ }
+#endif // defined(DAWN_PLATFORM_MACOS)
+
+ // There is no way to control Fifo vs. Mailbox in Metal.
+
+ return {};
+ }
+
+ MaybeError SwapChain::PresentImpl() {
+ ASSERT(mCurrentDrawable != nullptr);
+ [*mCurrentDrawable present];
+
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+
+ mCurrentDrawable = nullptr;
+
+ return {};
+ }
+
+ ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
+ ASSERT(mCurrentDrawable == nullptr);
+ mCurrentDrawable = [*mLayer nextDrawable];
+
+ TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+
+ mTexture = Texture::CreateWrapping(ToBackend(GetDevice()), &textureDesc,
+ [*mCurrentDrawable texture]);
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mTexture->APICreateView();
+ }
+
+ void SwapChain::DetachFromSurfaceImpl() {
+ ASSERT((mTexture == nullptr) == (mCurrentDrawable == nullptr));
+
+ if (mTexture != nullptr) {
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+
+ mCurrentDrawable = nullptr;
+ }
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h
new file mode 100644
index 00000000000..0ac8103c350
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.h
@@ -0,0 +1,96 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_TEXTUREMTL_H_
+#define DAWNNATIVE_METAL_TEXTUREMTL_H_
+
+#include "dawn/native/Texture.h"
+
+#include "dawn/common/NSRef.h"
+#include "dawn/native/DawnNative.h"
+
+#include <IOSurface/IOSurfaceRef.h>
+#import <Metal/Metal.h>
+
+namespace dawn::native::metal {
+
+ class CommandRecordingContext;
+ class Device;
+
+ MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
+ MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
+ const TextureDescriptor* descriptor,
+ IOSurfaceRef ioSurface,
+ uint32_t plane);
+
+ class Texture final : public TextureBase {
+ public:
+ static ResultOrError<Ref<Texture>> Create(Device* device,
+ const TextureDescriptor* descriptor);
+ static ResultOrError<Ref<Texture>> CreateFromIOSurface(
+ Device* device,
+ const ExternalImageDescriptor* descriptor,
+ IOSurfaceRef ioSurface,
+ uint32_t plane);
+ static Ref<Texture> CreateWrapping(Device* device,
+ const TextureDescriptor* descriptor,
+ NSPRef<id<MTLTexture>> wrapped);
+
+ id<MTLTexture> GetMTLTexture();
+ NSPRef<id<MTLTexture>> CreateFormatView(wgpu::TextureFormat format);
+
+ void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range);
+
+ private:
+ using TextureBase::TextureBase;
+ ~Texture() override;
+
+ NSRef<MTLTextureDescriptor> CreateMetalTextureDescriptor() const;
+
+ MaybeError InitializeAsInternalTexture(const TextureDescriptor* descriptor);
+ MaybeError InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ IOSurfaceRef ioSurface,
+ uint32_t plane);
+ void InitializeAsWrapping(const TextureDescriptor* descriptor,
+ NSPRef<id<MTLTexture>> wrapped);
+
+ void DestroyImpl() override;
+
+ MaybeError ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue);
+
+ NSPRef<id<MTLTexture>> mMtlTexture;
+ MTLTextureUsage mMtlUsage;
+ };
+
+ class TextureView final : public TextureViewBase {
+ public:
+ static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+
+ id<MTLTexture> GetMTLTexture();
+
+ private:
+ using TextureViewBase::TextureViewBase;
+ MaybeError Initialize(const TextureViewDescriptor* descriptor);
+
+ NSPRef<id<MTLTexture>> mMtlTextureView;
+ };
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_TEXTUREMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm
new file mode 100644
index 00000000000..bee2a1e7d25
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/TextureMTL.mm
@@ -0,0 +1,866 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/TextureMTL.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/common/Platform.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/StagingBufferMTL.h"
+#include "dawn/native/metal/UtilsMetal.h"
+
+#include <CoreVideo/CVPixelBuffer.h>
+
+namespace dawn::native::metal {
+
+ namespace {
+ bool UsageNeedsTextureView(wgpu::TextureUsage usage) {
+ constexpr wgpu::TextureUsage kUsageNeedsTextureView =
+ wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
+ return usage & kUsageNeedsTextureView;
+ }
+
+ MTLTextureUsage MetalTextureUsage(const Format& format,
+ wgpu::TextureUsage usage,
+ uint32_t sampleCount) {
+ MTLTextureUsage result = MTLTextureUsageUnknown; // This is 0
+
+ if (usage & (wgpu::TextureUsage::StorageBinding)) {
+ result |= MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
+ }
+
+ if (usage & (wgpu::TextureUsage::TextureBinding)) {
+ result |= MTLTextureUsageShaderRead;
+
+ // For sampling stencil aspect of combined depth/stencil. See TextureView
+ // constructor.
+ if (@available(macOS 10.12, iOS 10.0, *)) {
+ if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
+ result |= MTLTextureUsagePixelFormatView;
+ }
+ }
+ }
+
+ // MTLTextureUsageRenderTarget is needed to clear multisample textures.
+ if (usage & (wgpu::TextureUsage::RenderAttachment) || sampleCount > 1) {
+ result |= MTLTextureUsageRenderTarget;
+ }
+
+ return result;
+ }
+
+ MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
+ unsigned int sampleCount) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::e1D:
+ return MTLTextureType1D;
+ case wgpu::TextureViewDimension::e2D:
+ return (sampleCount > 1) ? MTLTextureType2DMultisample : MTLTextureType2D;
+ case wgpu::TextureViewDimension::e2DArray:
+ return MTLTextureType2DArray;
+ case wgpu::TextureViewDimension::Cube:
+ return MTLTextureTypeCube;
+ case wgpu::TextureViewDimension::CubeArray:
+ return MTLTextureTypeCubeArray;
+ case wgpu::TextureViewDimension::e3D:
+ return MTLTextureType3D;
+
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ bool RequiresCreatingNewTextureView(const TextureBase* texture,
+ const TextureViewDescriptor* textureViewDescriptor) {
+ if (texture->GetFormat().format != textureViewDescriptor->format) {
+ return true;
+ }
+
+ if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount) {
+ return true;
+ }
+
+ if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
+ return true;
+ }
+
+ if (IsSubset(Aspect::Depth | Aspect::Stencil, texture->GetFormat().aspects) &&
+ textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
+ return true;
+ }
+
+ switch (textureViewDescriptor->dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+ }
+
+ // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
+ // between linear space and sRGB without setting MTLTextureUsagePixelFormatView flag. For
+ // example, creating bgra8Unorm texture view on rgba8Unorm texture or creating
+ // rgba8Unorm_srgb texture view on rgab8Unorm texture.
+ bool AllowFormatReinterpretationWithoutFlag(MTLPixelFormat origin,
+ MTLPixelFormat reinterpretation) {
+ switch (origin) {
+ case MTLPixelFormatRGBA8Unorm:
+ return reinterpretation == MTLPixelFormatBGRA8Unorm ||
+ reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB;
+ case MTLPixelFormatBGRA8Unorm:
+ return reinterpretation == MTLPixelFormatRGBA8Unorm ||
+ reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB;
+ case MTLPixelFormatRGBA8Unorm_sRGB:
+ return reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB ||
+ reinterpretation == MTLPixelFormatRGBA8Unorm;
+ case MTLPixelFormatBGRA8Unorm_sRGB:
+ return reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB ||
+ reinterpretation == MTLPixelFormatBGRA8Unorm;
+#if defined(DAWN_PLATFORM_MACOS)
+ case MTLPixelFormatBC1_RGBA:
+ return reinterpretation == MTLPixelFormatBC1_RGBA_sRGB;
+ case MTLPixelFormatBC1_RGBA_sRGB:
+ return reinterpretation == MTLPixelFormatBC1_RGBA;
+ case MTLPixelFormatBC2_RGBA:
+ return reinterpretation == MTLPixelFormatBC2_RGBA_sRGB;
+ case MTLPixelFormatBC2_RGBA_sRGB:
+ return reinterpretation == MTLPixelFormatBC2_RGBA;
+ case MTLPixelFormatBC3_RGBA:
+ return reinterpretation == MTLPixelFormatBC3_RGBA_sRGB;
+ case MTLPixelFormatBC3_RGBA_sRGB:
+ return reinterpretation == MTLPixelFormatBC3_RGBA;
+ case MTLPixelFormatBC7_RGBAUnorm:
+ return reinterpretation == MTLPixelFormatBC7_RGBAUnorm_sRGB;
+ case MTLPixelFormatBC7_RGBAUnorm_sRGB:
+ return reinterpretation == MTLPixelFormatBC7_RGBAUnorm;
+#endif
+
+ default:
+ return false;
+ }
+ }
+
+ ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
+ switch (format) {
+ case kCVPixelFormatType_64RGBAHalf:
+ return wgpu::TextureFormat::RGBA16Float;
+ case kCVPixelFormatType_TwoComponent16Half:
+ return wgpu::TextureFormat::RG16Float;
+ case kCVPixelFormatType_OneComponent16Half:
+ return wgpu::TextureFormat::R16Float;
+ case kCVPixelFormatType_ARGB2101010LEPacked:
+ return wgpu::TextureFormat::RGB10A2Unorm;
+ case kCVPixelFormatType_32RGBA:
+ return wgpu::TextureFormat::RGBA8Unorm;
+ case kCVPixelFormatType_32BGRA:
+ return wgpu::TextureFormat::BGRA8Unorm;
+ case kCVPixelFormatType_TwoComponent8:
+ return wgpu::TextureFormat::RG8Unorm;
+ case kCVPixelFormatType_OneComponent8:
+ return wgpu::TextureFormat::R8Unorm;
+ default:
+ return DAWN_FORMAT_VALIDATION_ERROR("Unsupported IOSurface format (%x).",
+ format);
+ }
+ }
+
+#if defined(DAWN_PLATFORM_MACOS)
+ MTLStorageMode kIOSurfaceStorageMode = MTLStorageModeManaged;
+#elif defined(DAWN_PLATFORM_IOS)
+ MTLStorageMode kIOSurfaceStorageMode = MTLStorageModePrivate;
+#else
+# error "Unsupported Apple platform."
+#endif
+ }
+
+ MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R8Unorm:
+ return MTLPixelFormatR8Unorm;
+ case wgpu::TextureFormat::R8Snorm:
+ return MTLPixelFormatR8Snorm;
+ case wgpu::TextureFormat::R8Uint:
+ return MTLPixelFormatR8Uint;
+ case wgpu::TextureFormat::R8Sint:
+ return MTLPixelFormatR8Sint;
+
+ case wgpu::TextureFormat::R16Uint:
+ return MTLPixelFormatR16Uint;
+ case wgpu::TextureFormat::R16Sint:
+ return MTLPixelFormatR16Sint;
+ case wgpu::TextureFormat::R16Float:
+ return MTLPixelFormatR16Float;
+ case wgpu::TextureFormat::RG8Unorm:
+ return MTLPixelFormatRG8Unorm;
+ case wgpu::TextureFormat::RG8Snorm:
+ return MTLPixelFormatRG8Snorm;
+ case wgpu::TextureFormat::RG8Uint:
+ return MTLPixelFormatRG8Uint;
+ case wgpu::TextureFormat::RG8Sint:
+ return MTLPixelFormatRG8Sint;
+
+ case wgpu::TextureFormat::R32Uint:
+ return MTLPixelFormatR32Uint;
+ case wgpu::TextureFormat::R32Sint:
+ return MTLPixelFormatR32Sint;
+ case wgpu::TextureFormat::R32Float:
+ return MTLPixelFormatR32Float;
+ case wgpu::TextureFormat::RG16Uint:
+ return MTLPixelFormatRG16Uint;
+ case wgpu::TextureFormat::RG16Sint:
+ return MTLPixelFormatRG16Sint;
+ case wgpu::TextureFormat::RG16Float:
+ return MTLPixelFormatRG16Float;
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return MTLPixelFormatRGBA8Unorm;
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return MTLPixelFormatRGBA8Unorm_sRGB;
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return MTLPixelFormatRGBA8Snorm;
+ case wgpu::TextureFormat::RGBA8Uint:
+ return MTLPixelFormatRGBA8Uint;
+ case wgpu::TextureFormat::RGBA8Sint:
+ return MTLPixelFormatRGBA8Sint;
+ case wgpu::TextureFormat::BGRA8Unorm:
+ return MTLPixelFormatBGRA8Unorm;
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return MTLPixelFormatBGRA8Unorm_sRGB;
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return MTLPixelFormatRGB10A2Unorm;
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return MTLPixelFormatRG11B10Float;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return MTLPixelFormatRGB9E5Float;
+
+ case wgpu::TextureFormat::RG32Uint:
+ return MTLPixelFormatRG32Uint;
+ case wgpu::TextureFormat::RG32Sint:
+ return MTLPixelFormatRG32Sint;
+ case wgpu::TextureFormat::RG32Float:
+ return MTLPixelFormatRG32Float;
+ case wgpu::TextureFormat::RGBA16Uint:
+ return MTLPixelFormatRGBA16Uint;
+ case wgpu::TextureFormat::RGBA16Sint:
+ return MTLPixelFormatRGBA16Sint;
+ case wgpu::TextureFormat::RGBA16Float:
+ return MTLPixelFormatRGBA16Float;
+
+ case wgpu::TextureFormat::RGBA32Uint:
+ return MTLPixelFormatRGBA32Uint;
+ case wgpu::TextureFormat::RGBA32Sint:
+ return MTLPixelFormatRGBA32Sint;
+ case wgpu::TextureFormat::RGBA32Float:
+ return MTLPixelFormatRGBA32Float;
+
+ case wgpu::TextureFormat::Depth32Float:
+ return MTLPixelFormatDepth32Float;
+ case wgpu::TextureFormat::Depth24Plus:
+ return MTLPixelFormatDepth32Float;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return MTLPixelFormatDepth32Float_Stencil8;
+ case wgpu::TextureFormat::Depth16Unorm:
+ if (@available(macOS 10.12, iOS 13.0, *)) {
+ return MTLPixelFormatDepth16Unorm;
+ } else {
+ // TODO (dawn:1181): Allow non-conformant implementation on macOS 10.11
+ UNREACHABLE();
+ }
+
+#if defined(DAWN_PLATFORM_MACOS)
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return MTLPixelFormatDepth24Unorm_Stencil8;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ return MTLPixelFormatBC1_RGBA;
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ return MTLPixelFormatBC1_RGBA_sRGB;
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ return MTLPixelFormatBC2_RGBA;
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ return MTLPixelFormatBC2_RGBA_sRGB;
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ return MTLPixelFormatBC3_RGBA;
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ return MTLPixelFormatBC3_RGBA_sRGB;
+ case wgpu::TextureFormat::BC4RSnorm:
+ return MTLPixelFormatBC4_RSnorm;
+ case wgpu::TextureFormat::BC4RUnorm:
+ return MTLPixelFormatBC4_RUnorm;
+ case wgpu::TextureFormat::BC5RGSnorm:
+ return MTLPixelFormatBC5_RGSnorm;
+ case wgpu::TextureFormat::BC5RGUnorm:
+ return MTLPixelFormatBC5_RGUnorm;
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ return MTLPixelFormatBC6H_RGBFloat;
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ return MTLPixelFormatBC6H_RGBUfloat;
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ return MTLPixelFormatBC7_RGBAUnorm;
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return MTLPixelFormatBC7_RGBAUnorm_sRGB;
+#else
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+#endif
+
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+ // TODO(dawn:666): implement stencil8
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase*,
+ const TextureDescriptor* descriptor,
+ IOSurfaceRef ioSurface,
+ uint32_t plane) {
+ // IOSurfaceGetPlaneCount can return 0 for non-planar IOSurfaces but we will treat
+ // non-planar like it is a single plane.
+ size_t surfacePlaneCount = std::max(size_t(1), IOSurfaceGetPlaneCount(ioSurface));
+ DAWN_INVALID_IF(plane >= surfacePlaneCount,
+ "IOSurface plane (%u) exceeds the surface's plane count (%u).", plane,
+ surfacePlaneCount);
+
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "Texture dimension (%s) is not %s.", descriptor->dimension,
+ wgpu::TextureDimension::e2D);
+
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
+ "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+ descriptor->sampleCount);
+
+ uint32_t surfaceWidth = IOSurfaceGetWidthOfPlane(ioSurface, plane);
+ uint32_t surfaceHeight = IOSurfaceGetHeightOfPlane(ioSurface, plane);
+
+ DAWN_INVALID_IF(
+ descriptor->size.width != surfaceWidth || descriptor->size.height != surfaceHeight ||
+ descriptor->size.depthOrArrayLayers != 1,
+ "IOSurface size (width: %u, height %u, depth: 1) doesn't match descriptor size %s.",
+ surfaceWidth, surfaceHeight, &descriptor->size);
+
+ wgpu::TextureFormat ioSurfaceFormat;
+ DAWN_TRY_ASSIGN(ioSurfaceFormat,
+ GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
+ DAWN_INVALID_IF(descriptor->format != ioSurfaceFormat,
+ "IOSurface format (%s) doesn't match the descriptor format (%s).",
+ ioSurfaceFormat, descriptor->format);
+
+ return {};
+ }
+
+ NSRef<MTLTextureDescriptor> Texture::CreateMetalTextureDescriptor() const {
+ NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+ MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+ mtlDesc.width = GetWidth();
+ mtlDesc.sampleCount = GetSampleCount();
+ // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
+ // between linear space and sRGB. For example, creating bgra8Unorm texture view on
+ // rgba8Unorm texture or creating rgba8Unorm_srgb texture view on rgab8Unorm texture.
+ // TODO: add MTLTextureUsagePixelFormatView when needed when we support other format
+ // reinterpretation.
+ mtlDesc.usage = MetalTextureUsage(GetFormat(), GetInternalUsage(), GetSampleCount());
+ mtlDesc.pixelFormat = MetalPixelFormat(GetFormat().format);
+ mtlDesc.mipmapLevelCount = GetNumMipLevels();
+ mtlDesc.storageMode = MTLStorageModePrivate;
+
+ // Choose the correct MTLTextureType and paper over differences in how the array layer count
+ // is specified.
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ mtlDesc.arrayLength = 1;
+ mtlDesc.depth = 1;
+ ASSERT(mtlDesc.sampleCount == 1);
+ mtlDesc.textureType = MTLTextureType1D;
+ break;
+
+ case wgpu::TextureDimension::e2D:
+ mtlDesc.height = GetHeight();
+ mtlDesc.arrayLength = GetArrayLayers();
+ mtlDesc.depth = 1;
+ if (mtlDesc.arrayLength > 1) {
+ ASSERT(mtlDesc.sampleCount == 1);
+ mtlDesc.textureType = MTLTextureType2DArray;
+ } else if (mtlDesc.sampleCount > 1) {
+ mtlDesc.textureType = MTLTextureType2DMultisample;
+ } else {
+ mtlDesc.textureType = MTLTextureType2D;
+ }
+ break;
+ case wgpu::TextureDimension::e3D:
+ mtlDesc.height = GetHeight();
+ mtlDesc.depth = GetDepth();
+ mtlDesc.arrayLength = 1;
+ ASSERT(mtlDesc.sampleCount == 1);
+ mtlDesc.textureType = MTLTextureType3D;
+ break;
+ }
+
+ return mtlDescRef;
+ }
+
+ // static
+ ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor) {
+ Ref<Texture> texture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+ DAWN_TRY(texture->InitializeAsInternalTexture(descriptor));
+ return texture;
+ }
+
+ // static
+ ResultOrError<Ref<Texture>> Texture::CreateFromIOSurface(
+ Device* device,
+ const ExternalImageDescriptor* descriptor,
+ IOSurfaceRef ioSurface,
+ uint32_t plane) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ Ref<Texture> texture =
+ AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
+ DAWN_TRY(texture->InitializeFromIOSurface(descriptor, textureDescriptor, ioSurface, plane));
+ return texture;
+ }
+
+ // static
+ Ref<Texture> Texture::CreateWrapping(Device* device,
+ const TextureDescriptor* descriptor,
+ NSPRef<id<MTLTexture>> wrapped) {
+ Ref<Texture> texture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+ texture->InitializeAsWrapping(descriptor, std::move(wrapped));
+ return texture;
+ }
+
+ MaybeError Texture::InitializeAsInternalTexture(const TextureDescriptor* descriptor) {
+ Device* device = ToBackend(GetDevice());
+
+ NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+ mMtlUsage = [*mtlDesc usage];
+ mMtlTexture =
+ AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
+
+ if (mMtlTexture == nil) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture.");
+ }
+
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ DAWN_TRY(ClearTexture(device->GetPendingCommandContext(), GetAllSubresources(),
+ TextureBase::ClearValue::NonZero));
+ }
+
+ return {};
+ }
+
+ void Texture::InitializeAsWrapping(const TextureDescriptor* descriptor,
+ NSPRef<id<MTLTexture>> wrapped) {
+ NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+ mMtlUsage = [*mtlDesc usage];
+ mMtlTexture = std::move(wrapped);
+ }
+
+ MaybeError Texture::InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ IOSurfaceRef ioSurface,
+ uint32_t plane) {
+ Device* device = ToBackend(GetDevice());
+
+ NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+ [*mtlDesc setStorageMode:kIOSurfaceStorageMode];
+
+ mMtlUsage = [*mtlDesc usage];
+ mMtlTexture = AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()
+ iosurface:ioSurface
+ plane:plane]);
+
+ SetIsSubresourceContentInitialized(descriptor->isInitialized, GetAllSubresources());
+
+ return {};
+ }
+
+ Texture::~Texture() {
+ }
+
+ void Texture::DestroyImpl() {
+ TextureBase::DestroyImpl();
+ mMtlTexture = nullptr;
+ }
+
+ id<MTLTexture> Texture::GetMTLTexture() {
+ return mMtlTexture.Get();
+ }
+
+ NSPRef<id<MTLTexture>> Texture::CreateFormatView(wgpu::TextureFormat format) {
+ if (GetFormat().format == format) {
+ return mMtlTexture;
+ }
+
+ ASSERT(AllowFormatReinterpretationWithoutFlag(MetalPixelFormat(GetFormat().format),
+ MetalPixelFormat(format)));
+ return AcquireNSPRef(
+ [mMtlTexture.Get() newTextureViewWithPixelFormat:MetalPixelFormat(format)]);
+ }
+
+ MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue) {
+ Device* device = ToBackend(GetDevice());
+
+ const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
+
+ if ((mMtlUsage & MTLTextureUsageRenderTarget) != 0) {
+ ASSERT(GetFormat().isRenderable);
+
+ // End the blit encoder if it is open.
+ commandContext->EndBlit();
+
+ if (GetFormat().HasDepthOrStencil()) {
+ // Create a render pass to clear each subresource.
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+ level, arrayLayer, range.aspects))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ // Note that this creates a descriptor that's autoreleased so we don't use
+ // AcquireNSRef
+ NSRef<MTLRenderPassDescriptor> descriptorRef =
+ [MTLRenderPassDescriptor renderPassDescriptor];
+ MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
+
+ // At least one aspect needs clearing. Iterate the aspects individually to
+ // determine which to clear.
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+ level, arrayLayer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+ switch (aspect) {
+ case Aspect::Depth:
+ descriptor.depthAttachment.texture = GetMTLTexture();
+ descriptor.depthAttachment.level = level;
+ descriptor.depthAttachment.slice = arrayLayer;
+ descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+ descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+ descriptor.depthAttachment.clearDepth = dClearColor;
+ break;
+ case Aspect::Stencil:
+ descriptor.stencilAttachment.texture = GetMTLTexture();
+ descriptor.stencilAttachment.level = level;
+ descriptor.stencilAttachment.slice = arrayLayer;
+ descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+ descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+ descriptor.stencilAttachment.clearStencil =
+ static_cast<uint32_t>(clearColor);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ commandContext->BeginRender(descriptor);
+ commandContext->EndRender();
+ }
+ }
+ } else {
+ ASSERT(GetFormat().IsColor());
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ // Create multiple render passes with each subresource as a color attachment to
+ // clear them all. Only do this for array layers to ensure all attachments have
+ // the same size.
+ NSRef<MTLRenderPassDescriptor> descriptor;
+ uint32_t attachment = 0;
+
+ uint32_t numZSlices = GetMipLevelVirtualSize(level).depthOrArrayLayers;
+
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+ level, arrayLayer, Aspect::Color))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ for (uint32_t z = 0; z < numZSlices; ++z) {
+ if (descriptor == nullptr) {
+ // Note that this creates a descriptor that's autoreleased so we
+ // don't use AcquireNSRef
+ descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
+ }
+
+ [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
+ [*descriptor colorAttachments][attachment].loadAction =
+ MTLLoadActionClear;
+ [*descriptor colorAttachments][attachment].storeAction =
+ MTLStoreActionStore;
+ [*descriptor colorAttachments][attachment].clearColor =
+ MTLClearColorMake(dClearColor, dClearColor, dClearColor,
+ dClearColor);
+ [*descriptor colorAttachments][attachment].level = level;
+ [*descriptor colorAttachments][attachment].slice = arrayLayer;
+ [*descriptor colorAttachments][attachment].depthPlane = z;
+
+ attachment++;
+
+ if (attachment == kMaxColorAttachments) {
+ attachment = 0;
+ commandContext->BeginRender(descriptor.Get());
+ commandContext->EndRender();
+ descriptor = nullptr;
+ }
+ }
+ }
+
+ if (descriptor != nullptr) {
+ commandContext->BeginRender(descriptor.Get());
+ commandContext->EndRender();
+ }
+ }
+ }
+ } else {
+ Extent3D largestMipSize = GetMipLevelVirtualSize(range.baseMipLevel);
+
+ // Encode a buffer to texture copy to clear each subresource.
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ // Compute the buffer size big enough to fill the largest mip.
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
+
+ // Metal validation layers: sourceBytesPerRow must be at least 64.
+ uint32_t largestMipBytesPerRow =
+ std::max((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 64u);
+
+ // Metal validation layers: sourceBytesPerImage must be at least 512.
+ uint64_t largestMipBytesPerImage =
+ std::max(static_cast<uint64_t>(largestMipBytesPerRow) *
+ (largestMipSize.height / blockInfo.height),
+ 512llu);
+
+ uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
+
+ if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+ blockInfo.byteSize));
+ memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+
+ id<MTLBuffer> uploadBuffer =
+ ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
+
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ Extent3D virtualSize = GetMipLevelVirtualSize(level);
+
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ MTLBlitOption blitOption = ComputeMTLBlitOption(GetFormat(), aspect);
+ [commandContext->EnsureBlit()
+ copyFromBuffer:uploadBuffer
+ sourceOffset:uploadHandle.startOffset
+ sourceBytesPerRow:largestMipBytesPerRow
+ sourceBytesPerImage:largestMipBytesPerImage
+ sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
+ virtualSize.depthOrArrayLayers)
+ toTexture:GetMTLTexture()
+ destinationSlice:arrayLayer
+ destinationLevel:level
+ destinationOrigin:MTLOriginMake(0, 0, 0)
+ options:blitOption];
+ }
+ }
+ }
+ }
+
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, range);
+ device->IncrementLazyClearCountForTesting();
+ }
+ return {};
+ }
+
+ void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+ const SubresourceRange& range) {
+ if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
+ }
+ if (!IsSubresourceContentInitialized(range)) {
+ // If subresource has not been initialized, clear it to black as it could
+ // contain dirty bits from recycled memory
+ GetDevice()->ConsumedError(
+ ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
+ }
+ }
+
+ // static
+ ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
+ DAWN_TRY(view->Initialize(descriptor));
+ return view;
+ }
+
+ MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+ Texture* texture = ToBackend(GetTexture());
+
+ // Texture could be destroyed by the time we make a view.
+ if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+ return {};
+ }
+
+ id<MTLTexture> mtlTexture = texture->GetMTLTexture();
+
+ if (!UsageNeedsTextureView(texture->GetInternalUsage())) {
+ mMtlTextureView = nullptr;
+ } else if (!RequiresCreatingNewTextureView(texture, descriptor)) {
+ mMtlTextureView = mtlTexture;
+ } else {
+ MTLPixelFormat format = MetalPixelFormat(descriptor->format);
+ if (descriptor->aspect == wgpu::TextureAspect::StencilOnly) {
+ if (@available(macOS 10.12, iOS 10.0, *)) {
+ if (format == MTLPixelFormatDepth32Float_Stencil8) {
+ format = MTLPixelFormatX32_Stencil8;
+ }
+#if defined(DAWN_PLATFORM_MACOS)
+ else if (format == MTLPixelFormatDepth24Unorm_Stencil8) {
+ format = MTLPixelFormatX24_Stencil8;
+ }
+#endif
+ else {
+ UNREACHABLE();
+ }
+ } else {
+ // TODO(enga): Add a workaround to back combined depth/stencil textures
+ // with Sampled usage using two separate textures.
+ // Or, consider always using the workaround for D32S8.
+ GetDevice()->ConsumedError(
+ DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
+ "combined depth/stencil format."));
+ }
+ }
+
+ MTLTextureType textureViewType =
+ MetalTextureViewType(descriptor->dimension, texture->GetSampleCount());
+ auto mipLevelRange = NSMakeRange(descriptor->baseMipLevel, descriptor->mipLevelCount);
+ auto arrayLayerRange =
+ NSMakeRange(descriptor->baseArrayLayer, descriptor->arrayLayerCount);
+
+ mMtlTextureView =
+ AcquireNSPRef([mtlTexture newTextureViewWithPixelFormat:format
+ textureType:textureViewType
+ levels:mipLevelRange
+ slices:arrayLayerRange]);
+ if (mMtlTextureView == nil) {
+ return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view.");
+ }
+ }
+
+ return {};
+ }
+
+ id<MTLTexture> TextureView::GetMTLTexture() {
+ ASSERT(mMtlTextureView != nullptr);
+ return mMtlTextureView.Get();
+ }
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h b/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h
new file mode 100644
index 00000000000..5c4ae9c31c4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_UTILSMETAL_H_
+#define DAWNNATIVE_METAL_UTILSMETAL_H_
+
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/metal/DeviceMTL.h"
+#include "dawn/native/metal/ShaderModuleMTL.h"
+#include "dawn/native/metal/TextureMTL.h"
+
+#import <Metal/Metal.h>
+
+namespace dawn::native {
+ struct ProgrammableStage;
+ struct EntryPointMetadata;
+ enum class SingleShaderStage;
+}
+
+namespace dawn::native::metal {
+
+ MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
+
+ struct TextureBufferCopySplit {
+ static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
+
+ struct CopyInfo {
+ NSUInteger bufferOffset;
+ NSUInteger bytesPerRow;
+ NSUInteger bytesPerImage;
+ Origin3D textureOrigin;
+ Extent3D copyExtent;
+ };
+
+ uint32_t count = 0;
+ std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
+
+ auto begin() const {
+ return copies.begin();
+ }
+
+ auto end() const {
+ return copies.begin() + count;
+ }
+ };
+
+ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+ uint32_t mipLevel,
+ Origin3D origin,
+ Extent3D copyExtent,
+ uint64_t bufferSize,
+ uint64_t bufferOffset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Aspect aspect);
+
+ void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+ Texture* texture,
+ const TextureCopy& dst,
+ const Extent3D& size);
+
+ MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect);
+
+ // Helper function to create function with constant values wrapped in
+ // if available branch
+ MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
+ SingleShaderStage singleShaderStage,
+ PipelineLayout* pipelineLayout,
+ ShaderModule::MetalFunctionData* functionData,
+ uint32_t sampleMask = 0xFFFFFFFF,
+ const RenderPipeline* renderPipeline = nullptr);
+
+} // namespace dawn::native::metal
+
+#endif // DAWNNATIVE_METAL_UTILSMETAL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm b/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm
new file mode 100644
index 00000000000..e2e0ba3357e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/metal/UtilsMetal.mm
@@ -0,0 +1,288 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/metal/UtilsMetal.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native::metal {
+
+ MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction) {
+ switch (compareFunction) {
+ case wgpu::CompareFunction::Never:
+ return MTLCompareFunctionNever;
+ case wgpu::CompareFunction::Less:
+ return MTLCompareFunctionLess;
+ case wgpu::CompareFunction::LessEqual:
+ return MTLCompareFunctionLessEqual;
+ case wgpu::CompareFunction::Greater:
+ return MTLCompareFunctionGreater;
+ case wgpu::CompareFunction::GreaterEqual:
+ return MTLCompareFunctionGreaterEqual;
+ case wgpu::CompareFunction::NotEqual:
+ return MTLCompareFunctionNotEqual;
+ case wgpu::CompareFunction::Equal:
+ return MTLCompareFunctionEqual;
+ case wgpu::CompareFunction::Always:
+ return MTLCompareFunctionAlways;
+
+ case wgpu::CompareFunction::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+ uint32_t mipLevel,
+ Origin3D origin,
+ Extent3D copyExtent,
+ uint64_t bufferSize,
+ uint64_t bufferOffset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ Aspect aspect) {
+ TextureBufferCopySplit copy;
+ const Format textureFormat = texture->GetFormat();
+ const TexelBlockInfo& blockInfo = textureFormat.GetAspectInfo(aspect).block;
+
+ // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
+ // compute the correct range when checking if the buffer is big enough to contain the
+ // data for the whole copy. Instead of looking at the position of the last texel in the
+ // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
+ // format.blockHeight) * copySize.depthOrArrayLayers. For example considering the pixel
+ // buffer below where in memory, each row data (D) of the texture is followed by some
+ // padding data (P):
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDA|PP|
+ // The last pixel read will be A, but the driver will think it is the whole last padding
+ // row, causing it to generate an error when the pixel buffer is just big enough.
+
+ // We work around this limitation by detecting when Metal would complain and copy the
+ // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
+ uint32_t bytesPerImage = bytesPerRow * rowsPerImage;
+
+ // Metal validation layer requires that if the texture's pixel format is a compressed
+ // format, the sourceSize must be a multiple of the pixel format's block size or be
+ // clamped to the edge of the texture if the block extends outside the bounds of a
+ // texture.
+ const Extent3D clampedCopyExtent =
+ texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
+
+ // Check whether buffer size is big enough.
+ bool needWorkaround =
+ bufferSize - bufferOffset < bytesPerImage * copyExtent.depthOrArrayLayers;
+ if (!needWorkaround) {
+ copy.count = 1;
+ copy.copies[0].bufferOffset = bufferOffset;
+ copy.copies[0].bytesPerRow = bytesPerRow;
+ copy.copies[0].bytesPerImage = bytesPerImage;
+ copy.copies[0].textureOrigin = origin;
+ copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+ copyExtent.depthOrArrayLayers};
+ return copy;
+ }
+
+ uint64_t currentOffset = bufferOffset;
+
+ // Doing all the copy except the last image.
+ if (copyExtent.depthOrArrayLayers > 1) {
+ copy.copies[copy.count].bufferOffset = currentOffset;
+ copy.copies[copy.count].bytesPerRow = bytesPerRow;
+ copy.copies[copy.count].bytesPerImage = bytesPerImage;
+ copy.copies[copy.count].textureOrigin = origin;
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+ copyExtent.depthOrArrayLayers - 1};
+
+ ++copy.count;
+
+ // Update offset to copy to the last image.
+ currentOffset += (copyExtent.depthOrArrayLayers - 1) * bytesPerImage;
+ }
+
+ // Doing all the copy in last image except the last row.
+ uint32_t copyBlockRowCount = copyExtent.height / blockInfo.height;
+ if (copyBlockRowCount > 1) {
+ copy.copies[copy.count].bufferOffset = currentOffset;
+ copy.copies[copy.count].bytesPerRow = bytesPerRow;
+ copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
+ copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
+ origin.z + copyExtent.depthOrArrayLayers - 1};
+
+ ASSERT(copyExtent.height - blockInfo.height <
+ texture->GetMipLevelVirtualSize(mipLevel).height);
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width,
+ copyExtent.height - blockInfo.height, 1};
+
+ ++copy.count;
+
+ // Update offset to copy to the last row.
+ currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
+ }
+
+ // Doing the last row copy with the exact number of bytes in last row.
+ // Workaround this issue in a way just like the copy to a 1D texture.
+ uint32_t lastRowDataSize = (copyExtent.width / blockInfo.width) * blockInfo.byteSize;
+ uint32_t lastRowCopyExtentHeight =
+ blockInfo.height + clampedCopyExtent.height - copyExtent.height;
+ ASSERT(lastRowCopyExtentHeight <= blockInfo.height);
+
+ copy.copies[copy.count].bufferOffset = currentOffset;
+ copy.copies[copy.count].bytesPerRow = lastRowDataSize;
+ copy.copies[copy.count].bytesPerImage = lastRowDataSize;
+ copy.copies[copy.count].textureOrigin = {origin.x,
+ origin.y + copyExtent.height - blockInfo.height,
+ origin.z + copyExtent.depthOrArrayLayers - 1};
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
+ ++copy.count;
+
+ return copy;
+ }
+
+ void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+ Texture* texture,
+ const TextureCopy& dst,
+ const Extent3D& size) {
+ ASSERT(texture == dst.texture.Get());
+ SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
+ }
+ }
+
+ MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ ASSERT(format.aspects & aspect);
+
+ if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
+ // We only provide a blit option if the format has both depth and stencil.
+ // It is invalid to provide a blit option otherwise.
+ switch (aspect) {
+ case Aspect::Depth:
+ return MTLBlitOptionDepthFromDepthStencil;
+ case Aspect::Stencil:
+ return MTLBlitOptionStencilFromDepthStencil;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return MTLBlitOptionNone;
+ }
+
+ MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
+ SingleShaderStage singleShaderStage,
+ PipelineLayout* pipelineLayout,
+ ShaderModule::MetalFunctionData* functionData,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline) {
+ ShaderModule* shaderModule = ToBackend(programmableStage.module.Get());
+ const char* shaderEntryPoint = programmableStage.entryPoint.c_str();
+ const auto& entryPointMetadata = programmableStage.module->GetEntryPoint(shaderEntryPoint);
+ if (entryPointMetadata.overridableConstants.size() == 0) {
+ DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage,
+ pipelineLayout, functionData, nil, sampleMask,
+ renderPipeline));
+ return {};
+ }
+
+ if (@available(macOS 10.12, *)) {
+ // MTLFunctionConstantValues can only be created within the if available branch
+ NSRef<MTLFunctionConstantValues> constantValues =
+ AcquireNSRef([MTLFunctionConstantValues new]);
+
+ std::unordered_set<std::string> overriddenConstants;
+
+ auto switchType = [&](EntryPointMetadata::OverridableConstant::Type dawnType,
+ MTLDataType* type, OverridableConstantScalar* entry,
+ double value = 0) {
+ switch (dawnType) {
+ case EntryPointMetadata::OverridableConstant::Type::Boolean:
+ *type = MTLDataTypeBool;
+ if (entry) {
+ entry->b = static_cast<int32_t>(value);
+ }
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Float32:
+ *type = MTLDataTypeFloat;
+ if (entry) {
+ entry->f32 = static_cast<float>(value);
+ }
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Int32:
+ *type = MTLDataTypeInt;
+ if (entry) {
+ entry->i32 = static_cast<int32_t>(value);
+ }
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Uint32:
+ *type = MTLDataTypeUInt;
+ if (entry) {
+ entry->u32 = static_cast<uint32_t>(value);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ };
+
+ for (const auto& [name, value] : programmableStage.constants) {
+ overriddenConstants.insert(name);
+
+ // This is already validated so `name` must exist
+ const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+
+ MTLDataType type;
+ OverridableConstantScalar entry{};
+
+ switchType(moduleConstant.type, &type, &entry, value);
+
+ [constantValues.Get() setConstantValue:&entry type:type atIndex:moduleConstant.id];
+ }
+
+ // Set shader initialized default values because MSL function_constant
+ // has no default value
+ for (const std::string& name : entryPointMetadata.initializedOverridableConstants) {
+ if (overriddenConstants.count(name) != 0) {
+ // This constant already has overridden value
+ continue;
+ }
+
+ // Must exist because it is validated
+ const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+ ASSERT(moduleConstant.isInitialized);
+ MTLDataType type;
+
+ switchType(moduleConstant.type, &type, nullptr);
+
+ [constantValues.Get() setConstantValue:&moduleConstant.defaultValue
+ type:type
+ atIndex:moduleConstant.id];
+ }
+
+ DAWN_TRY(shaderModule->CreateFunction(
+ shaderEntryPoint, singleShaderStage, pipelineLayout, functionData,
+ constantValues.Get(), sampleMask, renderPipeline));
+ } else {
+ UNREACHABLE();
+ }
+ return {};
+ }
+
+} // namespace dawn::native::metal
diff --git a/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp
new file mode 100644
index 00000000000..17942933b57
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.cpp
@@ -0,0 +1,520 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/null/DeviceNull.h"
+
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/Surface.h"
+
+namespace dawn::native::null {
+
+ // Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
+
+ Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
+ mVendorId = 0;
+ mDeviceId = 0;
+ mName = "Null backend";
+ mAdapterType = wgpu::AdapterType::CPU;
+ MaybeError err = Initialize();
+ ASSERT(err.IsSuccess());
+ }
+
+ Adapter::~Adapter() = default;
+
+ bool Adapter::SupportsExternalImages() const {
+ return false;
+ }
+
+ // Used for the tests that intend to use an adapter without all features enabled.
+ void Adapter::SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures) {
+ mSupportedFeatures = {};
+ for (wgpu::FeatureName f : requiredFeatures) {
+ mSupportedFeatures.EnableFeature(f);
+ }
+ }
+
+ MaybeError Adapter::InitializeImpl() {
+ return {};
+ }
+
+ MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+ // Enable all features by default for the convenience of tests.
+ mSupportedFeatures.featuresBitSet.set();
+ return {};
+ }
+
+ MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+ GetDefaultLimits(&limits->v1);
+ return {};
+ }
+
+ ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+ return Device::Create(this, descriptor);
+ }
+
+ class Backend : public BackendConnection {
+ public:
+ Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Null) {
+ }
+
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override {
+ // There is always a single Null adapter because it is purely CPU based and doesn't
+ // depend on the system.
+ std::vector<Ref<AdapterBase>> adapters;
+ Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance()));
+ adapters.push_back(std::move(adapter));
+ return adapters;
+ }
+ };
+
+ BackendConnection* Connect(InstanceBase* instance) {
+ return new Backend(instance);
+ }
+
+ struct CopyFromStagingToBufferOperation : PendingOperation {
+ virtual void Execute() {
+ destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
+ }
+
+ StagingBufferBase* staging;
+ Ref<Buffer> destination;
+ uint64_t sourceOffset;
+ uint64_t destinationOffset;
+ uint64_t size;
+ };
+
+ // Device
+
+ // static
+ ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
+ const DeviceDescriptor* descriptor) {
+ Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+ DAWN_TRY(device->Initialize());
+ return device;
+ }
+
+ Device::~Device() {
+ Destroy();
+ }
+
+ MaybeError Device::Initialize() {
+ return DeviceBase::Initialize(new Queue(this));
+ }
+
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ return AcquireRef(new BindGroup(this, descriptor));
+ }
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
+ }
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ DAWN_TRY(IncrementMemoryUsage(descriptor->size));
+ return AcquireRef(new Buffer(this, descriptor));
+ }
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+ }
+ Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(this, descriptor));
+ }
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return AcquireRef(new PipelineLayout(this, descriptor));
+ }
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
+ return AcquireRef(new QuerySet(this, descriptor));
+ }
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(this, descriptor));
+ }
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(this, descriptor));
+ }
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
+ DAWN_TRY(module->Initialize(parseResult));
+ return module;
+ }
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(this, descriptor));
+ }
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+ }
+ ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
+ }
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return AcquireRef(new TextureView(texture, descriptor));
+ }
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer =
+ std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
+ return std::move(stagingBuffer);
+ }
+
+ void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
+
+ // Clear pending operations before checking mMemoryUsage because some operations keep a
+ // reference to Buffers.
+ mPendingOperations.clear();
+ ASSERT(mMemoryUsage == 0);
+ }
+
+ MaybeError Device::WaitForIdleForDestruction() {
+ mPendingOperations.clear();
+ return {};
+ }
+
+ MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ destination->SetIsDataInitialized();
+ }
+
+ auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
+ operation->staging = source;
+ operation->destination = ToBackend(destination);
+ operation->sourceOffset = sourceOffset;
+ operation->destinationOffset = destinationOffset;
+ operation->size = size;
+
+ AddPendingOperation(std::move(operation));
+
+ return {};
+ }
+
+ MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ return {};
+ }
+
+ MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
+ static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max());
+ if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
+ }
+ mMemoryUsage += bytes;
+ return {};
+ }
+
+ void Device::DecrementMemoryUsage(uint64_t bytes) {
+ ASSERT(mMemoryUsage >= bytes);
+ mMemoryUsage -= bytes;
+ }
+
+ MaybeError Device::TickImpl() {
+ return SubmitPendingOperations();
+ }
+
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ return GetLastSubmittedCommandSerial();
+ }
+
+ void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
+ mPendingOperations.emplace_back(std::move(operation));
+ }
+
+ MaybeError Device::SubmitPendingOperations() {
+ for (auto& operation : mPendingOperations) {
+ operation->Execute();
+ }
+ mPendingOperations.clear();
+
+ DAWN_TRY(CheckPassedSerials());
+ IncrementLastSubmittedCommandSerial();
+
+ return {};
+ }
+
+ // BindGroupDataHolder
+
+ BindGroupDataHolder::BindGroupDataHolder(size_t size)
+ : mBindingDataAllocation(malloc(size)) // malloc is guaranteed to return a
+ // pointer aligned enough for the allocation
+ {
+ }
+
+ BindGroupDataHolder::~BindGroupDataHolder() {
+ free(mBindingDataAllocation);
+ }
+
+ // BindGroup
+
+ BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
+ : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
+ BindGroupBase(device, descriptor, mBindingDataAllocation) {
+ }
+
+ // BindGroupLayout
+
+ BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) {
+ }
+
+ // Buffer
+
+ Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor) {
+ mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
+ mAllocatedSize = GetSize();
+ }
+
+ bool Buffer::IsCPUWritableAtCreation() const {
+ // Only return true for mappable buffers so we can test cases that need / don't need a
+ // staging buffer.
+ return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
+ }
+
+ MaybeError Buffer::MapAtCreationImpl() {
+ return {};
+ }
+
+ void Buffer::CopyFromStaging(StagingBufferBase* staging,
+ uint64_t sourceOffset,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
+ memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
+ }
+
+ void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
+ ASSERT(bufferOffset + size <= GetSize());
+ ASSERT(mBackingData);
+ memcpy(mBackingData.get() + bufferOffset, data, size);
+ }
+
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ return {};
+ }
+
+ void* Buffer::GetMappedPointerImpl() {
+ return mBackingData.get();
+ }
+
+ void Buffer::UnmapImpl() {
+ }
+
+ void Buffer::DestroyImpl() {
+ BufferBase::DestroyImpl();
+ ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
+ }
+
+ // CommandBuffer
+
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+ : CommandBufferBase(encoder, descriptor) {
+ }
+
+ // QuerySet
+
+ QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+ : QuerySetBase(device, descriptor) {
+ }
+
+ // Queue
+
+ Queue::Queue(Device* device) : QueueBase(device) {
+ }
+
+ Queue::~Queue() {
+ }
+
+ MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
+ Device* device = ToBackend(GetDevice());
+
+ // The Vulkan, D3D12 and Metal implementation all tick the device here,
+ // for testing purposes we should also tick in the null implementation.
+ DAWN_TRY(device->Tick());
+
+ return device->SubmitPendingOperations();
+ }
+
+ MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
+ return {};
+ }
+
+ // ComputePipeline
+ MaybeError ComputePipeline::Initialize() {
+ return {};
+ }
+
+ // RenderPipeline
+ MaybeError RenderPipeline::Initialize() {
+ return {};
+ }
+
+ // SwapChain
+
+ // static
+ ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+ }
+
+ MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ if (previousSwapChain != nullptr) {
+ // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
+ return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
+ }
+ }
+
+ return {};
+ }
+
+ SwapChain::~SwapChain() = default;
+
+ MaybeError SwapChain::PresentImpl() {
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+ return {};
+ }
+
+ ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
+ TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ mTexture = AcquireRef(
+ new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mTexture->APICreateView();
+ }
+
+ void SwapChain::DetachFromSurfaceImpl() {
+ if (mTexture != nullptr) {
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+ }
+ }
+
+ // ShaderModule
+
+ MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+ return InitializeBase(parseResult);
+ }
+
+ // OldSwapChain
+
+ OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ im.Init(im.userData, nullptr);
+ }
+
+ OldSwapChain::~OldSwapChain() {
+ }
+
+ TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ return GetDevice()->APICreateTexture(descriptor);
+ }
+
+ MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
+ return {};
+ }
+
+ // NativeSwapChainImpl
+
+ void NativeSwapChainImpl::Init(WSIContext* context) {
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height) {
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::Present() {
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
+ }
+
+ // StagingBuffer
+
+ StagingBuffer::StagingBuffer(size_t size, Device* device)
+ : StagingBufferBase(size), mDevice(device) {
+ }
+
+ StagingBuffer::~StagingBuffer() {
+ if (mBuffer) {
+ mDevice->DecrementMemoryUsage(GetSize());
+ }
+ }
+
+ MaybeError StagingBuffer::Initialize() {
+ DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
+ mBuffer = std::make_unique<uint8_t[]>(GetSize());
+ mMappedPointer = mBuffer.get();
+ return {};
+ }
+
+ uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return 1;
+ }
+
+ uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return 1;
+ }
+
+ float Device::GetTimestampPeriodInNS() const {
+ return 1.0f;
+ }
+
+} // namespace dawn::native::null
diff --git a/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h
new file mode 100644
index 00000000000..102d55c4d0c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/null/DeviceNull.h
@@ -0,0 +1,340 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_NULL_DEVICENULL_H_
+#define DAWNNATIVE_NULL_DEVICENULL_H_
+
+#include "dawn/native/Adapter.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/ComputePipeline.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/RenderPipeline.h"
+#include "dawn/native/RingBufferAllocator.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/ShaderModule.h"
+#include "dawn/native/StagingBuffer.h"
+#include "dawn/native/SwapChain.h"
+#include "dawn/native/Texture.h"
+#include "dawn/native/ToBackend.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native::null {
+
+ class Adapter;
+ class BindGroup;
+ class BindGroupLayout;
+ class Buffer;
+ class CommandBuffer;
+ class ComputePipeline;
+ class Device;
+ using PipelineLayout = PipelineLayoutBase;
+ class QuerySet;
+ class Queue;
+ class RenderPipeline;
+ using Sampler = SamplerBase;
+ class ShaderModule;
+ class SwapChain;
+ using Texture = TextureBase;
+ using TextureView = TextureViewBase;
+
+ struct NullBackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+ };
+
+ template <typename T>
+ auto ToBackend(T&& common) -> decltype(ToBackendBase<NullBackendTraits>(common)) {
+ return ToBackendBase<NullBackendTraits>(common);
+ }
+
+ struct PendingOperation {
+ virtual ~PendingOperation() = default;
+ virtual void Execute() = 0;
+ };
+
+ class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(Adapter* adapter,
+ const DeviceDescriptor* descriptor);
+ ~Device() override;
+
+ MaybeError Initialize();
+
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+
+ MaybeError TickImpl() override;
+
+ void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
+ MaybeError SubmitPendingOperations();
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ MaybeError IncrementMemoryUsage(uint64_t bytes);
+ void DecrementMemoryUsage(uint64_t bytes);
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ private:
+ using DeviceBase::DeviceBase;
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+ const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+
+ std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
+
+ static constexpr uint64_t kMaxMemoryUsage = 512 * 1024 * 1024;
+ size_t mMemoryUsage = 0;
+ };
+
+ class Adapter : public AdapterBase {
+ public:
+ Adapter(InstanceBase* instance);
+ ~Adapter() override;
+
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override;
+
+ // Used for the tests that intend to use an adapter without all features enabled.
+ void SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures);
+
+ private:
+ MaybeError InitializeImpl() override;
+ MaybeError InitializeSupportedFeaturesImpl() override;
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+ const DeviceDescriptor* descriptor) override;
+ };
+
+ // Helper class so |BindGroup| can allocate memory for its binding data,
+ // before calling the BindGroupBase base class constructor.
+ class BindGroupDataHolder {
+ protected:
+ explicit BindGroupDataHolder(size_t size);
+ ~BindGroupDataHolder();
+
+ void* mBindingDataAllocation;
+ };
+
+ // We don't have the complexity of placement-allocation of bind group data in
+ // the Null backend. This class, keeps the binding data in a separate allocation for simplicity.
+ class BindGroup final : private BindGroupDataHolder, public BindGroupBase {
+ public:
+ BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor);
+
+ private:
+ ~BindGroup() override = default;
+ };
+
+ class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ private:
+ ~BindGroupLayout() override = default;
+ };
+
+ class Buffer final : public BufferBase {
+ public:
+ Buffer(Device* device, const BufferDescriptor* descriptor);
+
+ void CopyFromStaging(StagingBufferBase* staging,
+ uint64_t sourceOffset,
+ uint64_t destinationOffset,
+ uint64_t size);
+
+ void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
+
+ private:
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+ void* GetMappedPointerImpl() override;
+
+ std::unique_ptr<uint8_t[]> mBackingData;
+ };
+
+ class CommandBuffer final : public CommandBufferBase {
+ public:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+ };
+
+ class QuerySet final : public QuerySetBase {
+ public:
+ QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+ };
+
+ class Queue final : public QueueBase {
+ public:
+ Queue(Device* device);
+
+ private:
+ ~Queue() override;
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) override;
+ };
+
+ class ComputePipeline final : public ComputePipelineBase {
+ public:
+ using ComputePipelineBase::ComputePipelineBase;
+
+ MaybeError Initialize() override;
+ };
+
+ class RenderPipeline final : public RenderPipelineBase {
+ public:
+ using RenderPipelineBase::RenderPipelineBase;
+
+ MaybeError Initialize() override;
+ };
+
+ class ShaderModule final : public ShaderModuleBase {
+ public:
+ using ShaderModuleBase::ShaderModuleBase;
+
+ MaybeError Initialize(ShaderModuleParseResult* parseResult);
+ };
+
+ class SwapChain final : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+ ~SwapChain() override;
+
+ private:
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+ Ref<Texture> mTexture;
+
+ MaybeError PresentImpl() override;
+ ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+ };
+
+ class OldSwapChain final : public OldSwapChainBase {
+ public:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+
+ protected:
+ ~OldSwapChain() override;
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase*) override;
+ };
+
+ class NativeSwapChainImpl {
+ public:
+ using WSIContext = struct {};
+ void Init(WSIContext* context);
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+ DawnSwapChainError Present();
+ wgpu::TextureFormat GetPreferredFormat() const;
+ };
+
+ class StagingBuffer : public StagingBufferBase {
+ public:
+ StagingBuffer(size_t size, Device* device);
+ ~StagingBuffer() override;
+ MaybeError Initialize() override;
+
+ private:
+ Device* mDevice;
+ std::unique_ptr<uint8_t[]> mBuffer;
+ };
+
+} // namespace dawn::native::null
+
+#endif // DAWNNATIVE_NULL_DEVICENULL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp b/chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp
new file mode 100644
index 00000000000..43637cd7882
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/null/NullBackend.cpp
@@ -0,0 +1,32 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NullBackend.cpp: contains the definition of symbols exported by NullBackend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/NullBackend.h"
+
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/null/DeviceNull.h"
+
+namespace dawn::native::null {
+
+ DawnSwapChainImplementation CreateNativeSwapChainImpl() {
+ DawnSwapChainImplementation impl;
+ impl = CreateSwapChainImplementation(new NativeSwapChainImpl());
+ impl.textureUsage = WGPUTextureUsage_Present;
+ return impl;
+ }
+
+} // namespace dawn::native::null
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp
new file mode 100644
index 00000000000..ed8f15b675e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.cpp
@@ -0,0 +1,305 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/BackendGL.h"
+
+#include "dawn/common/GPUInfo.h"
+#include "dawn/common/Log.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/OpenGLBackend.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+#include <cstring>
+
+namespace dawn::native::opengl {
+
+ namespace {
+
+ struct Vendor {
+ const char* vendorName;
+ uint32_t vendorId;
+ };
+
+ const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
+ {"ARM", gpu_info::kVendorID_ARM},
+ {"Imagination", gpu_info::kVendorID_ImgTec},
+ {"Intel", gpu_info::kVendorID_Intel},
+ {"NVIDIA", gpu_info::kVendorID_Nvidia},
+ {"Qualcomm", gpu_info::kVendorID_Qualcomm}};
+
+ uint32_t GetVendorIdFromVendors(const char* vendor) {
+ uint32_t vendorId = 0;
+ for (const auto& it : kVendors) {
+ // Matching vendor name with vendor string
+ if (strstr(vendor, it.vendorName) != nullptr) {
+ vendorId = it.vendorId;
+ break;
+ }
+ }
+ return vendorId;
+ }
+
+ void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
+ GLenum type,
+ GLuint id,
+ GLenum severity,
+ GLsizei length,
+ const GLchar* message,
+ const void* userParam) {
+ const char* sourceText;
+ switch (source) {
+ case GL_DEBUG_SOURCE_API:
+ sourceText = "OpenGL";
+ break;
+ case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
+ sourceText = "Window System";
+ break;
+ case GL_DEBUG_SOURCE_SHADER_COMPILER:
+ sourceText = "Shader Compiler";
+ break;
+ case GL_DEBUG_SOURCE_THIRD_PARTY:
+ sourceText = "Third Party";
+ break;
+ case GL_DEBUG_SOURCE_APPLICATION:
+ sourceText = "Application";
+ break;
+ case GL_DEBUG_SOURCE_OTHER:
+ sourceText = "Other";
+ break;
+ default:
+ sourceText = "UNKNOWN";
+ break;
+ }
+
+ const char* severityText;
+ switch (severity) {
+ case GL_DEBUG_SEVERITY_HIGH:
+ severityText = "High";
+ break;
+ case GL_DEBUG_SEVERITY_MEDIUM:
+ severityText = "Medium";
+ break;
+ case GL_DEBUG_SEVERITY_LOW:
+ severityText = "Low";
+ break;
+ case GL_DEBUG_SEVERITY_NOTIFICATION:
+ severityText = "Notification";
+ break;
+ default:
+ severityText = "UNKNOWN";
+ break;
+ }
+
+ if (type == GL_DEBUG_TYPE_ERROR) {
+ dawn::WarningLog() << "OpenGL error:"
+ << "\n Source: " << sourceText //
+ << "\n ID: " << id //
+ << "\n Severity: " << severityText //
+ << "\n Message: " << message;
+
+ // Abort on an error when in Debug mode.
+ UNREACHABLE();
+ }
+ }
+
+ } // anonymous namespace
+
+ // The OpenGL backend's Adapter.
+
+ class Adapter : public AdapterBase {
+ public:
+ Adapter(InstanceBase* instance, wgpu::BackendType backendType)
+ : AdapterBase(instance, backendType) {
+ }
+
+ MaybeError InitializeGLFunctions(void* (*getProc)(const char*)) {
+ // Use getProc to populate the dispatch table
+ return mFunctions.Initialize(getProc);
+ }
+
+ ~Adapter() override = default;
+
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override {
+ // Via dawn::native::opengl::WrapExternalEGLImage
+ return GetBackendType() == wgpu::BackendType::OpenGLES;
+ }
+
+ private:
+ MaybeError InitializeImpl() override {
+ if (mFunctions.GetVersion().IsES()) {
+ ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
+ } else {
+ ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
+ }
+
+ // Use the debug output functionality to get notified about GL errors
+ // TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
+ // extensions
+ bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
+
+ if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
+ mFunctions.Enable(GL_DEBUG_OUTPUT);
+ mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
+
+ // Any GL error; dangerous undefined behavior; any shader compiler and linker errors
+ mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH,
+ 0, nullptr, GL_TRUE);
+
+ // Severe performance warnings; GLSL or other shader compiler and linker warnings;
+ // use of currently deprecated behavior
+ mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM,
+ 0, nullptr, GL_TRUE);
+
+ // Performance warnings from redundant state changes; trivial undefined behavior
+ // This is disabled because we do an incredible amount of redundant state changes.
+ mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
+ nullptr, GL_FALSE);
+
+ // Any message which is not an error or performance concern
+ mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE,
+ GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr,
+ GL_FALSE);
+ mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
+ }
+
+ // Set state that never changes between devices.
+ mFunctions.Enable(GL_DEPTH_TEST);
+ mFunctions.Enable(GL_SCISSOR_TEST);
+ mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
+ if (mFunctions.GetVersion().IsDesktop()) {
+ // These are not necessary on GLES. The functionality is enabled by default, and
+ // works by specifying sample counts and SRGB textures, respectively.
+ mFunctions.Enable(GL_MULTISAMPLE);
+ mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
+ }
+ mFunctions.Enable(GL_SAMPLE_MASK);
+
+ mName = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
+
+ // Workaroud to find vendor id from vendor name
+ const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
+ mVendorId = GetVendorIdFromVendors(vendor);
+
+ mDriverDescription = std::string("OpenGL version ") +
+ reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
+
+ if (mName.find("SwiftShader") != std::string::npos) {
+ mAdapterType = wgpu::AdapterType::CPU;
+ }
+
+ return {};
+ }
+
+ MaybeError InitializeSupportedFeaturesImpl() override {
+ // TextureCompressionBC
+ {
+ // BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
+ bool supportsS3TC =
+ mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
+ (mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
+ mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
+ mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
+
+ // COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
+ // COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
+ // GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
+ // (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
+ bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
+
+ // GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
+ // NVidia GLES drivers don't support this extension, but they do support
+ // GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
+ // GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
+ // SRGB support even if S3TC is supported; see
+ // https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
+ bool supportsS3TCSRGB =
+ mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
+ mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
+
+ // BC4 and BC5
+ bool supportsRGTC =
+ mFunctions.IsAtLeastGL(3, 0) ||
+ mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
+ mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
+
+ // BC6 and BC7
+ bool supportsBPTC =
+ mFunctions.IsAtLeastGL(4, 2) ||
+ mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
+ mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
+
+ if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
+ supportsBPTC) {
+ mSupportedFeatures.EnableFeature(dawn::native::Feature::TextureCompressionBC);
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
+ GetDefaultLimits(&limits->v1);
+ return {};
+ }
+
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+ const DeviceDescriptor* descriptor) override {
+ // There is no limit on the number of devices created from this adapter because they can
+ // all share the same backing OpenGL context.
+ return Device::Create(this, descriptor, mFunctions);
+ }
+
+ OpenGLFunctions mFunctions;
+ };
+
+ // Implementation of the OpenGL backend's BackendConnection
+
+ Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
+ : BackendConnection(instance, backendType) {
+ }
+
+ std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+ // The OpenGL backend needs at least "getProcAddress" to discover an adapter.
+ return {};
+ }
+
+ ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ // TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
+ // know how to handle MakeCurrent.
+ DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
+
+ ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
+ const AdapterDiscoveryOptions* options =
+ static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+ DAWN_INVALID_IF(options->getProc == nullptr,
+ "AdapterDiscoveryOptions::getProc must be set");
+
+ Ref<Adapter> adapter = AcquireRef(
+ new Adapter(GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType)));
+ DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
+ DAWN_TRY(adapter->Initialize());
+
+ mCreatedAdapter = true;
+ std::vector<Ref<AdapterBase>> adapters{std::move(adapter)};
+ return std::move(adapters);
+ }
+
+ BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
+ return new Backend(instance, backendType);
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h
new file mode 100644
index 00000000000..12e7b47fa8e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BackendGL.h
@@ -0,0 +1,36 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BACKENDGL_H_
+#define DAWNNATIVE_OPENGL_BACKENDGL_H_
+
+#include "dawn/native/BackendConnection.h"
+
+namespace dawn::native::opengl {
+
+ class Backend : public BackendConnection {
+ public:
+ Backend(InstanceBase* instance, wgpu::BackendType backendType);
+
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* options) override;
+
+ private:
+ bool mCreatedAdapter = false;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_BACKENDGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp
new file mode 100644
index 00000000000..6573a9274ef
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.cpp
@@ -0,0 +1,65 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/BindGroupGL.h"
+
+#include "dawn/native/Texture.h"
+#include "dawn/native/opengl/BindGroupLayoutGL.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+ MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
+ const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+ for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+ const BindGroupEntry& entry = descriptor->entries[i];
+
+ const auto& it = bindingMap.find(BindingNumber(entry.binding));
+ BindingIndex bindingIndex = it->second;
+ ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+
+ const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+ if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
+ ASSERT(entry.textureView != nullptr);
+ const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
+ DAWN_INVALID_IF(
+ textureViewLayerCount != 1 &&
+ textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
+ "%s binds %u layers. Currently the OpenGL backend only supports either binding "
+ "1 layer or the all layers (%u) for storage texture.",
+ entry.textureView, textureViewLayerCount,
+ entry.textureView->GetTexture()->GetArrayLayers());
+ }
+ }
+
+ return {};
+ }
+
+ BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(this, device, descriptor) {
+ }
+
+ BindGroup::~BindGroup() = default;
+
+ void BindGroup::DestroyImpl() {
+ BindGroupBase::DestroyImpl();
+ ToBackend(GetLayout())->DeallocateBindGroup(this);
+ }
+
+ // static
+ Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h
new file mode 100644
index 00000000000..9d6ccec10e0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupGL.h
@@ -0,0 +1,41 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BINDGROUPGL_H_
+#define DAWNNATIVE_OPENGL_BINDGROUPGL_H_
+
+#include "dawn/common/PlacementAllocated.h"
+#include "dawn/native/BindGroup.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
+
+ class BindGroup final : public BindGroupBase, public PlacementAllocated {
+ public:
+ static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
+
+ BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+
+ private:
+ ~BindGroup() override;
+
+ void DestroyImpl() override;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_BINDGROUPGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp
new file mode 100644
index 00000000000..1cc14749890
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.cpp
@@ -0,0 +1,37 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/BindGroupLayoutGL.h"
+
+#include "dawn/native/opengl/BindGroupGL.h"
+
+namespace dawn::native::opengl {
+
+ BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ }
+
+ Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
+ }
+
+ void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+ mBindGroupAllocator.Deallocate(bindGroup);
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h
new file mode 100644
index 00000000000..1cb3cc267c0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BindGroupLayoutGL.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
+#define DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
+
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/native/BindGroupLayout.h"
+
+namespace dawn::native::opengl {
+
+ class BindGroup;
+ class Device;
+
+ class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup);
+
+ private:
+ ~BindGroupLayout() override = default;
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp
new file mode 100644
index 00000000000..fde83bc0e4c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.cpp
@@ -0,0 +1,184 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/BufferGL.h"
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+ // Buffer
+
+ // static
+ ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
+ const BufferDescriptor* descriptor,
+ bool shouldLazyClear) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
+ if (descriptor->mappedAtCreation) {
+ DAWN_TRY(buffer->MapAtCreationInternal());
+ }
+
+ return std::move(buffer);
+ }
+
+ Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor) {
+ // Allocate at least 4 bytes so clamped accesses are always in bounds.
+ mAllocatedSize = std::max(GetSize(), uint64_t(4u));
+
+ device->gl.GenBuffers(1, &mBuffer);
+ device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+ !descriptor->mappedAtCreation) {
+ std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
+ device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(),
+ GL_STATIC_DRAW);
+ } else {
+ // Buffers start zeroed if you pass nullptr to glBufferData.
+ device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
+ }
+ }
+
+ Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
+ : Buffer(device, descriptor) {
+ if (!shouldLazyClear) {
+ SetIsDataInitialized();
+ }
+ }
+
+ Buffer::~Buffer() = default;
+
+ GLuint Buffer::GetHandle() const {
+ return mBuffer;
+ }
+
+ bool Buffer::EnsureDataInitialized() {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ InitializeToZero();
+ return true;
+ }
+
+ bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ return false;
+ }
+
+ InitializeToZero();
+ return true;
+ }
+
+ bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ return false;
+ }
+
+ InitializeToZero();
+ return true;
+ }
+
+ void Buffer::InitializeToZero() {
+ ASSERT(NeedsInitialization());
+
+ const uint64_t size = GetAllocatedSize();
+ Device* device = ToBackend(GetDevice());
+
+ const std::vector<uint8_t> clearValues(size, 0u);
+ device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
+ device->IncrementLazyClearCountForTesting();
+
+ SetIsDataInitialized();
+ }
+
+ bool Buffer::IsCPUWritableAtCreation() const {
+ // TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
+ // driver to migrate it to shared memory.
+ return true;
+ }
+
+ MaybeError Buffer::MapAtCreationImpl() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
+ return {};
+ }
+
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ // It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
+ // so we extend the range to be 4 bytes.
+ if (size == 0) {
+ if (offset != 0) {
+ offset -= 4;
+ }
+ size = 4;
+ }
+
+ EnsureDataInitialized();
+
+ // This does GPU->CPU synchronization, we could require a high
+ // version of OpenGL that would let us map the buffer unsynchronized.
+ gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ void* mappedData = nullptr;
+ if (mode & wgpu::MapMode::Read) {
+ mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+ mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
+ }
+
+ // The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
+ // the resource but OpenGL gives us the pointer at offset. Remove the offset.
+ mMappedData = static_cast<uint8_t*>(mappedData) - offset;
+ return {};
+ }
+
+ void* Buffer::GetMappedPointerImpl() {
+ // The mapping offset has already been removed.
+ return mMappedData;
+ }
+
+ void Buffer::UnmapImpl() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ gl.UnmapBuffer(GL_ARRAY_BUFFER);
+ mMappedData = nullptr;
+ }
+
+ void Buffer::DestroyImpl() {
+ BufferBase::DestroyImpl();
+ ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
+ mBuffer = 0;
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h
new file mode 100644
index 00000000000..2cd1ae6eae5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/BufferGL.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BUFFERGL_H_
+#define DAWNNATIVE_OPENGL_BUFFERGL_H_
+
+#include "dawn/native/Buffer.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ class Buffer final : public BufferBase {
+ public:
+ static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
+ const BufferDescriptor* descriptor,
+ bool shouldLazyClear);
+
+ Buffer(Device* device, const BufferDescriptor* descriptor);
+
+ GLuint GetHandle() const;
+
+ bool EnsureDataInitialized();
+ bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
+ bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
+
+ private:
+ Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
+ ~Buffer() override;
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+ void* GetMappedPointerImpl() override;
+
+ void InitializeToZero();
+
+ GLuint mBuffer = 0;
+ void* mMappedData = nullptr;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_BUFFERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp
new file mode 100644
index 00000000000..c71d8f6d3eb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.cpp
@@ -0,0 +1,1472 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/CommandBufferGL.h"
+
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupTracker.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/VertexFormat.h"
+#include "dawn/native/opengl/BufferGL.h"
+#include "dawn/native/opengl/ComputePipelineGL.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/PersistentPipelineStateGL.h"
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+#include "dawn/native/opengl/RenderPipelineGL.h"
+#include "dawn/native/opengl/SamplerGL.h"
+#include "dawn/native/opengl/TextureGL.h"
+#include "dawn/native/opengl/UtilsGL.h"
+
+#include <cstring>
+
+namespace dawn::native::opengl {
+
+ namespace {
+
+ GLenum IndexFormatType(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Uint16:
+ return GL_UNSIGNED_SHORT;
+ case wgpu::IndexFormat::Uint32:
+ return GL_UNSIGNED_INT;
+ case wgpu::IndexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ GLenum VertexFormatType(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Unorm8x4:
+ return GL_UNSIGNED_BYTE;
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Snorm8x4:
+ return GL_BYTE;
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Unorm16x4:
+ return GL_UNSIGNED_SHORT;
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Snorm16x4:
+ return GL_SHORT;
+ case wgpu::VertexFormat::Float16x2:
+ case wgpu::VertexFormat::Float16x4:
+ return GL_HALF_FLOAT;
+ case wgpu::VertexFormat::Float32:
+ case wgpu::VertexFormat::Float32x2:
+ case wgpu::VertexFormat::Float32x3:
+ case wgpu::VertexFormat::Float32x4:
+ return GL_FLOAT;
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Uint32x4:
+ return GL_UNSIGNED_INT;
+ case wgpu::VertexFormat::Sint32:
+ case wgpu::VertexFormat::Sint32x2:
+ case wgpu::VertexFormat::Sint32x3:
+ case wgpu::VertexFormat::Sint32x4:
+ return GL_INT;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Unorm8x4:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Snorm8x4:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Unorm16x4:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Snorm16x4:
+ return GL_TRUE;
+ default:
+ return GL_FALSE;
+ }
+ }
+
+ bool VertexFormatIsInt(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Uint32x4:
+ case wgpu::VertexFormat::Sint32:
+ case wgpu::VertexFormat::Sint32x2:
+ case wgpu::VertexFormat::Sint32x3:
+ case wgpu::VertexFormat::Sint32x4:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // Vertex buffers and index buffers are implemented as part of an OpenGL VAO that
+ // corresponds to a VertexState. On the contrary in Dawn they are part of the global state.
+ // This means that we have to re-apply these buffers on a VertexState change.
+ class VertexStateBufferBindingTracker {
+ public:
+ void OnSetIndexBuffer(BufferBase* buffer) {
+ mIndexBufferDirty = true;
+ mIndexBuffer = ToBackend(buffer);
+ }
+
+ void OnSetVertexBuffer(VertexBufferSlot slot, BufferBase* buffer, uint64_t offset) {
+ mVertexBuffers[slot] = ToBackend(buffer);
+ mVertexBufferOffsets[slot] = offset;
+ mDirtyVertexBuffers.set(slot);
+ }
+
+ void OnSetPipeline(RenderPipelineBase* pipeline) {
+ if (mLastPipeline == pipeline) {
+ return;
+ }
+
+ mIndexBufferDirty = true;
+ mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+
+ mLastPipeline = pipeline;
+ }
+
+ void Apply(const OpenGLFunctions& gl) {
+ if (mIndexBufferDirty && mIndexBuffer != nullptr) {
+ gl.BindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer->GetHandle());
+ mIndexBufferDirty = false;
+ }
+
+ for (VertexBufferSlot slot : IterateBitSet(
+ mDirtyVertexBuffers & mLastPipeline->GetVertexBufferSlotsUsed())) {
+ for (VertexAttributeLocation location : IterateBitSet(
+ ToBackend(mLastPipeline)->GetAttributesUsingVertexBuffer(slot))) {
+ const VertexAttributeInfo& attribute =
+ mLastPipeline->GetAttribute(location);
+
+ GLuint attribIndex = static_cast<GLuint>(static_cast<uint8_t>(location));
+ GLuint buffer = mVertexBuffers[slot]->GetHandle();
+ uint64_t offset = mVertexBufferOffsets[slot];
+
+ const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
+ uint32_t components = GetVertexFormatInfo(attribute.format).componentCount;
+ GLenum formatType = VertexFormatType(attribute.format);
+
+ GLboolean normalized = VertexFormatIsNormalized(attribute.format);
+ gl.BindBuffer(GL_ARRAY_BUFFER, buffer);
+ if (VertexFormatIsInt(attribute.format)) {
+ gl.VertexAttribIPointer(
+ attribIndex, components, formatType, vertexBuffer.arrayStride,
+ reinterpret_cast<void*>(
+ static_cast<intptr_t>(offset + attribute.offset)));
+ } else {
+ gl.VertexAttribPointer(attribIndex, components, formatType, normalized,
+ vertexBuffer.arrayStride,
+ reinterpret_cast<void*>(static_cast<intptr_t>(
+ offset + attribute.offset)));
+ }
+ }
+ }
+
+ mDirtyVertexBuffers.reset();
+ }
+
+ private:
+ bool mIndexBufferDirty = false;
+ Buffer* mIndexBuffer = nullptr;
+
+ ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
+ ityp::array<VertexBufferSlot, Buffer*, kMaxVertexBuffers> mVertexBuffers;
+ ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferOffsets;
+
+ RenderPipelineBase* mLastPipeline = nullptr;
+ };
+
+ class BindGroupTracker : public BindGroupTrackerBase<false, uint64_t> {
+ public:
+ void OnSetPipeline(RenderPipeline* pipeline) {
+ BindGroupTrackerBase::OnSetPipeline(pipeline);
+ mPipeline = pipeline;
+ }
+
+ void OnSetPipeline(ComputePipeline* pipeline) {
+ BindGroupTrackerBase::OnSetPipeline(pipeline);
+ mPipeline = pipeline;
+ }
+
+ void Apply(const OpenGLFunctions& gl) {
+ BeforeApply();
+ for (BindGroupIndex index :
+ IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
+ mDynamicOffsets[index].data());
+ }
+ AfterApply();
+ }
+
+ private:
+ void ApplyBindGroup(const OpenGLFunctions& gl,
+ BindGroupIndex index,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount,
+ uint64_t* dynamicOffsets) {
+ const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
+ uint32_t currentDynamicOffsetIndex = 0;
+
+ for (BindingIndex bindingIndex{0};
+ bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+ GLuint buffer = ToBackend(binding.buffer)->GetHandle();
+ GLuint index = indices[bindingIndex];
+ GLuint offset = binding.offset;
+
+ if (bindingInfo.buffer.hasDynamicOffset) {
+ offset += dynamicOffsets[currentDynamicOffsetIndex];
+ ++currentDynamicOffsetIndex;
+ }
+
+ GLenum target;
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ target = GL_UNIFORM_BUFFER;
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ target = GL_SHADER_STORAGE_BUFFER;
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+
+ gl.BindBufferRange(target, index, buffer, offset, binding.size);
+ break;
+ }
+
+ case BindingInfoType::Sampler: {
+ Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+ GLuint samplerIndex = indices[bindingIndex];
+
+ for (PipelineGL::SamplerUnit unit :
+ mPipeline->GetTextureUnitsForSampler(samplerIndex)) {
+ // Only use filtering for certain texture units, because int
+ // and uint texture are only complete without filtering
+ if (unit.shouldUseFiltering) {
+ gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
+ } else {
+ gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
+ }
+ }
+ break;
+ }
+
+ case BindingInfoType::Texture: {
+ TextureView* view =
+ ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ GLuint handle = view->GetHandle();
+ GLenum target = view->GetGLTarget();
+ GLuint viewIndex = indices[bindingIndex];
+
+ for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
+ gl.ActiveTexture(GL_TEXTURE0 + unit);
+ gl.BindTexture(target, handle);
+ if (ToBackend(view->GetTexture())->GetGLFormat().format ==
+ GL_DEPTH_STENCIL) {
+ Aspect aspect = view->GetAspects();
+ ASSERT(HasOneBit(aspect));
+ switch (aspect) {
+ case Aspect::None:
+ case Aspect::Color:
+ case Aspect::CombinedDepthStencil:
+ case Aspect::Plane0:
+ case Aspect::Plane1:
+ UNREACHABLE();
+ case Aspect::Depth:
+ gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
+ GL_DEPTH_COMPONENT);
+ break;
+ case Aspect::Stencil:
+ gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
+ GL_STENCIL_INDEX);
+ break;
+ }
+ }
+ }
+ break;
+ }
+
+ case BindingInfoType::StorageTexture: {
+ TextureView* view =
+ ToBackend(group->GetBindingAsTextureView(bindingIndex));
+ Texture* texture = ToBackend(view->GetTexture());
+ GLuint handle = texture->GetHandle();
+ GLuint imageIndex = indices[bindingIndex];
+
+ GLenum access;
+ switch (bindingInfo.storageTexture.access) {
+ case wgpu::StorageTextureAccess::WriteOnly:
+ access = GL_WRITE_ONLY;
+ break;
+ case wgpu::StorageTextureAccess::Undefined:
+ UNREACHABLE();
+ }
+
+ // OpenGL ES only supports either binding a layer or the entire
+ // texture in glBindImageTexture().
+ GLboolean isLayered;
+ if (view->GetLayerCount() == 1) {
+ isLayered = GL_FALSE;
+ } else if (texture->GetArrayLayers() == view->GetLayerCount()) {
+ isLayered = GL_TRUE;
+ } else {
+ UNREACHABLE();
+ }
+
+ gl.BindImageTexture(imageIndex, handle, view->GetBaseMipLevel(),
+ isLayered, view->GetBaseArrayLayer(), access,
+ texture->GetGLFormat().internalFormat);
+ break;
+ }
+
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+
+ PipelineGL* mPipeline = nullptr;
+ };
+
+ void ResolveMultisampledRenderTargets(const OpenGLFunctions& gl,
+ const BeginRenderPassCmd* renderPass) {
+ ASSERT(renderPass != nullptr);
+
+ GLuint readFbo = 0;
+ GLuint writeFbo = 0;
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
+ if (readFbo == 0) {
+ ASSERT(writeFbo == 0);
+ gl.GenFramebuffers(1, &readFbo);
+ gl.GenFramebuffers(1, &writeFbo);
+ }
+
+ const TextureBase* colorTexture =
+ renderPass->colorAttachments[i].view->GetTexture();
+ ASSERT(colorTexture->IsMultisampledTexture());
+ ASSERT(colorTexture->GetArrayLayers() == 1);
+ ASSERT(renderPass->colorAttachments[i].view->GetBaseMipLevel() == 0);
+
+ GLuint colorHandle = ToBackend(colorTexture)->GetHandle();
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
+ gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ ToBackend(colorTexture)->GetGLTarget(), colorHandle, 0);
+
+ const TextureBase* resolveTexture =
+ renderPass->colorAttachments[i].resolveTarget->GetTexture();
+ GLuint resolveTextureHandle = ToBackend(resolveTexture)->GetHandle();
+ GLuint resolveTargetMipmapLevel =
+ renderPass->colorAttachments[i].resolveTarget->GetBaseMipLevel();
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, writeFbo);
+ if (resolveTexture->GetArrayLayers() == 1) {
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, resolveTextureHandle,
+ resolveTargetMipmapLevel);
+ } else {
+ GLuint resolveTargetArrayLayer =
+ renderPass->colorAttachments[i].resolveTarget->GetBaseArrayLayer();
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
+ resolveTextureHandle, resolveTargetMipmapLevel,
+ resolveTargetArrayLayer);
+ }
+
+ gl.BlitFramebuffer(0, 0, renderPass->width, renderPass->height, 0, 0,
+ renderPass->width, renderPass->height, GL_COLOR_BUFFER_BIT,
+ GL_NEAREST);
+ }
+ }
+
+ gl.DeleteFramebuffers(1, &readFbo);
+ gl.DeleteFramebuffers(1, &writeFbo);
+ }
+
+ // OpenGL SPEC requires the source/destination region must be a region that is contained
+ // within srcImage/dstImage. Here the size of the image refers to the virtual size, while
+ // Dawn validates texture copy extent with the physical size, so we need to re-calculate the
+ // texture copy extent to ensure it should fit in the virtual size of the subresource.
+ Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ Extent3D validTextureCopyExtent = copySize;
+ const TextureBase* texture = textureCopy.texture.Get();
+ Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
+ ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
+ ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
+ if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
+ ASSERT(texture->GetFormat().isCompressed);
+ validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
+ }
+ if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
+ ASSERT(texture->GetFormat().isCompressed);
+ validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
+ }
+
+ return validTextureCopyExtent;
+ }
+
+ void CopyTextureToTextureWithBlit(const OpenGLFunctions& gl,
+ const TextureCopy& src,
+ const TextureCopy& dst,
+ const Extent3D& copySize) {
+ Texture* srcTexture = ToBackend(src.texture.Get());
+ Texture* dstTexture = ToBackend(dst.texture.Get());
+
+ // Generate temporary framebuffers for the blits.
+ GLuint readFBO = 0, drawFBO = 0;
+ gl.GenFramebuffers(1, &readFBO);
+ gl.GenFramebuffers(1, &drawFBO);
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
+
+ // Reset state that may affect glBlitFramebuffer().
+ gl.Disable(GL_SCISSOR_TEST);
+ GLenum blitMask = 0;
+ if (src.aspect & Aspect::Color) {
+ blitMask |= GL_COLOR_BUFFER_BIT;
+ }
+ if (src.aspect & Aspect::Depth) {
+ blitMask |= GL_DEPTH_BUFFER_BIT;
+ }
+ if (src.aspect & Aspect::Stencil) {
+ blitMask |= GL_STENCIL_BUFFER_BIT;
+ }
+ // Iterate over all layers, doing a single blit for each.
+ for (uint32_t layer = 0; layer < copySize.depthOrArrayLayers; ++layer) {
+ // Bind all required aspects for this layer.
+ for (Aspect aspect : IterateEnumMask(src.aspect)) {
+ GLenum glAttachment;
+ switch (aspect) {
+ case Aspect::Color:
+ glAttachment = GL_COLOR_ATTACHMENT0;
+ break;
+ case Aspect::Depth:
+ glAttachment = GL_DEPTH_ATTACHMENT;
+ break;
+ case Aspect::Stencil:
+ glAttachment = GL_STENCIL_ATTACHMENT;
+ break;
+ case Aspect::CombinedDepthStencil:
+ case Aspect::None:
+ case Aspect::Plane0:
+ case Aspect::Plane1:
+ UNREACHABLE();
+ }
+ if (srcTexture->GetArrayLayers() == 1 &&
+ srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+ gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment,
+ srcTexture->GetGLTarget(), srcTexture->GetHandle(),
+ src.mipLevel);
+ } else {
+ gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
+ srcTexture->GetHandle(),
+ static_cast<GLint>(src.mipLevel),
+ static_cast<GLint>(src.origin.z + layer));
+ }
+ if (dstTexture->GetArrayLayers() == 1 &&
+ dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment,
+ dstTexture->GetGLTarget(), dstTexture->GetHandle(),
+ dst.mipLevel);
+ } else {
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment,
+ dstTexture->GetHandle(),
+ static_cast<GLint>(dst.mipLevel),
+ static_cast<GLint>(dst.origin.z + layer));
+ }
+ }
+ gl.BlitFramebuffer(src.origin.x, src.origin.y, src.origin.x + copySize.width,
+ src.origin.y + copySize.height, dst.origin.x, dst.origin.y,
+ dst.origin.x + copySize.width, dst.origin.y + copySize.height,
+ blitMask, GL_NEAREST);
+ }
+ gl.Enable(GL_SCISSOR_TEST);
+ gl.DeleteFramebuffers(1, &readFBO);
+ gl.DeleteFramebuffers(1, &drawFBO);
+ }
+ bool TextureFormatIsSnorm(wgpu::TextureFormat format) {
+ return format == wgpu::TextureFormat::RGBA8Snorm ||
+ format == wgpu::TextureFormat::RG8Snorm ||
+ format == wgpu::TextureFormat::R8Snorm;
+ }
+ } // namespace
+
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+ : CommandBufferBase(encoder, descriptor) {
+ }
+
+ MaybeError CommandBuffer::Execute() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
+ for (size_t i = 0; i < scope.textures.size(); i++) {
+ Texture* texture = ToBackend(scope.textures[i]);
+
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ scope.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(range);
+ }
+ });
+ }
+
+ for (BufferBase* bufferBase : scope.buffers) {
+ ToBackend(bufferBase)->EnsureDataInitialized();
+ }
+ };
+
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::BeginComputePass: {
+ mCommands.NextCommand<BeginComputePassCmd>();
+ for (const SyncScopeResourceUsage& scope :
+ GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+ LazyClearSyncScope(scope);
+ }
+ DAWN_TRY(ExecuteComputePass());
+
+ nextComputePassNumber++;
+ break;
+ }
+
+ case Command::BeginRenderPass: {
+ auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+ LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
+ LazyClearRenderPassAttachments(cmd);
+ DAWN_TRY(ExecuteRenderPass(cmd));
+
+ nextRenderPassNumber++;
+ break;
+ }
+
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ if (copy->size == 0) {
+ // Skip no-op copies.
+ break;
+ }
+
+ ToBackend(copy->source)->EnsureDataInitialized();
+ ToBackend(copy->destination)
+ ->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
+
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER,
+ ToBackend(copy->destination)->GetHandle());
+ gl.CopyBufferSubData(GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER,
+ copy->sourceOffset, copy->destinationOffset, copy->size);
+
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ break;
+ }
+
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+ Buffer* buffer = ToBackend(src.buffer.Get());
+
+ DAWN_INVALID_IF(
+ dst.aspect == Aspect::Stencil,
+ "Copies to stencil textures are unsupported on the OpenGL backend.");
+
+ ASSERT(dst.aspect == Aspect::Color);
+
+ buffer->EnsureDataInitialized();
+ SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+ dst.mipLevel)) {
+ dst.texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
+ }
+
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
+
+ TextureDataLayout dataLayout;
+ dataLayout.offset = 0;
+ dataLayout.bytesPerRow = src.bytesPerRow;
+ dataLayout.rowsPerImage = src.rowsPerImage;
+
+ DoTexSubImage(gl, dst, reinterpret_cast<void*>(src.offset), dataLayout,
+ copy->copySize);
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ break;
+ }
+
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+ auto& copySize = copy->copySize;
+ Texture* texture = ToBackend(src.texture.Get());
+ Buffer* buffer = ToBackend(dst.buffer.Get());
+ const Format& formatInfo = texture->GetFormat();
+ const GLFormat& format = texture->GetGLFormat();
+ GLenum target = texture->GetGLTarget();
+
+ // TODO(crbug.com/dawn/667): Implement validation in WebGPU/Compat to
+ // avoid this codepath. OpenGL does not support readback from non-renderable
+ // texture formats.
+ if (formatInfo.isCompressed ||
+ (TextureFormatIsSnorm(formatInfo.format) &&
+ GetDevice()->IsToggleEnabled(Toggle::DisableSnormRead))) {
+ UNREACHABLE();
+ }
+
+ buffer->EnsureDataInitializedAsDestination(copy);
+
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(src, copy->copySize);
+ texture->EnsureSubresourceContentInitialized(subresources);
+ // The only way to move data from a texture to a buffer in GL is via
+ // glReadPixels with a pack buffer. Create a temporary FBO for the copy.
+ gl.BindTexture(target, texture->GetHandle());
+
+ GLuint readFBO = 0;
+ gl.GenFramebuffers(1, &readFBO);
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
+
+ const TexelBlockInfo& blockInfo = formatInfo.GetAspectInfo(src.aspect).block;
+
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
+ gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / blockInfo.byteSize);
+
+ GLenum glAttachment;
+ GLenum glFormat;
+ GLenum glType;
+ switch (src.aspect) {
+ case Aspect::Color:
+ glAttachment = GL_COLOR_ATTACHMENT0;
+ glFormat = format.format;
+ glType = format.type;
+ break;
+ case Aspect::Depth:
+ glAttachment = GL_DEPTH_ATTACHMENT;
+ glFormat = GL_DEPTH_COMPONENT;
+ glType = GL_FLOAT;
+ break;
+ case Aspect::Stencil:
+ glAttachment = GL_STENCIL_ATTACHMENT;
+ glFormat = GL_STENCIL_INDEX;
+ glType = GL_UNSIGNED_BYTE;
+ break;
+
+ case Aspect::CombinedDepthStencil:
+ case Aspect::None:
+ case Aspect::Plane0:
+ case Aspect::Plane1:
+ UNREACHABLE();
+ }
+
+ uint8_t* offset =
+ reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e2D: {
+ if (texture->GetArrayLayers() == 1) {
+ gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
+ texture->GetHandle(), src.mipLevel);
+ gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+ copySize.height, glFormat, glType, offset);
+ break;
+ }
+ // Implementation for 2D array is the same as 3D.
+ [[fallthrough]];
+ }
+
+ case wgpu::TextureDimension::e3D: {
+ const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
+ for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
+ gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
+ texture->GetHandle(), src.mipLevel,
+ src.origin.z + z);
+ gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+ copySize.height, glFormat, glType, offset);
+
+ offset += bytesPerImage;
+ }
+ break;
+ }
+
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ }
+
+ gl.PixelStorei(GL_PACK_ROW_LENGTH, 0);
+
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+ gl.DeleteFramebuffers(1, &readFBO);
+ break;
+ }
+
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy =
+ mCommands.NextCommand<CopyTextureToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+
+ // TODO(crbug.com/dawn/817): add workaround for the case that imageExtentSrc
+ // is not equal to imageExtentDst. For example when copySize fits in the virtual
+ // size of the source image but does not fit in the one of the destination
+ // image.
+ Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
+ Texture* srcTexture = ToBackend(src.texture.Get());
+ Texture* dstTexture = ToBackend(dst.texture.Get());
+
+ SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+ SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+
+ srcTexture->EnsureSubresourceContentInitialized(srcRange);
+ if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
+ dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
+ } else {
+ dstTexture->EnsureSubresourceContentInitialized(dstRange);
+ }
+ if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
+ gl.CopyImageSubData(srcTexture->GetHandle(), srcTexture->GetGLTarget(),
+ src.mipLevel, src.origin.x, src.origin.y, src.origin.z,
+ dstTexture->GetHandle(), dstTexture->GetGLTarget(),
+ dst.mipLevel, dst.origin.x, dst.origin.y, dst.origin.z,
+ copySize.width, copySize.height,
+ copy->copySize.depthOrArrayLayers);
+ } else {
+ CopyTextureToTextureWithBlit(gl, src, dst, copySize);
+ }
+ break;
+ }
+
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+ if (cmd->size == 0) {
+ // Skip no-op fills.
+ break;
+ }
+ Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+
+ bool clearedToZero =
+ dstBuffer->EnsureDataInitializedAsDestination(cmd->offset, cmd->size);
+
+ if (!clearedToZero) {
+ const std::vector<uint8_t> clearValues(cmd->size, 0u);
+ gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
+ gl.BufferSubData(GL_ARRAY_BUFFER, cmd->offset, cmd->size,
+ clearValues.data());
+ }
+
+ break;
+ }
+
+ case Command::ResolveQuerySet: {
+ // TODO(crbug.com/dawn/434): Resolve non-precise occlusion query.
+ SkipCommand(&mCommands, type);
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+ }
+
+ case Command::InsertDebugMarker:
+ case Command::PopDebugGroup:
+ case Command::PushDebugGroup: {
+ // Due to lack of linux driver support for GL_EXT_debug_marker
+ // extension these functions are skipped.
+ SkipCommand(&mCommands, type);
+ break;
+ }
+
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ uint64_t offset = write->offset;
+ uint64_t size = write->size;
+ if (size == 0) {
+ continue;
+ }
+
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ dstBuffer->EnsureDataInitializedAsDestination(offset, size);
+
+ gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
+ gl.BufferSubData(GL_ARRAY_BUFFER, offset, size, data);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError CommandBuffer::ExecuteComputePass() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ ComputePipeline* lastPipeline = nullptr;
+ BindGroupTracker bindGroupTracker = {};
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndComputePass: {
+ mCommands.NextCommand<EndComputePassCmd>();
+ return {};
+ }
+
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+ bindGroupTracker.Apply(gl);
+
+ gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
+ gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
+ break;
+ }
+
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ bindGroupTracker.Apply(gl);
+
+ uint64_t indirectBufferOffset = dispatch->indirectOffset;
+ Buffer* indirectBuffer = ToBackend(dispatch->indirectBuffer.Get());
+
+ gl.BindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+ gl.DispatchComputeIndirect(static_cast<GLintptr>(indirectBufferOffset));
+ gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
+ break;
+ }
+
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+ lastPipeline = ToBackend(cmd->pipeline).Get();
+ lastPipeline->ApplyNow();
+
+ bindGroupTracker.OnSetPipeline(lastPipeline);
+ break;
+ }
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+ bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+ cmd->dynamicOffsetCount, dynamicOffsets);
+ break;
+ }
+
+ case Command::InsertDebugMarker:
+ case Command::PopDebugGroup:
+ case Command::PushDebugGroup: {
+ // Due to lack of linux driver support for GL_EXT_debug_marker
+ // extension these functions are skipped.
+ SkipCommand(&mCommands, type);
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // EndComputePass should have been called
+ UNREACHABLE();
+ }
+
+ MaybeError CommandBuffer::ExecuteRenderPass(BeginRenderPassCmd* renderPass) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ GLuint fbo = 0;
+
+ // Create the framebuffer used for this render pass and calls the correct glDrawBuffers
+ {
+ // TODO(kainino@chromium.org): This is added to possibly work around an issue seen on
+ // Windows/Intel. It should break any feedback loop before the clears, even if there
+ // shouldn't be any negative effects from this. Investigate whether it's actually
+ // needed.
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+ // TODO(kainino@chromium.org): possible future optimization: create these framebuffers
+ // at Framebuffer build time (or maybe CommandBuffer build time) so they don't have to
+ // be created and destroyed at draw time.
+ gl.GenFramebuffers(1, &fbo);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
+
+ // Mapping from attachmentSlot to GL framebuffer attachment points. Defaults to zero
+ // (GL_NONE).
+ ityp::array<ColorAttachmentIndex, GLenum, kMaxColorAttachments> drawBuffers = {};
+
+ // Construct GL framebuffer
+
+ ColorAttachmentIndex attachmentCount(uint8_t(0));
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ TextureViewBase* textureView = renderPass->colorAttachments[i].view.Get();
+ GLuint texture = ToBackend(textureView->GetTexture())->GetHandle();
+
+ GLenum glAttachment = GL_COLOR_ATTACHMENT0 + static_cast<uint8_t>(i);
+
+ // Attach color buffers.
+ if (textureView->GetTexture()->GetArrayLayers() == 1) {
+ GLenum target = ToBackend(textureView->GetTexture())->GetGLTarget();
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, texture,
+ textureView->GetBaseMipLevel());
+ } else {
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, texture,
+ textureView->GetBaseMipLevel(),
+ textureView->GetBaseArrayLayer());
+ }
+ drawBuffers[i] = glAttachment;
+ attachmentCount = i;
+ attachmentCount++;
+ }
+ gl.DrawBuffers(static_cast<uint8_t>(attachmentCount), drawBuffers.data());
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ TextureViewBase* textureView = renderPass->depthStencilAttachment.view.Get();
+ GLuint texture = ToBackend(textureView->GetTexture())->GetHandle();
+ const Format& format = textureView->GetTexture()->GetFormat();
+
+ // Attach depth/stencil buffer.
+ GLenum glAttachment = 0;
+ if (format.aspects == (Aspect::Depth | Aspect::Stencil)) {
+ glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
+ } else if (format.aspects == Aspect::Depth) {
+ glAttachment = GL_DEPTH_ATTACHMENT;
+ } else if (format.aspects == Aspect::Stencil) {
+ glAttachment = GL_STENCIL_ATTACHMENT;
+ } else {
+ UNREACHABLE();
+ }
+
+ if (textureView->GetTexture()->GetArrayLayers() == 1) {
+ GLenum target = ToBackend(textureView->GetTexture())->GetGLTarget();
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, texture,
+ textureView->GetBaseMipLevel());
+ } else {
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, texture,
+ textureView->GetBaseMipLevel(),
+ textureView->GetBaseArrayLayer());
+ }
+ }
+ }
+
+ ASSERT(gl.CheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
+
+ // Set defaults for dynamic state before executing clears and commands.
+ PersistentPipelineState persistentPipelineState;
+ persistentPipelineState.SetDefaultState(gl);
+ gl.BlendColor(0, 0, 0, 0);
+ gl.Viewport(0, 0, renderPass->width, renderPass->height);
+ gl.DepthRangef(0.0, 1.0);
+ gl.Scissor(0, 0, renderPass->width, renderPass->height);
+
+ // Clear framebuffer attachments as needed
+ {
+ for (ColorAttachmentIndex index :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ uint8_t i = static_cast<uint8_t>(index);
+ auto* attachmentInfo = &renderPass->colorAttachments[index];
+
+ // Load op - color
+ if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
+ gl.ColorMask(true, true, true, true);
+
+ wgpu::TextureComponentType baseType =
+ attachmentInfo->view->GetFormat().GetAspectInfo(Aspect::Color).baseType;
+ switch (baseType) {
+ case wgpu::TextureComponentType::Float: {
+ const std::array<float, 4> appliedClearColor =
+ ConvertToFloatColor(attachmentInfo->clearColor);
+ gl.ClearBufferfv(GL_COLOR, i, appliedClearColor.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Uint: {
+ const std::array<uint32_t, 4> appliedClearColor =
+ ConvertToUnsignedIntegerColor(attachmentInfo->clearColor);
+ gl.ClearBufferuiv(GL_COLOR, i, appliedClearColor.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Sint: {
+ const std::array<int32_t, 4> appliedClearColor =
+ ConvertToSignedIntegerColor(attachmentInfo->clearColor);
+ gl.ClearBufferiv(GL_COLOR, i, appliedClearColor.data());
+ break;
+ }
+
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
+ }
+ }
+
+ if (attachmentInfo->storeOp == wgpu::StoreOp::Discard) {
+ // TODO(natlee@microsoft.com): call glDiscard to do optimization
+ }
+ }
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto* attachmentInfo = &renderPass->depthStencilAttachment;
+ const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
+
+ // Load op - depth/stencil
+ bool doDepthClear = attachmentFormat.HasDepth() &&
+ (attachmentInfo->depthLoadOp == wgpu::LoadOp::Clear);
+ bool doStencilClear = attachmentFormat.HasStencil() &&
+ (attachmentInfo->stencilLoadOp == wgpu::LoadOp::Clear);
+
+ if (doDepthClear) {
+ gl.DepthMask(GL_TRUE);
+ }
+ if (doStencilClear) {
+ gl.StencilMask(GetStencilMaskFromStencilFormat(attachmentFormat.format));
+ }
+
+ if (doDepthClear && doStencilClear) {
+ gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo->clearDepth,
+ attachmentInfo->clearStencil);
+ } else if (doDepthClear) {
+ gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo->clearDepth);
+ } else if (doStencilClear) {
+ const GLint clearStencil = attachmentInfo->clearStencil;
+ gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
+ }
+ }
+ }
+
+ RenderPipeline* lastPipeline = nullptr;
+ uint64_t indexBufferBaseOffset = 0;
+ GLenum indexBufferFormat;
+ uint32_t indexFormatSize;
+
+ VertexStateBufferBindingTracker vertexStateBufferBindingTracker;
+ BindGroupTracker bindGroupTracker = {};
+
+ auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+ switch (type) {
+ case Command::Draw: {
+ DrawCmd* draw = iter->NextCommand<DrawCmd>();
+ vertexStateBufferBindingTracker.Apply(gl);
+ bindGroupTracker.Apply(gl);
+
+ if (draw->firstInstance > 0) {
+ gl.DrawArraysInstancedBaseInstance(
+ lastPipeline->GetGLPrimitiveTopology(), draw->firstVertex,
+ draw->vertexCount, draw->instanceCount, draw->firstInstance);
+ } else {
+ // This branch is only needed on OpenGL < 4.2
+ gl.DrawArraysInstanced(lastPipeline->GetGLPrimitiveTopology(),
+ draw->firstVertex, draw->vertexCount,
+ draw->instanceCount);
+ }
+ break;
+ }
+
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+ vertexStateBufferBindingTracker.Apply(gl);
+ bindGroupTracker.Apply(gl);
+
+ if (draw->firstInstance > 0) {
+ gl.DrawElementsInstancedBaseVertexBaseInstance(
+ lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+ indexBufferFormat,
+ reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+ indexBufferBaseOffset),
+ draw->instanceCount, draw->baseVertex, draw->firstInstance);
+ } else {
+ // This branch is only needed on OpenGL < 4.2; ES < 3.2
+ if (draw->baseVertex != 0) {
+ gl.DrawElementsInstancedBaseVertex(
+ lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+ indexBufferFormat,
+ reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+ indexBufferBaseOffset),
+ draw->instanceCount, draw->baseVertex);
+ } else {
+ // This branch is only needed on OpenGL < 3.2; ES < 3.2
+ gl.DrawElementsInstanced(
+ lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+ indexBufferFormat,
+ reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+ indexBufferBaseOffset),
+ draw->instanceCount);
+ }
+ }
+ break;
+ }
+
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+ vertexStateBufferBindingTracker.Apply(gl);
+ bindGroupTracker.Apply(gl);
+
+ uint64_t indirectBufferOffset = draw->indirectOffset;
+ Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
+
+ gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+ gl.DrawArraysIndirect(
+ lastPipeline->GetGLPrimitiveTopology(),
+ reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
+ break;
+ }
+
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+ vertexStateBufferBindingTracker.Apply(gl);
+ bindGroupTracker.Apply(gl);
+
+ Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(indirectBuffer != nullptr);
+
+ gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+ gl.DrawElementsIndirect(
+ lastPipeline->GetGLPrimitiveTopology(), indexBufferFormat,
+ reinterpret_cast<void*>(static_cast<intptr_t>(draw->indirectOffset)));
+ break;
+ }
+
+ case Command::InsertDebugMarker:
+ case Command::PopDebugGroup:
+ case Command::PushDebugGroup: {
+ // Due to lack of linux driver support for GL_EXT_debug_marker
+ // extension these functions are skipped.
+ SkipCommand(iter, type);
+ break;
+ }
+
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+ lastPipeline = ToBackend(cmd->pipeline).Get();
+ lastPipeline->ApplyNow(persistentPipelineState);
+
+ vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
+ bindGroupTracker.OnSetPipeline(lastPipeline);
+ break;
+ }
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+ bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+ cmd->dynamicOffsetCount, dynamicOffsets);
+ break;
+ }
+
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+
+ indexBufferBaseOffset = cmd->offset;
+ indexBufferFormat = IndexFormatType(cmd->format);
+ indexFormatSize = IndexFormatSize(cmd->format);
+ vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
+ break;
+ }
+
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+ vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
+ cmd->offset);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ };
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndRenderPass: {
+ mCommands.NextCommand<EndRenderPassCmd>();
+
+ if (renderPass->attachmentState->GetSampleCount() > 1) {
+ ResolveMultisampledRenderTargets(gl, renderPass);
+ }
+ gl.DeleteFramebuffers(1, &fbo);
+ return {};
+ }
+
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+ persistentPipelineState.SetStencilReference(gl, cmd->reference);
+ break;
+ }
+
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+ if (gl.IsAtLeastGL(4, 1)) {
+ gl.ViewportIndexedf(0, cmd->x, cmd->y, cmd->width, cmd->height);
+ } else {
+ // Floating-point viewport coords are unsupported on OpenGL ES, but
+ // truncation is ok because other APIs do not guarantee subpixel precision
+ // either.
+ gl.Viewport(static_cast<int>(cmd->x), static_cast<int>(cmd->y),
+ static_cast<int>(cmd->width), static_cast<int>(cmd->height));
+ }
+ gl.DepthRangef(cmd->minDepth, cmd->maxDepth);
+ break;
+ }
+
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+ gl.Scissor(cmd->x, cmd->y, cmd->width, cmd->height);
+ break;
+ }
+
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+ const std::array<float, 4> blendColor = ConvertToFloatColor(cmd->color);
+ gl.BlendColor(blendColor[0], blendColor[1], blendColor[2], blendColor[3]);
+ break;
+ }
+
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+ auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+ for (uint32_t i = 0; i < cmd->count; ++i) {
+ CommandIterator* iter = bundles[i]->GetCommands();
+ iter->Reset();
+ while (iter->NextCommandId(&type)) {
+ DoRenderBundleCommand(iter, type);
+ }
+ }
+ break;
+ }
+
+ case Command::BeginOcclusionQuery: {
+ return DAWN_UNIMPLEMENTED_ERROR("BeginOcclusionQuery unimplemented.");
+ }
+
+ case Command::EndOcclusionQuery: {
+ return DAWN_UNIMPLEMENTED_ERROR("EndOcclusionQuery unimplemented.");
+ }
+
+ case Command::WriteTimestamp:
+ return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+
+ default: {
+ DoRenderBundleCommand(&mCommands, type);
+ break;
+ }
+ }
+ }
+
+ // EndRenderPass should have been called
+ UNREACHABLE();
+ }
+
+ void DoTexSubImage(const OpenGLFunctions& gl,
+ const TextureCopy& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& copySize) {
+ Texture* texture = ToBackend(destination.texture.Get());
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+
+ const GLFormat& format = texture->GetGLFormat();
+ GLenum target = texture->GetGLTarget();
+ data = static_cast<const uint8_t*>(data) + dataLayout.offset;
+ gl.ActiveTexture(GL_TEXTURE0);
+ gl.BindTexture(target, texture->GetHandle());
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetAspectInfo(destination.aspect).block;
+
+ uint32_t x = destination.origin.x;
+ uint32_t y = destination.origin.y;
+ uint32_t z = destination.origin.z;
+ if (texture->GetFormat().isCompressed) {
+ size_t rowSize = copySize.width / blockInfo.width * blockInfo.byteSize;
+ Extent3D virtSize = texture->GetMipLevelVirtualSize(destination.mipLevel);
+ uint32_t width = std::min(copySize.width, virtSize.width - x);
+
+ // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
+ // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
+ // this limitation by copying the compressed texture data once per row.
+ // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
+ // Buffer Objects" for more details. For Desktop GL, we use row-by-row
+ // copies only for uploads where bytesPerRow is not a multiple of byteSize.
+ if (dataLayout.bytesPerRow % blockInfo.byteSize == 0 && gl.GetVersion().IsDesktop()) {
+ size_t imageSize =
+ rowSize * (copySize.height / blockInfo.height) * copySize.depthOrArrayLayers;
+
+ uint32_t height = std::min(copySize.height, virtSize.height - y);
+
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+ dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
+
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
+ format.internalFormat, imageSize, data);
+ } else {
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
+ dataLayout.rowsPerImage * blockInfo.height);
+ gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+ copySize.depthOrArrayLayers, format.internalFormat,
+ imageSize, data);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ }
+
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
+ gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
+ } else {
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ const uint8_t* d = static_cast<const uint8_t*>(data);
+
+ for (; y < destination.origin.y + copySize.height; y += blockInfo.height) {
+ uint32_t height = std::min(blockInfo.height, virtSize.height - y);
+ gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width,
+ height, format.internalFormat, rowSize, d);
+ d += dataLayout.bytesPerRow;
+ }
+ } else {
+ const uint8_t* slice = static_cast<const uint8_t*>(data);
+
+ for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+ const uint8_t* d = slice;
+
+ for (y = destination.origin.y; y < destination.origin.y + copySize.height;
+ y += blockInfo.height) {
+ uint32_t height = std::min(blockInfo.height, virtSize.height - y);
+ gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
+ height, 1, format.internalFormat, rowSize,
+ d);
+ d += dataLayout.bytesPerRow;
+ }
+
+ slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
+ }
+ }
+ }
+ } else {
+ uint32_t width = copySize.width;
+ uint32_t height = copySize.height;
+ if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+ dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ gl.TexSubImage2D(target, destination.mipLevel, x, y, width, height,
+ format.format, format.type, data);
+ } else {
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
+ dataLayout.rowsPerImage * blockInfo.height);
+ gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+ copySize.depthOrArrayLayers, format.format, format.type, data);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+ }
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ } else {
+ if (texture->GetArrayLayers() == 1 &&
+ texture->GetDimension() == wgpu::TextureDimension::e2D) {
+ const uint8_t* d = static_cast<const uint8_t*>(data);
+ for (; y < destination.origin.y + height; ++y) {
+ gl.TexSubImage2D(target, destination.mipLevel, x, y, width, 1,
+ format.format, format.type, d);
+ d += dataLayout.bytesPerRow;
+ }
+ } else {
+ const uint8_t* slice = static_cast<const uint8_t*>(data);
+ for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+ const uint8_t* d = slice;
+ for (y = destination.origin.y; y < destination.origin.y + height; ++y) {
+ gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, 1, 1,
+ format.format, format.type, d);
+ d += dataLayout.bytesPerRow;
+ }
+ slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
+ }
+ }
+ }
+ }
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h
new file mode 100644
index 00000000000..e0f3193c53e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/CommandBufferGL.h
@@ -0,0 +1,49 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
+#define DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
+
+#include "dawn/native/CommandBuffer.h"
+
+namespace dawn::native {
+ struct BeginRenderPassCmd;
+} // namespace dawn::native
+
+namespace dawn::native::opengl {
+
+ class Device;
+ struct OpenGLFunctions;
+
+ class CommandBuffer final : public CommandBufferBase {
+ public:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+ MaybeError Execute();
+
+ private:
+ MaybeError ExecuteComputePass();
+ MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
+ };
+
+ // Like glTexSubImage*, the "data" argument is either a pointer to image data or
+ // an offset if a PBO is bound.
+ void DoTexSubImage(const OpenGLFunctions& gl,
+ const TextureCopy& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& copySize);
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp
new file mode 100644
index 00000000000..b53541156c8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.cpp
@@ -0,0 +1,45 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/ComputePipelineGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+ // static
+ Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(device, descriptor));
+ }
+
+ ComputePipeline::~ComputePipeline() = default;
+
+ void ComputePipeline::DestroyImpl() {
+ ComputePipelineBase::DestroyImpl();
+ DeleteProgram(ToBackend(GetDevice())->gl);
+ }
+
+ MaybeError ComputePipeline::Initialize() {
+ DAWN_TRY(
+ InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
+ return {};
+ }
+
+ void ComputePipeline::ApplyNow() {
+ PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h
new file mode 100644
index 00000000000..23be225d07a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/ComputePipelineGL.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
+#define DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/native/opengl/PipelineGL.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
+ public:
+ static Ref<ComputePipeline> CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor);
+
+ void ApplyNow();
+
+ MaybeError Initialize() override;
+
+ private:
+ using ComputePipelineBase::ComputePipelineBase;
+ ~ComputePipeline() override;
+ void DestroyImpl() override;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp
new file mode 100644
index 00000000000..00222d1a3cd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.cpp
@@ -0,0 +1,315 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/DeviceGL.h"
+
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/StagingBuffer.h"
+#include "dawn/native/opengl/BindGroupGL.h"
+#include "dawn/native/opengl/BindGroupLayoutGL.h"
+#include "dawn/native/opengl/BufferGL.h"
+#include "dawn/native/opengl/CommandBufferGL.h"
+#include "dawn/native/opengl/ComputePipelineGL.h"
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+#include "dawn/native/opengl/QuerySetGL.h"
+#include "dawn/native/opengl/QueueGL.h"
+#include "dawn/native/opengl/RenderPipelineGL.h"
+#include "dawn/native/opengl/SamplerGL.h"
+#include "dawn/native/opengl/ShaderModuleGL.h"
+#include "dawn/native/opengl/SwapChainGL.h"
+#include "dawn/native/opengl/TextureGL.h"
+
+namespace dawn::native::opengl {
+
+ // static
+ ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
+ const DeviceDescriptor* descriptor,
+ const OpenGLFunctions& functions) {
+ Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
+ DAWN_TRY(device->Initialize());
+ return device;
+ }
+
+ Device::Device(AdapterBase* adapter,
+ const DeviceDescriptor* descriptor,
+ const OpenGLFunctions& functions)
+ : DeviceBase(adapter, descriptor), gl(functions) {
+ }
+
+ Device::~Device() {
+ Destroy();
+ }
+
+ MaybeError Device::Initialize() {
+ InitTogglesFromDriver();
+ mFormatTable = BuildGLFormatTable();
+
+ return DeviceBase::Initialize(new Queue(this));
+ }
+
+ void Device::InitTogglesFromDriver() {
+ bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
+
+ bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
+
+ // TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
+ bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
+
+ bool supportsSnormRead =
+ gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
+
+ bool supportsDepthStencilRead =
+ gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
+
+ bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
+ gl.IsGLExtensionSupported("GL_OES_sample_variables");
+
+ // TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
+ // procs without the extension suffix.
+ // We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
+
+ // supportsBaseVertex |=
+ // (gl.IsAtLeastGLES(2, 0) &&
+ // (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
+ // gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
+ // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
+
+ // supportsBaseInstance |=
+ // (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
+ // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
+
+ // TODO(crbug.com/dawn/343): Investigate emulation.
+ SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
+ SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
+ SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
+ SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
+ SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
+ SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
+ SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
+ // For OpenGL ES, we must use dummy fragment shader for vertex-only render pipeline.
+ SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
+ }
+
+ const GLFormat& Device::GetGLFormat(const Format& format) {
+ ASSERT(format.isSupported);
+ ASSERT(format.GetIndex() < mFormatTable.size());
+
+ const GLFormat& result = mFormatTable[format.GetIndex()];
+ ASSERT(result.isSupportedOnBackend);
+ return result;
+ }
+
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
+ return BindGroup::Create(this, descriptor);
+ }
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
+ }
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return AcquireRef(new Buffer(this, descriptor));
+ }
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+ }
+ Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return ComputePipeline::CreateUninitialized(this, descriptor);
+ }
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return AcquireRef(new PipelineLayout(this, descriptor));
+ }
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
+ return AcquireRef(new QuerySet(this, descriptor));
+ }
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return RenderPipeline::CreateUninitialized(this, descriptor);
+ }
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(this, descriptor));
+ }
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ return ShaderModule::Create(this, descriptor, parseResult);
+ }
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new SwapChain(this, descriptor));
+ }
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
+ }
+ ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return AcquireRef(new Texture(this, descriptor));
+ }
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return AcquireRef(new TextureView(texture, descriptor));
+ }
+
+ void Device::SubmitFenceSync() {
+ GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+ IncrementLastSubmittedCommandSerial();
+ mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
+ }
+
+ MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
+ ::EGLImage image) {
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "Texture dimension (%s) is not %s.", descriptor->dimension,
+ wgpu::TextureDimension::e2D);
+
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
+ "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+ descriptor->sampleCount);
+
+ DAWN_INVALID_IF(descriptor->usage & (wgpu::TextureUsage::TextureBinding |
+ wgpu::TextureUsage::StorageBinding),
+ "Texture usage (%s) cannot have %s or %s.", descriptor->usage,
+ wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
+
+ return {};
+ }
+ TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
+ ::EGLImage image) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+ return nullptr;
+ }
+ if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
+ return nullptr;
+ }
+
+ GLuint tex;
+ gl.GenTextures(1, &tex);
+ gl.BindTexture(GL_TEXTURE_2D, tex);
+ gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+
+ GLint width, height, internalFormat;
+ gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
+ gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
+ gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
+
+ if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
+ textureDescriptor->size.height != static_cast<uint32_t>(height) ||
+ textureDescriptor->size.depthOrArrayLayers != 1) {
+ ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
+ "EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
+ width, height, &textureDescriptor->size));
+ gl.DeleteTextures(1, &tex);
+ return nullptr;
+ }
+
+ // TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
+ // in the passed-in TextureDescriptor.
+ return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
+ }
+
+ MaybeError Device::TickImpl() {
+ return {};
+ }
+
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ ExecutionSerial fenceSerial{0};
+ while (!mFencesInFlight.empty()) {
+ auto [sync, tentativeSerial] = mFencesInFlight.front();
+
+ // Fence are added in order, so we can stop searching as soon
+ // as we see one that's not ready.
+
+ // TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
+ if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
+ gl.Flush();
+ }
+ GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
+ if (result == GL_TIMEOUT_EXPIRED) {
+ return fenceSerial;
+ }
+ // Update fenceSerial since fence is ready.
+ fenceSerial = tentativeSerial;
+
+ gl.DeleteSync(sync);
+
+ mFencesInFlight.pop();
+
+ ASSERT(fenceSerial > GetCompletedCommandSerial());
+ }
+ return fenceSerial;
+ }
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
+ }
+
+ MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
+ }
+
+ MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
+ }
+
+ void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
+ }
+
+ MaybeError Device::WaitForIdleForDestruction() {
+ gl.Finish();
+ DAWN_TRY(CheckPassedSerials());
+ ASSERT(mFencesInFlight.empty());
+
+ return {};
+ }
+
+ uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return 1;
+ }
+
+ uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return 1;
+ }
+
+ float Device::GetTimestampPeriodInNS() const {
+ return 1.0f;
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h
new file mode 100644
index 00000000000..f6c673c0cb0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/DeviceGL.h
@@ -0,0 +1,131 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_DEVICEGL_H_
+#define DAWNNATIVE_OPENGL_DEVICEGL_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/Platform.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/QuerySet.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/GLFormat.h"
+#include "dawn/native/opengl/OpenGLFunctions.h"
+
+#include <queue>
+
+// Remove windows.h macros after glad's include of windows.h
+#if defined(DAWN_PLATFORM_WINDOWS)
+# include "dawn/common/windows_with_undefs.h"
+#endif
+
+typedef void* EGLImage;
+
+namespace dawn::native::opengl {
+
+ class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
+ const DeviceDescriptor* descriptor,
+ const OpenGLFunctions& functions);
+ ~Device() override;
+
+ MaybeError Initialize();
+
+ // Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
+ const OpenGLFunctions gl;
+
+ const GLFormat& GetGLFormat(const Format& format);
+
+ void SubmitFenceSync();
+
+ MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
+ ::EGLImage image);
+ TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
+ ::EGLImage image);
+
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+
+ MaybeError TickImpl() override;
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ private:
+ Device(AdapterBase* adapter,
+ const DeviceDescriptor* descriptor,
+ const OpenGLFunctions& functions);
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+ const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+
+ void InitTogglesFromDriver();
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+
+ std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
+
+ GLFormatTable mFormatTable;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_DEVICEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/Forward.h b/chromium/third_party/dawn/src/dawn/native/opengl/Forward.h
new file mode 100644
index 00000000000..daf2dc32e15
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/Forward.h
@@ -0,0 +1,66 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_FORWARD_H_
+#define DAWNNATIVE_OPENGL_FORWARD_H_
+
+#include "dawn/native/ToBackend.h"
+
+namespace dawn::native::opengl {
+
+ class Adapter;
+ class BindGroup;
+ class BindGroupLayout;
+ class Buffer;
+ class CommandBuffer;
+ class ComputePipeline;
+ class Device;
+ class PersistentPipelineState;
+ class PipelineLayout;
+ class QuerySet;
+ class Queue;
+ class RenderPipeline;
+ class Sampler;
+ class ShaderModule;
+ class SwapChain;
+ class Texture;
+ class TextureView;
+
+ struct OpenGLBackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+ };
+
+ template <typename T>
+ auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
+ return ToBackendBase<OpenGLBackendTraits>(common);
+ }
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp
new file mode 100644
index 00000000000..6f7e1dee193
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.cpp
@@ -0,0 +1,120 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/GLFormat.h"
+
+namespace dawn::native::opengl {
+
+ GLFormatTable BuildGLFormatTable() {
+ GLFormatTable table;
+
+ using Type = GLFormat::ComponentType;
+
+ auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat,
+ GLenum format, GLenum type, Type componentType) {
+ size_t index = ComputeFormatIndex(dawnFormat);
+ ASSERT(index < table.size());
+
+ table[index].internalFormat = internalFormat;
+ table[index].format = format;
+ table[index].type = type;
+ table[index].componentType = componentType;
+ table[index].isSupportedOnBackend = true;
+ };
+
+ // It's dangerous to go alone, take this:
+ //
+ // [ANGLE's formatutils.cpp]
+ // [ANGLE's formatutilsgl.cpp]
+ //
+ // The format tables in these files are extremely complete and the best reference on GL
+ // format support, enums, etc.
+
+ // clang-format off
+
+ // 1 byte color formats
+ AddFormat(wgpu::TextureFormat::R8Unorm, GL_R8, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::R8Snorm, GL_R8_SNORM, GL_RED, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::R8Uint, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+ AddFormat(wgpu::TextureFormat::R8Sint, GL_R8I, GL_RED_INTEGER, GL_BYTE, Type::Int);
+
+ // 2 bytes color formats
+ AddFormat(wgpu::TextureFormat::R16Uint, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::R16Sint, GL_R16I, GL_RED_INTEGER, GL_SHORT, Type::Int);
+ AddFormat(wgpu::TextureFormat::R16Float, GL_R16F, GL_RED, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG8Unorm, GL_RG8, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG8Snorm, GL_RG8_SNORM, GL_RG, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG8Uint, GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RG8Sint, GL_RG8I, GL_RG_INTEGER, GL_BYTE, Type::Int);
+
+ // 4 bytes color formats
+ AddFormat(wgpu::TextureFormat::R32Uint, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::R32Sint, GL_R32I, GL_RED_INTEGER, GL_INT, Type::Int);
+ AddFormat(wgpu::TextureFormat::R32Float, GL_R32F, GL_RED, GL_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG16Uint, GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RG16Sint, GL_RG16I, GL_RG_INTEGER, GL_SHORT, Type::Int);
+ AddFormat(wgpu::TextureFormat::RG16Float, GL_RG16F, GL_RG, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA8Unorm, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA8UnormSrgb, GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA8Snorm, GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA8Uint, GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RGBA8Sint, GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE, Type::Int);
+
+ // This doesn't have an enum for the internal format in OpenGL, so use RGBA8.
+ AddFormat(wgpu::TextureFormat::BGRA8Unorm, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGB10A2Unorm, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG11B10Ufloat, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGB9E5Ufloat, GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, Type::Float);
+
+ // 8 bytes color formats
+ AddFormat(wgpu::TextureFormat::RG32Uint, GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RG32Sint, GL_RG32I, GL_RG_INTEGER, GL_INT, Type::Int);
+ AddFormat(wgpu::TextureFormat::RG32Float, GL_RG32F, GL_RG, GL_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA16Uint, GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RGBA16Sint, GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT, Type::Int);
+ AddFormat(wgpu::TextureFormat::RGBA16Float, GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, Type::Float);
+
+ // 16 bytes color formats
+ AddFormat(wgpu::TextureFormat::RGBA32Uint, GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RGBA32Sint, GL_RGBA32I, GL_RGBA_INTEGER, GL_INT, Type::Int);
+ AddFormat(wgpu::TextureFormat::RGBA32Float, GL_RGBA32F, GL_RGBA, GL_FLOAT, Type::Float);
+
+ // Depth stencil formats
+ AddFormat(wgpu::TextureFormat::Depth32Float, GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, Type::DepthStencil);
+ AddFormat(wgpu::TextureFormat::Depth24Plus, GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, Type::DepthStencil);
+ AddFormat(wgpu::TextureFormat::Depth24PlusStencil8, GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, Type::DepthStencil);
+ AddFormat(wgpu::TextureFormat::Depth16Unorm, GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, Type::DepthStencil);
+
+ // Block compressed formats
+ AddFormat(wgpu::TextureFormat::BC1RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC2RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC2RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC3RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC3RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC4RSnorm, GL_COMPRESSED_SIGNED_RED_RGTC1, GL_RED, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC4RUnorm, GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC5RGSnorm, GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC5RGUnorm, GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC6HRGBFloat, GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC6HRGBUfloat, GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC7RGBAUnorm, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+
+ // clang-format on
+
+ return table;
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h b/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h
new file mode 100644
index 00000000000..d58f697378e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/GLFormat.h
@@ -0,0 +1,42 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_GLFORMAT_H_
+#define DAWNNATIVE_OPENGL_GLFORMAT_H_
+
+#include "dawn/native/Format.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ struct GLFormat {
+ GLenum internalFormat = 0;
+ GLenum format = 0;
+ GLenum type = 0;
+ bool isSupportedOnBackend = false;
+
+ // OpenGL has different functions depending on the format component type, for example
+ // glClearBufferfv is only valid on formats with the Float ComponentType
+ enum ComponentType { Float, Int, Uint, DepthStencil };
+ ComponentType componentType;
+ };
+
+ using GLFormatTable = std::array<GLFormat, kKnownFormatCount>;
+ GLFormatTable BuildGLFormatTable();
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_GLFORMAT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
new file mode 100644
index 00000000000..b01e7e3b4f0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
@@ -0,0 +1,88 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/NativeSwapChainImplGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+ NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
+ PresentCallback present,
+ void* presentUserdata)
+ : mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {
+ }
+
+ NativeSwapChainImpl::~NativeSwapChainImpl() {
+ const OpenGLFunctions& gl = mDevice->gl;
+ gl.DeleteTextures(1, &mBackTexture);
+ gl.DeleteFramebuffers(1, &mBackFBO);
+ }
+
+ void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
+ const OpenGLFunctions& gl = mDevice->gl;
+ gl.GenTextures(1, &mBackTexture);
+ gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
+ gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+
+ gl.GenFramebuffers(1, &mBackFBO);
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
+ gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ mBackTexture, 0);
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
+ uint32_t width,
+ uint32_t height) {
+ if (format != WGPUTextureFormat_RGBA8Unorm) {
+ return "unsupported format";
+ }
+ ASSERT(width > 0);
+ ASSERT(height > 0);
+ mWidth = width;
+ mHeight = height;
+
+ const OpenGLFunctions& gl = mDevice->gl;
+ gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
+ // Reallocate the texture
+ gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ nullptr);
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ nextTexture->texture.u32 = mBackTexture;
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::Present() {
+ const OpenGLFunctions& gl = mDevice->gl;
+ gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+ gl.Scissor(0, 0, mWidth, mHeight);
+ gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
+ GL_NEAREST);
+
+ mPresentCallback(mPresentUserdata);
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h
new file mode 100644
index 00000000000..bd7bc9de7ea
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/NativeSwapChainImplGL.h
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
+#define DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
+
+#include "dawn/native/OpenGLBackend.h"
+
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ class NativeSwapChainImpl {
+ public:
+ using WSIContext = DawnWSIContextGL;
+
+ NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
+ ~NativeSwapChainImpl();
+
+ void Init(DawnWSIContextGL* context);
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+ DawnSwapChainError Present();
+
+ wgpu::TextureFormat GetPreferredFormat() const;
+
+ private:
+ PresentCallback mPresentCallback;
+ void* mPresentUserdata;
+
+ uint32_t mWidth = 0;
+ uint32_t mHeight = 0;
+ GLuint mBackFBO = 0;
+ GLuint mBackTexture = 0;
+
+ Device* mDevice = nullptr;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OWNERS b/chromium/third_party/dawn/src/dawn/native/opengl/OWNERS
index d6d65109732..d6d65109732 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OWNERS
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OWNERS
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp
new file mode 100644
index 00000000000..739de6258db
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLBackend.cpp
@@ -0,0 +1,65 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// OpenGLBackend.cpp: contains the definition of symbols exported by OpenGLBackend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn/native/OpenGLBackend.h"
+
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/NativeSwapChainImplGL.h"
+
+namespace dawn::native::opengl {
+
+ AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {
+ }
+
+ AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {
+ }
+
+ DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+ PresentCallback present,
+ void* presentUserdata) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+
+ DawnSwapChainImplementation impl;
+ impl = CreateSwapChainImplementation(
+ new NativeSwapChainImpl(backendDevice, present, presentUserdata));
+ impl.textureUsage = WGPUTextureUsage_Present;
+
+ return impl;
+ }
+
+ WGPUTextureFormat GetNativeSwapChainPreferredFormat(
+ const DawnSwapChainImplementation* swapChain) {
+ NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+ }
+
+ ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
+ : ExternalImageDescriptor(ExternalImageType::EGLImage) {
+ }
+
+ WGPUTexture WrapExternalEGLImage(WGPUDevice device,
+ const ExternalImageDescriptorEGLImage* descriptor) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ TextureBase* texture =
+ backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
+ return ToAPI(texture);
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp
new file mode 100644
index 00000000000..45f8354a2fd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.cpp
@@ -0,0 +1,61 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/OpenGLFunctions.h"
+
+#include <cctype>
+
+namespace dawn::native::opengl {
+
+ MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
+ DAWN_TRY(mVersion.Initialize(getProc));
+ if (mVersion.IsES()) {
+ DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
+ } else {
+ DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
+ }
+
+ InitializeSupportedGLExtensions();
+
+ return {};
+ }
+
+ void OpenGLFunctions::InitializeSupportedGLExtensions() {
+ int32_t numExtensions;
+ GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
+
+ for (int32_t i = 0; i < numExtensions; ++i) {
+ const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
+ mSupportedGLExtensionsSet.insert(extensionName);
+ }
+ }
+
+ bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
+ ASSERT(extension != nullptr);
+ return mSupportedGLExtensionsSet.count(extension) != 0;
+ }
+
+ const OpenGLVersion& OpenGLFunctions::GetVersion() const {
+ return mVersion;
+ }
+
+ bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
+ return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
+ }
+
+ bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
+ return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h
new file mode 100644
index 00000000000..3da6c860b6b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLFunctions.h
@@ -0,0 +1,45 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
+#define DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
+
+#include <unordered_set>
+
+#include "dawn/native/opengl/OpenGLFunctionsBase_autogen.h"
+#include "dawn/native/opengl/OpenGLVersion.h"
+
+namespace dawn::native::opengl {
+
+ struct OpenGLFunctions : OpenGLFunctionsBase {
+ public:
+ MaybeError Initialize(GetProcAddress getProc);
+
+ const OpenGLVersion& GetVersion() const;
+ bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
+ bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
+
+ bool IsGLExtensionSupported(const char* extension) const;
+
+ private:
+ void InitializeSupportedGLExtensions();
+
+ OpenGLVersion mVersion;
+
+ std::unordered_set<std::string> mSupportedGLExtensionsSet;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp
new file mode 100644
index 00000000000..60fffff2a65
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.cpp
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/OpenGLVersion.h"
+
+#include <cctype>
+#include <tuple>
+
+namespace dawn::native::opengl {
+
+ MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
+ PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
+ if (getString == nullptr) {
+ return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
+ }
+
+ std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
+
+ if (version.find("OpenGL ES") != std::string::npos) {
+ // ES spec states that the GL_VERSION string will be in the following format:
+ // "OpenGL ES N.M vendor-specific information"
+ mStandard = Standard::ES;
+ mMajorVersion = version[10] - '0';
+ mMinorVersion = version[12] - '0';
+
+ // The minor version shouldn't get to two digits.
+ ASSERT(version.size() <= 13 || !isdigit(version[13]));
+ } else {
+ // OpenGL spec states the GL_VERSION string will be in the following format:
+ // <version number><space><vendor-specific information>
+ // The version number is either of the form major number.minor number or major
+ // number.minor number.release number, where the numbers all have one or more
+ // digits
+ mStandard = Standard::Desktop;
+ mMajorVersion = version[0] - '0';
+ mMinorVersion = version[2] - '0';
+
+ // The minor version shouldn't get to two digits.
+ ASSERT(version.size() <= 3 || !isdigit(version[3]));
+ }
+
+ return {};
+ }
+
+ bool OpenGLVersion::IsDesktop() const {
+ return mStandard == Standard::Desktop;
+ }
+
+ bool OpenGLVersion::IsES() const {
+ return mStandard == Standard::ES;
+ }
+
+ uint32_t OpenGLVersion::GetMajor() const {
+ return mMajorVersion;
+ }
+
+ uint32_t OpenGLVersion::GetMinor() const {
+ return mMinorVersion;
+ }
+
+ bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
+ return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h
new file mode 100644
index 00000000000..6b1e91bba44
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/OpenGLVersion.h
@@ -0,0 +1,44 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_OPENGLVERSION_H_
+#define DAWNNATIVE_OPENGL_OPENGLVERSION_H_
+
+#include "dawn/native/opengl/OpenGLFunctionsBase_autogen.h"
+
+namespace dawn::native::opengl {
+
+ struct OpenGLVersion {
+ public:
+ MaybeError Initialize(GetProcAddress getProc);
+
+ bool IsDesktop() const;
+ bool IsES() const;
+ uint32_t GetMajor() const;
+ uint32_t GetMinor() const;
+ bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
+
+ private:
+ enum class Standard {
+ Desktop,
+ ES,
+ };
+ uint32_t mMajorVersion;
+ uint32_t mMinorVersion;
+ Standard mStandard;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_OPENGLVERSION_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
new file mode 100644
index 00000000000..446ab1adf46
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
@@ -0,0 +1,58 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/PersistentPipelineStateGL.h"
+
+#include "dawn/native/opengl/OpenGLFunctions.h"
+
+namespace dawn::native::opengl {
+
+ void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
+ CallGLStencilFunc(gl);
+ }
+
+ void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
+ GLenum stencilBackCompareFunction,
+ GLenum stencilFrontCompareFunction,
+ uint32_t stencilReadMask) {
+ if (mStencilBackCompareFunction == stencilBackCompareFunction &&
+ mStencilFrontCompareFunction == stencilFrontCompareFunction &&
+ mStencilReadMask == stencilReadMask) {
+ return;
+ }
+
+ mStencilBackCompareFunction = stencilBackCompareFunction;
+ mStencilFrontCompareFunction = stencilFrontCompareFunction;
+ mStencilReadMask = stencilReadMask;
+ CallGLStencilFunc(gl);
+ }
+
+ void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
+ uint32_t stencilReference) {
+ if (mStencilReference == stencilReference) {
+ return;
+ }
+
+ mStencilReference = stencilReference;
+ CallGLStencilFunc(gl);
+ }
+
+ void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
+ gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
+ mStencilReadMask);
+ gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
+ mStencilReadMask);
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h
new file mode 100644
index 00000000000..959e7f9ac64
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PersistentPipelineStateGL.h
@@ -0,0 +1,45 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
+#define DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
+
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ struct OpenGLFunctions;
+
+ class PersistentPipelineState {
+ public:
+ void SetDefaultState(const OpenGLFunctions& gl);
+ void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
+ GLenum stencilBackCompareFunction,
+ GLenum stencilFrontCompareFunction,
+ uint32_t stencilReadMask);
+ void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
+
+ private:
+ void CallGLStencilFunc(const OpenGLFunctions& gl);
+
+ GLenum mStencilBackCompareFunction = GL_ALWAYS;
+ GLenum mStencilFrontCompareFunction = GL_ALWAYS;
+ GLuint mStencilReadMask = 0xffffffff;
+ GLuint mStencilReference = 0;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp
new file mode 100644
index 00000000000..8890e6826c8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.cpp
@@ -0,0 +1,218 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/PipelineGL.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/OpenGLFunctions.h"
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+#include "dawn/native/opengl/SamplerGL.h"
+#include "dawn/native/opengl/ShaderModuleGL.h"
+
+#include <set>
+#include <sstream>
+
+namespace dawn::native::opengl {
+
+ namespace {
+
+ GLenum GLShaderType(SingleShaderStage stage) {
+ switch (stage) {
+ case SingleShaderStage::Vertex:
+ return GL_VERTEX_SHADER;
+ case SingleShaderStage::Fragment:
+ return GL_FRAGMENT_SHADER;
+ case SingleShaderStage::Compute:
+ return GL_COMPUTE_SHADER;
+ }
+ UNREACHABLE();
+ }
+
+ } // namespace
+
+ PipelineGL::PipelineGL() : mProgram(0) {
+ }
+
+ PipelineGL::~PipelineGL() = default;
+
+ MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
+ const PipelineLayout* layout,
+ const PerStage<ProgrammableStage>& stages) {
+ auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
+ const char* source) -> ResultOrError<GLuint> {
+ GLuint shader = gl.CreateShader(type);
+ gl.ShaderSource(shader, 1, &source, nullptr);
+ gl.CompileShader(shader);
+
+ GLint compileStatus = GL_FALSE;
+ gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
+ if (compileStatus == GL_FALSE) {
+ GLint infoLogLength = 0;
+ gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
+
+ if (infoLogLength > 1) {
+ std::vector<char> buffer(infoLogLength);
+ gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
+ return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s",
+ source, buffer.data());
+ }
+ }
+ return shader;
+ };
+
+ mProgram = gl.CreateProgram();
+
+ // Compute the set of active stages.
+ wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
+ for (SingleShaderStage stage : IterateStages(kAllStages)) {
+ if (stages[stage].module != nullptr) {
+ activeStages |= StageBit(stage);
+ }
+ }
+
+ // Create an OpenGL shader for each stage and gather the list of combined samplers.
+ PerStage<CombinedSamplerInfo> combinedSamplers;
+ bool needsDummySampler = false;
+ std::vector<GLuint> glShaders;
+ for (SingleShaderStage stage : IterateStages(activeStages)) {
+ const ShaderModule* module = ToBackend(stages[stage].module.Get());
+ std::string glsl;
+ DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
+ &combinedSamplers[stage], layout,
+ &needsDummySampler));
+ GLuint shader;
+ DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
+ gl.AttachShader(mProgram, shader);
+ glShaders.push_back(shader);
+ }
+
+ if (needsDummySampler) {
+ SamplerDescriptor desc = {};
+ ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
+ ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
+ ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
+ mDummySampler =
+ ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
+ }
+
+ // Link all the shaders together.
+ gl.LinkProgram(mProgram);
+
+ GLint linkStatus = GL_FALSE;
+ gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
+ if (linkStatus == GL_FALSE) {
+ GLint infoLogLength = 0;
+ gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
+
+ if (infoLogLength > 1) {
+ std::vector<char> buffer(infoLogLength);
+ gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
+ return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
+ }
+ }
+
+ // Compute links between stages for combined samplers, then bind them to texture units
+ gl.UseProgram(mProgram);
+ const auto& indices = layout->GetBindingIndexInfo();
+
+ std::set<CombinedSampler> combinedSamplersSet;
+ for (SingleShaderStage stage : IterateStages(activeStages)) {
+ for (const CombinedSampler& combined : combinedSamplers[stage]) {
+ combinedSamplersSet.insert(combined);
+ }
+ }
+
+ mUnitsForSamplers.resize(layout->GetNumSamplers());
+ mUnitsForTextures.resize(layout->GetNumSampledTextures());
+
+ GLuint textureUnit = layout->GetTextureUnitsUsed();
+ for (const auto& combined : combinedSamplersSet) {
+ const std::string& name = combined.GetName();
+ GLint location = gl.GetUniformLocation(mProgram, name.c_str());
+
+ if (location == -1) {
+ continue;
+ }
+
+ gl.Uniform1i(location, textureUnit);
+
+ bool shouldUseFiltering;
+ {
+ const BindGroupLayoutBase* bgl =
+ layout->GetBindGroupLayout(combined.textureLocation.group);
+ BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
+
+ GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
+ mUnitsForTextures[textureIndex].push_back(textureUnit);
+
+ shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
+ wgpu::TextureSampleType::Float;
+ }
+ {
+ if (combined.useDummySampler) {
+ mDummySamplerUnits.push_back(textureUnit);
+ } else {
+ const BindGroupLayoutBase* bgl =
+ layout->GetBindGroupLayout(combined.samplerLocation.group);
+ BindingIndex bindingIndex =
+ bgl->GetBindingIndex(combined.samplerLocation.binding);
+
+ GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
+ mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
+ }
+ }
+
+ textureUnit++;
+ }
+
+ for (GLuint glShader : glShaders) {
+ gl.DetachShader(mProgram, glShader);
+ gl.DeleteShader(glShader);
+ }
+
+ return {};
+ }
+
+ void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
+ gl.DeleteProgram(mProgram);
+ }
+
+ const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
+ GLuint index) const {
+ ASSERT(index < mUnitsForSamplers.size());
+ return mUnitsForSamplers[index];
+ }
+
+ const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
+ ASSERT(index < mUnitsForTextures.size());
+ return mUnitsForTextures[index];
+ }
+
+ GLuint PipelineGL::GetProgramHandle() const {
+ return mProgram;
+ }
+
+ void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
+ gl.UseProgram(mProgram);
+ for (GLuint unit : mDummySamplerUnits) {
+ ASSERT(mDummySampler.Get() != nullptr);
+ gl.BindSampler(unit, mDummySampler->GetNonFilteringHandle());
+ }
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h
new file mode 100644
index 00000000000..9bbfffa8b07
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineGL.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_PIPELINEGL_H_
+#define DAWNNATIVE_OPENGL_PIPELINEGL_H_
+
+#include "dawn/native/Pipeline.h"
+
+#include "dawn/native/PerStage.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+#include <vector>
+
+namespace dawn::native {
+ struct ProgrammableStage;
+} // namespace dawn::native
+
+namespace dawn::native::opengl {
+
+ struct OpenGLFunctions;
+ class PipelineLayout;
+ class Sampler;
+
+ class PipelineGL {
+ public:
+ PipelineGL();
+ ~PipelineGL();
+
+ // For each unit a sampler is bound to we need to know if we should use filtering or not
+ // because int and uint texture are only complete without filtering.
+ struct SamplerUnit {
+ GLuint unit;
+ bool shouldUseFiltering;
+ };
+ const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
+ const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
+ GLuint GetProgramHandle() const;
+
+ protected:
+ void ApplyNow(const OpenGLFunctions& gl);
+ MaybeError InitializeBase(const OpenGLFunctions& gl,
+ const PipelineLayout* layout,
+ const PerStage<ProgrammableStage>& stages);
+ void DeleteProgram(const OpenGLFunctions& gl);
+
+ private:
+ GLuint mProgram;
+ std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
+ std::vector<std::vector<GLuint>> mUnitsForTextures;
+ std::vector<GLuint> mDummySamplerUnits;
+ // TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
+ // destruction complex as it requires the sampler to be destroyed before the sampler cache.
+ Ref<Sampler> mDummySampler;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_PIPELINEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp
new file mode 100644
index 00000000000..7dd54ab473e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.cpp
@@ -0,0 +1,95 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+ PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
+ : PipelineLayoutBase(device, descriptor) {
+ GLuint uboIndex = 0;
+ GLuint samplerIndex = 0;
+ GLuint sampledTextureIndex = 0;
+ GLuint ssboIndex = 0;
+ GLuint storageTextureIndex = 0;
+
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+ mIndexInfo[group].resize(bgl->GetBindingCount());
+
+ for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ mIndexInfo[group][bindingIndex] = uboIndex;
+ uboIndex++;
+ break;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ mIndexInfo[group][bindingIndex] = ssboIndex;
+ ssboIndex++;
+ break;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ break;
+
+ case BindingInfoType::Sampler:
+ mIndexInfo[group][bindingIndex] = samplerIndex;
+ samplerIndex++;
+ break;
+
+ case BindingInfoType::Texture:
+ case BindingInfoType::ExternalTexture:
+ mIndexInfo[group][bindingIndex] = sampledTextureIndex;
+ sampledTextureIndex++;
+ break;
+
+ case BindingInfoType::StorageTexture:
+ mIndexInfo[group][bindingIndex] = storageTextureIndex;
+ storageTextureIndex++;
+ break;
+ }
+ }
+ }
+
+ mNumSamplers = samplerIndex;
+ mNumSampledTextures = sampledTextureIndex;
+ }
+
+ const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
+ return mIndexInfo;
+ }
+
+ GLuint PipelineLayout::GetTextureUnitsUsed() const {
+ return 0;
+ }
+
+ size_t PipelineLayout::GetNumSamplers() const {
+ return mNumSamplers;
+ }
+
+ size_t PipelineLayout::GetNumSampledTextures() const {
+ return mNumSampledTextures;
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h
new file mode 100644
index 00000000000..f743d6a20a6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/PipelineLayoutGL.h
@@ -0,0 +1,50 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
+#define DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
+
+#include "dawn/native/PipelineLayout.h"
+
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_vector.h"
+#include "dawn/native/BindingInfo.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ class PipelineLayout final : public PipelineLayoutBase {
+ public:
+ PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+
+ using BindingIndexInfo =
+ ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
+ const BindingIndexInfo& GetBindingIndexInfo() const;
+
+ GLuint GetTextureUnitsUsed() const;
+ size_t GetNumSamplers() const;
+ size_t GetNumSampledTextures() const;
+
+ private:
+ ~PipelineLayout() override = default;
+ BindingIndexInfo mIndexInfo;
+ size_t mNumSamplers;
+ size_t mNumSampledTextures;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp
new file mode 100644
index 00000000000..cdf98580cd6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.cpp
@@ -0,0 +1,27 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/QuerySetGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+
+namespace dawn::native::opengl {
+
+ QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+ : QuerySetBase(device, descriptor) {
+ }
+
+ QuerySet::~QuerySet() = default;
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h
new file mode 100644
index 00000000000..1bef7c50a1f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/QuerySetGL.h
@@ -0,0 +1,34 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_QUERYSETGL_H_
+#define DAWNNATIVE_OPENGL_QUERYSETGL_H_
+
+#include "dawn/native/QuerySet.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ class QuerySet final : public QuerySetBase {
+ public:
+ QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+
+ private:
+ ~QuerySet() override;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_QUERYSETGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp
new file mode 100644
index 00000000000..541d93be484
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.cpp
@@ -0,0 +1,80 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/QueueGL.h"
+
+#include "dawn/native/opengl/BufferGL.h"
+#include "dawn/native/opengl/CommandBufferGL.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/TextureGL.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native::opengl {
+
+ Queue::Queue(Device* device) : QueueBase(device) {
+ }
+
+ MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ Device* device = ToBackend(GetDevice());
+
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(ToBackend(commands[i])->Execute());
+ }
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
+
+ device->SubmitFenceSync();
+ return {};
+ }
+
+ MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
+
+ gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
+ gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
+ return {};
+ }
+
+ MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) {
+ DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
+ "Writes to stencil textures unsupported on the OpenGL backend.");
+
+ TextureCopy textureCopy;
+ textureCopy.texture = destination.texture;
+ textureCopy.mipLevel = destination.mipLevel;
+ textureCopy.origin = destination.origin;
+ textureCopy.aspect =
+ SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
+
+ SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
+ if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel,
+ destination.mipLevel)) {
+ destination.texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
+ }
+ DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
+ return {};
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h
new file mode 100644
index 00000000000..f83278df1b1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/QueueGL.h
@@ -0,0 +1,42 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_QUEUEGL_H_
+#define DAWNNATIVE_OPENGL_QUEUEGL_H_
+
+#include "dawn/native/Queue.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ class Queue final : public QueueBase {
+ public:
+ Queue(Device* device);
+
+ private:
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteBufferImpl(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) override;
+ MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) override;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_QUEUEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp
new file mode 100644
index 00000000000..5e4ddce6e72
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.cpp
@@ -0,0 +1,345 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/RenderPipelineGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/PersistentPipelineStateGL.h"
+#include "dawn/native/opengl/UtilsGL.h"
+
+namespace dawn::native::opengl {
+
+ namespace {
+
+ GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+ switch (primitiveTopology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return GL_POINTS;
+ case wgpu::PrimitiveTopology::LineList:
+ return GL_LINES;
+ case wgpu::PrimitiveTopology::LineStrip:
+ return GL_LINE_STRIP;
+ case wgpu::PrimitiveTopology::TriangleList:
+ return GL_TRIANGLES;
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return GL_TRIANGLE_STRIP;
+ }
+ UNREACHABLE();
+ }
+
+ void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
+ wgpu::FrontFace face,
+ wgpu::CullMode mode) {
+ // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
+ // which is different from WebGPU and other backends (Y axis is down).
+ GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
+ gl.FrontFace(direction);
+
+ if (mode == wgpu::CullMode::None) {
+ gl.Disable(GL_CULL_FACE);
+ } else {
+ gl.Enable(GL_CULL_FACE);
+
+ GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
+ gl.CullFace(cullMode);
+ }
+ }
+
+ GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
+ switch (factor) {
+ case wgpu::BlendFactor::Zero:
+ return GL_ZERO;
+ case wgpu::BlendFactor::One:
+ return GL_ONE;
+ case wgpu::BlendFactor::Src:
+ return GL_SRC_COLOR;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return GL_ONE_MINUS_SRC_COLOR;
+ case wgpu::BlendFactor::SrcAlpha:
+ return GL_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
+ return GL_ONE_MINUS_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return GL_DST_COLOR;
+ case wgpu::BlendFactor::OneMinusDst:
+ return GL_ONE_MINUS_DST_COLOR;
+ case wgpu::BlendFactor::DstAlpha:
+ return GL_DST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDstAlpha:
+ return GL_ONE_MINUS_DST_ALPHA;
+ case wgpu::BlendFactor::SrcAlphaSaturated:
+ return GL_SRC_ALPHA_SATURATE;
+ case wgpu::BlendFactor::Constant:
+ return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
+ case wgpu::BlendFactor::OneMinusConstant:
+ return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
+ }
+ UNREACHABLE();
+ }
+
+ GLenum GLBlendMode(wgpu::BlendOperation operation) {
+ switch (operation) {
+ case wgpu::BlendOperation::Add:
+ return GL_FUNC_ADD;
+ case wgpu::BlendOperation::Subtract:
+ return GL_FUNC_SUBTRACT;
+ case wgpu::BlendOperation::ReverseSubtract:
+ return GL_FUNC_REVERSE_SUBTRACT;
+ case wgpu::BlendOperation::Min:
+ return GL_MIN;
+ case wgpu::BlendOperation::Max:
+ return GL_MAX;
+ }
+ UNREACHABLE();
+ }
+
+ void ApplyColorState(const OpenGLFunctions& gl,
+ ColorAttachmentIndex attachment,
+ const ColorTargetState* state) {
+ GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
+ if (state->blend != nullptr) {
+ gl.Enablei(GL_BLEND, colorBuffer);
+ gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
+ GLBlendMode(state->blend->alpha.operation));
+ gl.BlendFuncSeparatei(colorBuffer,
+ GLBlendFactor(state->blend->color.srcFactor, false),
+ GLBlendFactor(state->blend->color.dstFactor, false),
+ GLBlendFactor(state->blend->alpha.srcFactor, true),
+ GLBlendFactor(state->blend->alpha.dstFactor, true));
+ } else {
+ gl.Disablei(GL_BLEND, colorBuffer);
+ }
+ gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
+ state->writeMask & wgpu::ColorWriteMask::Green,
+ state->writeMask & wgpu::ColorWriteMask::Blue,
+ state->writeMask & wgpu::ColorWriteMask::Alpha);
+ }
+
+ void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
+ if (state->blend != nullptr) {
+ gl.Enable(GL_BLEND);
+ gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
+ GLBlendMode(state->blend->alpha.operation));
+ gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
+ GLBlendFactor(state->blend->color.dstFactor, false),
+ GLBlendFactor(state->blend->alpha.srcFactor, true),
+ GLBlendFactor(state->blend->alpha.dstFactor, true));
+ } else {
+ gl.Disable(GL_BLEND);
+ }
+ gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
+ state->writeMask & wgpu::ColorWriteMask::Green,
+ state->writeMask & wgpu::ColorWriteMask::Blue,
+ state->writeMask & wgpu::ColorWriteMask::Alpha);
+ }
+
+ bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
+ return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
+ lhs.dstFactor == rhs.dstFactor;
+ }
+
+ GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
+ switch (stencilOperation) {
+ case wgpu::StencilOperation::Keep:
+ return GL_KEEP;
+ case wgpu::StencilOperation::Zero:
+ return GL_ZERO;
+ case wgpu::StencilOperation::Replace:
+ return GL_REPLACE;
+ case wgpu::StencilOperation::Invert:
+ return GL_INVERT;
+ case wgpu::StencilOperation::IncrementClamp:
+ return GL_INCR;
+ case wgpu::StencilOperation::DecrementClamp:
+ return GL_DECR;
+ case wgpu::StencilOperation::IncrementWrap:
+ return GL_INCR_WRAP;
+ case wgpu::StencilOperation::DecrementWrap:
+ return GL_DECR_WRAP;
+ }
+ UNREACHABLE();
+ }
+
+ void ApplyDepthStencilState(const OpenGLFunctions& gl,
+ const DepthStencilState* descriptor,
+ PersistentPipelineState* persistentPipelineState) {
+ // Depth writes only occur if depth is enabled
+ if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+ !descriptor->depthWriteEnabled) {
+ gl.Disable(GL_DEPTH_TEST);
+ } else {
+ gl.Enable(GL_DEPTH_TEST);
+ }
+
+ if (descriptor->depthWriteEnabled) {
+ gl.DepthMask(GL_TRUE);
+ } else {
+ gl.DepthMask(GL_FALSE);
+ }
+
+ gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
+
+ if (StencilTestEnabled(descriptor)) {
+ gl.Enable(GL_STENCIL_TEST);
+ } else {
+ gl.Disable(GL_STENCIL_TEST);
+ }
+
+ GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
+ GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
+ persistentPipelineState->SetStencilFuncsAndMask(
+ gl, backCompareFunction, frontCompareFunction, descriptor->stencilReadMask);
+
+ gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
+ OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
+ OpenGLStencilOperation(descriptor->stencilBack.passOp));
+ gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
+ OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
+ OpenGLStencilOperation(descriptor->stencilFront.passOp));
+
+ gl.StencilMask(descriptor->stencilWriteMask);
+ }
+
+ } // anonymous namespace
+
+ // static
+ Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(device, descriptor));
+ }
+
+ RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
+ : RenderPipelineBase(device, descriptor),
+ mVertexArrayObject(0),
+ mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {
+ }
+
+ MaybeError RenderPipeline::Initialize() {
+ DAWN_TRY(
+ InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
+ CreateVAOForVertexState();
+ return {};
+ }
+
+ RenderPipeline::~RenderPipeline() = default;
+
+ void RenderPipeline::DestroyImpl() {
+ RenderPipelineBase::DestroyImpl();
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ gl.DeleteVertexArrays(1, &mVertexArrayObject);
+ gl.BindVertexArray(0);
+ DeleteProgram(gl);
+ }
+
+ GLenum RenderPipeline::GetGLPrimitiveTopology() const {
+ return mGlPrimitiveTopology;
+ }
+
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
+ RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
+ ASSERT(!IsError());
+ return mAttributesUsingVertexBuffer[slot];
+ }
+
+ void RenderPipeline::CreateVAOForVertexState() {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ gl.GenVertexArrays(1, &mVertexArrayObject);
+ gl.BindVertexArray(mVertexArrayObject);
+
+ for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
+ const auto& attribute = GetAttribute(location);
+ GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
+ gl.EnableVertexAttribArray(glAttrib);
+
+ mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
+ const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
+
+ if (vertexBuffer.arrayStride == 0) {
+ // Emulate a stride of zero (constant vertex attribute) by
+ // setting the attribute instance divisor to a huge number.
+ gl.VertexAttribDivisor(glAttrib, 0xffffffff);
+ } else {
+ switch (vertexBuffer.stepMode) {
+ case wgpu::VertexStepMode::Vertex:
+ break;
+ case wgpu::VertexStepMode::Instance:
+ gl.VertexAttribDivisor(glAttrib, 1);
+ break;
+ }
+ }
+ }
+ }
+
+ void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ PipelineGL::ApplyNow(gl);
+
+ ASSERT(mVertexArrayObject);
+ gl.BindVertexArray(mVertexArrayObject);
+
+ ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
+
+ ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
+
+ gl.SampleMaski(0, GetSampleMask());
+ if (IsAlphaToCoverageEnabled()) {
+ gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+ } else {
+ gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+ }
+
+ if (IsDepthBiasEnabled()) {
+ gl.Enable(GL_POLYGON_OFFSET_FILL);
+ float depthBias = GetDepthBias();
+ float slopeScale = GetDepthBiasSlopeScale();
+ if (gl.PolygonOffsetClamp != nullptr) {
+ gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
+ } else {
+ gl.PolygonOffset(slopeScale, depthBias);
+ }
+ } else {
+ gl.Disable(GL_POLYGON_OFFSET_FILL);
+ }
+
+ if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
+ for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
+ ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
+ }
+ } else {
+ const ColorTargetState* prevDescriptor = nullptr;
+ for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
+ const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
+ if (!prevDescriptor) {
+ ApplyColorState(gl, descriptor);
+ prevDescriptor = descriptor;
+ } else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
+ // TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
+ // per color target. Add validation to prevent this as it is not.
+ ASSERT(false);
+ } else if (descriptor->blend != nullptr) {
+ if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
+ !Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
+ descriptor->writeMask != prevDescriptor->writeMask) {
+ // TODO(crbug.com/dawn/582)
+ ASSERT(false);
+ }
+ }
+ }
+ }
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h
new file mode 100644
index 00000000000..1ee3f81661f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/RenderPipelineGL.h
@@ -0,0 +1,62 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
+#define DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/native/opengl/PipelineGL.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+#include <vector>
+
+namespace dawn::native::opengl {
+
+ class Device;
+ class PersistentPipelineState;
+
+ class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
+ public:
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
+
+ GLenum GetGLPrimitiveTopology() const;
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
+ VertexBufferSlot slot) const;
+
+ void ApplyNow(PersistentPipelineState& persistentPipelineState);
+
+ MaybeError Initialize() override;
+
+ private:
+ RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
+ ~RenderPipeline() override;
+ void DestroyImpl() override;
+
+ void CreateVAOForVertexState();
+
+ // TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
+ GLuint mVertexArrayObject;
+ GLenum mGlPrimitiveTopology;
+
+ ityp::array<VertexBufferSlot,
+ ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
+ kMaxVertexBuffers>
+ mAttributesUsingVertexBuffer;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp
new file mode 100644
index 00000000000..77905305704
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.cpp
@@ -0,0 +1,130 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/SamplerGL.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/UtilsGL.h"
+
+namespace dawn::native::opengl {
+
+ namespace {
+ GLenum MagFilterMode(wgpu::FilterMode filter) {
+ switch (filter) {
+ case wgpu::FilterMode::Nearest:
+ return GL_NEAREST;
+ case wgpu::FilterMode::Linear:
+ return GL_LINEAR;
+ }
+ UNREACHABLE();
+ }
+
+ GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
+ switch (minFilter) {
+ case wgpu::FilterMode::Nearest:
+ switch (mipMapFilter) {
+ case wgpu::FilterMode::Nearest:
+ return GL_NEAREST_MIPMAP_NEAREST;
+ case wgpu::FilterMode::Linear:
+ return GL_NEAREST_MIPMAP_LINEAR;
+ }
+ case wgpu::FilterMode::Linear:
+ switch (mipMapFilter) {
+ case wgpu::FilterMode::Nearest:
+ return GL_LINEAR_MIPMAP_NEAREST;
+ case wgpu::FilterMode::Linear:
+ return GL_LINEAR_MIPMAP_LINEAR;
+ }
+ }
+ UNREACHABLE();
+ }
+
+ GLenum WrapMode(wgpu::AddressMode mode) {
+ switch (mode) {
+ case wgpu::AddressMode::Repeat:
+ return GL_REPEAT;
+ case wgpu::AddressMode::MirrorRepeat:
+ return GL_MIRRORED_REPEAT;
+ case wgpu::AddressMode::ClampToEdge:
+ return GL_CLAMP_TO_EDGE;
+ }
+ UNREACHABLE();
+ }
+
+ } // namespace
+
+ Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
+ : SamplerBase(device, descriptor) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ gl.GenSamplers(1, &mFilteringHandle);
+ SetupGLSampler(mFilteringHandle, descriptor, false);
+
+ gl.GenSamplers(1, &mNonFilteringHandle);
+ SetupGLSampler(mNonFilteringHandle, descriptor, true);
+ }
+
+ Sampler::~Sampler() = default;
+
+ void Sampler::DestroyImpl() {
+ SamplerBase::DestroyImpl();
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ gl.DeleteSamplers(1, &mFilteringHandle);
+ gl.DeleteSamplers(1, &mNonFilteringHandle);
+ }
+
+ void Sampler::SetupGLSampler(GLuint sampler,
+ const SamplerDescriptor* descriptor,
+ bool forceNearest) {
+ Device* device = ToBackend(GetDevice());
+ const OpenGLFunctions& gl = device->gl;
+
+ if (forceNearest) {
+ gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
+ } else {
+ gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER,
+ MagFilterMode(descriptor->magFilter));
+ gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
+ MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
+ }
+ gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
+ gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
+ gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
+
+ gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
+ gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
+
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
+ gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
+ ToOpenGLCompareFunction(descriptor->compare));
+ }
+
+ if (gl.IsAtLeastGL(4, 6) ||
+ gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
+ gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
+ }
+ }
+
+ GLuint Sampler::GetFilteringHandle() const {
+ return mFilteringHandle;
+ }
+
+ GLuint Sampler::GetNonFilteringHandle() const {
+ return mNonFilteringHandle;
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h
new file mode 100644
index 00000000000..5d07ecb750e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/SamplerGL.h
@@ -0,0 +1,48 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_SAMPLERGL_H_
+#define DAWNNATIVE_OPENGL_SAMPLERGL_H_
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ class Sampler final : public SamplerBase {
+ public:
+ Sampler(Device* device, const SamplerDescriptor* descriptor);
+
+ GLuint GetFilteringHandle() const;
+ GLuint GetNonFilteringHandle() const;
+
+ private:
+ ~Sampler() override;
+ void DestroyImpl() override;
+
+ void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
+
+ GLuint mFilteringHandle;
+
+ // This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
+ // for everything, which is important to preserve texture completeness for u/int textures.
+ GLuint mNonFilteringHandle;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_SAMPLERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp
new file mode 100644
index 00000000000..6bda26bd081
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.cpp
@@ -0,0 +1,177 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/ShaderModuleGL.h"
+
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/PipelineLayoutGL.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <tint/tint.h>
+
+#include <sstream>
+
+namespace dawn::native::opengl {
+
+ std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
+ std::ostringstream o;
+ o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
+ << static_cast<uint32_t>(bindingNumber);
+ return o.str();
+ }
+
+ bool operator<(const BindingLocation& a, const BindingLocation& b) {
+ return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
+ }
+
+ bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
+ return std::tie(a.useDummySampler, a.samplerLocation, a.textureLocation) <
+ std::tie(b.useDummySampler, a.samplerLocation, b.textureLocation);
+ }
+
+ std::string CombinedSampler::GetName() const {
+ std::ostringstream o;
+ o << "dawn_combined";
+ if (useDummySampler) {
+ o << "_dummy_sampler";
+ } else {
+ o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
+ << static_cast<uint32_t>(samplerLocation.binding);
+ }
+ o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
+ << static_cast<uint32_t>(textureLocation.binding);
+ return o.str();
+ }
+
+ // static
+ ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+ DAWN_TRY(module->Initialize(parseResult));
+ return module;
+ }
+
+ ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor) {
+ }
+
+ MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ DAWN_TRY(InitializeBase(parseResult));
+
+ return {};
+ }
+
+ ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
+ SingleShaderStage stage,
+ CombinedSamplerInfo* combinedSamplers,
+ const PipelineLayout* layout,
+ bool* needsDummySampler) const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
+
+ AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ nullptr, nullptr));
+ const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
+
+ tint::writer::glsl::Options tintOptions;
+ using Version = tint::writer::glsl::Version;
+ tintOptions.version =
+ Version(version.IsDesktop() ? Version::Standard::kDesktop : Version::Standard::kES,
+ version.GetMajor(), version.GetMinor());
+
+ using tint::transform::BindingPoint;
+ // When textures are accessed without a sampler (e.g., textureLoad()),
+ // GetSamplerTextureUses() will return this sentinel value.
+ BindingPoint placeholderBindingPoint{static_cast<uint32_t>(kMaxBindGroupsTyped), 0};
+
+ tint::inspector::Inspector inspector(&program);
+ // Find all the sampler/texture pairs for this entry point, and create
+ // CombinedSamplers for them. CombinedSampler records the binding points
+ // of the original texture and sampler, and generates a unique name. The
+ // corresponding uniforms will be retrieved by these generated names
+ // in PipelineGL. Any texture-only references will have
+ // "useDummySampler" set to true, and only the texture binding point
+ // will be used in naming them. In addition, Dawn will bind a
+ // non-filtering sampler for them (see PipelineGL).
+ auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
+ for (const auto& use : uses) {
+ combinedSamplers->emplace_back();
+
+ CombinedSampler* info = &combinedSamplers->back();
+ if (use.sampler_binding_point == placeholderBindingPoint) {
+ info->useDummySampler = true;
+ *needsDummySampler = true;
+ } else {
+ info->useDummySampler = false;
+ }
+ info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
+ info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
+ info->textureLocation.group = BindGroupIndex(use.texture_binding_point.group);
+ info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
+ tintOptions.binding_map[use] = info->GetName();
+ }
+ if (*needsDummySampler) {
+ tintOptions.placeholder_binding_point = placeholderBindingPoint;
+ }
+
+ // Since (non-Vulkan) GLSL does not support descriptor sets, generate a
+ // mapping from the original group/binding pair to a binding-only
+ // value. This mapping will be used by Tint to remap all global
+ // variables to the 1D space.
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayoutBase::BindingMap& bindingMap =
+ layout->GetBindGroupLayout(group)->GetBindingMap();
+ for (const auto& it : bindingMap) {
+ BindingNumber bindingNumber = it.first;
+ BindingIndex bindingIndex = it.second;
+ const BindingInfo& bindingInfo =
+ layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+ if (!(bindingInfo.visibility & StageBit(stage))) {
+ continue;
+ }
+
+ uint32_t shaderIndex = layout->GetBindingIndexInfo()[group][bindingIndex];
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(bindingNumber)};
+ BindingPoint dstBindingPoint{0, shaderIndex};
+ tintOptions.binding_points.emplace(srcBindingPoint, dstBindingPoint);
+ }
+ tintOptions.allow_collisions = true;
+ }
+ auto result = tint::writer::glsl::Generate(&program, tintOptions, entryPointName);
+ DAWN_INVALID_IF(!result.success, "An error occured while generating GLSL: %s.",
+ result.error);
+ std::string glsl = std::move(result.glsl);
+
+ if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
+
+ GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
+
+ return glsl;
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h
new file mode 100644
index 00000000000..44cd0d40e59
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/ShaderModuleGL.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
+#define DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
+
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+ class PipelineLayout;
+
+ std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
+
+ struct BindingLocation {
+ BindGroupIndex group;
+ BindingNumber binding;
+ };
+ bool operator<(const BindingLocation& a, const BindingLocation& b);
+
+ struct CombinedSampler {
+ BindingLocation samplerLocation;
+ BindingLocation textureLocation;
+ // OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
+ // one and Dawn should bind a dummy non-filtering sampler. |samplerLocation| is unused.
+ bool useDummySampler;
+ std::string GetName() const;
+ };
+ bool operator<(const CombinedSampler& a, const CombinedSampler& b);
+
+ using CombinedSamplerInfo = std::vector<CombinedSampler>;
+
+ using BindingInfoArrayTable =
+ std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
+
+ class ShaderModule final : public ShaderModuleBase {
+ public:
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
+
+ ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
+ SingleShaderStage stage,
+ CombinedSamplerInfo* combinedSamplers,
+ const PipelineLayout* layout,
+ bool* needsDummySampler) const;
+
+ private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModule() override = default;
+ MaybeError Initialize(ShaderModuleParseResult* parseResult);
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp
new file mode 100644
index 00000000000..e59bb9ff8d5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.cpp
@@ -0,0 +1,51 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/SwapChainGL.h"
+
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/Forward.h"
+#include "dawn/native/opengl/TextureGL.h"
+
+#include <dawn/dawn_wsi.h>
+
+namespace dawn::native::opengl {
+
+ SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ im.Init(im.userData, nullptr);
+ }
+
+ SwapChain::~SwapChain() {
+ }
+
+ TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ const auto& im = GetImplementation();
+ DawnSwapChainNextTexture next = {};
+ DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+ if (error) {
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
+ return nullptr;
+ }
+ GLuint nativeTexture = next.texture.u32;
+ return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
+ TextureBase::TextureState::OwnedExternal);
+ }
+
+ MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
+ return {};
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h
new file mode 100644
index 00000000000..2c6c91a842d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/SwapChainGL.h
@@ -0,0 +1,38 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
+#define DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+
+ class SwapChain final : public OldSwapChainBase {
+ public:
+ SwapChain(Device* device, const SwapChainDescriptor* descriptor);
+
+ protected:
+ ~SwapChain() override;
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase* view) override;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp
new file mode 100644
index 00000000000..6f65f9e7e29
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.cpp
@@ -0,0 +1,580 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/TextureGL.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/opengl/BufferGL.h"
+#include "dawn/native/opengl/CommandBufferGL.h"
+#include "dawn/native/opengl/DeviceGL.h"
+#include "dawn/native/opengl/UtilsGL.h"
+
+namespace dawn::native::opengl {
+
+ namespace {
+
+ GLenum TargetForTexture(const TextureDescriptor* descriptor) {
+ switch (descriptor->dimension) {
+ case wgpu::TextureDimension::e2D:
+ if (descriptor->size.depthOrArrayLayers > 1) {
+ ASSERT(descriptor->sampleCount == 1);
+ return GL_TEXTURE_2D_ARRAY;
+ } else {
+ if (descriptor->sampleCount > 1) {
+ return GL_TEXTURE_2D_MULTISAMPLE;
+ } else {
+ return GL_TEXTURE_2D;
+ }
+ }
+ case wgpu::TextureDimension::e3D:
+ return GL_TEXTURE_3D;
+
+ case wgpu::TextureDimension::e1D:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
+ uint32_t arrayLayerCount,
+ uint32_t sampleCount) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::e2D:
+ return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
+ case wgpu::TextureViewDimension::e2DArray:
+ if (arrayLayerCount == 1) {
+ return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
+ }
+ ASSERT(sampleCount == 1);
+ return GL_TEXTURE_2D_ARRAY;
+ case wgpu::TextureViewDimension::Cube:
+ return GL_TEXTURE_CUBE_MAP;
+ case wgpu::TextureViewDimension::CubeArray:
+ return GL_TEXTURE_CUBE_MAP_ARRAY;
+ case wgpu::TextureViewDimension::e3D:
+ return GL_TEXTURE_3D;
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ GLuint GenTexture(const OpenGLFunctions& gl) {
+ GLuint handle = 0;
+ gl.GenTextures(1, &handle);
+ return handle;
+ }
+
+ bool UsageNeedsTextureView(wgpu::TextureUsage usage) {
+ constexpr wgpu::TextureUsage kUsageNeedingTextureView =
+ wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
+ return usage & kUsageNeedingTextureView;
+ }
+
+ bool RequiresCreatingNewTextureView(const TextureBase* texture,
+ const TextureViewDescriptor* textureViewDescriptor) {
+ if (texture->GetFormat().format != textureViewDescriptor->format) {
+ return true;
+ }
+
+ if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount) {
+ return true;
+ }
+
+ if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
+ return true;
+ }
+
+ if (ToBackend(texture)->GetGLFormat().format == GL_DEPTH_STENCIL &&
+ (texture->GetUsage() & wgpu::TextureUsage::TextureBinding) != 0 &&
+ textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
+ // We need a separate view for one of the depth or stencil planes
+ // because each glTextureView needs it's own handle to set
+ // GL_DEPTH_STENCIL_TEXTURE_MODE. Choose the stencil aspect for the
+ // extra handle since it is likely sampled less often.
+ return true;
+ }
+
+ switch (textureViewDescriptor->dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+ }
+
+ } // namespace
+
+ // Texture
+
+ Texture::Texture(Device* device, const TextureDescriptor* descriptor)
+ : Texture(device, descriptor, GenTexture(device->gl), TextureState::OwnedInternal) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ uint32_t width = GetWidth();
+ uint32_t height = GetHeight();
+ uint32_t levels = GetNumMipLevels();
+ uint32_t arrayLayers = GetArrayLayers();
+ uint32_t sampleCount = GetSampleCount();
+
+ const GLFormat& glFormat = GetGLFormat();
+
+ gl.BindTexture(mTarget, mHandle);
+
+ // glTextureView() requires the value of GL_TEXTURE_IMMUTABLE_FORMAT for origtexture to be
+ // GL_TRUE, so the storage of the texture must be allocated with glTexStorage*D.
+ // https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTextureView.xhtml
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ if (arrayLayers > 1) {
+ ASSERT(!IsMultisampledTexture());
+ gl.TexStorage3D(mTarget, levels, glFormat.internalFormat, width, height,
+ arrayLayers);
+ } else {
+ if (IsMultisampledTexture()) {
+ gl.TexStorage2DMultisample(mTarget, sampleCount, glFormat.internalFormat,
+ width, height, true);
+ } else {
+ gl.TexStorage2D(mTarget, levels, glFormat.internalFormat, width, height);
+ }
+ }
+ break;
+ case wgpu::TextureDimension::e3D:
+ ASSERT(!IsMultisampledTexture());
+ ASSERT(arrayLayers == 1);
+ gl.TexStorage3D(mTarget, levels, glFormat.internalFormat, width, height,
+ GetDepth());
+ break;
+
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ }
+
+ // The texture is not complete if it uses mipmapping and not all levels up to
+ // MAX_LEVEL have been defined.
+ gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
+
+ if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ GetDevice()->ConsumedError(
+ ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
+ }
+ }
+
+ Texture::Texture(Device* device,
+ const TextureDescriptor* descriptor,
+ GLuint handle,
+ TextureState state)
+ : TextureBase(device, descriptor, state), mHandle(handle) {
+ mTarget = TargetForTexture(descriptor);
+ }
+
+ Texture::~Texture() {
+ }
+
+ void Texture::DestroyImpl() {
+ TextureBase::DestroyImpl();
+ if (GetTextureState() == TextureState::OwnedInternal) {
+ ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+ mHandle = 0;
+ }
+ }
+
+ GLuint Texture::GetHandle() const {
+ return mHandle;
+ }
+
+ GLenum Texture::GetGLTarget() const {
+ return mTarget;
+ }
+
+ const GLFormat& Texture::GetGLFormat() const {
+ return ToBackend(GetDevice())->GetGLFormat(GetFormat());
+ }
+
+ MaybeError Texture::ClearTexture(const SubresourceRange& range,
+ TextureBase::ClearValue clearValue) {
+ // TODO(crbug.com/dawn/850): initialize the textures with compressed formats.
+ if (GetFormat().isCompressed) {
+ return {};
+ }
+
+ Device* device = ToBackend(GetDevice());
+ const OpenGLFunctions& gl = device->gl;
+
+ uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+
+ if (GetFormat().isRenderable) {
+ if ((range.aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
+ GLfloat depth = fClearColor;
+ GLint stencil = clearColor;
+ if (range.aspects & Aspect::Depth) {
+ gl.DepthMask(GL_TRUE);
+ }
+ if (range.aspects & Aspect::Stencil) {
+ gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
+ }
+
+ auto DoClear = [&](Aspect aspects) {
+ if (aspects == (Aspect::Depth | Aspect::Stencil)) {
+ gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
+ } else if (aspects == Aspect::Depth) {
+ gl.ClearBufferfv(GL_DEPTH, 0, &depth);
+ } else if (aspects == Aspect::Stencil) {
+ gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
+ } else {
+ UNREACHABLE();
+ }
+ };
+
+ GLuint framebuffer = 0;
+ gl.GenFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+
+ GLenum attachment;
+ if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
+ attachment = GL_DEPTH_STENCIL_ATTACHMENT;
+ } else if (range.aspects == Aspect::Depth) {
+ attachment = GL_DEPTH_ATTACHMENT;
+ } else if (range.aspects == Aspect::Stencil) {
+ attachment = GL_STENCIL_ATTACHMENT;
+ } else {
+ UNREACHABLE();
+ }
+
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ if (GetArrayLayers() == 1) {
+ Aspect aspectsToClear = Aspect::None;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, 0,
+ aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ aspectsToClear |= aspect;
+ }
+
+ if (aspectsToClear == Aspect::None) {
+ continue;
+ }
+
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
+ GetGLTarget(), GetHandle(),
+ static_cast<GLint>(level));
+ DoClear(aspectsToClear);
+ } else {
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ Aspect aspectsToClear = Aspect::None;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer,
+ aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ aspectsToClear |= aspect;
+ }
+
+ if (aspectsToClear == Aspect::None) {
+ continue;
+ }
+
+ gl.FramebufferTextureLayer(
+ GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
+ static_cast<GLint>(level), static_cast<GLint>(layer));
+ DoClear(aspectsToClear);
+ }
+ }
+ break;
+
+ case wgpu::TextureDimension::e1D:
+ case wgpu::TextureDimension::e3D:
+ UNREACHABLE();
+ }
+ }
+
+ gl.DeleteFramebuffers(1, &framebuffer);
+ } else {
+ ASSERT(range.aspects == Aspect::Color);
+
+ // For gl.ClearBufferiv/uiv calls
+ constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
+ constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
+ std::array<GLuint, 4> clearColorData;
+ clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
+
+ // For gl.ClearBufferfv calls
+ constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
+ constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
+ std::array<GLfloat, 4> fClearColorData;
+ fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
+
+ static constexpr uint32_t MAX_TEXEL_SIZE = 16;
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
+ ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
+
+ // For gl.ClearTexSubImage calls
+ constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
+
+ wgpu::TextureComponentType baseType =
+ GetFormat().GetAspectInfo(Aspect::Color).baseType;
+
+ const GLFormat& glFormat = GetGLFormat();
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ Extent3D mipSize = GetMipLevelPhysicalSize(level);
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ if (gl.IsAtLeastGL(4, 4)) {
+ gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
+ static_cast<GLint>(layer), mipSize.width,
+ mipSize.height, mipSize.depthOrArrayLayers,
+ glFormat.format, glFormat.type,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataBytes0.data()
+ : kClearColorDataBytes255.data());
+ continue;
+ }
+
+ GLuint framebuffer = 0;
+ gl.GenFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+
+ GLenum attachment = GL_COLOR_ATTACHMENT0;
+ gl.DrawBuffers(1, &attachment);
+
+ gl.Disable(GL_SCISSOR_TEST);
+ gl.ColorMask(true, true, true, true);
+
+ auto DoClear = [&]() {
+ switch (baseType) {
+ case wgpu::TextureComponentType::Float: {
+ gl.ClearBufferfv(GL_COLOR, 0,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataFloat0.data()
+ : kClearColorDataFloat1.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Uint: {
+ gl.ClearBufferuiv(GL_COLOR, 0,
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataUint0.data()
+ : kClearColorDataUint1.data());
+ break;
+ }
+ case wgpu::TextureComponentType::Sint: {
+ gl.ClearBufferiv(GL_COLOR, 0,
+ reinterpret_cast<const GLint*>(
+ clearValue == TextureBase::ClearValue::Zero
+ ? kClearColorDataUint0.data()
+ : kClearColorDataUint1.data()));
+ break;
+ }
+
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
+ }
+ };
+
+ if (GetArrayLayers() == 1) {
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ UNREACHABLE();
+ case wgpu::TextureDimension::e2D:
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
+ GetGLTarget(), GetHandle(), level);
+ DoClear();
+ break;
+ case wgpu::TextureDimension::e3D:
+ uint32_t depth =
+ GetMipLevelVirtualSize(level).depthOrArrayLayers;
+ for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
+ GetHandle(), level, z);
+ DoClear();
+ }
+ break;
+ }
+
+ } else {
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
+ level, layer);
+ DoClear();
+ }
+
+ gl.Enable(GL_SCISSOR_TEST);
+ gl.DeleteFramebuffers(1, &framebuffer);
+ gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+ }
+ }
+ }
+ } else {
+ ASSERT(range.aspects == Aspect::Color);
+
+ // create temp buffer with clear color to copy to the texture image
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
+ ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
+
+ Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+ uint32_t bytesPerRow =
+ Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
+
+ // Make sure that we are not rounding
+ ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+ ASSERT(largestMipSize.height % blockInfo.height == 0);
+
+ uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
+ (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
+ if (bufferSize64 > std::numeric_limits<size_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+ size_t bufferSize = static_cast<size_t>(bufferSize64);
+
+ dawn::native::BufferDescriptor descriptor = {};
+ descriptor.mappedAtCreation = true;
+ descriptor.usage = wgpu::BufferUsage::CopySrc;
+ descriptor.size = bufferSize;
+
+ // We don't count the lazy clear of srcBuffer because it is an internal buffer.
+ // TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
+ Ref<Buffer> srcBuffer;
+ DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
+
+ // Fill the buffer with clear color
+ memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
+ srcBuffer->Unmap();
+
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ TextureCopy textureCopy;
+ textureCopy.texture = this;
+ textureCopy.mipLevel = level;
+ textureCopy.origin = {};
+ textureCopy.aspect = Aspect::Color;
+
+ TextureDataLayout dataLayout;
+ dataLayout.offset = 0;
+ dataLayout.bytesPerRow = bytesPerRow;
+ dataLayout.rowsPerImage = largestMipSize.height;
+
+ Extent3D mipSize = GetMipLevelPhysicalSize(level);
+
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ textureCopy.origin.z = layer;
+ DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
+ }
+ }
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ }
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, range);
+ device->IncrementLazyClearCountForTesting();
+ }
+ return {};
+ }
+
+ void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
+ if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
+ }
+ if (!IsSubresourceContentInitialized(range)) {
+ GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
+ }
+ }
+
+ // TextureView
+
+ TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
+ : TextureViewBase(texture, descriptor), mOwnsHandle(false) {
+ mTarget = TargetForTextureViewDimension(descriptor->dimension, descriptor->arrayLayerCount,
+ texture->GetSampleCount());
+
+ // Texture could be destroyed by the time we make a view.
+ if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+ return;
+ }
+
+ if (!UsageNeedsTextureView(texture->GetUsage())) {
+ mHandle = 0;
+ } else if (!RequiresCreatingNewTextureView(texture, descriptor)) {
+ mHandle = ToBackend(texture)->GetHandle();
+ } else {
+ // glTextureView() is supported on OpenGL version >= 4.3
+ // TODO(crbug.com/dawn/593): support texture view on OpenGL version <= 4.2 and ES
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ mHandle = GenTexture(gl);
+ const Texture* textureGL = ToBackend(texture);
+ const GLFormat& glFormat = ToBackend(GetDevice())->GetGLFormat(GetFormat());
+ gl.TextureView(mHandle, mTarget, textureGL->GetHandle(), glFormat.internalFormat,
+ descriptor->baseMipLevel, descriptor->mipLevelCount,
+ descriptor->baseArrayLayer, descriptor->arrayLayerCount);
+ mOwnsHandle = true;
+ }
+ }
+
+ TextureView::~TextureView() {
+ }
+
+ void TextureView::DestroyImpl() {
+ TextureViewBase::DestroyImpl();
+ if (mOwnsHandle) {
+ ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+ }
+ }
+
+ GLuint TextureView::GetHandle() const {
+ ASSERT(mHandle != 0);
+ return mHandle;
+ }
+
+ GLenum TextureView::GetGLTarget() const {
+ return mTarget;
+ }
+
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h
new file mode 100644
index 00000000000..74b34b9b3bd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/TextureGL.h
@@ -0,0 +1,69 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_TEXTUREGL_H_
+#define DAWNNATIVE_OPENGL_TEXTUREGL_H_
+
+#include "dawn/native/Texture.h"
+
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ class Device;
+ struct GLFormat;
+
+ class Texture final : public TextureBase {
+ public:
+ Texture(Device* device, const TextureDescriptor* descriptor);
+ Texture(Device* device,
+ const TextureDescriptor* descriptor,
+ GLuint handle,
+ TextureState state);
+
+ GLuint GetHandle() const;
+ GLenum GetGLTarget() const;
+ const GLFormat& GetGLFormat() const;
+
+ void EnsureSubresourceContentInitialized(const SubresourceRange& range);
+
+ private:
+ ~Texture() override;
+
+ void DestroyImpl() override;
+ MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
+
+ GLuint mHandle;
+ GLenum mTarget;
+ };
+
+ class TextureView final : public TextureViewBase {
+ public:
+ TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+ GLuint GetHandle() const;
+ GLenum GetGLTarget() const;
+
+ private:
+ ~TextureView() override;
+ void DestroyImpl() override;
+
+ GLuint mHandle;
+ GLenum mTarget;
+ bool mOwnsHandle;
+ };
+
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_TEXTUREGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp b/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp
new file mode 100644
index 00000000000..ec94704398f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.cpp
@@ -0,0 +1,55 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/UtilsGL.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::native::opengl {
+
+ GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
+ switch (compareFunction) {
+ case wgpu::CompareFunction::Never:
+ return GL_NEVER;
+ case wgpu::CompareFunction::Less:
+ return GL_LESS;
+ case wgpu::CompareFunction::LessEqual:
+ return GL_LEQUAL;
+ case wgpu::CompareFunction::Greater:
+ return GL_GREATER;
+ case wgpu::CompareFunction::GreaterEqual:
+ return GL_GEQUAL;
+ case wgpu::CompareFunction::NotEqual:
+ return GL_NOTEQUAL;
+ case wgpu::CompareFunction::Equal:
+ return GL_EQUAL;
+ case wgpu::CompareFunction::Always:
+ return GL_ALWAYS;
+
+ case wgpu::CompareFunction::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
+ switch (depthStencilFormat) {
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ return 0xFF;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+} // namespace dawn::native::opengl
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h b/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h
new file mode 100644
index 00000000000..90869800efc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/UtilsGL.h
@@ -0,0 +1,27 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_UTILSGL_H_
+#define DAWNNATIVE_OPENGL_UTILSGL_H_
+
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/opengl/opengl_platform.h"
+
+namespace dawn::native::opengl {
+
+ GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
+ GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
+} // namespace dawn::native::opengl
+
+#endif // DAWNNATIVE_OPENGL_UTILSGL_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/opengl/opengl_platform.h b/chromium/third_party/dawn/src/dawn/native/opengl/opengl_platform.h
new file mode 100644
index 00000000000..04d91261f03
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/opengl_platform.h
@@ -0,0 +1,15 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/opengl/opengl_platform_autogen.h"
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/supported_extensions.json b/chromium/third_party/dawn/src/dawn/native/opengl/supported_extensions.json
index 8e006337b9f..8e006337b9f 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/supported_extensions.json
+++ b/chromium/third_party/dawn/src/dawn/native/opengl/supported_extensions.json
diff --git a/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp
new file mode 100644
index 00000000000..a7ab910e514
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.cpp
@@ -0,0 +1,192 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/utils/WGPUHelpers.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/native/BindGroup.h"
+#include "dawn/native/BindGroupLayout.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/PipelineLayout.h"
+#include "dawn/native/Queue.h"
+#include "dawn/native/Sampler.h"
+#include "dawn/native/ShaderModule.h"
+
+#include <cstring>
+#include <iomanip>
+#include <limits>
+#include <mutex>
+#include <sstream>
+
+namespace dawn::native::utils {
+
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device,
+ const char* source) {
+ ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = source;
+ ShaderModuleDescriptor descriptor;
+ descriptor.nextInChain = &wgslDesc;
+ return device->CreateShaderModule(&descriptor);
+ }
+
+ ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+ wgpu::BufferUsage usage,
+ const void* data,
+ uint64_t size) {
+ BufferDescriptor descriptor;
+ descriptor.size = size;
+ descriptor.usage = usage;
+ descriptor.mappedAtCreation = true;
+ Ref<BufferBase> buffer;
+ DAWN_TRY_ASSIGN(buffer, device->CreateBuffer(&descriptor));
+ memcpy(buffer->GetMappedRange(0, size), data, size);
+ buffer->Unmap();
+ return buffer;
+ }
+
+ ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
+ DeviceBase* device,
+ const Ref<BindGroupLayoutBase>& bindGroupLayout) {
+ PipelineLayoutDescriptor descriptor;
+ descriptor.bindGroupLayoutCount = 1;
+ BindGroupLayoutBase* bgl = bindGroupLayout.Get();
+ descriptor.bindGroupLayouts = &bgl;
+ return device->CreatePipelineLayout(&descriptor);
+ }
+
+ ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
+ DeviceBase* device,
+ std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
+ bool allowInternalBinding) {
+ std::vector<BindGroupLayoutEntry> entries;
+ for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
+ entries.push_back(entry);
+ }
+
+ BindGroupLayoutDescriptor descriptor;
+ descriptor.entryCount = static_cast<uint32_t>(entries.size());
+ descriptor.entries = entries.data();
+ return device->CreateBindGroupLayout(&descriptor, allowInternalBinding);
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::BufferBindingType bufferType,
+ bool bufferHasDynamicOffset,
+ uint64_t bufferMinBindingSize) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ buffer.type = bufferType;
+ buffer.hasDynamicOffset = bufferHasDynamicOffset;
+ buffer.minBindingSize = bufferMinBindingSize;
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::SamplerBindingType samplerType) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ sampler.type = samplerType;
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::TextureSampleType textureSampleType,
+ wgpu::TextureViewDimension textureViewDimension,
+ bool textureMultisampled) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ texture.sampleType = textureSampleType;
+ texture.viewDimension = textureViewDimension;
+ texture.multisampled = textureMultisampled;
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::StorageTextureAccess storageTextureAccess,
+ wgpu::TextureFormat format,
+ wgpu::TextureViewDimension textureViewDimension) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ storageTexture.access = storageTextureAccess;
+ storageTexture.format = format;
+ storageTexture.viewDimension = textureViewDimension;
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ const BindGroupLayoutEntry& entry)
+ : BindGroupLayoutEntry(entry) {
+ }
+
+ BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const Ref<SamplerBase>& sampler)
+ : binding(binding), sampler(sampler) {
+ }
+
+ BindingInitializationHelper::BindingInitializationHelper(
+ uint32_t binding,
+ const Ref<TextureViewBase>& textureView)
+ : binding(binding), textureView(textureView) {
+ }
+
+ BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const Ref<BufferBase>& buffer,
+ uint64_t offset,
+ uint64_t size)
+ : binding(binding), buffer(buffer), offset(offset), size(size) {
+ }
+
+ BindingInitializationHelper::~BindingInitializationHelper() = default;
+
+ BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
+ BindGroupEntry result;
+
+ result.binding = binding;
+ result.sampler = sampler.Get();
+ result.textureView = textureView.Get();
+ result.buffer = buffer.Get();
+ result.offset = offset;
+ result.size = size;
+
+ return result;
+ }
+
+ ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
+ DeviceBase* device,
+ const Ref<BindGroupLayoutBase>& layout,
+ std::initializer_list<BindingInitializationHelper> entriesInitializer) {
+ std::vector<BindGroupEntry> entries;
+ for (const BindingInitializationHelper& helper : entriesInitializer) {
+ entries.push_back(helper.GetAsBinding());
+ }
+
+ BindGroupDescriptor descriptor;
+ descriptor.layout = layout.Get();
+ descriptor.entryCount = entries.size();
+ descriptor.entries = entries.data();
+
+ return device->CreateBindGroup(&descriptor);
+ }
+
+ const char* GetLabelForTrace(const char* label) {
+ return (label == nullptr || strlen(label) == 0) ? "None" : label;
+ }
+
+} // namespace dawn::native::utils
diff --git a/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h
new file mode 100644
index 00000000000..6e1fad2eba7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/utils/WGPUHelpers.h
@@ -0,0 +1,123 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_UTILS_WGPUHELPERS_H_
+#define DAWNNATIVE_UTILS_WGPUHELPERS_H_
+
+#include <dawn/native/dawn_platform.h>
+
+#include <array>
+#include <initializer_list>
+#include <vector>
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::utils {
+
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source);
+
+ ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+ wgpu::BufferUsage usage,
+ const void* data,
+ uint64_t size);
+
+ template <typename T>
+ ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+ wgpu::BufferUsage usage,
+ std::initializer_list<T> data) {
+ return CreateBufferFromData(device, usage, data.begin(), uint32_t(sizeof(T) * data.size()));
+ }
+
+ ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
+ DeviceBase* device,
+ const Ref<BindGroupLayoutBase>& bindGroupLayout);
+
+ // Helpers to make creating bind group layouts look nicer:
+ //
+ // utils::MakeBindGroupLayout(device, {
+ // {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+ // {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+ // {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
+ // });
+
+ struct BindingLayoutEntryInitializationHelper : BindGroupLayoutEntry {
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::BufferBindingType bufferType,
+ bool bufferHasDynamicOffset = false,
+ uint64_t bufferMinBindingSize = 0);
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::SamplerBindingType samplerType);
+ BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::TextureSampleType textureSampleType,
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
+ bool textureMultisampled = false);
+ BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::StorageTextureAccess storageTextureAccess,
+ wgpu::TextureFormat format,
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
+
+ BindingLayoutEntryInitializationHelper(const BindGroupLayoutEntry& entry);
+ };
+
+ ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
+ DeviceBase* device,
+ std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
+ bool allowInternalBinding = false);
+
+ // Helpers to make creating bind groups look nicer:
+ //
+ // utils::MakeBindGroup(device, layout, {
+ // {0, mySampler},
+ // {1, myBuffer, offset, size},
+ // {3, myTextureView}
+ // });
+
+ // Structure with one constructor per-type of bindings, so that the initializer_list accepts
+ // bindings with the right type and no extra information.
+ struct BindingInitializationHelper {
+ BindingInitializationHelper(uint32_t binding, const Ref<SamplerBase>& sampler);
+ BindingInitializationHelper(uint32_t binding, const Ref<TextureViewBase>& textureView);
+ BindingInitializationHelper(uint32_t binding,
+ const Ref<BufferBase>& buffer,
+ uint64_t offset = 0,
+ uint64_t size = wgpu::kWholeSize);
+ ~BindingInitializationHelper();
+
+ BindGroupEntry GetAsBinding() const;
+
+ uint32_t binding;
+ Ref<SamplerBase> sampler;
+ Ref<TextureViewBase> textureView;
+ Ref<BufferBase> buffer;
+ uint64_t offset = 0;
+ uint64_t size = 0;
+ };
+
+ ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
+ DeviceBase* device,
+ const Ref<BindGroupLayoutBase>& layout,
+ std::initializer_list<BindingInitializationHelper> entriesInitializer);
+
+ const char* GetLabelForTrace(const char* label);
+
+} // namespace dawn::native::utils
+
+#endif // DAWNNATIVE_UTILS_WGPUHELPERS_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp
new file mode 100644
index 00000000000..6957a39406c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.cpp
@@ -0,0 +1,353 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/AdapterVk.h"
+
+#include "dawn/native/Limits.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+
+#include "dawn/common/GPUInfo.h"
+
+namespace dawn::native::vulkan {
+
+ Adapter::Adapter(InstanceBase* instance,
+ VulkanInstance* vulkanInstance,
+ VkPhysicalDevice physicalDevice)
+ : AdapterBase(instance, wgpu::BackendType::Vulkan),
+ mPhysicalDevice(physicalDevice),
+ mVulkanInstance(vulkanInstance) {
+ }
+
+ const VulkanDeviceInfo& Adapter::GetDeviceInfo() const {
+ return mDeviceInfo;
+ }
+
+ VkPhysicalDevice Adapter::GetPhysicalDevice() const {
+ return mPhysicalDevice;
+ }
+
+ VulkanInstance* Adapter::GetVulkanInstance() const {
+ return mVulkanInstance.Get();
+ }
+
+ bool Adapter::IsDepthStencilFormatSupported(VkFormat format) {
+ ASSERT(format == VK_FORMAT_D16_UNORM_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT ||
+ format == VK_FORMAT_D32_SFLOAT_S8_UINT);
+
+ VkFormatProperties properties;
+ mVulkanInstance->GetFunctions().GetPhysicalDeviceFormatProperties(mPhysicalDevice, format,
+ &properties);
+ return properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+
+ MaybeError Adapter::InitializeImpl() {
+ DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+
+ if (mDeviceInfo.HasExt(DeviceExt::DriverProperties)) {
+ mDriverDescription = mDeviceInfo.driverProperties.driverName;
+ if (mDeviceInfo.driverProperties.driverInfo[0] != '\0') {
+ mDriverDescription += std::string(": ") + mDeviceInfo.driverProperties.driverInfo;
+ }
+ } else {
+ mDriverDescription =
+ "Vulkan driver version: " + std::to_string(mDeviceInfo.properties.driverVersion);
+ }
+
+ mDeviceId = mDeviceInfo.properties.deviceID;
+ mVendorId = mDeviceInfo.properties.vendorID;
+ mName = mDeviceInfo.properties.deviceName;
+
+ switch (mDeviceInfo.properties.deviceType) {
+ case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
+ mAdapterType = wgpu::AdapterType::IntegratedGPU;
+ break;
+ case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
+ mAdapterType = wgpu::AdapterType::DiscreteGPU;
+ break;
+ case VK_PHYSICAL_DEVICE_TYPE_CPU:
+ mAdapterType = wgpu::AdapterType::CPU;
+ break;
+ default:
+ mAdapterType = wgpu::AdapterType::Unknown;
+ break;
+ }
+
+ return {};
+ }
+
+ MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+ // Needed for viewport Y-flip.
+ if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
+ return DAWN_INTERNAL_ERROR("Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 required.");
+ }
+
+ // Needed for security
+ if (!mDeviceInfo.features.robustBufferAccess) {
+ return DAWN_INTERNAL_ERROR("Vulkan robustBufferAccess feature required.");
+ }
+
+ if (!mDeviceInfo.features.textureCompressionBC &&
+ !(mDeviceInfo.features.textureCompressionETC2 &&
+ mDeviceInfo.features.textureCompressionASTC_LDR)) {
+ return DAWN_INTERNAL_ERROR(
+ "Vulkan textureCompressionBC feature required or both textureCompressionETC2 and "
+ "textureCompressionASTC required.");
+ }
+
+ // Needed for the respective WebGPU features.
+ if (!mDeviceInfo.features.depthBiasClamp) {
+ return DAWN_INTERNAL_ERROR("Vulkan depthBiasClamp feature required.");
+ }
+ if (!mDeviceInfo.features.fragmentStoresAndAtomics) {
+ return DAWN_INTERNAL_ERROR("Vulkan fragmentStoresAndAtomics feature required.");
+ }
+ if (!mDeviceInfo.features.fullDrawIndexUint32) {
+ return DAWN_INTERNAL_ERROR("Vulkan fullDrawIndexUint32 feature required.");
+ }
+ if (!mDeviceInfo.features.imageCubeArray) {
+ return DAWN_INTERNAL_ERROR("Vulkan imageCubeArray feature required.");
+ }
+ if (!mDeviceInfo.features.independentBlend) {
+ return DAWN_INTERNAL_ERROR("Vulkan independentBlend feature required.");
+ }
+ if (!mDeviceInfo.features.sampleRateShading) {
+ return DAWN_INTERNAL_ERROR("Vulkan sampleRateShading feature required.");
+ }
+
+ // Initialize supported extensions
+ if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+ }
+
+ if (mDeviceInfo.features.textureCompressionETC2 == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+ }
+
+ if (mDeviceInfo.features.textureCompressionASTC_LDR == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
+ }
+
+ if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ }
+
+ if (mDeviceInfo.features.depthClamp == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::DepthClamping);
+ }
+
+ if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+ }
+
+ if (IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT)) {
+ mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+ }
+
+ if (IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT)) {
+ mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+ }
+
+#if defined(DAWN_USE_SYNC_FDS)
+ // TODO(chromium:1258986): Precisely enable the feature by querying the device's format
+ // features.
+ mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+#endif
+
+ return {};
+ }
+
+ MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+ GetDefaultLimits(&limits->v1);
+ CombinedLimits baseLimits = *limits;
+
+ const VkPhysicalDeviceLimits& vkLimits = mDeviceInfo.properties.limits;
+
+#define CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, compareOp, msgSegment) \
+ do { \
+ if (vkLimits.vulkanName compareOp baseLimits.v1.webgpuName) { \
+ return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for " #webgpuName \
+ "." \
+ " VkPhysicalDeviceLimits::" #vulkanName \
+ " must be at " msgSegment " " + \
+ std::to_string(baseLimits.v1.webgpuName)); \
+ } \
+ limits->v1.webgpuName = vkLimits.vulkanName; \
+ } while (false)
+
+#define CHECK_AND_SET_V1_MAX_LIMIT(vulkanName, webgpuName) \
+ CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, <, "least")
+#define CHECK_AND_SET_V1_MIN_LIMIT(vulkanName, webgpuName) \
+ CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, >, "most")
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension1D, maxTextureDimension1D);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension2D, maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimensionCube, maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferWidth, maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferHeight, maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[0], maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[1], maxTextureDimension2D);
+ CHECK_AND_SET_V1_MAX_LIMIT(viewportBoundsRange[1], maxTextureDimension2D);
+ limits->v1.maxTextureDimension2D = std::min({
+ static_cast<uint32_t>(vkLimits.maxImageDimension2D),
+ static_cast<uint32_t>(vkLimits.maxImageDimensionCube),
+ static_cast<uint32_t>(vkLimits.maxFramebufferWidth),
+ static_cast<uint32_t>(vkLimits.maxFramebufferHeight),
+ static_cast<uint32_t>(vkLimits.maxViewportDimensions[0]),
+ static_cast<uint32_t>(vkLimits.maxViewportDimensions[1]),
+ static_cast<uint32_t>(vkLimits.viewportBoundsRange[1]),
+ });
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension3D, maxTextureDimension3D);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxImageArrayLayers, maxTextureArrayLayers);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxBoundDescriptorSets, maxBindGroups);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetUniformBuffersDynamic,
+ maxDynamicUniformBuffersPerPipelineLayout);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetStorageBuffersDynamic,
+ maxDynamicStorageBuffersPerPipelineLayout);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSampledImages,
+ maxSampledTexturesPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSamplers, maxSamplersPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageBuffers,
+ maxStorageBuffersPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageImages,
+ maxStorageTexturesPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorUniformBuffers,
+ maxUniformBuffersPerShaderStage);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxUniformBufferRange, maxUniformBufferBindingSize);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxStorageBufferRange, maxStorageBufferBindingSize);
+
+ CHECK_AND_SET_V1_MIN_LIMIT(minUniformBufferOffsetAlignment,
+ minUniformBufferOffsetAlignment);
+ CHECK_AND_SET_V1_MIN_LIMIT(minStorageBufferOffsetAlignment,
+ minStorageBufferOffsetAlignment);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputBindings, maxVertexBuffers);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputAttributes, maxVertexAttributes);
+
+ if (vkLimits.maxVertexInputBindingStride < baseLimits.v1.maxVertexBufferArrayStride ||
+ vkLimits.maxVertexInputAttributeOffset < baseLimits.v1.maxVertexBufferArrayStride - 1) {
+ return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBufferArrayStride");
+ }
+ limits->v1.maxVertexBufferArrayStride = std::min(
+ vkLimits.maxVertexInputBindingStride, vkLimits.maxVertexInputAttributeOffset + 1);
+
+ if (vkLimits.maxVertexOutputComponents < baseLimits.v1.maxInterStageShaderComponents ||
+ vkLimits.maxFragmentInputComponents < baseLimits.v1.maxInterStageShaderComponents) {
+ return DAWN_INTERNAL_ERROR(
+ "Insufficient Vulkan limits for maxInterStageShaderComponents");
+ }
+ limits->v1.maxInterStageShaderComponents =
+ std::min(vkLimits.maxVertexOutputComponents, vkLimits.maxFragmentInputComponents);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeSharedMemorySize, maxComputeWorkgroupStorageSize);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupInvocations,
+ maxComputeInvocationsPerWorkgroup);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[0], maxComputeWorkgroupSizeX);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[1], maxComputeWorkgroupSizeY);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[2], maxComputeWorkgroupSizeZ);
+
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[0], maxComputeWorkgroupsPerDimension);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[1], maxComputeWorkgroupsPerDimension);
+ CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[2], maxComputeWorkgroupsPerDimension);
+ limits->v1.maxComputeWorkgroupsPerDimension = std::min({
+ vkLimits.maxComputeWorkGroupCount[0],
+ vkLimits.maxComputeWorkGroupCount[1],
+ vkLimits.maxComputeWorkGroupCount[2],
+ });
+
+ if (vkLimits.maxColorAttachments < kMaxColorAttachments) {
+ return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxColorAttachments");
+ }
+ if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
+ vkLimits.framebufferColorSampleCounts)) {
+ return DAWN_INTERNAL_ERROR(
+ "Insufficient Vulkan limits for framebufferColorSampleCounts");
+ }
+ if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
+ vkLimits.framebufferDepthSampleCounts)) {
+ return DAWN_INTERNAL_ERROR(
+ "Insufficient Vulkan limits for framebufferDepthSampleCounts");
+ }
+
+ // Only check maxFragmentCombinedOutputResources on mobile GPUs. Desktop GPUs drivers seem
+ // to put incorrect values for this limit with things like 8 or 16 when they can do bindless
+ // storage buffers.
+ uint32_t vendorId = mDeviceInfo.properties.vendorID;
+ if (!gpu_info::IsAMD(vendorId) && !gpu_info::IsIntel(vendorId) &&
+ !gpu_info::IsNvidia(vendorId)) {
+ if (vkLimits.maxFragmentCombinedOutputResources <
+ kMaxColorAttachments + baseLimits.v1.maxStorageTexturesPerShaderStage +
+ baseLimits.v1.maxStorageBuffersPerShaderStage) {
+ return DAWN_INTERNAL_ERROR(
+ "Insufficient Vulkan maxFragmentCombinedOutputResources limit");
+ }
+
+ uint32_t maxFragmentCombinedOutputResources =
+ kMaxColorAttachments + limits->v1.maxStorageTexturesPerShaderStage +
+ limits->v1.maxStorageBuffersPerShaderStage;
+
+ if (maxFragmentCombinedOutputResources > vkLimits.maxFragmentCombinedOutputResources) {
+ // WebGPU's maxFragmentCombinedOutputResources exceeds the Vulkan limit.
+ // Decrease |maxStorageTexturesPerShaderStage| and |maxStorageBuffersPerShaderStage|
+ // to fit within the Vulkan limit.
+ uint32_t countOverLimit = maxFragmentCombinedOutputResources -
+ vkLimits.maxFragmentCombinedOutputResources;
+
+ uint32_t maxStorageTexturesOverBase =
+ limits->v1.maxStorageTexturesPerShaderStage -
+ baseLimits.v1.maxStorageTexturesPerShaderStage;
+ uint32_t maxStorageBuffersOverBase = limits->v1.maxStorageBuffersPerShaderStage -
+ baseLimits.v1.maxStorageBuffersPerShaderStage;
+
+ // Reduce the number of resources by half the overage count, but clamp to
+ // to ensure we don't go below the base limits.
+ uint32_t numFewerStorageTextures =
+ std::min(countOverLimit / 2, maxStorageTexturesOverBase);
+ uint32_t numFewerStorageBuffers =
+ std::min((countOverLimit + 1) / 2, maxStorageBuffersOverBase);
+
+ if (numFewerStorageTextures == maxStorageTexturesOverBase) {
+ // If |numFewerStorageTextures| was clamped, subtract the remaining
+ // from the storage buffers.
+ numFewerStorageBuffers = countOverLimit - numFewerStorageTextures;
+ ASSERT(numFewerStorageBuffers <= maxStorageBuffersOverBase);
+ } else if (numFewerStorageBuffers == maxStorageBuffersOverBase) {
+ // If |numFewerStorageBuffers| was clamped, subtract the remaining
+ // from the storage textures.
+ numFewerStorageTextures = countOverLimit - numFewerStorageBuffers;
+ ASSERT(numFewerStorageTextures <= maxStorageTexturesOverBase);
+ }
+ limits->v1.maxStorageTexturesPerShaderStage -= numFewerStorageTextures;
+ limits->v1.maxStorageBuffersPerShaderStage -= numFewerStorageBuffers;
+ }
+ }
+
+ return {};
+ }
+
+ bool Adapter::SupportsExternalImages() const {
+ // Via dawn::native::vulkan::WrapVulkanImage
+ return external_memory::Service::CheckSupport(mDeviceInfo) &&
+ external_semaphore::Service::CheckSupport(mDeviceInfo, mPhysicalDevice,
+ mVulkanInstance->GetFunctions());
+ }
+
+ ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+ return Device::Create(this, descriptor);
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h
new file mode 100644
index 00000000000..2f3948ecce7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/AdapterVk.h
@@ -0,0 +1,59 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_ADAPTERVK_H_
+#define DAWNNATIVE_VULKAN_ADAPTERVK_H_
+
+#include "dawn/native/Adapter.h"
+
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+namespace dawn::native::vulkan {
+
+ class VulkanInstance;
+
+ class Adapter : public AdapterBase {
+ public:
+ Adapter(InstanceBase* instance,
+ VulkanInstance* vulkanInstance,
+ VkPhysicalDevice physicalDevice);
+ ~Adapter() override = default;
+
+ // AdapterBase Implementation
+ bool SupportsExternalImages() const override;
+
+ const VulkanDeviceInfo& GetDeviceInfo() const;
+ VkPhysicalDevice GetPhysicalDevice() const;
+ VulkanInstance* GetVulkanInstance() const;
+
+ bool IsDepthStencilFormatSupported(VkFormat format);
+
+ private:
+ MaybeError InitializeImpl() override;
+ MaybeError InitializeSupportedFeaturesImpl() override;
+ MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+
+ ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
+ const DeviceDescriptor* descriptor) override;
+
+ VkPhysicalDevice mPhysicalDevice;
+ Ref<VulkanInstance> mVulkanInstance;
+ VulkanDeviceInfo mDeviceInfo = {};
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_ADAPTERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp
new file mode 100644
index 00000000000..b7da2473904
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp
@@ -0,0 +1,444 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/BackendVk.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+// TODO(crbug.com/dawn/283): Link against the Vulkan Loader and remove this.
+#if defined(DAWN_ENABLE_SWIFTSHADER)
+# if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_FUSCHIA)
+constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.so";
+# elif defined(DAWN_PLATFORM_WINDOWS)
+constexpr char kSwiftshaderLibName[] = "vk_swiftshader.dll";
+# elif defined(DAWN_PLATFORM_MACOS)
+constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.dylib";
+# else
+# error "Unimplemented Swiftshader Vulkan backend platform"
+# endif
+#endif
+
+#if defined(DAWN_PLATFORM_LINUX)
+# if defined(DAWN_PLATFORM_ANDROID)
+constexpr char kVulkanLibName[] = "libvulkan.so";
+# else
+constexpr char kVulkanLibName[] = "libvulkan.so.1";
+# endif
+#elif defined(DAWN_PLATFORM_WINDOWS)
+constexpr char kVulkanLibName[] = "vulkan-1.dll";
+#elif defined(DAWN_PLATFORM_MACOS)
+constexpr char kVulkanLibName[] = "libvulkan.dylib";
+#elif defined(DAWN_PLATFORM_FUCHSIA)
+constexpr char kVulkanLibName[] = "libvulkan.so";
+#else
+# error "Unimplemented Vulkan backend platform"
+#endif
+
+struct SkippedMessage {
+ const char* messageId;
+ const char* messageContents;
+};
+
+// Array of Validation error/warning messages that will be ignored, should include bugID
+constexpr SkippedMessage kSkippedMessages[] = {
+ // These errors are generated when simultaneously using a read-only depth/stencil attachment as
+ // a texture binding. This is valid Vulkan.
+ // The substring matching matches both
+ // VK_PIPELINE_STAGE_2_NONE and VK_PIPELINE_STAGE_2_NONE_KHR.
+ //
+ // When storeOp=NONE is not present, Dawn uses storeOp=STORE, but Vulkan validation layer
+ // considers the image read-only and produces a hazard. Dawn can't rely on storeOp=NONE and
+ // so this is not expected to be worked around.
+ // See http://crbug.com/dawn/1225 for more details.
+ {"SYNC-HAZARD-WRITE_AFTER_READ",
+ "depth aspect during store with storeOp VK_ATTACHMENT_STORE_OP_STORE. Access info (usage: "
+ "SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, prior_usage: "
+ "SYNC_FRAGMENT_SHADER_SHADER_STORAGE_READ, read_barriers: VK_PIPELINE_STAGE_2_NONE"},
+
+ {"SYNC-HAZARD-WRITE_AFTER_READ",
+ "stencil aspect during store with stencilStoreOp VK_ATTACHMENT_STORE_OP_STORE. Access info "
+ "(usage: SYNC_LATE_FRAGMENT_TESTS_DEPTH_STENCIL_ATTACHMENT_WRITE, prior_usage: "
+ "SYNC_FRAGMENT_SHADER_SHADER_STORAGE_READ, read_barriers: VK_PIPELINE_STAGE_2_NONE"},
+};
+
+namespace dawn::native::vulkan {
+
+ namespace {
+
+ static constexpr ICD kICDs[] = {
+ ICD::None,
+#if defined(DAWN_ENABLE_SWIFTSHADER)
+ ICD::SwiftShader,
+#endif // defined(DAWN_ENABLE_SWIFTSHADER)
+ };
+
+ // Suppress validation errors that are known. Returns false in that case.
+ bool ShouldReportDebugMessage(const char* messageId, const char* message) {
+ for (const SkippedMessage& msg : kSkippedMessages) {
+ if (strstr(messageId, msg.messageId) != nullptr &&
+ strstr(message, msg.messageContents) != nullptr) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ VKAPI_ATTR VkBool32 VKAPI_CALL
+ OnDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
+ const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+ void* /* pUserData */) {
+ if (ShouldReportDebugMessage(pCallbackData->pMessageIdName, pCallbackData->pMessage)) {
+ dawn::WarningLog() << pCallbackData->pMessage;
+ ASSERT((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) == 0);
+ }
+ return VK_FALSE;
+ }
+
+ // A debug callback specifically for instance creation so that we don't fire an ASSERT when
+ // the instance fails creation in an expected manner (for example the system not having
+ // Vulkan drivers).
+ VKAPI_ATTR VkBool32 VKAPI_CALL OnInstanceCreationDebugUtilsCallback(
+ VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
+ const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+ void* /* pUserData */) {
+ dawn::WarningLog() << pCallbackData->pMessage;
+ return VK_FALSE;
+ }
+
+ } // anonymous namespace
+
+ VulkanInstance::VulkanInstance() = default;
+
+ VulkanInstance::~VulkanInstance() {
+ if (mDebugUtilsMessenger != VK_NULL_HANDLE) {
+ mFunctions.DestroyDebugUtilsMessengerEXT(mInstance, mDebugUtilsMessenger, nullptr);
+ mDebugUtilsMessenger = VK_NULL_HANDLE;
+ }
+
+ // VkPhysicalDevices are destroyed when the VkInstance is destroyed
+ if (mInstance != VK_NULL_HANDLE) {
+ mFunctions.DestroyInstance(mInstance, nullptr);
+ mInstance = VK_NULL_HANDLE;
+ }
+ }
+
+ const VulkanFunctions& VulkanInstance::GetFunctions() const {
+ return mFunctions;
+ }
+
+ VkInstance VulkanInstance::GetVkInstance() const {
+ return mInstance;
+ }
+
+ const VulkanGlobalInfo& VulkanInstance::GetGlobalInfo() const {
+ return mGlobalInfo;
+ }
+
+ const std::vector<VkPhysicalDevice>& VulkanInstance::GetPhysicalDevices() const {
+ return mPhysicalDevices;
+ }
+
+ // static
+ ResultOrError<Ref<VulkanInstance>> VulkanInstance::Create(const InstanceBase* instance,
+ ICD icd) {
+ Ref<VulkanInstance> vulkanInstance = AcquireRef(new VulkanInstance());
+ DAWN_TRY(vulkanInstance->Initialize(instance, icd));
+ return std::move(vulkanInstance);
+ }
+
+ MaybeError VulkanInstance::Initialize(const InstanceBase* instance, ICD icd) {
+ // These environment variables need only be set while loading procs and gathering device
+ // info.
+ ScopedEnvironmentVar vkICDFilenames;
+ ScopedEnvironmentVar vkLayerPath;
+
+ const std::vector<std::string>& searchPaths = instance->GetRuntimeSearchPaths();
+
+ auto CommaSeparatedResolvedSearchPaths = [&](const char* name) {
+ std::string list;
+ bool first = true;
+ for (const std::string& path : searchPaths) {
+ if (!first) {
+ list += ", ";
+ }
+ first = false;
+ list += (path + name);
+ }
+ return list;
+ };
+
+ auto LoadVulkan = [&](const char* libName) -> MaybeError {
+ for (const std::string& path : searchPaths) {
+ std::string resolvedPath = path + libName;
+ if (mVulkanLib.Open(resolvedPath)) {
+ return {};
+ }
+ }
+ return DAWN_FORMAT_INTERNAL_ERROR("Couldn't load Vulkan. Searched %s.",
+ CommaSeparatedResolvedSearchPaths(libName));
+ };
+
+ switch (icd) {
+ case ICD::None: {
+ DAWN_TRY(LoadVulkan(kVulkanLibName));
+ // Succesfully loaded driver; break.
+ break;
+ }
+ case ICD::SwiftShader: {
+#if defined(DAWN_ENABLE_SWIFTSHADER)
+ DAWN_TRY(LoadVulkan(kSwiftshaderLibName));
+ break;
+#endif // defined(DAWN_ENABLE_SWIFTSHADER)
+ // ICD::SwiftShader should not be passed if SwiftShader is not enabled.
+ UNREACHABLE();
+ }
+ }
+
+ if (instance->IsBackendValidationEnabled()) {
+#if defined(DAWN_ENABLE_VULKAN_VALIDATION_LAYERS)
+ auto execDir = GetExecutableDirectory();
+ std::string vkDataDir = execDir.value_or("") + DAWN_VK_DATA_DIR;
+ if (!vkLayerPath.Set("VK_LAYER_PATH", vkDataDir.c_str())) {
+ return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
+ }
+#else
+ dawn::WarningLog() << "Backend validation enabled but Dawn was not built with "
+ "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS.";
+#endif
+ }
+
+ DAWN_TRY(mFunctions.LoadGlobalProcs(mVulkanLib));
+
+ DAWN_TRY_ASSIGN(mGlobalInfo, GatherGlobalInfo(mFunctions));
+
+ VulkanGlobalKnobs usedGlobalKnobs = {};
+ DAWN_TRY_ASSIGN(usedGlobalKnobs, CreateVkInstance(instance));
+ *static_cast<VulkanGlobalKnobs*>(&mGlobalInfo) = usedGlobalKnobs;
+
+ DAWN_TRY(mFunctions.LoadInstanceProcs(mInstance, mGlobalInfo));
+
+ if (usedGlobalKnobs.HasExt(InstanceExt::DebugUtils)) {
+ DAWN_TRY(RegisterDebugUtils());
+ }
+
+ DAWN_TRY_ASSIGN(mPhysicalDevices, GatherPhysicalDevices(mInstance, mFunctions));
+
+ return {};
+ }
+
+ ResultOrError<VulkanGlobalKnobs> VulkanInstance::CreateVkInstance(
+ const InstanceBase* instance) {
+ VulkanGlobalKnobs usedKnobs = {};
+ std::vector<const char*> layerNames;
+ InstanceExtSet extensionsToRequest = mGlobalInfo.extensions;
+
+ auto UseLayerIfAvailable = [&](VulkanLayer layer) {
+ if (mGlobalInfo.layers[layer]) {
+ layerNames.push_back(GetVulkanLayerInfo(layer).name);
+ usedKnobs.layers.set(layer, true);
+ extensionsToRequest |= mGlobalInfo.layerExtensions[layer];
+ }
+ };
+
+ // vktrace works by instering a layer, but we hide it behind a macro because the vktrace
+ // layer crashes when used without vktrace server started. See this vktrace issue:
+ // https://github.com/LunarG/VulkanTools/issues/254
+ // Also it is good to put it in first position so that it doesn't see Vulkan calls inserted
+ // by other layers.
+#if defined(DAWN_USE_VKTRACE)
+ UseLayerIfAvailable(VulkanLayer::LunargVkTrace);
+#endif
+ // RenderDoc installs a layer at the system level for its capture but we don't want to use
+ // it unless we are debugging in RenderDoc so we hide it behind a macro.
+#if defined(DAWN_USE_RENDERDOC)
+ UseLayerIfAvailable(VulkanLayer::RenderDocCapture);
+#endif
+
+ if (instance->IsBackendValidationEnabled()) {
+ UseLayerIfAvailable(VulkanLayer::Validation);
+ }
+
+ // Always use the Fuchsia swapchain layer if available.
+ UseLayerIfAvailable(VulkanLayer::FuchsiaImagePipeSwapchain);
+
+ // Available and known instance extensions default to being requested, but some special
+ // cases are removed.
+ usedKnobs.extensions = extensionsToRequest;
+
+ std::vector<const char*> extensionNames;
+ for (InstanceExt ext : IterateBitSet(extensionsToRequest)) {
+ const InstanceExtInfo& info = GetInstanceExtInfo(ext);
+
+ if (info.versionPromoted > mGlobalInfo.apiVersion) {
+ extensionNames.push_back(info.name);
+ }
+ }
+
+ VkApplicationInfo appInfo;
+ appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
+ appInfo.pNext = nullptr;
+ appInfo.pApplicationName = nullptr;
+ appInfo.applicationVersion = 0;
+ appInfo.pEngineName = nullptr;
+ appInfo.engineVersion = 0;
+ // Vulkan 1.0 implementations were required to return VK_ERROR_INCOMPATIBLE_DRIVER if
+ // apiVersion was larger than 1.0. Meanwhile, as long as the instance supports at least
+ // Vulkan 1.1, an application can use different versions of Vulkan with an instance than
+ // it does with a device or physical device. So we should set apiVersion to Vulkan 1.0
+ // if the instance only supports Vulkan 1.0. Otherwise we set apiVersion to Vulkan 1.2,
+ // treat 1.2 as the highest API version dawn targets.
+ if (mGlobalInfo.apiVersion == VK_MAKE_VERSION(1, 0, 0)) {
+ appInfo.apiVersion = VK_MAKE_VERSION(1, 0, 0);
+ } else {
+ appInfo.apiVersion = VK_MAKE_VERSION(1, 2, 0);
+ }
+
+ VkInstanceCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.pApplicationInfo = &appInfo;
+ createInfo.enabledLayerCount = static_cast<uint32_t>(layerNames.size());
+ createInfo.ppEnabledLayerNames = layerNames.data();
+ createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+ createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+ PNextChainBuilder createInfoChain(&createInfo);
+
+ // Register the debug callback for instance creation so we receive message for any errors
+ // (validation or other).
+ VkDebugUtilsMessengerCreateInfoEXT utilsMessengerCreateInfo;
+ if (usedKnobs.HasExt(InstanceExt::DebugUtils)) {
+ utilsMessengerCreateInfo.flags = 0;
+ utilsMessengerCreateInfo.messageSeverity =
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+ utilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+ utilsMessengerCreateInfo.pfnUserCallback = OnInstanceCreationDebugUtilsCallback;
+ utilsMessengerCreateInfo.pUserData = nullptr;
+
+ createInfoChain.Add(&utilsMessengerCreateInfo,
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
+ }
+
+ // Try to turn on synchronization validation if the instance was created with backend
+ // validation enabled.
+ VkValidationFeaturesEXT validationFeatures;
+ VkValidationFeatureEnableEXT kEnableSynchronizationValidation =
+ VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT;
+ if (instance->IsBackendValidationEnabled() &&
+ usedKnobs.HasExt(InstanceExt::ValidationFeatures)) {
+ validationFeatures.enabledValidationFeatureCount = 1;
+ validationFeatures.pEnabledValidationFeatures = &kEnableSynchronizationValidation;
+ validationFeatures.disabledValidationFeatureCount = 0;
+ validationFeatures.pDisabledValidationFeatures = nullptr;
+
+ createInfoChain.Add(&validationFeatures, VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT);
+ }
+
+ DAWN_TRY(CheckVkSuccess(mFunctions.CreateInstance(&createInfo, nullptr, &mInstance),
+ "vkCreateInstance"));
+
+ return usedKnobs;
+ }
+
+ MaybeError VulkanInstance::RegisterDebugUtils() {
+ VkDebugUtilsMessengerCreateInfoEXT createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+ createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+ createInfo.pfnUserCallback = OnDebugUtilsCallback;
+ createInfo.pUserData = nullptr;
+
+ return CheckVkSuccess(mFunctions.CreateDebugUtilsMessengerEXT(
+ mInstance, &createInfo, nullptr, &*mDebugUtilsMessenger),
+ "vkCreateDebugUtilsMessengerEXT");
+ }
+
+ Backend::Backend(InstanceBase* instance)
+ : BackendConnection(instance, wgpu::BackendType::Vulkan) {
+ }
+
+ Backend::~Backend() = default;
+
+ std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+ AdapterDiscoveryOptions options;
+ auto result = DiscoverAdapters(&options);
+ if (result.IsError()) {
+ GetInstance()->ConsumedError(result.AcquireError());
+ return {};
+ }
+ return result.AcquireSuccess();
+ }
+
+ ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ ASSERT(optionsBase->backendType == WGPUBackendType_Vulkan);
+
+ const AdapterDiscoveryOptions* options =
+ static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+ std::vector<Ref<AdapterBase>> adapters;
+
+ InstanceBase* instance = GetInstance();
+ for (ICD icd : kICDs) {
+#if defined(DAWN_PLATFORM_MACOS)
+ // On Mac, we don't expect non-Swiftshader Vulkan to be available.
+ if (icd == ICD::None) {
+ continue;
+ }
+#endif // defined(DAWN_PLATFORM_MACOS)
+ if (options->forceSwiftShader && icd != ICD::SwiftShader) {
+ continue;
+ }
+ if (mVulkanInstances[icd] == nullptr && instance->ConsumedError([&]() -> MaybeError {
+ DAWN_TRY_ASSIGN(mVulkanInstances[icd], VulkanInstance::Create(instance, icd));
+ return {};
+ }())) {
+ // Instance failed to initialize.
+ continue;
+ }
+ const std::vector<VkPhysicalDevice>& physicalDevices =
+ mVulkanInstances[icd]->GetPhysicalDevices();
+ for (uint32_t i = 0; i < physicalDevices.size(); ++i) {
+ Ref<Adapter> adapter = AcquireRef(
+ new Adapter(instance, mVulkanInstances[icd].Get(), physicalDevices[i]));
+ if (instance->ConsumedError(adapter->Initialize())) {
+ continue;
+ }
+ adapters.push_back(std::move(adapter));
+ }
+ }
+ return adapters;
+ }
+
+ BackendConnection* Connect(InstanceBase* instance) {
+ return new Backend(instance);
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h
new file mode 100644
index 00000000000..2902dbb43bc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BackendVk.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_BACKENDVK_H_
+#define DAWNNATIVE_VULKAN_BACKENDVK_H_
+
+#include "dawn/native/BackendConnection.h"
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/common/RefCounted.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/native/vulkan/VulkanFunctions.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+namespace dawn::native::vulkan {
+
+ enum class ICD {
+ None,
+ SwiftShader,
+ };
+
+ // VulkanInstance holds the reference to the Vulkan library, the VkInstance, VkPhysicalDevices
+ // on that instance, Vulkan functions loaded from the library, and global information
+ // gathered from the instance. VkPhysicalDevices bound to the VkInstance are bound to the GPU
+ // and GPU driver, keeping them active. It is RefCounted so that (eventually) when all adapters
+ // on an instance are no longer in use, the instance is deleted. This can be particuarly useful
+ // when we create multiple instances to selectively discover ICDs (like only
+ // SwiftShader/iGPU/dGPU/eGPU), and only one physical device on one instance remains in use. We
+ // can delete the VkInstances that are not in use to avoid holding the discrete GPU active.
+ class VulkanInstance : public RefCounted {
+ public:
+ static ResultOrError<Ref<VulkanInstance>> Create(const InstanceBase* instance, ICD icd);
+ ~VulkanInstance();
+
+ const VulkanFunctions& GetFunctions() const;
+ VkInstance GetVkInstance() const;
+ const VulkanGlobalInfo& GetGlobalInfo() const;
+ const std::vector<VkPhysicalDevice>& GetPhysicalDevices() const;
+
+ private:
+ VulkanInstance();
+
+ MaybeError Initialize(const InstanceBase* instance, ICD icd);
+ ResultOrError<VulkanGlobalKnobs> CreateVkInstance(const InstanceBase* instance);
+
+ MaybeError RegisterDebugUtils();
+
+ DynamicLib mVulkanLib;
+ VulkanGlobalInfo mGlobalInfo = {};
+ VkInstance mInstance = VK_NULL_HANDLE;
+ VulkanFunctions mFunctions;
+
+ VkDebugUtilsMessengerEXT mDebugUtilsMessenger = VK_NULL_HANDLE;
+
+ std::vector<VkPhysicalDevice> mPhysicalDevices;
+ };
+
+ class Backend : public BackendConnection {
+ public:
+ Backend(InstanceBase* instance);
+ ~Backend() override;
+
+ MaybeError Initialize();
+
+ std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) override;
+
+ private:
+ ityp::array<ICD, Ref<VulkanInstance>, 2> mVulkanInstances = {};
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_BACKENDVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
new file mode 100644
index 00000000000..20a8e1bcfd0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
@@ -0,0 +1,199 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_vector.h"
+#include "dawn/native/vulkan/BindGroupVk.h"
+#include "dawn/native/vulkan/DescriptorSetAllocator.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <map>
+
+namespace dawn::native::vulkan {
+
+ namespace {
+
+ VkShaderStageFlags VulkanShaderStageFlags(wgpu::ShaderStage stages) {
+ VkShaderStageFlags flags = 0;
+
+ if (stages & wgpu::ShaderStage::Vertex) {
+ flags |= VK_SHADER_STAGE_VERTEX_BIT;
+ }
+ if (stages & wgpu::ShaderStage::Fragment) {
+ flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+ if (stages & wgpu::ShaderStage::Compute) {
+ flags |= VK_SHADER_STAGE_COMPUTE_BIT;
+ }
+
+ return flags;
+ }
+
+ } // anonymous namespace
+
+ VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo) {
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer:
+ switch (bindingInfo.buffer.type) {
+ case wgpu::BufferBindingType::Uniform:
+ if (bindingInfo.buffer.hasDynamicOffset) {
+ return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
+ }
+ return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ case wgpu::BufferBindingType::Storage:
+ case kInternalStorageBufferBinding:
+ case wgpu::BufferBindingType::ReadOnlyStorage:
+ if (bindingInfo.buffer.hasDynamicOffset) {
+ return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+ }
+ return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ case wgpu::BufferBindingType::Undefined:
+ UNREACHABLE();
+ }
+ case BindingInfoType::Sampler:
+ return VK_DESCRIPTOR_TYPE_SAMPLER;
+ case BindingInfoType::Texture:
+ case BindingInfoType::ExternalTexture:
+ return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+ case BindingInfoType::StorageTexture:
+ return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ }
+ UNREACHABLE();
+ }
+
+ // static
+ ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ Ref<BindGroupLayout> bgl =
+ AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+ DAWN_TRY(bgl->Initialize());
+ return bgl;
+ }
+
+ MaybeError BindGroupLayout::Initialize() {
+ // Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
+ // one entry per binding set. This might be optimized by computing continuous ranges of
+ // bindings of the same type.
+ ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
+ bindings.reserve(GetBindingCount());
+
+ for (const auto& [_, bindingIndex] : GetBindingMap()) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+ VkDescriptorSetLayoutBinding vkBinding;
+ vkBinding.binding = static_cast<uint32_t>(bindingIndex);
+ // TODO(dawn:728) In the future, special handling will be needed for external textures
+ // here because they encompass multiple views.
+ vkBinding.descriptorType = VulkanDescriptorType(bindingInfo);
+ vkBinding.descriptorCount = 1;
+ vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
+ vkBinding.pImmutableSamplers = nullptr;
+
+ bindings.emplace_back(vkBinding);
+ }
+
+ VkDescriptorSetLayoutCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
+ createInfo.pBindings = bindings.data();
+
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(
+ device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreateDescriptorSetLayout"));
+
+ // Compute the size of descriptor pools used for this layout.
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
+
+ for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
+ // TODO(dawn:728) In the future, special handling will be needed for external textures
+ // here because they encompass multiple views.
+ VkDescriptorType vulkanType = VulkanDescriptorType(GetBindingInfo(bindingIndex));
+
+ // map::operator[] will return 0 if the key doesn't exist.
+ descriptorCountPerType[vulkanType]++;
+ }
+
+ // TODO(enga): Consider deduping allocators for layouts with the same descriptor type
+ // counts.
+ mDescriptorSetAllocator =
+ DescriptorSetAllocator::Create(this, std::move(descriptorCountPerType));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ }
+
+ BindGroupLayout::~BindGroupLayout() = default;
+
+ void BindGroupLayout::DestroyImpl() {
+ BindGroupLayoutBase::DestroyImpl();
+
+ Device* device = ToBackend(GetDevice());
+
+ // DescriptorSetLayout aren't used by execution on the GPU and can be deleted at any time,
+ // so we can destroy mHandle immediately instead of using the FencedDeleter.
+ // (Swiftshader implements this wrong b/154522740).
+ // In practice, the GPU is done with all descriptor sets because bind group deallocation
+ // refs the bind group layout so that once the bind group is finished being used, we can
+ // recycle its descriptor set.
+ if (mHandle != VK_NULL_HANDLE) {
+ device->fn.DestroyDescriptorSetLayout(device->GetVkDevice(), mHandle, nullptr);
+ mHandle = VK_NULL_HANDLE;
+ }
+ mDescriptorSetAllocator = nullptr;
+ }
+
+ VkDescriptorSetLayout BindGroupLayout::GetHandle() const {
+ return mHandle;
+ }
+
+ ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
+ Device* device,
+ const BindGroupDescriptor* descriptor) {
+ DescriptorSetAllocation descriptorSetAllocation;
+ DAWN_TRY_ASSIGN(descriptorSetAllocation, mDescriptorSetAllocator->Allocate());
+
+ return AcquireRef(
+ mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation));
+ }
+
+ void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
+ DescriptorSetAllocation* descriptorSetAllocation) {
+ mDescriptorSetAllocator->Deallocate(descriptorSetAllocation);
+ mBindGroupAllocator.Deallocate(bindGroup);
+ }
+
+ void BindGroupLayout::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_BindGroupLayout", GetLabel());
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h
new file mode 100644
index 00000000000..558ff7fc29b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupLayoutVk.h
@@ -0,0 +1,80 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
+#define DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
+
+#include "dawn/native/BindGroupLayout.h"
+
+#include "dawn/common/SlabAllocator.h"
+#include "dawn/common/vulkan_platform.h"
+
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+ class BindGroup;
+ struct DescriptorSetAllocation;
+ class DescriptorSetAllocator;
+ class Device;
+
+ VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo);
+
+ // In Vulkan descriptor pools have to be sized to an exact number of descriptors. This means
+ // it's hard to have something where we can mix different types of descriptor sets because
+ // we don't know if their vector of number of descriptors will be similar.
+ //
+ // That's why that in addition to containing the VkDescriptorSetLayout to create
+ // VkDescriptorSets for its bindgroups, the layout also acts as an allocator for the descriptor
+ // sets.
+ //
+ // The allocations is done with one pool per descriptor set, which is inefficient, but at least
+ // the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
+ // is important because creating them can incur GPU memory allocation which is usually an
+ // expensive syscall.
+ class BindGroupLayout final : public BindGroupLayoutBase {
+ public:
+ static ResultOrError<Ref<BindGroupLayout>> Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
+
+ VkDescriptorSetLayout GetHandle() const;
+
+ ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup,
+ DescriptorSetAllocation* descriptorSetAllocation);
+
+ private:
+ ~BindGroupLayout() override;
+ MaybeError Initialize();
+ void DestroyImpl() override;
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
+
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+ Ref<DescriptorSetAllocator> mDescriptorSetAllocator;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp
new file mode 100644
index 00000000000..00e70cfc163
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.cpp
@@ -0,0 +1,165 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/BindGroupVk.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/ityp_stack_vec.h"
+#include "dawn/native/ExternalTexture.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/BufferVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/SamplerVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ // static
+ ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+ }
+
+ BindGroup::BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ DescriptorSetAllocation descriptorSetAllocation)
+ : BindGroupBase(this, device, descriptor),
+ mDescriptorSetAllocation(descriptorSetAllocation) {
+ // Now do a write of a single descriptor set with all possible chained data allocated on the
+ // stack.
+ const uint32_t bindingCount = static_cast<uint32_t>((GetLayout()->GetBindingCount()));
+ ityp::stack_vec<uint32_t, VkWriteDescriptorSet, kMaxOptimalBindingsPerGroup> writes(
+ bindingCount);
+ ityp::stack_vec<uint32_t, VkDescriptorBufferInfo, kMaxOptimalBindingsPerGroup>
+ writeBufferInfo(bindingCount);
+ ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup>
+ writeImageInfo(bindingCount);
+
+ uint32_t numWrites = 0;
+ for (const auto [_, bindingIndex] : GetLayout()->GetBindingMap()) {
+ const BindingInfo& bindingInfo = GetLayout()->GetBindingInfo(bindingIndex);
+
+ auto& write = writes[numWrites];
+ write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write.pNext = nullptr;
+ write.dstSet = GetHandle();
+ write.dstBinding = static_cast<uint32_t>(bindingIndex);
+ write.dstArrayElement = 0;
+ write.descriptorCount = 1;
+ write.descriptorType = VulkanDescriptorType(bindingInfo);
+
+ switch (bindingInfo.bindingType) {
+ case BindingInfoType::Buffer: {
+ BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+
+ VkBuffer handle = ToBackend(binding.buffer)->GetHandle();
+ if (handle == VK_NULL_HANDLE) {
+ // The Buffer was destroyed. Skip this descriptor write since it would be
+ // a Vulkan Validation Layers error. This bind group won't be used as it
+ // is an error to submit a command buffer that references destroyed
+ // resources.
+ continue;
+ }
+ writeBufferInfo[numWrites].buffer = handle;
+ writeBufferInfo[numWrites].offset = binding.offset;
+ writeBufferInfo[numWrites].range = binding.size;
+ write.pBufferInfo = &writeBufferInfo[numWrites];
+ break;
+ }
+
+ case BindingInfoType::Sampler: {
+ Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
+ writeImageInfo[numWrites].sampler = sampler->GetHandle();
+ write.pImageInfo = &writeImageInfo[numWrites];
+ break;
+ }
+
+ case BindingInfoType::Texture: {
+ TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+ VkImageView handle = view->GetHandle();
+ if (handle == VK_NULL_HANDLE) {
+ // The Texture was destroyed before the TextureView was created.
+ // Skip this descriptor write since it would be
+ // a Vulkan Validation Layers error. This bind group won't be used as it
+ // is an error to submit a command buffer that references destroyed
+ // resources.
+ continue;
+ }
+ writeImageInfo[numWrites].imageView = handle;
+
+ // The layout may be GENERAL here because of interactions between the Sampled
+ // and ReadOnlyStorage usages. See the logic in VulkanImageLayout.
+ writeImageInfo[numWrites].imageLayout = VulkanImageLayout(
+ ToBackend(view->GetTexture()), wgpu::TextureUsage::TextureBinding);
+
+ write.pImageInfo = &writeImageInfo[numWrites];
+ break;
+ }
+
+ case BindingInfoType::StorageTexture: {
+ TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+ VkImageView handle = view->GetHandle();
+ if (handle == VK_NULL_HANDLE) {
+ // The Texture was destroyed before the TextureView was created.
+ // Skip this descriptor write since it would be
+ // a Vulkan Validation Layers error. This bind group won't be used as it
+ // is an error to submit a command buffer that references destroyed
+ // resources.
+ continue;
+ }
+ writeImageInfo[numWrites].imageView = handle;
+ writeImageInfo[numWrites].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+ write.pImageInfo = &writeImageInfo[numWrites];
+ break;
+ }
+
+ case BindingInfoType::ExternalTexture:
+ UNREACHABLE();
+ break;
+ }
+
+ numWrites++;
+ }
+
+ // TODO(crbug.com/dawn/855): Batch these updates
+ device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0,
+ nullptr);
+
+ SetLabelImpl();
+ }
+
+ BindGroup::~BindGroup() = default;
+
+ void BindGroup::DestroyImpl() {
+ BindGroupBase::DestroyImpl();
+ ToBackend(GetLayout())->DeallocateBindGroup(this, &mDescriptorSetAllocation);
+ }
+
+ VkDescriptorSet BindGroup::GetHandle() const {
+ return mDescriptorSetAllocation.set;
+ }
+
+ void BindGroup::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_DESCRIPTOR_SET,
+ reinterpret_cast<uint64_t&>(mDescriptorSetAllocation.set), "Dawn_BindGroup",
+ GetLabel());
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h
new file mode 100644
index 00000000000..100ea852917
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BindGroupVk.h
@@ -0,0 +1,55 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_BINDGROUPVK_H_
+#define DAWNNATIVE_VULKAN_BINDGROUPVK_H_
+
+#include "dawn/native/BindGroup.h"
+
+#include "dawn/common/PlacementAllocated.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/DescriptorSetAllocation.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class BindGroup final : public BindGroupBase, public PlacementAllocated {
+ public:
+ static ResultOrError<Ref<BindGroup>> Create(Device* device,
+ const BindGroupDescriptor* descriptor);
+
+ BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ DescriptorSetAllocation descriptorSetAllocation);
+
+ VkDescriptorSet GetHandle() const;
+
+ private:
+ ~BindGroup() override;
+
+ void DestroyImpl() override;
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ // The descriptor set in this allocation outlives the BindGroup because it is owned by
+ // the BindGroupLayout which is referenced by the BindGroup.
+ DescriptorSetAllocation mDescriptorSetAllocation;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_BINDGROUPVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp
new file mode 100644
index 00000000000..c7e9fb02d3b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.cpp
@@ -0,0 +1,413 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/BufferVk.h"
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <cstring>
+
+namespace dawn::native::vulkan {
+
+ namespace {
+
+ VkBufferUsageFlags VulkanBufferUsage(wgpu::BufferUsage usage) {
+ VkBufferUsageFlags flags = 0;
+
+ if (usage & wgpu::BufferUsage::CopySrc) {
+ flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ }
+ if (usage & wgpu::BufferUsage::CopyDst) {
+ flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Index) {
+ flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Vertex) {
+ flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Uniform) {
+ flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ }
+ if (usage &
+ (wgpu::BufferUsage::Storage | kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
+ flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Indirect) {
+ flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ }
+
+ return flags;
+ }
+
+ VkPipelineStageFlags VulkanPipelineStage(wgpu::BufferUsage usage) {
+ VkPipelineStageFlags flags = 0;
+
+ if (usage & kMappableBufferUsages) {
+ flags |= VK_PIPELINE_STAGE_HOST_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst)) {
+ flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
+ flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage |
+ kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
+ flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Indirect) {
+ flags |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+ }
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ }
+
+ return flags;
+ }
+
+ VkAccessFlags VulkanAccessFlags(wgpu::BufferUsage usage) {
+ VkAccessFlags flags = 0;
+
+ if (usage & wgpu::BufferUsage::MapRead) {
+ flags |= VK_ACCESS_HOST_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::MapWrite) {
+ flags |= VK_ACCESS_HOST_WRITE_BIT;
+ }
+ if (usage & wgpu::BufferUsage::CopySrc) {
+ flags |= VK_ACCESS_TRANSFER_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::CopyDst) {
+ flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Index) {
+ flags |= VK_ACCESS_INDEX_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Vertex) {
+ flags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Uniform) {
+ flags |= VK_ACCESS_UNIFORM_READ_BIT;
+ }
+ if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+ flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (usage & kReadOnlyStorageBuffer) {
+ flags |= VK_ACCESS_SHADER_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::Indirect) {
+ flags |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+ }
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+
+ return flags;
+ }
+
+ } // namespace
+
+ // static
+ ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+ DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+ return std::move(buffer);
+ }
+
+ MaybeError Buffer::Initialize(bool mappedAtCreation) {
+ // vkCmdFillBuffer requires the size to be a multiple of 4.
+ constexpr size_t kAlignment = 4u;
+
+ uint32_t extraBytes = 0u;
+ if (GetUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
+ // vkCmdSetIndexBuffer and vkCmdSetVertexBuffer are invalid if the offset
+ // is equal to the whole buffer size. Allocate at least one more byte so it
+ // is valid to setVertex/IndexBuffer with a zero-sized range at the end
+ // of the buffer with (offset=buffer.size, size=0).
+ extraBytes = 1u;
+ }
+
+ uint64_t size = GetSize();
+ if (size > std::numeric_limits<uint64_t>::max() - extraBytes) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+
+ size += extraBytes;
+
+ // Allocate at least 4 bytes so clamped accesses are always in bounds.
+ // Also, Vulkan requires the size to be non-zero.
+ size = std::max(size, uint64_t(4u));
+
+ if (size > std::numeric_limits<uint64_t>::max() - kAlignment) {
+ // Alignment would overlow.
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+ }
+ mAllocatedSize = Align(size, kAlignment);
+
+ // Avoid passing ludicrously large sizes to drivers because it causes issues: drivers add
+ // some constants to the size passed and align it, but for values close to the maximum
+ // VkDeviceSize this can cause overflows and makes drivers crash or return bad sizes in the
+ // VkmemoryRequirements. See https://gitlab.khronos.org/vulkan/vulkan/issues/1904
+ // Any size with one of two top bits of VkDeviceSize set is a HUGE allocation and we can
+ // safely return an OOM error.
+ if (mAllocatedSize & (uint64_t(3) << uint64_t(62))) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer size is HUGE and could cause overflows");
+ }
+
+ VkBufferCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.size = mAllocatedSize;
+ // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
+ // and robust resource initialization.
+ createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
+ createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = 0;
+
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkOOMThenSuccess(
+ device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "vkCreateBuffer"));
+
+ // Gather requirements for the buffer's memory and allocate it.
+ VkMemoryRequirements requirements;
+ device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
+
+ MemoryKind requestKind = MemoryKind::Linear;
+ if (GetUsage() & kMappableBufferUsages) {
+ requestKind = MemoryKind::LinearMappable;
+ }
+ DAWN_TRY_ASSIGN(mMemoryAllocation,
+ device->GetResourceMemoryAllocator()->Allocate(requirements, requestKind));
+
+ // Finally associate it with the buffer.
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
+ ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+ mMemoryAllocation.GetOffset()),
+ "vkBindBufferMemory"));
+
+ // The buffers with mappedAtCreation == true will be initialized in
+ // BufferBase::MapAtCreation().
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+ !mappedAtCreation) {
+ ClearBuffer(device->GetPendingRecordingContext(), 0x01010101);
+ }
+
+ // Initialize the padding bytes to zero.
+ if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
+ uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+ if (paddingBytes > 0) {
+ uint32_t clearSize = Align(paddingBytes, 4);
+ uint64_t clearOffset = GetAllocatedSize() - clearSize;
+
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ ClearBuffer(recordingContext, 0, clearOffset, clearSize);
+ }
+ }
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ Buffer::~Buffer() = default;
+
+ VkBuffer Buffer::GetHandle() const {
+ return mHandle;
+ }
+
+ void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::BufferUsage usage) {
+ VkBufferMemoryBarrier barrier;
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
+
+ if (TransitionUsageAndGetResourceBarrier(usage, &barrier, &srcStages, &dstStages)) {
+ ASSERT(srcStages != 0 && dstStages != 0);
+ ToBackend(GetDevice())
+ ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+ nullptr, 1u, &barrier, 0, nullptr);
+ }
+ }
+
+ bool Buffer::TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
+ VkBufferMemoryBarrier* barrier,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ bool lastIncludesTarget = IsSubset(usage, mLastUsage);
+ bool lastReadOnly = IsSubset(mLastUsage, kReadOnlyBufferUsages);
+
+ // We can skip transitions to already current read-only usages.
+ if (lastIncludesTarget && lastReadOnly) {
+ return false;
+ }
+
+ // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
+ if (mLastUsage == wgpu::BufferUsage::None) {
+ mLastUsage = usage;
+ return false;
+ }
+
+ *srcStages |= VulkanPipelineStage(mLastUsage);
+ *dstStages |= VulkanPipelineStage(usage);
+
+ barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+ barrier->pNext = nullptr;
+ barrier->srcAccessMask = VulkanAccessFlags(mLastUsage);
+ barrier->dstAccessMask = VulkanAccessFlags(usage);
+ barrier->srcQueueFamilyIndex = 0;
+ barrier->dstQueueFamilyIndex = 0;
+ barrier->buffer = mHandle;
+ barrier->offset = 0;
+ // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
+ barrier->size = GetAllocatedSize();
+
+ mLastUsage = usage;
+
+ return true;
+ }
+
+ bool Buffer::IsCPUWritableAtCreation() const {
+ // TODO(enga): Handle CPU-visible memory on UMA
+ return mMemoryAllocation.GetMappedPointer() != nullptr;
+ }
+
+ MaybeError Buffer::MapAtCreationImpl() {
+ return {};
+ }
+
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ Device* device = ToBackend(GetDevice());
+
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+
+ // TODO(crbug.com/dawn/852): initialize mapped buffer in CPU side.
+ EnsureDataInitialized(recordingContext);
+
+ if (mode & wgpu::MapMode::Read) {
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
+ }
+ return {};
+ }
+
+ void Buffer::UnmapImpl() {
+ // No need to do anything, we keep CPU-visible memory mapped at all time.
+ }
+
+ void* Buffer::GetMappedPointerImpl() {
+ uint8_t* memory = mMemoryAllocation.GetMappedPointer();
+ ASSERT(memory != nullptr);
+ return memory;
+ }
+
+ void Buffer::DestroyImpl() {
+ BufferBase::DestroyImpl();
+
+ ToBackend(GetDevice())->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
+
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
+ }
+ }
+
+ bool Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ InitializeToZero(recordingContext);
+ return true;
+ }
+
+ bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ uint64_t offset,
+ uint64_t size) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ return false;
+ }
+
+ InitializeToZero(recordingContext);
+ return true;
+ }
+
+ bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ const CopyTextureToBufferCmd* copy) {
+ if (!NeedsInitialization()) {
+ return false;
+ }
+
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ return false;
+ }
+
+ InitializeToZero(recordingContext);
+ return true;
+ }
+
+ void Buffer::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_BUFFER,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_Buffer", GetLabel());
+ }
+
+ void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
+ ASSERT(NeedsInitialization());
+
+ ClearBuffer(recordingContext, 0u);
+ GetDevice()->IncrementLazyClearCountForTesting();
+ SetIsDataInitialized();
+ }
+
+ void Buffer::ClearBuffer(CommandRecordingContext* recordingContext,
+ uint32_t clearValue,
+ uint64_t offset,
+ uint64_t size) {
+ ASSERT(recordingContext != nullptr);
+ size = size > 0 ? size : GetAllocatedSize();
+ ASSERT(size > 0);
+
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+ Device* device = ToBackend(GetDevice());
+ // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
+ // Note: Allocated size must be a multiple of 4.
+ ASSERT(size % 4 == 0);
+ device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, offset, size,
+ clearValue);
+ }
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h
new file mode 100644
index 00000000000..1f7ae748350
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/BufferVk.h
@@ -0,0 +1,82 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_BUFFERVK_H_
+#define DAWNNATIVE_VULKAN_BUFFERVK_H_
+
+#include "dawn/native/Buffer.h"
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+
+namespace dawn::native::vulkan {
+
+ struct CommandRecordingContext;
+ class Device;
+
+ class Buffer final : public BufferBase {
+ public:
+ static ResultOrError<Ref<Buffer>> Create(Device* device,
+ const BufferDescriptor* descriptor);
+
+ VkBuffer GetHandle() const;
+
+ // Transitions the buffer to be used as `usage`, recording any necessary barrier in
+ // `commands`.
+ // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
+ void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
+ bool TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
+ VkBufferMemoryBarrier* barrier,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+
+ // All the Ensure methods return true if the buffer was initialized to zero.
+ bool EnsureDataInitialized(CommandRecordingContext* recordingContext);
+ bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ uint64_t offset,
+ uint64_t size);
+ bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ const CopyTextureToBufferCmd* copy);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ ~Buffer() override;
+ using BufferBase::BufferBase;
+
+ MaybeError Initialize(bool mappedAtCreation);
+ void InitializeToZero(CommandRecordingContext* recordingContext);
+ void ClearBuffer(CommandRecordingContext* recordingContext,
+ uint32_t clearValue,
+ uint64_t offset = 0,
+ uint64_t size = 0);
+
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+ void UnmapImpl() override;
+ void DestroyImpl() override;
+ bool IsCPUWritableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
+ void* GetMappedPointerImpl() override;
+
+ VkBuffer mHandle = VK_NULL_HANDLE;
+ ResourceMemoryAllocation mMemoryAllocation;
+
+ wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_BUFFERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp
new file mode 100644
index 00000000000..1fba2b35347
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.cpp
@@ -0,0 +1,1326 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/CommandBufferVk.h"
+
+#include "dawn/native/BindGroupTracker.h"
+#include "dawn/native/CommandEncoder.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/RenderBundle.h"
+#include "dawn/native/vulkan/BindGroupVk.h"
+#include "dawn/native/vulkan/BufferVk.h"
+#include "dawn/native/vulkan/CommandRecordingContext.h"
+#include "dawn/native/vulkan/ComputePipelineVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/QuerySetVk.h"
+#include "dawn/native/vulkan/RenderPassCache.h"
+#include "dawn/native/vulkan/RenderPipelineVk.h"
+#include "dawn/native/vulkan/StagingBufferVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <algorithm>
+
+namespace dawn::native::vulkan {
+
+ namespace {
+
+ VkIndexType VulkanIndexType(wgpu::IndexFormat format) {
+ switch (format) {
+ case wgpu::IndexFormat::Uint16:
+ return VK_INDEX_TYPE_UINT16;
+ case wgpu::IndexFormat::Uint32:
+ return VK_INDEX_TYPE_UINT32;
+ case wgpu::IndexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ bool HasSameTextureCopyExtent(const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize) {
+ Extent3D imageExtentSrc = ComputeTextureCopyExtent(srcCopy, copySize);
+ Extent3D imageExtentDst = ComputeTextureCopyExtent(dstCopy, copySize);
+ return imageExtentSrc.width == imageExtentDst.width &&
+ imageExtentSrc.height == imageExtentDst.height &&
+ imageExtentSrc.depthOrArrayLayers == imageExtentDst.depthOrArrayLayers;
+ }
+
+ VkImageCopy ComputeImageCopyRegion(const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize,
+ Aspect aspect) {
+ const Texture* srcTexture = ToBackend(srcCopy.texture.Get());
+ const Texture* dstTexture = ToBackend(dstCopy.texture.Get());
+
+ VkImageCopy region;
+ region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
+ region.srcSubresource.mipLevel = srcCopy.mipLevel;
+ region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
+ region.dstSubresource.mipLevel = dstCopy.mipLevel;
+
+ bool has3DTextureInCopy = false;
+
+ region.srcOffset.x = srcCopy.origin.x;
+ region.srcOffset.y = srcCopy.origin.y;
+ switch (srcTexture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ region.srcOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e2D:
+ region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
+ region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
+ region.srcOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ has3DTextureInCopy = true;
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ region.srcOffset.z = srcCopy.origin.z;
+ break;
+ }
+
+ region.dstOffset.x = dstCopy.origin.x;
+ region.dstOffset.y = dstCopy.origin.y;
+ switch (dstTexture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ region.dstSubresource.baseArrayLayer = 0;
+ region.dstSubresource.layerCount = 1;
+ region.dstOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e2D:
+ region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
+ region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
+ region.dstOffset.z = 0;
+ break;
+ case wgpu::TextureDimension::e3D:
+ has3DTextureInCopy = true;
+ region.dstSubresource.baseArrayLayer = 0;
+ region.dstSubresource.layerCount = 1;
+ region.dstOffset.z = dstCopy.origin.z;
+ break;
+ }
+
+ ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
+ Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
+ region.extent.width = imageExtent.width;
+ region.extent.height = imageExtent.height;
+ region.extent.depth = has3DTextureInCopy ? copySize.depthOrArrayLayers : 1;
+
+ return region;
+ }
+
+ class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
+ public:
+ DescriptorSetTracker() = default;
+
+ void Apply(Device* device,
+ CommandRecordingContext* recordingContext,
+ VkPipelineBindPoint bindPoint) {
+ BeforeApply();
+ for (BindGroupIndex dirtyIndex :
+ IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+ VkDescriptorSet set = ToBackend(mBindGroups[dirtyIndex])->GetHandle();
+ const uint32_t* dynamicOffset = mDynamicOffsetCounts[dirtyIndex] > 0
+ ? mDynamicOffsets[dirtyIndex].data()
+ : nullptr;
+ device->fn.CmdBindDescriptorSets(
+ recordingContext->commandBuffer, bindPoint,
+ ToBackend(mPipelineLayout)->GetHandle(), static_cast<uint32_t>(dirtyIndex),
+ 1, &*set, mDynamicOffsetCounts[dirtyIndex], dynamicOffset);
+ }
+ AfterApply();
+ }
+ };
+
+ // Records the necessary barriers for a synchronization scope using the resource usage
+ // data pre-computed in the frontend. Also performs lazy initialization if required.
+ void TransitionAndClearForSyncScope(Device* device,
+ CommandRecordingContext* recordingContext,
+ const SyncScopeResourceUsage& scope) {
+ std::vector<VkBufferMemoryBarrier> bufferBarriers;
+ std::vector<VkImageMemoryBarrier> imageBarriers;
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
+
+ for (size_t i = 0; i < scope.buffers.size(); ++i) {
+ Buffer* buffer = ToBackend(scope.buffers[i]);
+ buffer->EnsureDataInitialized(recordingContext);
+
+ VkBufferMemoryBarrier bufferBarrier;
+ if (buffer->TransitionUsageAndGetResourceBarrier(
+ scope.bufferUsages[i], &bufferBarrier, &srcStages, &dstStages)) {
+ bufferBarriers.push_back(bufferBarrier);
+ }
+ }
+
+ for (size_t i = 0; i < scope.textures.size(); ++i) {
+ Texture* texture = ToBackend(scope.textures[i]);
+
+ // Clear subresources that are not render attachments. Render attachments will be
+ // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+ // subresource has not been initialized before the render pass.
+ scope.textureUsages[i].Iterate(
+ [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+ texture->EnsureSubresourceContentInitialized(recordingContext, range);
+ }
+ });
+ texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i],
+ &imageBarriers, &srcStages, &dstStages);
+ }
+
+ if (bufferBarriers.size() || imageBarriers.size()) {
+ device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages,
+ 0, 0, nullptr, bufferBarriers.size(),
+ bufferBarriers.data(), imageBarriers.size(),
+ imageBarriers.data());
+ }
+ }
+
+ MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
+ Device* device,
+ BeginRenderPassCmd* renderPass) {
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+
+ // Query a VkRenderPass from the cache
+ VkRenderPass renderPassVK = VK_NULL_HANDLE;
+ {
+ RenderPassCacheQuery query;
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ const auto& attachmentInfo = renderPass->colorAttachments[i];
+
+ bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+
+ query.SetColor(i, attachmentInfo.view->GetFormat().format,
+ attachmentInfo.loadOp, attachmentInfo.storeOp, hasResolveTarget);
+ }
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ const auto& attachmentInfo = renderPass->depthStencilAttachment;
+
+ query.SetDepthStencil(
+ attachmentInfo.view->GetTexture()->GetFormat().format,
+ attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
+ attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+ attachmentInfo.depthReadOnly || attachmentInfo.stencilReadOnly);
+ }
+
+ query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
+
+ DAWN_TRY_ASSIGN(renderPassVK, device->GetRenderPassCache()->GetRenderPass(query));
+ }
+
+ // Create a framebuffer that will be used once for the render pass and gather the clear
+ // values for the attachments at the same time.
+ std::array<VkClearValue, kMaxColorAttachments + 1> clearValues;
+ VkFramebuffer framebuffer = VK_NULL_HANDLE;
+ uint32_t attachmentCount = 0;
+ {
+ // Fill in the attachment info that will be chained in the framebuffer create info.
+ std::array<VkImageView, kMaxColorAttachments * 2 + 1> attachments;
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ auto& attachmentInfo = renderPass->colorAttachments[i];
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
+
+ attachments[attachmentCount] = view->GetHandle();
+
+ switch (view->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+ case wgpu::TextureComponentType::Float: {
+ const std::array<float, 4> appliedClearColor =
+ ConvertToFloatColor(attachmentInfo.clearColor);
+ for (uint32_t i = 0; i < 4; ++i) {
+ clearValues[attachmentCount].color.float32[i] =
+ appliedClearColor[i];
+ }
+ break;
+ }
+ case wgpu::TextureComponentType::Uint: {
+ const std::array<uint32_t, 4> appliedClearColor =
+ ConvertToUnsignedIntegerColor(attachmentInfo.clearColor);
+ for (uint32_t i = 0; i < 4; ++i) {
+ clearValues[attachmentCount].color.uint32[i] = appliedClearColor[i];
+ }
+ break;
+ }
+ case wgpu::TextureComponentType::Sint: {
+ const std::array<int32_t, 4> appliedClearColor =
+ ConvertToSignedIntegerColor(attachmentInfo.clearColor);
+ for (uint32_t i = 0; i < 4; ++i) {
+ clearValues[attachmentCount].color.int32[i] = appliedClearColor[i];
+ }
+ break;
+ }
+
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
+ }
+ attachmentCount++;
+ }
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto& attachmentInfo = renderPass->depthStencilAttachment;
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
+
+ attachments[attachmentCount] = view->GetHandle();
+
+ clearValues[attachmentCount].depthStencil.depth = attachmentInfo.clearDepth;
+ clearValues[attachmentCount].depthStencil.stencil = attachmentInfo.clearStencil;
+
+ attachmentCount++;
+ }
+
+ for (ColorAttachmentIndex i :
+ IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
+ TextureView* view =
+ ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
+
+ attachments[attachmentCount] = view->GetHandle();
+
+ attachmentCount++;
+ }
+ }
+
+ // Chain attachments and create the framebuffer
+ VkFramebufferCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.renderPass = renderPassVK;
+ createInfo.attachmentCount = attachmentCount;
+ createInfo.pAttachments = AsVkArray(attachments.data());
+ createInfo.width = renderPass->width;
+ createInfo.height = renderPass->height;
+ createInfo.layers = 1;
+
+ DAWN_TRY(
+ CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
+ nullptr, &*framebuffer),
+ "CreateFramebuffer"));
+
+ // We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
+ // commands currently being recorded are finished.
+ device->GetFencedDeleter()->DeleteWhenUnused(framebuffer);
+ }
+
+ VkRenderPassBeginInfo beginInfo;
+ beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ beginInfo.pNext = nullptr;
+ beginInfo.renderPass = renderPassVK;
+ beginInfo.framebuffer = framebuffer;
+ beginInfo.renderArea.offset.x = 0;
+ beginInfo.renderArea.offset.y = 0;
+ beginInfo.renderArea.extent.width = renderPass->width;
+ beginInfo.renderArea.extent.height = renderPass->height;
+ beginInfo.clearValueCount = attachmentCount;
+ beginInfo.pClearValues = clearValues.data();
+
+ device->fn.CmdBeginRenderPass(commands, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
+
+ return {};
+ }
+
+ // Reset the query sets used on render pass because the reset command must be called outside
+ // render pass.
+ void ResetUsedQuerySetsOnRenderPass(Device* device,
+ VkCommandBuffer commands,
+ QuerySetBase* querySet,
+ const std::vector<bool>& availability) {
+ ASSERT(availability.size() == querySet->GetQueryAvailability().size());
+
+ auto currentIt = availability.begin();
+ auto lastIt = availability.end();
+ // Traverse the used queries which availability are true.
+ while (currentIt != lastIt) {
+ auto firstTrueIt = std::find(currentIt, lastIt, true);
+ // No used queries need to be reset
+ if (firstTrueIt == lastIt) {
+ break;
+ }
+
+ auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+ uint32_t queryIndex = std::distance(availability.begin(), firstTrueIt);
+ uint32_t queryCount = std::distance(firstTrueIt, nextFalseIt);
+
+ // Reset the queries between firstTrueIt and nextFalseIt (which is at most
+ // lastIt)
+ device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), queryIndex,
+ queryCount);
+
+ // Set current iterator to next false
+ currentIt = nextFalseIt;
+ }
+ }
+
+ void RecordWriteTimestampCmd(CommandRecordingContext* recordingContext,
+ Device* device,
+ WriteTimestampCmd* cmd) {
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+ device->fn.CmdWriteTimestamp(commands, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+ querySet->GetHandle(), cmd->queryIndex);
+ }
+
+ void RecordResolveQuerySetCmd(VkCommandBuffer commands,
+ Device* device,
+ QuerySet* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ Buffer* destination,
+ uint64_t destinationOffset) {
+ const std::vector<bool>& availability = querySet->GetQueryAvailability();
+
+ auto currentIt = availability.begin() + firstQuery;
+ auto lastIt = availability.begin() + firstQuery + queryCount;
+
+ // Traverse available queries in the range of [firstQuery, firstQuery + queryCount - 1]
+ while (currentIt != lastIt) {
+ auto firstTrueIt = std::find(currentIt, lastIt, true);
+ // No available query found for resolving
+ if (firstTrueIt == lastIt) {
+ break;
+ }
+ auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+ // The query index of firstTrueIt where the resolving starts
+ uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
+ // The queries count between firstTrueIt and nextFalseIt need to be resolved
+ uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+
+ // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
+ uint32_t resolveDestinationOffset =
+ destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+
+ // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
+ device->fn.CmdCopyQueryPoolResults(
+ commands, querySet->GetHandle(), resolveQueryIndex, resolveQueryCount,
+ destination->GetHandle(), resolveDestinationOffset, sizeof(uint64_t),
+ VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+
+ // Set current iterator to next false
+ currentIt = nextFalseIt;
+ }
+ }
+
+ } // anonymous namespace
+
+ // static
+ Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+ }
+
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+ : CommandBufferBase(encoder, descriptor) {
+ }
+
+ void CommandBuffer::RecordCopyImageWithTemporaryBuffer(
+ CommandRecordingContext* recordingContext,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize) {
+ ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
+ ASSERT(srcCopy.aspect == dstCopy.aspect);
+ dawn::native::Format format = srcCopy.texture->GetFormat();
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+ ASSERT(copySize.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copySize.width / blockInfo.width;
+ ASSERT(copySize.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copySize.height / blockInfo.height;
+
+ // Create the temporary buffer. Note that We don't need to respect WebGPU's 256 alignment
+ // because it isn't a hard constraint in Vulkan.
+ uint64_t tempBufferSize =
+ widthInBlocks * heightInBlocks * copySize.depthOrArrayLayers * blockInfo.byteSize;
+ BufferDescriptor tempBufferDescriptor;
+ tempBufferDescriptor.size = tempBufferSize;
+ tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+
+ Device* device = ToBackend(GetDevice());
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<Buffer> tempBuffer =
+ AcquireRef(ToBackend(device->APICreateBuffer(&tempBufferDescriptor)));
+
+ BufferCopy tempBufferCopy;
+ tempBufferCopy.buffer = tempBuffer.Get();
+ tempBufferCopy.rowsPerImage = heightInBlocks;
+ tempBufferCopy.offset = 0;
+ tempBufferCopy.bytesPerRow = copySize.width / blockInfo.width * blockInfo.byteSize;
+
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+ VkImage srcImage = ToBackend(srcCopy.texture)->GetHandle();
+ VkImage dstImage = ToBackend(dstCopy.texture)->GetHandle();
+
+ tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ VkBufferImageCopy srcToTempBufferRegion =
+ ComputeBufferImageCopyRegion(tempBufferCopy, srcCopy, copySize);
+
+ // The Dawn CopySrc usage is always mapped to GENERAL
+ device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+ tempBuffer->GetHandle(), 1, &srcToTempBufferRegion);
+
+ tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ VkBufferImageCopy tempBufferToDstRegion =
+ ComputeBufferImageCopyRegion(tempBufferCopy, dstCopy, copySize);
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+ // copy command.
+ device->fn.CmdCopyBufferToImage(commands, tempBuffer->GetHandle(), dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+ &tempBufferToDstRegion);
+
+ recordingContext->tempBuffers.emplace_back(tempBuffer);
+ }
+
+ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
+ Device* device = ToBackend(GetDevice());
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+
+ // Records the necessary barriers for the resource usage pre-computed by the frontend.
+ // And resets the used query sets which are rewritten on the render pass.
+ auto PrepareResourcesForRenderPass = [](Device* device,
+ CommandRecordingContext* recordingContext,
+ const RenderPassResourceUsage& usages) {
+ TransitionAndClearForSyncScope(device, recordingContext, usages);
+
+ // Reset all query set used on current render pass together before beginning render pass
+ // because the reset command must be called outside render pass
+ for (size_t i = 0; i < usages.querySets.size(); ++i) {
+ ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
+ usages.querySets[i], usages.queryAvailabilities[i]);
+ }
+ };
+
+ size_t nextComputePassNumber = 0;
+ size_t nextRenderPassNumber = 0;
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::CopyBufferToBuffer: {
+ CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ if (copy->size == 0) {
+ // Skip no-op copies.
+ break;
+ }
+
+ Buffer* srcBuffer = ToBackend(copy->source.Get());
+ Buffer* dstBuffer = ToBackend(copy->destination.Get());
+
+ srcBuffer->EnsureDataInitialized(recordingContext);
+ dstBuffer->EnsureDataInitializedAsDestination(
+ recordingContext, copy->destinationOffset, copy->size);
+
+ srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+ VkBufferCopy region;
+ region.srcOffset = copy->sourceOffset;
+ region.dstOffset = copy->destinationOffset;
+ region.size = copy->size;
+
+ VkBuffer srcHandle = srcBuffer->GetHandle();
+ VkBuffer dstHandle = dstBuffer->GetHandle();
+ device->fn.CmdCopyBuffer(commands, srcHandle, dstHandle, 1, &region);
+ break;
+ }
+
+ case Command::CopyBufferToTexture: {
+ CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+
+ ToBackend(src.buffer)->EnsureDataInitialized(recordingContext);
+
+ VkBufferImageCopy region =
+ ComputeBufferImageCopyRegion(src, dst, copy->copySize);
+ VkImageSubresourceLayers subresource = region.imageSubresource;
+
+ SubresourceRange range =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+ subresource.mipLevel)) {
+ // Since texture has been overwritten, it has been "initialized"
+ dst.texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ ToBackend(dst.texture)
+ ->EnsureSubresourceContentInitialized(recordingContext, range);
+ }
+ ToBackend(src.buffer)
+ ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ ToBackend(dst.texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+ VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
+ VkImage dstImage = ToBackend(dst.texture)->GetHandle();
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+ // copy command.
+ device->fn.CmdCopyBufferToImage(commands, srcBuffer, dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+ &region);
+ break;
+ }
+
+ case Command::CopyTextureToBuffer: {
+ CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ auto& src = copy->source;
+ auto& dst = copy->destination;
+
+ ToBackend(dst.buffer)
+ ->EnsureDataInitializedAsDestination(recordingContext, copy);
+
+ VkBufferImageCopy region =
+ ComputeBufferImageCopyRegion(dst, src, copy->copySize);
+
+ SubresourceRange range =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+
+ ToBackend(src.texture)
+ ->EnsureSubresourceContentInitialized(recordingContext, range);
+
+ ToBackend(src.texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
+ ToBackend(dst.buffer)
+ ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+ VkImage srcImage = ToBackend(src.texture)->GetHandle();
+ VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
+ // The Dawn CopySrc usage is always mapped to GENERAL
+ device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+ dstBuffer, 1, &region);
+ break;
+ }
+
+ case Command::CopyTextureToTexture: {
+ CopyTextureToTextureCmd* copy =
+ mCommands.NextCommand<CopyTextureToTextureCmd>();
+ if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+ copy->copySize.depthOrArrayLayers == 0) {
+ // Skip no-op copies.
+ continue;
+ }
+ TextureCopy& src = copy->source;
+ TextureCopy& dst = copy->destination;
+ SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+ SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+
+ ToBackend(src.texture)
+ ->EnsureSubresourceContentInitialized(recordingContext, srcRange);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+ dst.mipLevel)) {
+ // Since destination texture has been overwritten, it has been "initialized"
+ dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
+ } else {
+ ToBackend(dst.texture)
+ ->EnsureSubresourceContentInitialized(recordingContext, dstRange);
+ }
+
+ if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
+ // When there are overlapped subresources, the layout of the overlapped
+ // subresources should all be GENERAL instead of what we set now. Currently
+ // it is not allowed to copy with overlapped subresources, but we still
+ // add the ASSERT here as a reminder for this possible misuse.
+ ASSERT(!IsRangeOverlapped(src.origin.z, dst.origin.z,
+ copy->copySize.depthOrArrayLayers));
+ }
+
+ // TODO after Yunchao's CL
+ ToBackend(src.texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
+ srcRange);
+ ToBackend(dst.texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
+ dstRange);
+
+ // In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
+ // because as Vulkan SPEC always validates image copies with the virtual size of
+ // the image subresource, when the extent that fits in the copy region of one
+ // subresource but does not fit in the one of another subresource, we will fail
+ // to find a valid extent to satisfy the requirements on both source and
+ // destination image subresource. For example, when the source is the first
+ // level of a 16x16 texture in BC format, and the destination is the third level
+ // of a 60x60 texture in the same format, neither 16x16 nor 15x15 is valid as
+ // the extent of vkCmdCopyImage.
+ // Our workaround for this issue is replacing the texture-to-texture copy with
+ // one texture-to-buffer copy and one buffer-to-texture copy.
+ bool copyUsingTemporaryBuffer =
+ device->IsToggleEnabled(
+ Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy) &&
+ src.texture->GetFormat().isCompressed &&
+ !HasSameTextureCopyExtent(src, dst, copy->copySize);
+
+ if (!copyUsingTemporaryBuffer) {
+ VkImage srcImage = ToBackend(src.texture)->GetHandle();
+ VkImage dstImage = ToBackend(dst.texture)->GetHandle();
+
+ for (Aspect aspect : IterateEnumMask(src.texture->GetFormat().aspects)) {
+ ASSERT(dst.texture->GetFormat().aspects & aspect);
+ VkImageCopy region =
+ ComputeImageCopyRegion(src, dst, copy->copySize, aspect);
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after
+ // the copy command.
+ device->fn.CmdCopyImage(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+ dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1, &region);
+ }
+ } else {
+ RecordCopyImageWithTemporaryBuffer(recordingContext, src, dst,
+ copy->copySize);
+ }
+
+ break;
+ }
+
+ case Command::ClearBuffer: {
+ ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+ if (cmd->size == 0) {
+ // Skip no-op fills.
+ break;
+ }
+
+ Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+ bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
+ recordingContext, cmd->offset, cmd->size);
+
+ if (!clearedToZero) {
+ dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ device->fn.CmdFillBuffer(recordingContext->commandBuffer,
+ dstBuffer->GetHandle(), cmd->offset, cmd->size,
+ 0u);
+ }
+
+ break;
+ }
+
+ case Command::BeginRenderPass: {
+ BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+
+ PrepareResourcesForRenderPass(
+ device, recordingContext,
+ GetResourceUsages().renderPasses[nextRenderPassNumber]);
+
+ LazyClearRenderPassAttachments(cmd);
+ DAWN_TRY(RecordRenderPass(recordingContext, cmd));
+
+ nextRenderPassNumber++;
+ break;
+ }
+
+ case Command::BeginComputePass: {
+ mCommands.NextCommand<BeginComputePassCmd>();
+
+ DAWN_TRY(RecordComputePass(
+ recordingContext,
+ GetResourceUsages().computePasses[nextComputePassNumber]));
+
+ nextComputePassNumber++;
+ break;
+ }
+
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ Buffer* destination = ToBackend(cmd->destination.Get());
+
+ destination->EnsureDataInitializedAsDestination(
+ recordingContext, cmd->destinationOffset,
+ cmd->queryCount * sizeof(uint64_t));
+
+ // vkCmdCopyQueryPoolResults only can retrieve available queries because
+ // VK_QUERY_RESULT_WAIT_BIT is set. In order to resolve the unavailable queries
+ // as 0s, we need to clear the resolving region of the destination buffer to 0s.
+ auto startIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery;
+ auto endIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery +
+ cmd->queryCount;
+ bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+ if (hasUnavailableQueries) {
+ destination->TransitionUsageNow(recordingContext,
+ wgpu::BufferUsage::CopyDst);
+ device->fn.CmdFillBuffer(commands, destination->GetHandle(),
+ cmd->destinationOffset,
+ cmd->queryCount * sizeof(uint64_t), 0u);
+ }
+
+ destination->TransitionUsageNow(recordingContext,
+ wgpu::BufferUsage::QueryResolve);
+
+ RecordResolveQuerySetCmd(commands, device, querySet, cmd->firstQuery,
+ cmd->queryCount, destination, cmd->destinationOffset);
+
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ // The query must be reset between uses.
+ device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+ cmd->queryIndex, 1);
+
+ RecordWriteTimestampCmd(recordingContext, device, cmd);
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(&mCommands, Command::InsertDebugMarker);
+ }
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ mCommands.NextCommand<PopDebugGroupCmd>();
+ device->fn.CmdEndDebugUtilsLabelEXT(commands);
+ } else {
+ SkipCommand(&mCommands, Command::PopDebugGroup);
+ }
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(&mCommands, Command::PushDebugGroup);
+ }
+ break;
+ }
+
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
+ }
+
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ dstBuffer->EnsureDataInitializedAsDestination(recordingContext, offset, size);
+
+ dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+ VkBufferCopy copy;
+ copy.srcOffset = uploadHandle.startOffset;
+ copy.dstOffset = offset;
+ copy.size = size;
+
+ device->fn.CmdCopyBuffer(
+ commands, ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
+ dstBuffer->GetHandle(), 1, &copy);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ return {};
+ }
+
+ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext,
+ const ComputePassResourceUsage& resourceUsages) {
+ Device* device = ToBackend(GetDevice());
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+
+ uint64_t currentDispatch = 0;
+ DescriptorSetTracker descriptorSets = {};
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndComputePass: {
+ mCommands.NextCommand<EndComputePassCmd>();
+ return {};
+ }
+
+ case Command::Dispatch: {
+ DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+ TransitionAndClearForSyncScope(device, recordingContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+
+ device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
+ currentDispatch++;
+ break;
+ }
+
+ case Command::DispatchIndirect: {
+ DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+ VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
+
+ TransitionAndClearForSyncScope(device, recordingContext,
+ resourceUsages.dispatchUsages[currentDispatch]);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+
+ device->fn.CmdDispatchIndirect(
+ commands, indirectBuffer,
+ static_cast<VkDeviceSize>(dispatch->indirectOffset));
+ currentDispatch++;
+ break;
+ }
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+
+ BindGroup* bindGroup = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+
+ descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
+ dynamicOffsets);
+ break;
+ }
+
+ case Command::SetComputePipeline: {
+ SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+ ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+ device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
+ pipeline->GetHandle());
+ descriptorSets.OnSetPipeline(pipeline);
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(&mCommands, Command::InsertDebugMarker);
+ }
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ mCommands.NextCommand<PopDebugGroupCmd>();
+ device->fn.CmdEndDebugUtilsLabelEXT(commands);
+ } else {
+ SkipCommand(&mCommands, Command::PopDebugGroup);
+ }
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+ const char* label = mCommands.NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(&mCommands, Command::PushDebugGroup);
+ }
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ // The query must be reset between uses.
+ device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+ cmd->queryIndex, 1);
+
+ RecordWriteTimestampCmd(recordingContext, device, cmd);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // EndComputePass should have been called
+ UNREACHABLE();
+ }
+
+ MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
+ BeginRenderPassCmd* renderPassCmd) {
+ Device* device = ToBackend(GetDevice());
+ VkCommandBuffer commands = recordingContext->commandBuffer;
+
+ DAWN_TRY(RecordBeginRenderPass(recordingContext, device, renderPassCmd));
+
+ // Set the default value for the dynamic state
+ {
+ device->fn.CmdSetLineWidth(commands, 1.0f);
+ device->fn.CmdSetDepthBounds(commands, 0.0f, 1.0f);
+
+ device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK, 0);
+
+ float blendConstants[4] = {
+ 0.0f,
+ 0.0f,
+ 0.0f,
+ 0.0f,
+ };
+ device->fn.CmdSetBlendConstants(commands, blendConstants);
+
+ // The viewport and scissor default to cover all of the attachments
+ VkViewport viewport;
+ viewport.x = 0.0f;
+ viewport.y = static_cast<float>(renderPassCmd->height);
+ viewport.width = static_cast<float>(renderPassCmd->width);
+ viewport.height = -static_cast<float>(renderPassCmd->height);
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+ device->fn.CmdSetViewport(commands, 0, 1, &viewport);
+
+ VkRect2D scissorRect;
+ scissorRect.offset.x = 0;
+ scissorRect.offset.y = 0;
+ scissorRect.extent.width = renderPassCmd->width;
+ scissorRect.extent.height = renderPassCmd->height;
+ device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
+ }
+
+ DescriptorSetTracker descriptorSets = {};
+ RenderPipeline* lastPipeline = nullptr;
+
+ auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+ switch (type) {
+ case Command::Draw: {
+ DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
+ draw->firstVertex, draw->firstInstance);
+ break;
+ }
+
+ case Command::DrawIndexed: {
+ DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
+ draw->firstIndex, draw->baseVertex,
+ draw->firstInstance);
+ break;
+ }
+
+ case Command::DrawIndirect: {
+ DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ device->fn.CmdDrawIndirect(commands, buffer->GetHandle(),
+ static_cast<VkDeviceSize>(draw->indirectOffset), 1,
+ 0);
+ break;
+ }
+
+ case Command::DrawIndexedIndirect: {
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+ Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(buffer != nullptr);
+
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ device->fn.CmdDrawIndexedIndirect(
+ commands, buffer->GetHandle(),
+ static_cast<VkDeviceSize>(draw->indirectOffset), 1, 0);
+ break;
+ }
+
+ case Command::InsertDebugMarker: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+ const char* label = iter->NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(iter, Command::InsertDebugMarker);
+ }
+ break;
+ }
+
+ case Command::PopDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ iter->NextCommand<PopDebugGroupCmd>();
+ device->fn.CmdEndDebugUtilsLabelEXT(commands);
+ } else {
+ SkipCommand(iter, Command::PopDebugGroup);
+ }
+ break;
+ }
+
+ case Command::PushDebugGroup: {
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+ const char* label = iter->NextData<char>(cmd->length + 1);
+ VkDebugUtilsLabelEXT utilsLabel;
+ utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+ utilsLabel.pNext = nullptr;
+ utilsLabel.pLabelName = label;
+ // Default color to black
+ utilsLabel.color[0] = 0.0;
+ utilsLabel.color[1] = 0.0;
+ utilsLabel.color[2] = 0.0;
+ utilsLabel.color[3] = 1.0;
+ device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+ } else {
+ SkipCommand(iter, Command::PushDebugGroup);
+ }
+ break;
+ }
+
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+ BindGroup* bindGroup = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
+ if (cmd->dynamicOffsetCount > 0) {
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+
+ descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
+ dynamicOffsets);
+ break;
+ }
+
+ case Command::SetIndexBuffer: {
+ SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+ VkBuffer indexBuffer = ToBackend(cmd->buffer)->GetHandle();
+
+ device->fn.CmdBindIndexBuffer(commands, indexBuffer, cmd->offset,
+ VulkanIndexType(cmd->format));
+ break;
+ }
+
+ case Command::SetRenderPipeline: {
+ SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+ RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+ device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->GetHandle());
+ lastPipeline = pipeline;
+
+ descriptorSets.OnSetPipeline(pipeline);
+ break;
+ }
+
+ case Command::SetVertexBuffer: {
+ SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+ VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
+ VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
+
+ device->fn.CmdBindVertexBuffers(commands, static_cast<uint8_t>(cmd->slot), 1,
+ &*buffer, &offset);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ };
+
+ Command type;
+ while (mCommands.NextCommandId(&type)) {
+ switch (type) {
+ case Command::EndRenderPass: {
+ mCommands.NextCommand<EndRenderPassCmd>();
+ device->fn.CmdEndRenderPass(commands);
+ return {};
+ }
+
+ case Command::SetBlendConstant: {
+ SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+ const std::array<float, 4> blendConstants = ConvertToFloatColor(cmd->color);
+ device->fn.CmdSetBlendConstants(commands, blendConstants.data());
+ break;
+ }
+
+ case Command::SetStencilReference: {
+ SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+ device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK,
+ cmd->reference);
+ break;
+ }
+
+ case Command::SetViewport: {
+ SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+ VkViewport viewport;
+ viewport.x = cmd->x;
+ viewport.y = cmd->y + cmd->height;
+ viewport.width = cmd->width;
+ viewport.height = -cmd->height;
+ viewport.minDepth = cmd->minDepth;
+ viewport.maxDepth = cmd->maxDepth;
+
+ // Vulkan disallows width = 0, but VK_KHR_maintenance1 which we require allows
+ // height = 0 so use that to do an empty viewport.
+ if (viewport.width == 0) {
+ viewport.height = 0;
+
+ // Set the viewport x range to a range that's always valid.
+ viewport.x = 0;
+ viewport.width = 1;
+ }
+
+ device->fn.CmdSetViewport(commands, 0, 1, &viewport);
+ break;
+ }
+
+ case Command::SetScissorRect: {
+ SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+ VkRect2D rect;
+ rect.offset.x = cmd->x;
+ rect.offset.y = cmd->y;
+ rect.extent.width = cmd->width;
+ rect.extent.height = cmd->height;
+
+ device->fn.CmdSetScissor(commands, 0, 1, &rect);
+ break;
+ }
+
+ case Command::ExecuteBundles: {
+ ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+ auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+ for (uint32_t i = 0; i < cmd->count; ++i) {
+ CommandIterator* iter = bundles[i]->GetCommands();
+ iter->Reset();
+ while (iter->NextCommandId(&type)) {
+ EncodeRenderBundleCommand(iter, type);
+ }
+ }
+ break;
+ }
+
+ case Command::BeginOcclusionQuery: {
+ BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+
+ device->fn.CmdBeginQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
+ cmd->queryIndex, 0);
+ break;
+ }
+
+ case Command::EndOcclusionQuery: {
+ EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+
+ device->fn.CmdEndQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
+ cmd->queryIndex);
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ RecordWriteTimestampCmd(recordingContext, device, cmd);
+ break;
+ }
+
+ default: {
+ EncodeRenderBundleCommand(&mCommands, type);
+ break;
+ }
+ }
+ }
+
+ // EndRenderPass should have been called
+ UNREACHABLE();
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h
new file mode 100644
index 00000000000..b0d1e9cab9b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandBufferVk.h
@@ -0,0 +1,55 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
+#define DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
+
+#include "dawn/native/CommandBuffer.h"
+#include "dawn/native/Error.h"
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native {
+ struct BeginRenderPassCmd;
+ struct TextureCopy;
+} // namespace dawn::native
+
+namespace dawn::native::vulkan {
+
+ struct CommandRecordingContext;
+ class Device;
+
+ class CommandBuffer final : public CommandBufferBase {
+ public:
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
+
+ MaybeError RecordCommands(CommandRecordingContext* recordingContext);
+
+ private:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
+ MaybeError RecordComputePass(CommandRecordingContext* recordingContext,
+ const ComputePassResourceUsage& resourceUsages);
+ MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
+ BeginRenderPassCmd* renderPass);
+ void RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize);
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h
new file mode 100644
index 00000000000..44f1c907262
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/CommandRecordingContext.h
@@ -0,0 +1,40 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
+#define DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
+
+#include "dawn/common/vulkan_platform.h"
+
+#include "dawn/native/vulkan/BufferVk.h"
+
+namespace dawn::native::vulkan {
+ // Used to track operations that are handled after recording.
+ // Currently only tracks semaphores, but may be used to do barrier coalescing in the future.
+ struct CommandRecordingContext {
+ VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
+ std::vector<VkSemaphore> waitSemaphores = {};
+ std::vector<VkSemaphore> signalSemaphores = {};
+
+ // The internal buffers used in the workaround of texture-to-texture copies with compressed
+ // formats.
+ std::vector<Ref<Buffer>> tempBuffers;
+
+ // For Device state tracking only.
+ VkCommandPool commandPool = VK_NULL_HANDLE;
+ bool used = false;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp
new file mode 100644
index 00000000000..fa13e26ad04
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.cpp
@@ -0,0 +1,116 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/ComputePipelineVk.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/ShaderModuleVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ // static
+ Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ return AcquireRef(new ComputePipeline(device, descriptor));
+ }
+
+ MaybeError ComputePipeline::Initialize() {
+ VkComputePipelineCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.layout = ToBackend(GetLayout())->GetHandle();
+ createInfo.basePipelineHandle = ::VK_NULL_HANDLE;
+ createInfo.basePipelineIndex = -1;
+
+ createInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ createInfo.stage.pNext = nullptr;
+ createInfo.stage.flags = 0;
+ createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
+ // Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+ DAWN_TRY_ASSIGN(createInfo.stage.module,
+ ToBackend(computeStage.module.Get())
+ ->GetTransformedModuleHandle(computeStage.entryPoint.c_str(),
+ ToBackend(GetLayout())));
+
+ createInfo.stage.pName = computeStage.entryPoint.c_str();
+
+ std::vector<OverridableConstantScalar> specializationDataEntries;
+ std::vector<VkSpecializationMapEntry> specializationMapEntries;
+ VkSpecializationInfo specializationInfo{};
+ createInfo.stage.pSpecializationInfo =
+ GetVkSpecializationInfo(computeStage, &specializationInfo, &specializationDataEntries,
+ &specializationMapEntries);
+
+ Device* device = ToBackend(GetDevice());
+
+ PNextChainBuilder stageExtChain(&createInfo.stage);
+
+ VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeInfo = {};
+ uint32_t computeSubgroupSize = device->GetComputeSubgroupSize();
+ if (computeSubgroupSize != 0u) {
+ ASSERT(device->GetDeviceInfo().HasExt(DeviceExt::SubgroupSizeControl));
+ subgroupSizeInfo.requiredSubgroupSize = computeSubgroupSize;
+ stageExtChain.Add(
+ &subgroupSizeInfo,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
+ }
+
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
+ &createInfo, nullptr, &*mHandle),
+ "CreateComputePipeline"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ void ComputePipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_ComputePipeline", GetLabel());
+ }
+
+ ComputePipeline::~ComputePipeline() = default;
+
+ void ComputePipeline::DestroyImpl() {
+ ComputePipelineBase::DestroyImpl();
+
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
+ }
+ }
+
+ VkPipeline ComputePipeline::GetHandle() const {
+ return mHandle;
+ }
+
+ void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+ std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+ userdata);
+ CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h
new file mode 100644
index 00000000000..ef4aeff1be2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ComputePipelineVk.h
@@ -0,0 +1,53 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
+#define DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
+
+#include "dawn/native/ComputePipeline.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class ComputePipeline final : public ComputePipelineBase {
+ public:
+ static Ref<ComputePipeline> CreateUninitialized(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+
+ VkPipeline GetHandle() const;
+
+ MaybeError Initialize() override;
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ ~ComputePipeline() override;
+ void DestroyImpl() override;
+ using ComputePipelineBase::ComputePipelineBase;
+
+ VkPipeline mHandle = VK_NULL_HANDLE;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h
new file mode 100644
index 00000000000..ef72e860eb8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocation.h
@@ -0,0 +1,31 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
+#define DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native::vulkan {
+
+ // Contains a descriptor set along with data necessary to track its allocation.
+ struct DescriptorSetAllocation {
+ VkDescriptorSet set = VK_NULL_HANDLE;
+ uint32_t poolIndex;
+ uint16_t setIndex;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
new file mode 100644
index 00000000000..0f89d614548
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
@@ -0,0 +1,188 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/DescriptorSetAllocator.h"
+
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ // TODO(enga): Figure out this value.
+ static constexpr uint32_t kMaxDescriptorsPerPool = 512;
+
+ // static
+ Ref<DescriptorSetAllocator> DescriptorSetAllocator::Create(
+ BindGroupLayout* layout,
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType) {
+ return AcquireRef(new DescriptorSetAllocator(layout, descriptorCountPerType));
+ }
+
+ DescriptorSetAllocator::DescriptorSetAllocator(
+ BindGroupLayout* layout,
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType)
+ : ObjectBase(layout->GetDevice()), mLayout(layout) {
+ ASSERT(layout != nullptr);
+
+ // Compute the total number of descriptors for this layout.
+ uint32_t totalDescriptorCount = 0;
+ mPoolSizes.reserve(descriptorCountPerType.size());
+ for (const auto& [type, count] : descriptorCountPerType) {
+ ASSERT(count > 0);
+ totalDescriptorCount += count;
+ mPoolSizes.push_back(VkDescriptorPoolSize{type, count});
+ }
+
+ if (totalDescriptorCount == 0) {
+ // Vulkan requires that valid usage of vkCreateDescriptorPool must have a non-zero
+ // number of pools, each of which has non-zero descriptor counts.
+ // Since the descriptor set layout is empty, we should be able to allocate
+ // |kMaxDescriptorsPerPool| sets from this 1-sized descriptor pool.
+ // The type of this descriptor pool doesn't matter because it is never used.
+ mPoolSizes.push_back(VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1});
+ mMaxSets = kMaxDescriptorsPerPool;
+ } else {
+ ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
+ static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool);
+
+ // Compute the total number of descriptors sets that fits given the max.
+ mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
+ ASSERT(mMaxSets > 0);
+
+ // Grow the number of desciptors in the pool to fit the computed |mMaxSets|.
+ for (auto& poolSize : mPoolSizes) {
+ poolSize.descriptorCount *= mMaxSets;
+ }
+ }
+ }
+
+ DescriptorSetAllocator::~DescriptorSetAllocator() {
+ for (auto& pool : mDescriptorPools) {
+ ASSERT(pool.freeSetIndices.size() == mMaxSets);
+ if (pool.vkPool != VK_NULL_HANDLE) {
+ Device* device = ToBackend(GetDevice());
+ device->GetFencedDeleter()->DeleteWhenUnused(pool.vkPool);
+ }
+ }
+ }
+
+ ResultOrError<DescriptorSetAllocation> DescriptorSetAllocator::Allocate() {
+ if (mAvailableDescriptorPoolIndices.empty()) {
+ DAWN_TRY(AllocateDescriptorPool());
+ }
+
+ ASSERT(!mAvailableDescriptorPoolIndices.empty());
+
+ const PoolIndex poolIndex = mAvailableDescriptorPoolIndices.back();
+ DescriptorPool* pool = &mDescriptorPools[poolIndex];
+
+ ASSERT(!pool->freeSetIndices.empty());
+
+ SetIndex setIndex = pool->freeSetIndices.back();
+ pool->freeSetIndices.pop_back();
+
+ if (pool->freeSetIndices.empty()) {
+ mAvailableDescriptorPoolIndices.pop_back();
+ }
+
+ return DescriptorSetAllocation{pool->sets[setIndex], poolIndex, setIndex};
+ }
+
+ void DescriptorSetAllocator::Deallocate(DescriptorSetAllocation* allocationInfo) {
+ ASSERT(allocationInfo != nullptr);
+ ASSERT(allocationInfo->set != VK_NULL_HANDLE);
+
+ // We can't reuse the descriptor set right away because the Vulkan spec says in the
+ // documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
+ // host execution of the command and the end of the draw/dispatch.
+ Device* device = ToBackend(GetDevice());
+ const ExecutionSerial serial = device->GetPendingCommandSerial();
+ mPendingDeallocations.Enqueue({allocationInfo->poolIndex, allocationInfo->setIndex},
+ serial);
+
+ if (mLastDeallocationSerial != serial) {
+ device->EnqueueDeferredDeallocation(this);
+ mLastDeallocationSerial = serial;
+ }
+
+ // Clear the content of allocation so that use after frees are more visible.
+ *allocationInfo = {};
+ }
+
+ void DescriptorSetAllocator::FinishDeallocation(ExecutionSerial completedSerial) {
+ for (const Deallocation& dealloc : mPendingDeallocations.IterateUpTo(completedSerial)) {
+ ASSERT(dealloc.poolIndex < mDescriptorPools.size());
+
+ auto& freeSetIndices = mDescriptorPools[dealloc.poolIndex].freeSetIndices;
+ if (freeSetIndices.empty()) {
+ mAvailableDescriptorPoolIndices.emplace_back(dealloc.poolIndex);
+ }
+ freeSetIndices.emplace_back(dealloc.setIndex);
+ }
+ mPendingDeallocations.ClearUpTo(completedSerial);
+ }
+
+ MaybeError DescriptorSetAllocator::AllocateDescriptorPool() {
+ VkDescriptorPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.maxSets = mMaxSets;
+ createInfo.poolSizeCount = static_cast<PoolIndex>(mPoolSizes.size());
+ createInfo.pPoolSizes = mPoolSizes.data();
+
+ Device* device = ToBackend(GetDevice());
+
+ VkDescriptorPool descriptorPool;
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
+ nullptr, &*descriptorPool),
+ "CreateDescriptorPool"));
+
+ std::vector<VkDescriptorSetLayout> layouts(mMaxSets, mLayout->GetHandle());
+
+ VkDescriptorSetAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.descriptorPool = descriptorPool;
+ allocateInfo.descriptorSetCount = mMaxSets;
+ allocateInfo.pSetLayouts = AsVkArray(layouts.data());
+
+ std::vector<VkDescriptorSet> sets(mMaxSets);
+ MaybeError result =
+ CheckVkSuccess(device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo,
+ AsVkArray(sets.data())),
+ "AllocateDescriptorSets");
+ if (result.IsError()) {
+ // On an error we can destroy the pool immediately because no command references it.
+ device->fn.DestroyDescriptorPool(device->GetVkDevice(), descriptorPool, nullptr);
+ DAWN_TRY(std::move(result));
+ }
+
+ std::vector<SetIndex> freeSetIndices;
+ freeSetIndices.reserve(mMaxSets);
+
+ for (SetIndex i = 0; i < mMaxSets; ++i) {
+ freeSetIndices.push_back(i);
+ }
+
+ mAvailableDescriptorPoolIndices.push_back(mDescriptorPools.size());
+ mDescriptorPools.emplace_back(
+ DescriptorPool{descriptorPool, std::move(sets), std::move(freeSetIndices)});
+
+ return {};
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h
new file mode 100644
index 00000000000..b6cd49507d8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DescriptorSetAllocator.h
@@ -0,0 +1,76 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
+#define DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/vulkan/DescriptorSetAllocation.h"
+
+#include <map>
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+ class BindGroupLayout;
+
+ class DescriptorSetAllocator : public ObjectBase {
+ using PoolIndex = uint32_t;
+ using SetIndex = uint16_t;
+
+ public:
+ static Ref<DescriptorSetAllocator> Create(
+ BindGroupLayout* layout,
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
+
+ ResultOrError<DescriptorSetAllocation> Allocate();
+ void Deallocate(DescriptorSetAllocation* allocationInfo);
+ void FinishDeallocation(ExecutionSerial completedSerial);
+
+ private:
+ DescriptorSetAllocator(BindGroupLayout* layout,
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
+ ~DescriptorSetAllocator();
+
+ MaybeError AllocateDescriptorPool();
+
+ BindGroupLayout* mLayout;
+
+ std::vector<VkDescriptorPoolSize> mPoolSizes;
+ SetIndex mMaxSets;
+
+ struct DescriptorPool {
+ VkDescriptorPool vkPool;
+ std::vector<VkDescriptorSet> sets;
+ std::vector<SetIndex> freeSetIndices;
+ };
+
+ std::vector<PoolIndex> mAvailableDescriptorPoolIndices;
+ std::vector<DescriptorPool> mDescriptorPools;
+
+ struct Deallocation {
+ PoolIndex poolIndex;
+ SetIndex setIndex;
+ };
+ SerialQueue<ExecutionSerial, Deallocation> mPendingDeallocations;
+ ExecutionSerial mLastDeallocationSerial = ExecutionSerial(0);
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp
new file mode 100644
index 00000000000..2662817d869
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.cpp
@@ -0,0 +1,1017 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/DeviceVk.h"
+
+#include "dawn/common/Platform.h"
+#include "dawn/native/BackendConnection.h"
+#include "dawn/native/ChainUtils_autogen.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/ErrorData.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/BindGroupVk.h"
+#include "dawn/native/vulkan/BufferVk.h"
+#include "dawn/native/vulkan/CommandBufferVk.h"
+#include "dawn/native/vulkan/ComputePipelineVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/QuerySetVk.h"
+#include "dawn/native/vulkan/QueueVk.h"
+#include "dawn/native/vulkan/RenderPassCache.h"
+#include "dawn/native/vulkan/RenderPipelineVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/SamplerVk.h"
+#include "dawn/native/vulkan/ShaderModuleVk.h"
+#include "dawn/native/vulkan/StagingBufferVk.h"
+#include "dawn/native/vulkan/SwapChainVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ // static
+ ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
+ const DeviceDescriptor* descriptor) {
+ Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+ DAWN_TRY(device->Initialize());
+ return device;
+ }
+
+ Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
+ : DeviceBase(adapter, descriptor) {
+ InitTogglesFromDriver();
+ }
+
+ MaybeError Device::Initialize() {
+ // Copy the adapter's device info to the device so that we can change the "knobs"
+ mDeviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+
+ // Initialize the "instance" procs of our local function table.
+ VulkanFunctions* functions = GetMutableFunctions();
+ *functions = ToBackend(GetAdapter())->GetVulkanInstance()->GetFunctions();
+
+ // Two things are crucial if device initialization fails: the function pointers to destroy
+ // objects, and the fence deleter that calls these functions. Do not do anything before
+ // these two are set up, so that a failed initialization doesn't cause a crash in
+ // DestroyImpl()
+ {
+ VkPhysicalDevice physicalDevice = ToBackend(GetAdapter())->GetPhysicalDevice();
+
+ VulkanDeviceKnobs usedDeviceKnobs = {};
+ DAWN_TRY_ASSIGN(usedDeviceKnobs, CreateDevice(physicalDevice));
+ *static_cast<VulkanDeviceKnobs*>(&mDeviceInfo) = usedDeviceKnobs;
+
+ DAWN_TRY(functions->LoadDeviceProcs(mVkDevice, mDeviceInfo));
+
+ // The queue can be loaded before the fenced deleter because their lifetime is tied to
+ // the device.
+ GatherQueueFromDevice();
+
+ mDeleter = std::make_unique<FencedDeleter>(this);
+ }
+
+ mRenderPassCache = std::make_unique<RenderPassCache>(this);
+ mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
+
+ mExternalMemoryService = std::make_unique<external_memory::Service>(this);
+ mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
+
+ DAWN_TRY(PrepareRecordingContext());
+
+ // The environment can request to use D32S8 or D24S8 when it's not available. Override
+ // the decision if it is not applicable.
+ ApplyDepth24PlusS8Toggle();
+
+ return DeviceBase::Initialize(Queue::Create(this));
+ }
+
+ Device::~Device() {
+ Destroy();
+ }
+
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) {
+ return BindGroup::Create(this, descriptor);
+ }
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+ }
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+ }
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return CommandBuffer::Create(encoder, descriptor);
+ }
+ Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) {
+ return ComputePipeline::CreateUninitialized(this, descriptor);
+ }
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) {
+ return PipelineLayout::Create(this, descriptor);
+ }
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
+ return QuerySet::Create(this, descriptor);
+ }
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) {
+ return RenderPipeline::CreateUninitialized(this, descriptor);
+ }
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return Sampler::Create(this, descriptor);
+ }
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ return ShaderModule::Create(this, descriptor, parseResult);
+ }
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) {
+ return OldSwapChain::Create(this, descriptor);
+ }
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+ }
+ ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+ return Texture::Create(this, descriptor);
+ }
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return TextureView::Create(texture, descriptor);
+ }
+ void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+ }
+ void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+ }
+
+ MaybeError Device::TickImpl() {
+ RecycleCompletedCommands();
+
+ ExecutionSerial completedSerial = GetCompletedCommandSerial();
+
+ for (Ref<DescriptorSetAllocator>& allocator :
+ mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
+ allocator->FinishDeallocation(completedSerial);
+ }
+
+ mResourceMemoryAllocator->Tick(completedSerial);
+ mDeleter->Tick(completedSerial);
+ mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+
+ if (mRecordingContext.used) {
+ DAWN_TRY(SubmitPendingCommands());
+ }
+
+ return {};
+ }
+
+ VkInstance Device::GetVkInstance() const {
+ return ToBackend(GetAdapter())->GetVulkanInstance()->GetVkInstance();
+ }
+ const VulkanDeviceInfo& Device::GetDeviceInfo() const {
+ return mDeviceInfo;
+ }
+
+ const VulkanGlobalInfo& Device::GetGlobalInfo() const {
+ return ToBackend(GetAdapter())->GetVulkanInstance()->GetGlobalInfo();
+ }
+
+ VkDevice Device::GetVkDevice() const {
+ return mVkDevice;
+ }
+
+ uint32_t Device::GetGraphicsQueueFamily() const {
+ return mQueueFamily;
+ }
+
+ VkQueue Device::GetQueue() const {
+ return mQueue;
+ }
+
+ FencedDeleter* Device::GetFencedDeleter() const {
+ return mDeleter.get();
+ }
+
+ RenderPassCache* Device::GetRenderPassCache() const {
+ return mRenderPassCache.get();
+ }
+
+ ResourceMemoryAllocator* Device::GetResourceMemoryAllocator() const {
+ return mResourceMemoryAllocator.get();
+ }
+
+ void Device::EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator) {
+ mDescriptorAllocatorsPendingDeallocation.Enqueue(allocator, GetPendingCommandSerial());
+ }
+
+ CommandRecordingContext* Device::GetPendingRecordingContext() {
+ ASSERT(mRecordingContext.commandBuffer != VK_NULL_HANDLE);
+ mRecordingContext.used = true;
+ return &mRecordingContext;
+ }
+
+ MaybeError Device::SubmitPendingCommands() {
+ if (!mRecordingContext.used) {
+ return {};
+ }
+
+ DAWN_TRY(CheckVkSuccess(fn.EndCommandBuffer(mRecordingContext.commandBuffer),
+ "vkEndCommandBuffer"));
+
+ std::vector<VkPipelineStageFlags> dstStageMasks(mRecordingContext.waitSemaphores.size(),
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
+
+ VkSubmitInfo submitInfo;
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = nullptr;
+ submitInfo.waitSemaphoreCount =
+ static_cast<uint32_t>(mRecordingContext.waitSemaphores.size());
+ submitInfo.pWaitSemaphores = AsVkArray(mRecordingContext.waitSemaphores.data());
+ submitInfo.pWaitDstStageMask = dstStageMasks.data();
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
+ submitInfo.signalSemaphoreCount =
+ static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
+ submitInfo.pSignalSemaphores = AsVkArray(mRecordingContext.signalSemaphores.data());
+
+ VkFence fence = VK_NULL_HANDLE;
+ DAWN_TRY_ASSIGN(fence, GetUnusedFence());
+ DAWN_TRY_WITH_CLEANUP(
+ CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"), {
+ // If submitting to the queue fails, move the fence back into the unused fence
+ // list, as if it were never acquired. Not doing so would leak the fence since
+ // it would be neither in the unused list nor in the in-flight list.
+ mUnusedFences.push_back(fence);
+ });
+
+ // Enqueue the semaphores before incrementing the serial, so that they can be deleted as
+ // soon as the current submission is finished.
+ for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+ mDeleter->DeleteWhenUnused(semaphore);
+ }
+ for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+ mDeleter->DeleteWhenUnused(semaphore);
+ }
+
+ IncrementLastSubmittedCommandSerial();
+ ExecutionSerial lastSubmittedSerial = GetLastSubmittedCommandSerial();
+ mFencesInFlight.emplace(fence, lastSubmittedSerial);
+
+ CommandPoolAndBuffer submittedCommands = {mRecordingContext.commandPool,
+ mRecordingContext.commandBuffer};
+ mCommandsInFlight.Enqueue(submittedCommands, lastSubmittedSerial);
+ mRecordingContext = CommandRecordingContext();
+ DAWN_TRY(PrepareRecordingContext());
+
+ return {};
+ }
+
+ ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
+ VulkanDeviceKnobs usedKnobs = {};
+
+ // Default to asking for all avilable known extensions.
+ usedKnobs.extensions = mDeviceInfo.extensions;
+
+ // However only request the extensions that haven't been promoted in the device's apiVersion
+ std::vector<const char*> extensionNames;
+ for (DeviceExt ext : IterateBitSet(usedKnobs.extensions)) {
+ const DeviceExtInfo& info = GetDeviceExtInfo(ext);
+
+ if (info.versionPromoted > mDeviceInfo.properties.apiVersion) {
+ extensionNames.push_back(info.name);
+ }
+ }
+
+ // Some device features can only be enabled using a VkPhysicalDeviceFeatures2 struct, which
+ // is supported by the VK_EXT_get_physical_properties2 instance extension, which was
+ // promoted as a core API in Vulkan 1.1.
+ //
+ // Prepare a VkPhysicalDeviceFeatures2 struct for this use case, it will only be populated
+ // if HasExt(DeviceExt::GetPhysicalDeviceProperties2) is true.
+ VkPhysicalDeviceFeatures2 features2 = {};
+ features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ features2.pNext = nullptr;
+ PNextChainBuilder featuresChain(&features2);
+
+ // Required for core WebGPU features.
+ usedKnobs.features.depthBiasClamp = VK_TRUE;
+ usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
+ usedKnobs.features.fullDrawIndexUint32 = VK_TRUE;
+ usedKnobs.features.imageCubeArray = VK_TRUE;
+ usedKnobs.features.independentBlend = VK_TRUE;
+ usedKnobs.features.sampleRateShading = VK_TRUE;
+
+ if (IsRobustnessEnabled()) {
+ usedKnobs.features.robustBufferAccess = VK_TRUE;
+ }
+
+ if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+ ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
+
+ // Always request all the features from VK_EXT_subgroup_size_control when available.
+ usedKnobs.subgroupSizeControlFeatures = mDeviceInfo.subgroupSizeControlFeatures;
+ featuresChain.Add(&usedKnobs.subgroupSizeControlFeatures);
+
+ mComputeSubgroupSize = FindComputeSubgroupSize();
+ }
+
+ if (mDeviceInfo.features.samplerAnisotropy == VK_TRUE) {
+ usedKnobs.features.samplerAnisotropy = VK_TRUE;
+ }
+
+ if (IsFeatureEnabled(Feature::TextureCompressionBC)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC ==
+ VK_TRUE);
+ usedKnobs.features.textureCompressionBC = VK_TRUE;
+ }
+
+ if (IsFeatureEnabled(Feature::TextureCompressionETC2)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionETC2 ==
+ VK_TRUE);
+ usedKnobs.features.textureCompressionETC2 = VK_TRUE;
+ }
+
+ if (IsFeatureEnabled(Feature::TextureCompressionASTC)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionASTC_LDR ==
+ VK_TRUE);
+ usedKnobs.features.textureCompressionASTC_LDR = VK_TRUE;
+ }
+
+ if (IsFeatureEnabled(Feature::PipelineStatisticsQuery)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
+ VK_TRUE);
+ usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
+ }
+
+ if (IsFeatureEnabled(Feature::ShaderFloat16)) {
+ const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+ ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
+ deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
+ deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
+ deviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
+ deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
+
+ usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
+ usedKnobs._16BitStorageFeatures.storageBuffer16BitAccess = VK_TRUE;
+ usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
+
+ featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+ featuresChain.Add(&usedKnobs._16BitStorageFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+ }
+
+ if (IsFeatureEnabled(Feature::DepthClamping)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.depthClamp == VK_TRUE);
+ usedKnobs.features.depthClamp = VK_TRUE;
+ }
+
+ // Find a universal queue family
+ {
+ // Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
+ constexpr uint32_t kUniversalFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
+ int universalQueueFamily = -1;
+ for (unsigned int i = 0; i < mDeviceInfo.queueFamilies.size(); ++i) {
+ if ((mDeviceInfo.queueFamilies[i].queueFlags & kUniversalFlags) ==
+ kUniversalFlags) {
+ universalQueueFamily = i;
+ break;
+ }
+ }
+
+ if (universalQueueFamily == -1) {
+ return DAWN_INTERNAL_ERROR("No universal queue family");
+ }
+ mQueueFamily = static_cast<uint32_t>(universalQueueFamily);
+ }
+
+ // Choose to create a single universal queue
+ std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
+ float zero = 0.0f;
+ {
+ VkDeviceQueueCreateInfo queueCreateInfo;
+ queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queueCreateInfo.pNext = nullptr;
+ queueCreateInfo.flags = 0;
+ queueCreateInfo.queueFamilyIndex = static_cast<uint32_t>(mQueueFamily);
+ queueCreateInfo.queueCount = 1;
+ queueCreateInfo.pQueuePriorities = &zero;
+
+ queuesToRequest.push_back(queueCreateInfo);
+ }
+
+ VkDeviceCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.queueCreateInfoCount = static_cast<uint32_t>(queuesToRequest.size());
+ createInfo.pQueueCreateInfos = queuesToRequest.data();
+ createInfo.enabledLayerCount = 0;
+ createInfo.ppEnabledLayerNames = nullptr;
+ createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+ createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+ // When we have DeviceExt::GetPhysicalDeviceProperties2, use features2 so that features not
+ // covered by VkPhysicalDeviceFeatures can be enabled.
+ if (mDeviceInfo.HasExt(DeviceExt::GetPhysicalDeviceProperties2)) {
+ features2.features = usedKnobs.features;
+ createInfo.pNext = &features2;
+ createInfo.pEnabledFeatures = nullptr;
+ } else {
+ ASSERT(features2.pNext == nullptr);
+ createInfo.pEnabledFeatures = &usedKnobs.features;
+ }
+
+ DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
+ "vkCreateDevice"));
+
+ return usedKnobs;
+ }
+
+ uint32_t Device::FindComputeSubgroupSize() const {
+ if (!mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+ return 0;
+ }
+
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& ext =
+ mDeviceInfo.subgroupSizeControlProperties;
+
+ if (ext.minSubgroupSize == ext.maxSubgroupSize) {
+ return 0;
+ }
+
+ // At the moment, only Intel devices support varying subgroup sizes and 16, which is the
+ // next value after the minimum of 8, is the sweet spot according to [1]. Hence the
+ // following heuristics, which may need to be adjusted in the future for other
+ // architectures, or if a specific API is added to let client code select the size..
+ //
+ // [1] https://bugs.freedesktop.org/show_bug.cgi?id=108875
+ uint32_t subgroupSize = ext.minSubgroupSize * 2;
+ if (subgroupSize <= ext.maxSubgroupSize) {
+ return subgroupSize;
+ } else {
+ return ext.minSubgroupSize;
+ }
+ }
+
+ void Device::GatherQueueFromDevice() {
+ fn.GetDeviceQueue(mVkDevice, mQueueFamily, 0, &mQueue);
+ }
+
+ void Device::InitTogglesFromDriver() {
+ // TODO(crbug.com/dawn/857): tighten this workaround when this issue is fixed in both
+ // Vulkan SPEC and drivers.
+ SetToggle(Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy, true);
+
+ // By default try to use D32S8 for Depth24PlusStencil8
+ SetToggle(Toggle::VulkanUseD32S8, true);
+ }
+
+ void Device::ApplyDepth24PlusS8Toggle() {
+ bool supportsD32s8 =
+ ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT);
+ bool supportsD24s8 =
+ ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT);
+
+ ASSERT(supportsD32s8 || supportsD24s8);
+
+ if (!supportsD24s8) {
+ ForceSetToggle(Toggle::VulkanUseD32S8, true);
+ }
+ if (!supportsD32s8) {
+ ForceSetToggle(Toggle::VulkanUseD32S8, false);
+ }
+ }
+
+ VulkanFunctions* Device::GetMutableFunctions() {
+ return const_cast<VulkanFunctions*>(&fn);
+ }
+
+ ResultOrError<VkFence> Device::GetUnusedFence() {
+ if (!mUnusedFences.empty()) {
+ VkFence fence = mUnusedFences.back();
+ DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &*fence), "vkResetFences"));
+
+ mUnusedFences.pop_back();
+ return fence;
+ }
+
+ VkFenceCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+
+ VkFence fence = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &*fence),
+ "vkCreateFence"));
+
+ return fence;
+ }
+
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+ ExecutionSerial fenceSerial(0);
+ while (!mFencesInFlight.empty()) {
+ VkFence fence = mFencesInFlight.front().first;
+ ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
+ VkResult result = VkResult::WrapUnsafe(
+ INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
+
+ // Fence are added in order, so we can stop searching as soon
+ // as we see one that's not ready.
+ if (result == VK_NOT_READY) {
+ return fenceSerial;
+ } else {
+ DAWN_TRY(CheckVkSuccess(::VkResult(result), "GetFenceStatus"));
+ }
+
+ // Update fenceSerial since fence is ready.
+ fenceSerial = tentativeSerial;
+
+ mUnusedFences.push_back(fence);
+
+ ASSERT(fenceSerial > GetCompletedCommandSerial());
+ mFencesInFlight.pop();
+ }
+ return fenceSerial;
+ }
+
+ MaybeError Device::PrepareRecordingContext() {
+ ASSERT(!mRecordingContext.used);
+ ASSERT(mRecordingContext.commandBuffer == VK_NULL_HANDLE);
+ ASSERT(mRecordingContext.commandPool == VK_NULL_HANDLE);
+
+ // First try to recycle unused command pools.
+ if (!mUnusedCommands.empty()) {
+ CommandPoolAndBuffer commands = mUnusedCommands.back();
+ mUnusedCommands.pop_back();
+ DAWN_TRY_WITH_CLEANUP(CheckVkSuccess(fn.ResetCommandPool(mVkDevice, commands.pool, 0),
+ "vkResetCommandPool"),
+ {
+ // vkResetCommandPool failed (it may return out-of-memory).
+ // Free the commands in the cleanup step before returning to
+ // reclaim memory.
+
+ // The VkCommandBuffer memory should be wholly owned by the
+ // pool and freed when it is destroyed, but that's not the
+ // case in some drivers and they leak memory. So we call
+ // FreeCommandBuffers before DestroyCommandPool to be safe.
+ // TODO(enga): Only do this on a known list of bad drivers.
+ fn.FreeCommandBuffers(mVkDevice, commands.pool, 1,
+ &commands.commandBuffer);
+ fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
+ });
+
+ mRecordingContext.commandBuffer = commands.commandBuffer;
+ mRecordingContext.commandPool = commands.pool;
+ } else {
+ // Create a new command pool for our commands and allocate the command buffer.
+ VkCommandPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
+ createInfo.queueFamilyIndex = mQueueFamily;
+
+ DAWN_TRY(CheckVkSuccess(fn.CreateCommandPool(mVkDevice, &createInfo, nullptr,
+ &*mRecordingContext.commandPool),
+ "vkCreateCommandPool"));
+
+ VkCommandBufferAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.commandPool = mRecordingContext.commandPool;
+ allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ allocateInfo.commandBufferCount = 1;
+
+ DAWN_TRY(CheckVkSuccess(fn.AllocateCommandBuffers(mVkDevice, &allocateInfo,
+ &mRecordingContext.commandBuffer),
+ "vkAllocateCommandBuffers"));
+ }
+
+ // Start the recording of commands in the command buffer.
+ VkCommandBufferBeginInfo beginInfo;
+ beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ beginInfo.pNext = nullptr;
+ beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ beginInfo.pInheritanceInfo = nullptr;
+
+ return CheckVkSuccess(fn.BeginCommandBuffer(mRecordingContext.commandBuffer, &beginInfo),
+ "vkBeginCommandBuffer");
+ }
+
+ void Device::RecycleCompletedCommands() {
+ for (auto& commands : mCommandsInFlight.IterateUpTo(GetCompletedCommandSerial())) {
+ mUnusedCommands.push_back(commands);
+ }
+ mCommandsInFlight.ClearUpTo(GetCompletedCommandSerial());
+ }
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer =
+ std::make_unique<StagingBuffer>(size, this);
+ DAWN_TRY(stagingBuffer->Initialize());
+ return std::move(stagingBuffer);
+ }
+
+ MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ // It is a validation error to do a 0-sized copy in Vulkan, check it is skipped prior to
+ // calling this function.
+ ASSERT(size != 0);
+
+ CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+ ToBackend(destination)
+ ->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
+
+ // There is no need of a barrier to make host writes available and visible to the copy
+ // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+ // does an implicit availability, visibility and domain operation.
+
+ // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+ // buffer.
+ ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+ VkBufferCopy copy;
+ copy.srcOffset = sourceOffset;
+ copy.dstOffset = destinationOffset;
+ copy.size = size;
+
+ this->fn.CmdCopyBuffer(recordingContext->commandBuffer,
+ ToBackend(source)->GetBufferHandle(),
+ ToBackend(destination)->GetHandle(), 1, &copy);
+
+ return {};
+ }
+
+ MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ // There is no need of a barrier to make host writes available and visible to the copy
+ // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+ // does an implicit availability, visibility and domain operation.
+
+ CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+ VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
+ VkImageSubresourceLayers subresource = region.imageSubresource;
+
+ SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+ if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels,
+ subresource.mipLevel)) {
+ // Since texture has been overwritten, it has been "initialized"
+ dst->texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
+ }
+ // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+ // texture.
+ ToBackend(dst->texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+ VkImage dstImage = ToBackend(dst->texture)->GetHandle();
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+ // copy command.
+ this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
+ ToBackend(source)->GetBufferHandle(), dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+ return {};
+ }
+
+ MaybeError Device::ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
+ ExternalMemoryHandle memoryHandle,
+ VkImage image,
+ const std::vector<ExternalSemaphoreHandle>& waitHandles,
+ VkSemaphore* outSignalSemaphore,
+ VkDeviceMemory* outAllocation,
+ std::vector<VkSemaphore>* outWaitSemaphores) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+ FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
+
+ wgpu::TextureUsage usage = textureDescriptor->usage;
+ if (internalUsageDesc != nullptr) {
+ usage |= internalUsageDesc->internalUsage;
+ }
+
+ // Check services support this combination of handle type / image info
+ DAWN_INVALID_IF(!mExternalSemaphoreService->Supported(),
+ "External semaphore usage not supported");
+
+ DAWN_INVALID_IF(
+ !mExternalMemoryService->SupportsImportMemory(
+ VulkanImageFormat(this, textureDescriptor->format), VK_IMAGE_TYPE_2D,
+ VK_IMAGE_TILING_OPTIMAL,
+ VulkanImageUsage(usage, GetValidInternalFormat(textureDescriptor->format)),
+ VK_IMAGE_CREATE_ALIAS_BIT_KHR),
+ "External memory usage not supported");
+
+ // Create an external semaphore to signal when the texture is done being used
+ DAWN_TRY_ASSIGN(*outSignalSemaphore,
+ mExternalSemaphoreService->CreateExportableSemaphore());
+
+ // Import the external image's memory
+ external_memory::MemoryImportParams importParams;
+ DAWN_TRY_ASSIGN(importParams,
+ mExternalMemoryService->GetMemoryImportParams(descriptor, image));
+ DAWN_TRY_ASSIGN(*outAllocation,
+ mExternalMemoryService->ImportMemory(memoryHandle, importParams, image));
+
+ // Import semaphores we have to wait on before using the texture
+ for (const ExternalSemaphoreHandle& handle : waitHandles) {
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ DAWN_TRY_ASSIGN(semaphore, mExternalSemaphoreService->ImportSemaphore(handle));
+ outWaitSemaphores->push_back(semaphore);
+ }
+
+ return {};
+ }
+
+ bool Device::SignalAndExportExternalTexture(
+ Texture* texture,
+ VkImageLayout desiredLayout,
+ ExternalImageExportInfoVk* info,
+ std::vector<ExternalSemaphoreHandle>* semaphoreHandles) {
+ return !ConsumedError([&]() -> MaybeError {
+ DAWN_TRY(ValidateObject(texture));
+
+ VkSemaphore signalSemaphore;
+ VkImageLayout releasedOldLayout;
+ VkImageLayout releasedNewLayout;
+ DAWN_TRY(texture->ExportExternalTexture(desiredLayout, &signalSemaphore,
+ &releasedOldLayout, &releasedNewLayout));
+
+ ExternalSemaphoreHandle semaphoreHandle;
+ DAWN_TRY_ASSIGN(semaphoreHandle,
+ mExternalSemaphoreService->ExportSemaphore(signalSemaphore));
+ semaphoreHandles->push_back(semaphoreHandle);
+ info->releasedOldLayout = releasedOldLayout;
+ info->releasedNewLayout = releasedNewLayout;
+ info->isInitialized =
+ texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
+
+ return {};
+ }());
+ }
+
+ TextureBase* Device::CreateTextureWrappingVulkanImage(
+ const ExternalImageDescriptorVk* descriptor,
+ ExternalMemoryHandle memoryHandle,
+ const std::vector<ExternalSemaphoreHandle>& waitHandles) {
+ const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+ // Initial validation
+ if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+ return nullptr;
+ }
+ if (ConsumedError(ValidateVulkanImageCanBeWrapped(this, textureDescriptor),
+ "validating that a Vulkan image can be wrapped with %s.",
+ textureDescriptor)) {
+ return nullptr;
+ }
+
+ VkSemaphore signalSemaphore = VK_NULL_HANDLE;
+ VkDeviceMemory allocation = VK_NULL_HANDLE;
+ std::vector<VkSemaphore> waitSemaphores;
+ waitSemaphores.reserve(waitHandles.size());
+
+ // Cleanup in case of a failure, the image creation doesn't acquire the external objects
+ // if a failure happems.
+ Texture* result = nullptr;
+ // TODO(crbug.com/1026480): Consolidate this into a single CreateFromExternal call.
+ if (ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
+ mExternalMemoryService.get()),
+ &result) ||
+ ConsumedError(ImportExternalImage(descriptor, memoryHandle, result->GetHandle(),
+ waitHandles, &signalSemaphore, &allocation,
+ &waitSemaphores)) ||
+ ConsumedError(result->BindExternalMemory(descriptor, signalSemaphore, allocation,
+ waitSemaphores))) {
+ // Delete the Texture if it was created
+ if (result != nullptr) {
+ result->Release();
+ }
+
+ // Clear the signal semaphore
+ fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
+
+ // Clear image memory
+ fn.FreeMemory(GetVkDevice(), allocation, nullptr);
+
+ // Clear any wait semaphores we were able to import
+ for (VkSemaphore semaphore : waitSemaphores) {
+ fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
+ }
+ return nullptr;
+ }
+
+ return result;
+ }
+
+ uint32_t Device::GetComputeSubgroupSize() const {
+ return mComputeSubgroupSize;
+ }
+
+ MaybeError Device::WaitForIdleForDestruction() {
+ // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+ // Move the mRecordingContext.used to mUnusedCommands so it can be cleaned up in
+ // ShutDownImpl
+ if (mRecordingContext.used) {
+ CommandPoolAndBuffer commands = {mRecordingContext.commandPool,
+ mRecordingContext.commandBuffer};
+ mUnusedCommands.push_back(commands);
+ mRecordingContext = CommandRecordingContext();
+ }
+
+ VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
+ // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
+ // about, Device lost, which means workloads running on the GPU are no longer accessible
+ // (so they are as good as waited on) or success.
+ DAWN_UNUSED(waitIdleResult);
+
+ // Make sure all fences are complete by explicitly waiting on them all
+ while (!mFencesInFlight.empty()) {
+ VkFence fence = mFencesInFlight.front().first;
+ ExecutionSerial fenceSerial = mFencesInFlight.front().second;
+ ASSERT(fenceSerial > GetCompletedCommandSerial());
+
+ VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
+ do {
+ // If WaitForIdleForDesctruction is called while we are Disconnected, it means that
+ // the device lost came from the ErrorInjector and we need to wait without allowing
+ // any more error to be injected. This is because the device lost was "fake" and
+ // commands might still be running.
+ if (GetState() == State::Disconnected) {
+ result = VkResult::WrapUnsafe(
+ fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX));
+ continue;
+ }
+
+ result = VkResult::WrapUnsafe(
+ INJECT_ERROR_OR_RUN(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX),
+ VK_ERROR_DEVICE_LOST));
+ } while (result == VK_TIMEOUT);
+ // Ignore errors from vkWaitForFences: it can be either OOM which we can't do anything
+ // about (and we need to keep going with the destruction of all fences), or device
+ // loss, which means the workload on the GPU is no longer accessible and we can
+ // safely destroy the fence.
+
+ fn.DestroyFence(mVkDevice, fence, nullptr);
+ mFencesInFlight.pop();
+ }
+ return {};
+ }
+
+ void Device::DestroyImpl() {
+ ASSERT(GetState() == State::Disconnected);
+
+ // We failed during initialization so early that we don't even have a VkDevice. There is
+ // nothing to do.
+ if (mVkDevice == VK_NULL_HANDLE) {
+ return;
+ }
+
+ // The deleter is the second thing we initialize. If it is not present, it means that
+ // only the VkDevice was created and nothing else. Destroy the device and do nothing else
+ // because the function pointers might not have been loaded (and there is nothing to
+ // destroy anyway).
+ if (mDeleter == nullptr) {
+ fn.DestroyDevice(mVkDevice, nullptr);
+ mVkDevice = VK_NULL_HANDLE;
+ return;
+ }
+
+ // Enough of the Device's initialization happened that we can now do regular robust
+ // deinitialization.
+
+ // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+ mRecordingContext.used = false;
+ if (mRecordingContext.commandPool != VK_NULL_HANDLE) {
+ // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+ // destroyed, but that's not the case in some drivers and the leak memory.
+ // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+ // TODO(enga): Only do this on a known list of bad drivers.
+ fn.FreeCommandBuffers(mVkDevice, mRecordingContext.commandPool, 1,
+ &mRecordingContext.commandBuffer);
+ fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
+ }
+
+ for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+ fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+ }
+ mRecordingContext.waitSemaphores.clear();
+
+ for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+ fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+ }
+ mRecordingContext.signalSemaphores.clear();
+
+ // Some commands might still be marked as in-flight if we shut down because of a device
+ // loss. Recycle them as unused so that we free them below.
+ RecycleCompletedCommands();
+ ASSERT(mCommandsInFlight.Empty());
+
+ for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
+ // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+ // destroyed, but that's not the case in some drivers and the leak memory.
+ // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+ // TODO(enga): Only do this on a known list of bad drivers.
+ fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
+ fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
+ }
+ mUnusedCommands.clear();
+
+ // Some fences might still be marked as in-flight if we shut down because of a device loss.
+ // Delete them since at this point all commands are complete.
+ while (!mFencesInFlight.empty()) {
+ fn.DestroyFence(mVkDevice, *mFencesInFlight.front().first, nullptr);
+ mFencesInFlight.pop();
+ }
+
+ for (VkFence fence : mUnusedFences) {
+ fn.DestroyFence(mVkDevice, fence, nullptr);
+ }
+ mUnusedFences.clear();
+
+ ExecutionSerial completedSerial = GetCompletedCommandSerial();
+ for (Ref<DescriptorSetAllocator>& allocator :
+ mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
+ allocator->FinishDeallocation(completedSerial);
+ }
+
+ // Releasing the uploader enqueues buffers to be released.
+ // Call Tick() again to clear them before releasing the deleter.
+ mResourceMemoryAllocator->Tick(completedSerial);
+ mDeleter->Tick(completedSerial);
+ mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+
+ // Allow recycled memory to be deleted.
+ mResourceMemoryAllocator->DestroyPool();
+
+ // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
+ // to them are guaranteed to be finished executing.
+ mRenderPassCache = nullptr;
+
+ // We need handle deleting all child objects by calling Tick() again with a large serial to
+ // force all operations to look as if they were completed, and delete all objects before
+ // destroying the Deleter and vkDevice.
+ ASSERT(mDeleter != nullptr);
+ mDeleter->Tick(kMaxExecutionSerial);
+ mDeleter = nullptr;
+
+ // VkQueues are destroyed when the VkDevice is destroyed
+ // The VkDevice is needed to destroy child objects, so it must be destroyed last after all
+ // child objects have been deleted.
+ ASSERT(mVkDevice != VK_NULL_HANDLE);
+ fn.DestroyDevice(mVkDevice, nullptr);
+ mVkDevice = VK_NULL_HANDLE;
+ }
+
+ uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+ return mDeviceInfo.properties.limits.optimalBufferCopyRowPitchAlignment;
+ }
+
+ uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+ return mDeviceInfo.properties.limits.optimalBufferCopyOffsetAlignment;
+ }
+
+ float Device::GetTimestampPeriodInNS() const {
+ return mDeviceInfo.properties.limits.timestampPeriod;
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h
new file mode 100644
index 00000000000..e660cf38117
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/DeviceVk.h
@@ -0,0 +1,213 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_DEVICEVK_H_
+#define DAWNNATIVE_VULKAN_DEVICEVK_H_
+
+#include "dawn/native/dawn_platform.h"
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/Device.h"
+#include "dawn/native/vulkan/CommandRecordingContext.h"
+#include "dawn/native/vulkan/DescriptorSetAllocator.h"
+#include "dawn/native/vulkan/Forward.h"
+#include "dawn/native/vulkan/VulkanFunctions.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
+
+#include <memory>
+#include <queue>
+
+namespace dawn::native::vulkan {
+
+ class Adapter;
+ class BindGroupLayout;
+ class BufferUploader;
+ class FencedDeleter;
+ class RenderPassCache;
+ class ResourceMemoryAllocator;
+
+ class Device final : public DeviceBase {
+ public:
+ static ResultOrError<Ref<Device>> Create(Adapter* adapter,
+ const DeviceDescriptor* descriptor);
+ ~Device() override;
+
+ MaybeError Initialize();
+
+ // Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
+ const VulkanFunctions fn;
+
+ VkInstance GetVkInstance() const;
+ const VulkanDeviceInfo& GetDeviceInfo() const;
+ const VulkanGlobalInfo& GetGlobalInfo() const;
+ VkDevice GetVkDevice() const;
+ uint32_t GetGraphicsQueueFamily() const;
+ VkQueue GetQueue() const;
+
+ FencedDeleter* GetFencedDeleter() const;
+ RenderPassCache* GetRenderPassCache() const;
+ ResourceMemoryAllocator* GetResourceMemoryAllocator() const;
+
+ CommandRecordingContext* GetPendingRecordingContext();
+ MaybeError SubmitPendingCommands();
+
+ void EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator);
+
+ // Dawn Native API
+
+ TextureBase* CreateTextureWrappingVulkanImage(
+ const ExternalImageDescriptorVk* descriptor,
+ ExternalMemoryHandle memoryHandle,
+ const std::vector<ExternalSemaphoreHandle>& waitHandles);
+ bool SignalAndExportExternalTexture(Texture* texture,
+ VkImageLayout desiredLayout,
+ ExternalImageExportInfoVk* info,
+ std::vector<ExternalSemaphoreHandle>* semaphoreHandle);
+
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+
+ MaybeError TickImpl() override;
+
+ ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+ MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) override;
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) override;
+
+ // Return the fixed subgroup size to use for compute shaders on this device or 0 if none
+ // needs to be set.
+ uint32_t GetComputeSubgroupSize() const;
+
+ uint32_t GetOptimalBytesPerRowAlignment() const override;
+ uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+
+ float GetTimestampPeriodInNS() const override;
+
+ private:
+ Device(Adapter* adapter, const DeviceDescriptor* descriptor);
+
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+ const BindGroupDescriptor* descriptor) override;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+ const PipelineLayoutDescriptor* descriptor) override;
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) override;
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+ const TextureDescriptor* descriptor) override;
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) override;
+ Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+ const ComputePipelineDescriptor* descriptor) override;
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+ const RenderPipelineDescriptor* descriptor) override;
+ void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
+
+ ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
+ void GatherQueueFromDevice();
+
+ uint32_t FindComputeSubgroupSize() const;
+ void InitTogglesFromDriver();
+ void ApplyDepth24PlusS8Toggle();
+
+ void DestroyImpl() override;
+ MaybeError WaitForIdleForDestruction() override;
+
+ // To make it easier to use fn it is a public const member. However
+ // the Device is allowed to mutate them through these private methods.
+ VulkanFunctions* GetMutableFunctions();
+
+ VulkanDeviceInfo mDeviceInfo = {};
+ VkDevice mVkDevice = VK_NULL_HANDLE;
+ uint32_t mQueueFamily = 0;
+ VkQueue mQueue = VK_NULL_HANDLE;
+ uint32_t mComputeSubgroupSize = 0;
+
+ SerialQueue<ExecutionSerial, Ref<DescriptorSetAllocator>>
+ mDescriptorAllocatorsPendingDeallocation;
+ std::unique_ptr<FencedDeleter> mDeleter;
+ std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
+ std::unique_ptr<RenderPassCache> mRenderPassCache;
+
+ std::unique_ptr<external_memory::Service> mExternalMemoryService;
+ std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
+
+ ResultOrError<VkFence> GetUnusedFence();
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+
+ // We track which operations are in flight on the GPU with an increasing serial.
+ // This works only because we have a single queue. Each submit to a queue is associated
+ // to a serial and a fence, such that when the fence is "ready" we know the operations
+ // have finished.
+ std::queue<std::pair<VkFence, ExecutionSerial>> mFencesInFlight;
+ // Fences in the unused list aren't reset yet.
+ std::vector<VkFence> mUnusedFences;
+
+ MaybeError PrepareRecordingContext();
+ void RecycleCompletedCommands();
+
+ struct CommandPoolAndBuffer {
+ VkCommandPool pool = VK_NULL_HANDLE;
+ VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
+ };
+ SerialQueue<ExecutionSerial, CommandPoolAndBuffer> mCommandsInFlight;
+ // Command pools in the unused list haven't been reset yet.
+ std::vector<CommandPoolAndBuffer> mUnusedCommands;
+ // There is always a valid recording context stored in mRecordingContext
+ CommandRecordingContext mRecordingContext;
+
+ MaybeError ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
+ ExternalMemoryHandle memoryHandle,
+ VkImage image,
+ const std::vector<ExternalSemaphoreHandle>& waitHandles,
+ VkSemaphore* outSignalSemaphore,
+ VkDeviceMemory* outAllocation,
+ std::vector<VkSemaphore>* outWaitSemaphores);
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_DEVICEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h
new file mode 100644
index 00000000000..24edf428cd7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ExternalHandle.h
@@ -0,0 +1,26 @@
+#ifndef DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
+#define DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native::vulkan {
+
+#if DAWN_PLATFORM_LINUX
+ // File descriptor
+ using ExternalMemoryHandle = int;
+ // File descriptor
+ using ExternalSemaphoreHandle = int;
+#elif DAWN_PLATFORM_FUCHSIA
+ // Really a Zircon vmo handle.
+ using ExternalMemoryHandle = zx_handle_t;
+ // Really a Zircon event handle.
+ using ExternalSemaphoreHandle = zx_handle_t;
+#else
+ // Generic types so that the Null service can compile, not used for real handles
+ using ExternalMemoryHandle = void*;
+ using ExternalSemaphoreHandle = void*;
+#endif
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp
new file mode 100644
index 00000000000..09c91b43c34
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.cpp
@@ -0,0 +1,183 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/FencedDeleter.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+
+namespace dawn::native::vulkan {
+
+ FencedDeleter::FencedDeleter(Device* device) : mDevice(device) {
+ }
+
+ FencedDeleter::~FencedDeleter() {
+ ASSERT(mBuffersToDelete.Empty());
+ ASSERT(mDescriptorPoolsToDelete.Empty());
+ ASSERT(mFramebuffersToDelete.Empty());
+ ASSERT(mImagesToDelete.Empty());
+ ASSERT(mImageViewsToDelete.Empty());
+ ASSERT(mMemoriesToDelete.Empty());
+ ASSERT(mPipelinesToDelete.Empty());
+ ASSERT(mPipelineLayoutsToDelete.Empty());
+ ASSERT(mQueryPoolsToDelete.Empty());
+ ASSERT(mRenderPassesToDelete.Empty());
+ ASSERT(mSamplersToDelete.Empty());
+ ASSERT(mSemaphoresToDelete.Empty());
+ ASSERT(mShaderModulesToDelete.Empty());
+ ASSERT(mSurfacesToDelete.Empty());
+ ASSERT(mSwapChainsToDelete.Empty());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkBuffer buffer) {
+ mBuffersToDelete.Enqueue(buffer, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkDescriptorPool pool) {
+ mDescriptorPoolsToDelete.Enqueue(pool, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkDeviceMemory memory) {
+ mMemoriesToDelete.Enqueue(memory, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkFramebuffer framebuffer) {
+ mFramebuffersToDelete.Enqueue(framebuffer, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkImage image) {
+ mImagesToDelete.Enqueue(image, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkImageView view) {
+ mImageViewsToDelete.Enqueue(view, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkPipeline pipeline) {
+ mPipelinesToDelete.Enqueue(pipeline, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkPipelineLayout layout) {
+ mPipelineLayoutsToDelete.Enqueue(layout, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkQueryPool querypool) {
+ mQueryPoolsToDelete.Enqueue(querypool, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkRenderPass renderPass) {
+ mRenderPassesToDelete.Enqueue(renderPass, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkSampler sampler) {
+ mSamplersToDelete.Enqueue(sampler, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkSemaphore semaphore) {
+ mSemaphoresToDelete.Enqueue(semaphore, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkShaderModule module) {
+ mShaderModulesToDelete.Enqueue(module, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkSurfaceKHR surface) {
+ mSurfacesToDelete.Enqueue(surface, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::DeleteWhenUnused(VkSwapchainKHR swapChain) {
+ mSwapChainsToDelete.Enqueue(swapChain, mDevice->GetPendingCommandSerial());
+ }
+
+ void FencedDeleter::Tick(ExecutionSerial completedSerial) {
+ VkDevice vkDevice = mDevice->GetVkDevice();
+ VkInstance instance = mDevice->GetVkInstance();
+
+ // Buffers and images must be deleted before memories because it is invalid to free memory
+ // that still have resources bound to it.
+ for (VkBuffer buffer : mBuffersToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyBuffer(vkDevice, buffer, nullptr);
+ }
+ mBuffersToDelete.ClearUpTo(completedSerial);
+ for (VkImage image : mImagesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyImage(vkDevice, image, nullptr);
+ }
+ mImagesToDelete.ClearUpTo(completedSerial);
+
+ for (VkDeviceMemory memory : mMemoriesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.FreeMemory(vkDevice, memory, nullptr);
+ }
+ mMemoriesToDelete.ClearUpTo(completedSerial);
+
+ for (VkPipelineLayout layout : mPipelineLayoutsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyPipelineLayout(vkDevice, layout, nullptr);
+ }
+ mPipelineLayoutsToDelete.ClearUpTo(completedSerial);
+
+ for (VkRenderPass renderPass : mRenderPassesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyRenderPass(vkDevice, renderPass, nullptr);
+ }
+ mRenderPassesToDelete.ClearUpTo(completedSerial);
+
+ for (VkFramebuffer framebuffer : mFramebuffersToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyFramebuffer(vkDevice, framebuffer, nullptr);
+ }
+ mFramebuffersToDelete.ClearUpTo(completedSerial);
+
+ for (VkImageView view : mImageViewsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyImageView(vkDevice, view, nullptr);
+ }
+ mImageViewsToDelete.ClearUpTo(completedSerial);
+
+ for (VkShaderModule module : mShaderModulesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyShaderModule(vkDevice, module, nullptr);
+ }
+ mShaderModulesToDelete.ClearUpTo(completedSerial);
+
+ for (VkPipeline pipeline : mPipelinesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyPipeline(vkDevice, pipeline, nullptr);
+ }
+ mPipelinesToDelete.ClearUpTo(completedSerial);
+
+ // Vulkan swapchains must be destroyed before their corresponding VkSurface
+ for (VkSwapchainKHR swapChain : mSwapChainsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroySwapchainKHR(vkDevice, swapChain, nullptr);
+ }
+ mSwapChainsToDelete.ClearUpTo(completedSerial);
+ for (VkSurfaceKHR surface : mSurfacesToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroySurfaceKHR(instance, surface, nullptr);
+ }
+ mSurfacesToDelete.ClearUpTo(completedSerial);
+
+ for (VkSemaphore semaphore : mSemaphoresToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroySemaphore(vkDevice, semaphore, nullptr);
+ }
+ mSemaphoresToDelete.ClearUpTo(completedSerial);
+
+ for (VkDescriptorPool pool : mDescriptorPoolsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyDescriptorPool(vkDevice, pool, nullptr);
+ }
+ mDescriptorPoolsToDelete.ClearUpTo(completedSerial);
+
+ for (VkQueryPool pool : mQueryPoolsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyQueryPool(vkDevice, pool, nullptr);
+ }
+ mQueryPoolsToDelete.ClearUpTo(completedSerial);
+
+ for (VkSampler sampler : mSamplersToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroySampler(vkDevice, sampler, nullptr);
+ }
+ mSamplersToDelete.ClearUpTo(completedSerial);
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h b/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h
new file mode 100644
index 00000000000..bd4c88a24e8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/FencedDeleter.h
@@ -0,0 +1,70 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_FENCEDDELETER_H_
+#define DAWNNATIVE_VULKAN_FENCEDDELETER_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/IntegerTypes.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class FencedDeleter {
+ public:
+ FencedDeleter(Device* device);
+ ~FencedDeleter();
+
+ void DeleteWhenUnused(VkBuffer buffer);
+ void DeleteWhenUnused(VkDescriptorPool pool);
+ void DeleteWhenUnused(VkDeviceMemory memory);
+ void DeleteWhenUnused(VkFramebuffer framebuffer);
+ void DeleteWhenUnused(VkImage image);
+ void DeleteWhenUnused(VkImageView view);
+ void DeleteWhenUnused(VkPipelineLayout layout);
+ void DeleteWhenUnused(VkRenderPass renderPass);
+ void DeleteWhenUnused(VkPipeline pipeline);
+ void DeleteWhenUnused(VkQueryPool querypool);
+ void DeleteWhenUnused(VkSampler sampler);
+ void DeleteWhenUnused(VkSemaphore semaphore);
+ void DeleteWhenUnused(VkShaderModule module);
+ void DeleteWhenUnused(VkSurfaceKHR surface);
+ void DeleteWhenUnused(VkSwapchainKHR swapChain);
+
+ void Tick(ExecutionSerial completedSerial);
+
+ private:
+ Device* mDevice = nullptr;
+ SerialQueue<ExecutionSerial, VkBuffer> mBuffersToDelete;
+ SerialQueue<ExecutionSerial, VkDescriptorPool> mDescriptorPoolsToDelete;
+ SerialQueue<ExecutionSerial, VkDeviceMemory> mMemoriesToDelete;
+ SerialQueue<ExecutionSerial, VkFramebuffer> mFramebuffersToDelete;
+ SerialQueue<ExecutionSerial, VkImage> mImagesToDelete;
+ SerialQueue<ExecutionSerial, VkImageView> mImageViewsToDelete;
+ SerialQueue<ExecutionSerial, VkPipeline> mPipelinesToDelete;
+ SerialQueue<ExecutionSerial, VkPipelineLayout> mPipelineLayoutsToDelete;
+ SerialQueue<ExecutionSerial, VkQueryPool> mQueryPoolsToDelete;
+ SerialQueue<ExecutionSerial, VkRenderPass> mRenderPassesToDelete;
+ SerialQueue<ExecutionSerial, VkSampler> mSamplersToDelete;
+ SerialQueue<ExecutionSerial, VkSemaphore> mSemaphoresToDelete;
+ SerialQueue<ExecutionSerial, VkShaderModule> mShaderModulesToDelete;
+ SerialQueue<ExecutionSerial, VkSurfaceKHR> mSurfacesToDelete;
+ SerialQueue<ExecutionSerial, VkSwapchainKHR> mSwapChainsToDelete;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_FENCEDDELETER_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h b/chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h
new file mode 100644
index 00000000000..35f6adecb90
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/Forward.h
@@ -0,0 +1,69 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_FORWARD_H_
+#define DAWNNATIVE_VULKAN_FORWARD_H_
+
+#include "dawn/native/ToBackend.h"
+
+namespace dawn::native::vulkan {
+
+ class Adapter;
+ class BindGroup;
+ class BindGroupLayout;
+ class Buffer;
+ class CommandBuffer;
+ class ComputePipeline;
+ class Device;
+ class PipelineLayout;
+ class QuerySet;
+ class Queue;
+ class RenderPipeline;
+ class ResourceHeap;
+ class Sampler;
+ class ShaderModule;
+ class StagingBuffer;
+ class SwapChain;
+ class Texture;
+ class TextureView;
+
+ struct VulkanBackendTraits {
+ using AdapterType = Adapter;
+ using BindGroupType = BindGroup;
+ using BindGroupLayoutType = BindGroupLayout;
+ using BufferType = Buffer;
+ using CommandBufferType = CommandBuffer;
+ using ComputePipelineType = ComputePipeline;
+ using DeviceType = Device;
+ using PipelineLayoutType = PipelineLayout;
+ using QuerySetType = QuerySet;
+ using QueueType = Queue;
+ using RenderPipelineType = RenderPipeline;
+ using ResourceHeapType = ResourceHeap;
+ using SamplerType = Sampler;
+ using ShaderModuleType = ShaderModule;
+ using StagingBufferType = StagingBuffer;
+ using SwapChainType = SwapChain;
+ using TextureType = Texture;
+ using TextureViewType = TextureView;
+ };
+
+ template <typename T>
+ auto ToBackend(T&& common) -> decltype(ToBackendBase<VulkanBackendTraits>(common)) {
+ return ToBackendBase<VulkanBackendTraits>(common);
+ }
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
new file mode 100644
index 00000000000..e16ae2caeca
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
@@ -0,0 +1,225 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/NativeSwapChainImplVk.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/TextureVk.h"
+
+#include <limits>
+
+namespace dawn::native::vulkan {
+
+ namespace {
+
+ bool chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes,
+ bool turnOffVsync,
+ VkPresentModeKHR* presentMode) {
+ if (turnOffVsync) {
+ for (const auto& availablePresentMode : availablePresentModes) {
+ if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
+ *presentMode = availablePresentMode;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ *presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ return true;
+ }
+
+ bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
+ NativeSwapChainImpl::ChosenConfig* config,
+ bool turnOffVsync) {
+ VkPresentModeKHR presentMode;
+ if (!chooseSwapPresentMode(info.presentModes, turnOffVsync, &presentMode)) {
+ return false;
+ }
+ // TODO(crbug.com/dawn/269): For now this is hardcoded to what works with one NVIDIA
+ // driver. Need to generalize
+ config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
+ config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ config->format = wgpu::TextureFormat::BGRA8Unorm;
+ config->minImageCount = 3;
+ // TODO(crbug.com/dawn/269): This is upside down compared to what we want, at least
+ // on Linux
+ config->preTransform = info.capabilities.currentTransform;
+ config->presentMode = presentMode;
+ config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+
+ return true;
+ }
+ } // anonymous namespace
+
+ NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
+ : mSurface(surface), mDevice(device) {
+ // Call this immediately, so that BackendBinding::GetPreferredSwapChainTextureFormat
+ // will return a correct result before a SwapChain is created.
+ UpdateSurfaceConfig();
+ }
+
+ NativeSwapChainImpl::~NativeSwapChainImpl() {
+ if (mSwapChain != VK_NULL_HANDLE) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
+ mSwapChain = VK_NULL_HANDLE;
+ }
+ if (mSurface != VK_NULL_HANDLE) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(mSurface);
+ mSurface = VK_NULL_HANDLE;
+ }
+ }
+
+ void NativeSwapChainImpl::UpdateSurfaceConfig() {
+ if (mDevice->ConsumedError(GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface),
+ &mInfo)) {
+ ASSERT(false);
+ }
+
+ if (!ChooseSurfaceConfig(mInfo, &mConfig, mDevice->IsToggleEnabled(Toggle::TurnOffVsync))) {
+ ASSERT(false);
+ }
+ }
+
+ void NativeSwapChainImpl::Init(DawnWSIContextVulkan* /*context*/) {
+ UpdateSurfaceConfig();
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
+ uint32_t width,
+ uint32_t height) {
+ UpdateSurfaceConfig();
+
+ ASSERT(mInfo.capabilities.minImageExtent.width <= width);
+ ASSERT(mInfo.capabilities.maxImageExtent.width >= width);
+ ASSERT(mInfo.capabilities.minImageExtent.height <= height);
+ ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
+
+ ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
+ // TODO(crbug.com/dawn/269): need to check usage works too
+
+ // Create the swapchain with the configuration we chose
+ VkSwapchainKHR oldSwapchain = mSwapChain;
+ VkSwapchainCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.surface = mSurface;
+ createInfo.minImageCount = mConfig.minImageCount;
+ createInfo.imageFormat = mConfig.nativeFormat;
+ createInfo.imageColorSpace = mConfig.colorSpace;
+ createInfo.imageExtent.width = width;
+ createInfo.imageExtent.height = height;
+ createInfo.imageArrayLayers = 1;
+ createInfo.imageUsage = VulkanImageUsage(static_cast<wgpu::TextureUsage>(usage),
+ mDevice->GetValidInternalFormat(mConfig.format));
+ createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = nullptr;
+ createInfo.preTransform = mConfig.preTransform;
+ createInfo.compositeAlpha = mConfig.compositeAlpha;
+ createInfo.presentMode = mConfig.presentMode;
+ createInfo.clipped = false;
+ createInfo.oldSwapchain = oldSwapchain;
+
+ if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
+ &*mSwapChain) != VK_SUCCESS) {
+ ASSERT(false);
+ }
+
+ // Gather the swapchain's images. Implementations are allowed to return more images than the
+ // number we asked for.
+ uint32_t count = 0;
+ if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
+ nullptr) != VK_SUCCESS) {
+ ASSERT(false);
+ }
+
+ ASSERT(count >= mConfig.minImageCount);
+ mSwapChainImages.resize(count);
+ if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
+ AsVkArray(mSwapChainImages.data())) != VK_SUCCESS) {
+ ASSERT(false);
+ }
+
+ if (oldSwapchain != VK_NULL_HANDLE) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(oldSwapchain);
+ }
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ // Transiently create a semaphore that will be signaled when the presentation engine is done
+ // with the swapchain image. Further operations on the image will wait for this semaphore.
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ {
+ VkSemaphoreCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
+ &*semaphore) != VK_SUCCESS) {
+ ASSERT(false);
+ }
+ }
+
+ if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
+ std::numeric_limits<uint64_t>::max(), semaphore,
+ VkFence{}, &mLastImageIndex) != VK_SUCCESS) {
+ ASSERT(false);
+ }
+
+ nextTexture->texture.u64 =
+#if defined(DAWN_PLATFORM_64_BIT)
+ reinterpret_cast<uint64_t>
+#endif
+ (*mSwapChainImages[mLastImageIndex]);
+ mDevice->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError NativeSwapChainImpl::Present() {
+ // This assumes that the image has already been transitioned to the PRESENT layout and
+ // writes were made available to the stage.
+
+ // Assuming that the present queue is the same as the graphics queue, the proper
+ // synchronization has already been done on the queue so we don't need to wait on any
+ // semaphores.
+ VkPresentInfoKHR presentInfo;
+ presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ presentInfo.pNext = nullptr;
+ presentInfo.waitSemaphoreCount = 0;
+ presentInfo.pWaitSemaphores = nullptr;
+ presentInfo.swapchainCount = 1;
+ presentInfo.pSwapchains = &*mSwapChain;
+ presentInfo.pImageIndices = &mLastImageIndex;
+ presentInfo.pResults = nullptr;
+
+ VkQueue queue = mDevice->GetQueue();
+ if (mDevice->fn.QueuePresentKHR(queue, &presentInfo) != VK_SUCCESS) {
+ ASSERT(false);
+ }
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return mConfig.format;
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h
new file mode 100644
index 00000000000..529146576a6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/NativeSwapChainImplVk.h
@@ -0,0 +1,71 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
+#define DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
+
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class NativeSwapChainImpl {
+ public:
+ using WSIContext = DawnWSIContextVulkan;
+
+ NativeSwapChainImpl(Device* device, VkSurfaceKHR surface);
+ ~NativeSwapChainImpl();
+
+ void Init(DawnWSIContextVulkan* context);
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
+ uint32_t width,
+ uint32_t height);
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+ DawnSwapChainError Present();
+
+ wgpu::TextureFormat GetPreferredFormat() const;
+
+ struct ChosenConfig {
+ VkFormat nativeFormat;
+ wgpu::TextureFormat format;
+ VkColorSpaceKHR colorSpace;
+ VkSurfaceTransformFlagBitsKHR preTransform;
+ uint32_t minImageCount;
+ VkPresentModeKHR presentMode;
+ VkCompositeAlphaFlagBitsKHR compositeAlpha;
+ };
+
+ private:
+ void UpdateSurfaceConfig();
+
+ VkSurfaceKHR mSurface = VK_NULL_HANDLE;
+ VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
+ std::vector<VkImage> mSwapChainImages;
+ uint32_t mLastImageIndex = 0;
+
+ VulkanSurfaceInfo mInfo;
+
+ ChosenConfig mConfig;
+
+ Device* mDevice = nullptr;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp
new file mode 100644
index 00000000000..245f2c9ce5c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.cpp
@@ -0,0 +1,84 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ // static
+ ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
+ DAWN_TRY(layout->Initialize());
+ return layout;
+ }
+
+ MaybeError PipelineLayout::Initialize() {
+ // Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
+ // TODO(crbug.com/dawn/277) Vulkan doesn't allow holes in this array, should we expose
+ // this constraints at the Dawn level?
+ uint32_t numSetLayouts = 0;
+ std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
+ for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
+ setLayouts[numSetLayouts] = ToBackend(GetBindGroupLayout(setIndex))->GetHandle();
+ numSetLayouts++;
+ }
+
+ VkPipelineLayoutCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.setLayoutCount = numSetLayouts;
+ createInfo.pSetLayouts = AsVkArray(setLayouts.data());
+ createInfo.pushConstantRangeCount = 0;
+ createInfo.pPushConstantRanges = nullptr;
+
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreatePipelineLayout"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ PipelineLayout::~PipelineLayout() = default;
+
+ void PipelineLayout::DestroyImpl() {
+ PipelineLayoutBase::DestroyImpl();
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
+ }
+ }
+
+ VkPipelineLayout PipelineLayout::GetHandle() const {
+ return mHandle;
+ }
+
+ void PipelineLayout::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE_LAYOUT,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_PipelineLayout", GetLabel());
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h
new file mode 100644
index 00000000000..56d51e577bf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/PipelineLayoutVk.h
@@ -0,0 +1,50 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
+#define DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
+
+#include "dawn/native/PipelineLayout.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class PipelineLayout final : public PipelineLayoutBase {
+ public:
+ static ResultOrError<Ref<PipelineLayout>> Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor);
+
+ VkPipelineLayout GetHandle() const;
+
+ private:
+ ~PipelineLayout() override;
+ void DestroyImpl() override;
+
+ using PipelineLayoutBase::PipelineLayoutBase;
+ MaybeError Initialize();
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ VkPipelineLayout mHandle = VK_NULL_HANDLE;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp
new file mode 100644
index 00000000000..398179304b8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.cpp
@@ -0,0 +1,117 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/QuerySetVk.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::native::vulkan {
+
+ namespace {
+ VkQueryType VulkanQueryType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return VK_QUERY_TYPE_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return VK_QUERY_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return VK_QUERY_TYPE_TIMESTAMP;
+ }
+ UNREACHABLE();
+ }
+
+ VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
+ std::vector<wgpu::PipelineStatisticName> pipelineStatisticsSet) {
+ VkQueryPipelineStatisticFlags pipelineStatistics = 0;
+ for (size_t i = 0; i < pipelineStatisticsSet.size(); ++i) {
+ switch (pipelineStatisticsSet[i]) {
+ case wgpu::PipelineStatisticName::ClipperInvocations:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::ClipperPrimitivesOut:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT;
+ break;
+ case wgpu::PipelineStatisticName::ComputeShaderInvocations:
+ pipelineStatistics |=
+ VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::FragmentShaderInvocations:
+ pipelineStatistics |=
+ VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::VertexShaderInvocations:
+ pipelineStatistics |=
+ VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT;
+ break;
+ }
+ }
+
+ return pipelineStatistics;
+ }
+ } // anonymous namespace
+
+ // static
+ ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
+ Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+ DAWN_TRY(queryset->Initialize());
+ return queryset;
+ }
+
+ MaybeError QuerySet::Initialize() {
+ VkQueryPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+ createInfo.pNext = NULL;
+ createInfo.flags = 0;
+ createInfo.queryType = VulkanQueryType(GetQueryType());
+ createInfo.queryCount = std::max(GetQueryCount(), uint32_t(1u));
+ if (GetQueryType() == wgpu::QueryType::PipelineStatistics) {
+ createInfo.pipelineStatistics =
+ VulkanQueryPipelineStatisticFlags(GetPipelineStatistics());
+ }
+
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkOOMThenSuccess(
+ device->fn.CreateQueryPool(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "vkCreateQueryPool"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ VkQueryPool QuerySet::GetHandle() const {
+ return mHandle;
+ }
+
+ QuerySet::~QuerySet() = default;
+
+ void QuerySet::DestroyImpl() {
+ QuerySetBase::DestroyImpl();
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
+ }
+ }
+
+ void QuerySet::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_QUERY_POOL,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_QuerySet", GetLabel());
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h
new file mode 100644
index 00000000000..78a52c0ab8e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/QuerySetVk.h
@@ -0,0 +1,47 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_QUERYSETVK_H_
+#define DAWNNATIVE_VULKAN_QUERYSETVK_H_
+
+#include "dawn/native/QuerySet.h"
+
+#include "dawn/common/vulkan_platform.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class QuerySet final : public QuerySetBase {
+ public:
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
+
+ VkQueryPool GetHandle() const;
+
+ private:
+ ~QuerySet() override;
+ using QuerySetBase::QuerySetBase;
+ MaybeError Initialize();
+
+ // Dawn API
+ void DestroyImpl() override;
+ void SetLabelImpl() override;
+
+ VkQueryPool mHandle = VK_NULL_HANDLE;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_QUERYSETVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp
new file mode 100644
index 00000000000..875b771b0de
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.cpp
@@ -0,0 +1,59 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/QueueVk.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/Buffer.h"
+#include "dawn/native/CommandValidation.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/vulkan/CommandBufferVk.h"
+#include "dawn/native/vulkan/CommandRecordingContext.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+namespace dawn::native::vulkan {
+
+ // static
+ Queue* Queue::Create(Device* device) {
+ return new Queue(device);
+ }
+
+ Queue::Queue(Device* device) : QueueBase(device) {
+ }
+
+ Queue::~Queue() {
+ }
+
+ MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ Device* device = ToBackend(GetDevice());
+
+ DAWN_TRY(device->Tick());
+
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
+ "CommandBufferVk::RecordCommands");
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
+ }
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
+
+ DAWN_TRY(device->SubmitPendingCommands());
+
+ return {};
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h
new file mode 100644
index 00000000000..a80b875ec69
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/QueueVk.h
@@ -0,0 +1,38 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_QUEUEVK_H_
+#define DAWNNATIVE_VULKAN_QUEUEVK_H_
+
+#include "dawn/native/Queue.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class Queue final : public QueueBase {
+ public:
+ static Queue* Create(Device* device);
+
+ private:
+ Queue(Device* device);
+ ~Queue() override;
+ using QueueBase::QueueBase;
+
+ MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_QUEUEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp
new file mode 100644
index 00000000000..695dbd95200
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.cpp
@@ -0,0 +1,302 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/RenderPassCache.h"
+
+#include "dawn/common/BitSetIterator.h"
+#include "dawn/common/HashUtils.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ namespace {
+ VkAttachmentLoadOp VulkanAttachmentLoadOp(wgpu::LoadOp op) {
+ switch (op) {
+ case wgpu::LoadOp::Load:
+ return VK_ATTACHMENT_LOAD_OP_LOAD;
+ case wgpu::LoadOp::Clear:
+ return VK_ATTACHMENT_LOAD_OP_CLEAR;
+ case wgpu::LoadOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ VkAttachmentStoreOp VulkanAttachmentStoreOp(wgpu::StoreOp op) {
+ // TODO(crbug.com/dawn/485): return STORE_OP_STORE_NONE_QCOM if the device has required
+ // extension.
+ switch (op) {
+ case wgpu::StoreOp::Store:
+ return VK_ATTACHMENT_STORE_OP_STORE;
+ case wgpu::StoreOp::Discard:
+ return VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ case wgpu::StoreOp::Undefined:
+ UNREACHABLE();
+ break;
+ }
+ UNREACHABLE();
+ }
+ } // anonymous namespace
+
+ // RenderPassCacheQuery
+
+ void RenderPassCacheQuery::SetColor(ColorAttachmentIndex index,
+ wgpu::TextureFormat format,
+ wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ bool hasResolveTarget) {
+ colorMask.set(index);
+ colorFormats[index] = format;
+ colorLoadOp[index] = loadOp;
+ colorStoreOp[index] = storeOp;
+ resolveTargetMask[index] = hasResolveTarget;
+ }
+
+ void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
+ wgpu::LoadOp depthLoadOpIn,
+ wgpu::StoreOp depthStoreOpIn,
+ wgpu::LoadOp stencilLoadOpIn,
+ wgpu::StoreOp stencilStoreOpIn,
+ bool readOnly) {
+ hasDepthStencil = true;
+ depthStencilFormat = format;
+ depthLoadOp = depthLoadOpIn;
+ depthStoreOp = depthStoreOpIn;
+ stencilLoadOp = stencilLoadOpIn;
+ stencilStoreOp = stencilStoreOpIn;
+ readOnlyDepthStencil = readOnly;
+ }
+
+ void RenderPassCacheQuery::SetSampleCount(uint32_t sampleCount) {
+ this->sampleCount = sampleCount;
+ }
+
+ // RenderPassCache
+
+ RenderPassCache::RenderPassCache(Device* device) : mDevice(device) {
+ }
+
+ RenderPassCache::~RenderPassCache() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ for (auto [_, renderPass] : mCache) {
+ mDevice->fn.DestroyRenderPass(mDevice->GetVkDevice(), renderPass, nullptr);
+ }
+
+ mCache.clear();
+ }
+
+ ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ auto it = mCache.find(query);
+ if (it != mCache.end()) {
+ return VkRenderPass(it->second);
+ }
+
+ VkRenderPass renderPass;
+ DAWN_TRY_ASSIGN(renderPass, CreateRenderPassForQuery(query));
+ mCache.emplace(query, renderPass);
+ return renderPass;
+ }
+
+ ResultOrError<VkRenderPass> RenderPassCache::CreateRenderPassForQuery(
+ const RenderPassCacheQuery& query) const {
+ // The Vulkan subpasses want to know the layout of the attachments with VkAttachmentRef.
+ // Precompute them as they must be pointer-chained in VkSubpassDescription
+ std::array<VkAttachmentReference, kMaxColorAttachments> colorAttachmentRefs;
+ std::array<VkAttachmentReference, kMaxColorAttachments> resolveAttachmentRefs;
+ VkAttachmentReference depthStencilAttachmentRef;
+
+ // Contains the attachment description that will be chained in the create info
+ // The order of all attachments in attachmentDescs is "color-depthstencil-resolve".
+ constexpr uint8_t kMaxAttachmentCount = kMaxColorAttachments * 2 + 1;
+ std::array<VkAttachmentDescription, kMaxAttachmentCount> attachmentDescs = {};
+
+ VkSampleCountFlagBits vkSampleCount = VulkanSampleCount(query.sampleCount);
+
+ uint32_t colorAttachmentIndex = 0;
+ for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
+ auto& attachmentRef = colorAttachmentRefs[colorAttachmentIndex];
+ auto& attachmentDesc = attachmentDescs[colorAttachmentIndex];
+
+ attachmentRef.attachment = colorAttachmentIndex;
+ attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ attachmentDesc.flags = 0;
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
+ attachmentDesc.samples = vkSampleCount;
+ attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
+ attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.colorStoreOp[i]);
+ attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ ++colorAttachmentIndex;
+ }
+
+ uint32_t attachmentCount = colorAttachmentIndex;
+ VkAttachmentReference* depthStencilAttachment = nullptr;
+ if (query.hasDepthStencil) {
+ auto& attachmentDesc = attachmentDescs[attachmentCount];
+
+ depthStencilAttachment = &depthStencilAttachmentRef;
+
+ depthStencilAttachmentRef.attachment = attachmentCount;
+ depthStencilAttachmentRef.layout =
+ query.readOnlyDepthStencil ? VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL
+ : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+ attachmentDesc.flags = 0;
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
+ attachmentDesc.samples = vkSampleCount;
+
+ attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
+ attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.depthStoreOp);
+ attachmentDesc.stencilLoadOp = VulkanAttachmentLoadOp(query.stencilLoadOp);
+ attachmentDesc.stencilStoreOp = VulkanAttachmentStoreOp(query.stencilStoreOp);
+
+ // There is only one subpass, so it is safe to set both initialLayout and finalLayout to
+ // the only subpass's layout.
+ attachmentDesc.initialLayout = depthStencilAttachmentRef.layout;
+ attachmentDesc.finalLayout = depthStencilAttachmentRef.layout;
+
+ ++attachmentCount;
+ }
+
+ uint32_t resolveAttachmentIndex = 0;
+ for (ColorAttachmentIndex i : IterateBitSet(query.resolveTargetMask)) {
+ auto& attachmentRef = resolveAttachmentRefs[resolveAttachmentIndex];
+ auto& attachmentDesc = attachmentDescs[attachmentCount];
+
+ attachmentRef.attachment = attachmentCount;
+ attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ attachmentDesc.flags = 0;
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
+ attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
+ attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ ++attachmentCount;
+ ++resolveAttachmentIndex;
+ }
+
+ // All color attachments without a corresponding resolve attachment must be set to
+ // VK_ATTACHMENT_UNUSED
+ for (; resolveAttachmentIndex < colorAttachmentIndex; resolveAttachmentIndex++) {
+ auto& attachmentRef = resolveAttachmentRefs[resolveAttachmentIndex];
+ attachmentRef.attachment = VK_ATTACHMENT_UNUSED;
+ // The Khronos Vulkan validation layer will complain if not set
+ attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ }
+
+ VkAttachmentReference* resolveTargetAttachmentRefs =
+ query.resolveTargetMask.any() ? resolveAttachmentRefs.data() : nullptr;
+
+ // Create the VkSubpassDescription that will be chained in the VkRenderPassCreateInfo
+ VkSubpassDescription subpassDesc;
+ subpassDesc.flags = 0;
+ subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpassDesc.inputAttachmentCount = 0;
+ subpassDesc.pInputAttachments = nullptr;
+ subpassDesc.colorAttachmentCount = colorAttachmentIndex;
+ subpassDesc.pColorAttachments = colorAttachmentRefs.data();
+ subpassDesc.pResolveAttachments = resolveTargetAttachmentRefs;
+ subpassDesc.pDepthStencilAttachment = depthStencilAttachment;
+ subpassDesc.preserveAttachmentCount = 0;
+ subpassDesc.pPreserveAttachments = nullptr;
+
+ // Chain everything in VkRenderPassCreateInfo
+ VkRenderPassCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.attachmentCount = attachmentCount;
+ createInfo.pAttachments = attachmentDescs.data();
+ createInfo.subpassCount = 1;
+ createInfo.pSubpasses = &subpassDesc;
+ createInfo.dependencyCount = 0;
+ createInfo.pDependencies = nullptr;
+
+ // Create the render pass from the zillion parameters
+ VkRenderPass renderPass;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo,
+ nullptr, &*renderPass),
+ "CreateRenderPass"));
+ return renderPass;
+ }
+
+ // RenderPassCache
+
+ size_t RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& query) const {
+ size_t hash = Hash(query.colorMask);
+
+ HashCombine(&hash, Hash(query.resolveTargetMask));
+
+ for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
+ HashCombine(&hash, query.colorFormats[i], query.colorLoadOp[i], query.colorStoreOp[i]);
+ }
+
+ HashCombine(&hash, query.hasDepthStencil);
+ if (query.hasDepthStencil) {
+ HashCombine(&hash, query.depthStencilFormat, query.depthLoadOp, query.depthStoreOp,
+ query.stencilLoadOp, query.stencilStoreOp, query.readOnlyDepthStencil);
+ }
+
+ HashCombine(&hash, query.sampleCount);
+
+ return hash;
+ }
+
+ bool RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& a,
+ const RenderPassCacheQuery& b) const {
+ if (a.colorMask != b.colorMask) {
+ return false;
+ }
+
+ if (a.resolveTargetMask != b.resolveTargetMask) {
+ return false;
+ }
+
+ if (a.sampleCount != b.sampleCount) {
+ return false;
+ }
+
+ for (ColorAttachmentIndex i : IterateBitSet(a.colorMask)) {
+ if ((a.colorFormats[i] != b.colorFormats[i]) ||
+ (a.colorLoadOp[i] != b.colorLoadOp[i]) ||
+ (a.colorStoreOp[i] != b.colorStoreOp[i])) {
+ return false;
+ }
+ }
+
+ if (a.hasDepthStencil != b.hasDepthStencil) {
+ return false;
+ }
+
+ if (a.hasDepthStencil) {
+ if ((a.depthStencilFormat != b.depthStencilFormat) ||
+ (a.depthLoadOp != b.depthLoadOp) || (a.stencilLoadOp != b.stencilLoadOp) ||
+ (a.depthStoreOp != b.depthStoreOp) || (a.stencilStoreOp != b.stencilStoreOp) ||
+ (a.readOnlyDepthStencil != b.readOnlyDepthStencil)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h
new file mode 100644
index 00000000000..aaf9fc808f7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPassCache.h
@@ -0,0 +1,106 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
+#define DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/ityp_bitset.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/dawn_platform.h"
+
+#include <array>
+#include <bitset>
+#include <mutex>
+#include <unordered_map>
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ // This is a key to query the RenderPassCache, it can be sparse meaning that only the
+ // information for bits set in colorMask or hasDepthStencil need to be provided and the rest can
+ // be uninintialized.
+ struct RenderPassCacheQuery {
+ // Use these helpers to build the query, they make sure all relevant data is initialized and
+ // masks set.
+ void SetColor(ColorAttachmentIndex index,
+ wgpu::TextureFormat format,
+ wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ bool hasResolveTarget);
+ void SetDepthStencil(wgpu::TextureFormat format,
+ wgpu::LoadOp depthLoadOp,
+ wgpu::StoreOp depthStoreOp,
+ wgpu::LoadOp stencilLoadOp,
+ wgpu::StoreOp stencilStoreOp,
+ bool readOnly);
+ void SetSampleCount(uint32_t sampleCount);
+
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> colorMask;
+ ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> resolveTargetMask;
+ ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
+ ityp::array<ColorAttachmentIndex, wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
+ ityp::array<ColorAttachmentIndex, wgpu::StoreOp, kMaxColorAttachments> colorStoreOp;
+
+ bool hasDepthStencil = false;
+ wgpu::TextureFormat depthStencilFormat;
+ wgpu::LoadOp depthLoadOp;
+ wgpu::StoreOp depthStoreOp;
+ wgpu::LoadOp stencilLoadOp;
+ wgpu::StoreOp stencilStoreOp;
+ bool readOnlyDepthStencil;
+
+ uint32_t sampleCount;
+ };
+
+ // Caches VkRenderPasses so that we don't create duplicate ones for every RenderPipeline or
+ // render pass. We always arrange the order of attachments in "color-depthstencil-resolve" order
+ // when creating render pass and framebuffer so that we can always make sure the order of
+ // attachments in the rendering pipeline matches the one of the framebuffer.
+ // All the operations on RenderPassCache are guaranteed to be thread-safe.
+ // TODO(cwallez@chromium.org): Make it an LRU cache somehow?
+ class RenderPassCache {
+ public:
+ RenderPassCache(Device* device);
+ ~RenderPassCache();
+
+ ResultOrError<VkRenderPass> GetRenderPass(const RenderPassCacheQuery& query);
+
+ private:
+ // Does the actual VkRenderPass creation on a cache miss.
+ ResultOrError<VkRenderPass> CreateRenderPassForQuery(
+ const RenderPassCacheQuery& query) const;
+
+ // Implements the functors necessary for to use RenderPassCacheQueries as unordered_map
+ // keys.
+ struct CacheFuncs {
+ size_t operator()(const RenderPassCacheQuery& query) const;
+ bool operator()(const RenderPassCacheQuery& a, const RenderPassCacheQuery& b) const;
+ };
+ using Cache =
+ std::unordered_map<RenderPassCacheQuery, VkRenderPass, CacheFuncs, CacheFuncs>;
+
+ Device* mDevice = nullptr;
+
+ std::mutex mMutex;
+ Cache mCache;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp
new file mode 100644
index 00000000000..580be6c86f9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.cpp
@@ -0,0 +1,623 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/RenderPipelineVk.h"
+
+#include "dawn/native/CreatePipelineAsyncTask.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/RenderPassCache.h"
+#include "dawn/native/vulkan/ShaderModuleVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ namespace {
+
+ VkVertexInputRate VulkanInputRate(wgpu::VertexStepMode stepMode) {
+ switch (stepMode) {
+ case wgpu::VertexStepMode::Vertex:
+ return VK_VERTEX_INPUT_RATE_VERTEX;
+ case wgpu::VertexStepMode::Instance:
+ return VK_VERTEX_INPUT_RATE_INSTANCE;
+ }
+ UNREACHABLE();
+ }
+
+ VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ return VK_FORMAT_R8G8_UINT;
+ case wgpu::VertexFormat::Uint8x4:
+ return VK_FORMAT_R8G8B8A8_UINT;
+ case wgpu::VertexFormat::Sint8x2:
+ return VK_FORMAT_R8G8_SINT;
+ case wgpu::VertexFormat::Sint8x4:
+ return VK_FORMAT_R8G8B8A8_SINT;
+ case wgpu::VertexFormat::Unorm8x2:
+ return VK_FORMAT_R8G8_UNORM;
+ case wgpu::VertexFormat::Unorm8x4:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case wgpu::VertexFormat::Snorm8x2:
+ return VK_FORMAT_R8G8_SNORM;
+ case wgpu::VertexFormat::Snorm8x4:
+ return VK_FORMAT_R8G8B8A8_SNORM;
+ case wgpu::VertexFormat::Uint16x2:
+ return VK_FORMAT_R16G16_UINT;
+ case wgpu::VertexFormat::Uint16x4:
+ return VK_FORMAT_R16G16B16A16_UINT;
+ case wgpu::VertexFormat::Sint16x2:
+ return VK_FORMAT_R16G16_SINT;
+ case wgpu::VertexFormat::Sint16x4:
+ return VK_FORMAT_R16G16B16A16_SINT;
+ case wgpu::VertexFormat::Unorm16x2:
+ return VK_FORMAT_R16G16_UNORM;
+ case wgpu::VertexFormat::Unorm16x4:
+ return VK_FORMAT_R16G16B16A16_UNORM;
+ case wgpu::VertexFormat::Snorm16x2:
+ return VK_FORMAT_R16G16_SNORM;
+ case wgpu::VertexFormat::Snorm16x4:
+ return VK_FORMAT_R16G16B16A16_SNORM;
+ case wgpu::VertexFormat::Float16x2:
+ return VK_FORMAT_R16G16_SFLOAT;
+ case wgpu::VertexFormat::Float16x4:
+ return VK_FORMAT_R16G16B16A16_SFLOAT;
+ case wgpu::VertexFormat::Float32:
+ return VK_FORMAT_R32_SFLOAT;
+ case wgpu::VertexFormat::Float32x2:
+ return VK_FORMAT_R32G32_SFLOAT;
+ case wgpu::VertexFormat::Float32x3:
+ return VK_FORMAT_R32G32B32_SFLOAT;
+ case wgpu::VertexFormat::Float32x4:
+ return VK_FORMAT_R32G32B32A32_SFLOAT;
+ case wgpu::VertexFormat::Uint32:
+ return VK_FORMAT_R32_UINT;
+ case wgpu::VertexFormat::Uint32x2:
+ return VK_FORMAT_R32G32_UINT;
+ case wgpu::VertexFormat::Uint32x3:
+ return VK_FORMAT_R32G32B32_UINT;
+ case wgpu::VertexFormat::Uint32x4:
+ return VK_FORMAT_R32G32B32A32_UINT;
+ case wgpu::VertexFormat::Sint32:
+ return VK_FORMAT_R32_SINT;
+ case wgpu::VertexFormat::Sint32x2:
+ return VK_FORMAT_R32G32_SINT;
+ case wgpu::VertexFormat::Sint32x3:
+ return VK_FORMAT_R32G32B32_SINT;
+ case wgpu::VertexFormat::Sint32x4:
+ return VK_FORMAT_R32G32B32A32_SINT;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) {
+ switch (topology) {
+ case wgpu::PrimitiveTopology::PointList:
+ return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+ case wgpu::PrimitiveTopology::LineList:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
+ case wgpu::PrimitiveTopology::LineStrip:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
+ case wgpu::PrimitiveTopology::TriangleList:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+ }
+ UNREACHABLE();
+ }
+
+ bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
+ // Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that
+ // primitive restart be only enabled on primitive topologies that support restarting.
+ switch (topology) {
+ case wgpu::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::TriangleList:
+ return false;
+ case wgpu::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::TriangleStrip:
+ return true;
+ }
+ UNREACHABLE();
+ }
+
+ VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
+ switch (face) {
+ case wgpu::FrontFace::CCW:
+ return VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ case wgpu::FrontFace::CW:
+ return VK_FRONT_FACE_CLOCKWISE;
+ }
+ UNREACHABLE();
+ }
+
+ VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
+ switch (mode) {
+ case wgpu::CullMode::None:
+ return VK_CULL_MODE_NONE;
+ case wgpu::CullMode::Front:
+ return VK_CULL_MODE_FRONT_BIT;
+ case wgpu::CullMode::Back:
+ return VK_CULL_MODE_BACK_BIT;
+ }
+ UNREACHABLE();
+ }
+
+ VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
+ switch (factor) {
+ case wgpu::BlendFactor::Zero:
+ return VK_BLEND_FACTOR_ZERO;
+ case wgpu::BlendFactor::One:
+ return VK_BLEND_FACTOR_ONE;
+ case wgpu::BlendFactor::Src:
+ return VK_BLEND_FACTOR_SRC_COLOR;
+ case wgpu::BlendFactor::OneMinusSrc:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
+ case wgpu::BlendFactor::SrcAlpha:
+ return VK_BLEND_FACTOR_SRC_ALPHA;
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ case wgpu::BlendFactor::Dst:
+ return VK_BLEND_FACTOR_DST_COLOR;
+ case wgpu::BlendFactor::OneMinusDst:
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
+ case wgpu::BlendFactor::DstAlpha:
+ return VK_BLEND_FACTOR_DST_ALPHA;
+ case wgpu::BlendFactor::OneMinusDstAlpha:
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
+ case wgpu::BlendFactor::SrcAlphaSaturated:
+ return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
+ case wgpu::BlendFactor::Constant:
+ return VK_BLEND_FACTOR_CONSTANT_COLOR;
+ case wgpu::BlendFactor::OneMinusConstant:
+ return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
+ }
+ UNREACHABLE();
+ }
+
+ VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
+ switch (operation) {
+ case wgpu::BlendOperation::Add:
+ return VK_BLEND_OP_ADD;
+ case wgpu::BlendOperation::Subtract:
+ return VK_BLEND_OP_SUBTRACT;
+ case wgpu::BlendOperation::ReverseSubtract:
+ return VK_BLEND_OP_REVERSE_SUBTRACT;
+ case wgpu::BlendOperation::Min:
+ return VK_BLEND_OP_MIN;
+ case wgpu::BlendOperation::Max:
+ return VK_BLEND_OP_MAX;
+ }
+ UNREACHABLE();
+ }
+
+ VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
+ bool isDeclaredInFragmentShader) {
+ // Vulkan and Dawn color write masks match, static assert it and return the mask
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
+ VK_COLOR_COMPONENT_R_BIT);
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
+ VK_COLOR_COMPONENT_G_BIT);
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
+ VK_COLOR_COMPONENT_B_BIT);
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
+ VK_COLOR_COMPONENT_A_BIT);
+
+ // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
+ // attachment writes are undefined for components which do not correspond to a fragment
+ // shader outputs", we set the color write mask to 0 to prevent such undefined values
+ // being written into the color attachments.
+ return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask)
+ : static_cast<VkColorComponentFlags>(0);
+ }
+
+ VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state,
+ bool isDeclaredInFragmentShader) {
+ VkPipelineColorBlendAttachmentState attachment;
+ attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE;
+ if (attachment.blendEnable) {
+ attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor);
+ attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor);
+ attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation);
+ attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor);
+ attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor);
+ attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation);
+ } else {
+ // Swiftshader's Vulkan implementation appears to expect these values to be valid
+ // even when blending is not enabled.
+ attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+ attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+ attachment.colorBlendOp = VK_BLEND_OP_ADD;
+ attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+ attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+ attachment.alphaBlendOp = VK_BLEND_OP_ADD;
+ }
+ attachment.colorWriteMask =
+ VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
+ return attachment;
+ }
+
+ VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) {
+ switch (op) {
+ case wgpu::StencilOperation::Keep:
+ return VK_STENCIL_OP_KEEP;
+ case wgpu::StencilOperation::Zero:
+ return VK_STENCIL_OP_ZERO;
+ case wgpu::StencilOperation::Replace:
+ return VK_STENCIL_OP_REPLACE;
+ case wgpu::StencilOperation::IncrementClamp:
+ return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+ case wgpu::StencilOperation::DecrementClamp:
+ return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+ case wgpu::StencilOperation::Invert:
+ return VK_STENCIL_OP_INVERT;
+ case wgpu::StencilOperation::IncrementWrap:
+ return VK_STENCIL_OP_INCREMENT_AND_WRAP;
+ case wgpu::StencilOperation::DecrementWrap:
+ return VK_STENCIL_OP_DECREMENT_AND_WRAP;
+ }
+ UNREACHABLE();
+ }
+
+ VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(
+ const DepthStencilState* descriptor) {
+ VkPipelineDepthStencilStateCreateInfo depthStencilState;
+ depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ depthStencilState.pNext = nullptr;
+ depthStencilState.flags = 0;
+
+ // Depth writes only occur if depth is enabled
+ depthStencilState.depthTestEnable =
+ (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+ !descriptor->depthWriteEnabled)
+ ? VK_FALSE
+ : VK_TRUE;
+ depthStencilState.depthWriteEnable = descriptor->depthWriteEnabled ? VK_TRUE : VK_FALSE;
+ depthStencilState.depthCompareOp = ToVulkanCompareOp(descriptor->depthCompare);
+ depthStencilState.depthBoundsTestEnable = false;
+ depthStencilState.minDepthBounds = 0.0f;
+ depthStencilState.maxDepthBounds = 1.0f;
+
+ depthStencilState.stencilTestEnable =
+ StencilTestEnabled(descriptor) ? VK_TRUE : VK_FALSE;
+
+ depthStencilState.front.failOp = VulkanStencilOp(descriptor->stencilFront.failOp);
+ depthStencilState.front.passOp = VulkanStencilOp(descriptor->stencilFront.passOp);
+ depthStencilState.front.depthFailOp =
+ VulkanStencilOp(descriptor->stencilFront.depthFailOp);
+ depthStencilState.front.compareOp = ToVulkanCompareOp(descriptor->stencilFront.compare);
+
+ depthStencilState.back.failOp = VulkanStencilOp(descriptor->stencilBack.failOp);
+ depthStencilState.back.passOp = VulkanStencilOp(descriptor->stencilBack.passOp);
+ depthStencilState.back.depthFailOp =
+ VulkanStencilOp(descriptor->stencilBack.depthFailOp);
+ depthStencilState.back.compareOp = ToVulkanCompareOp(descriptor->stencilBack.compare);
+
+ // Dawn doesn't have separate front and back stencil masks.
+ depthStencilState.front.compareMask = descriptor->stencilReadMask;
+ depthStencilState.back.compareMask = descriptor->stencilReadMask;
+ depthStencilState.front.writeMask = descriptor->stencilWriteMask;
+ depthStencilState.back.writeMask = descriptor->stencilWriteMask;
+
+ // The stencil reference is always dynamic
+ depthStencilState.front.reference = 0;
+ depthStencilState.back.reference = 0;
+
+ return depthStencilState;
+ }
+
+ } // anonymous namespace
+
+ // static
+ Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ return AcquireRef(new RenderPipeline(device, descriptor));
+ }
+
+ MaybeError RenderPipeline::Initialize() {
+ Device* device = ToBackend(GetDevice());
+
+ // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment
+ std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
+ std::array<std::vector<OverridableConstantScalar>, 2> specializationDataEntriesPerStages;
+ std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages;
+ std::array<VkSpecializationInfo, 2> specializationInfoPerStages;
+ uint32_t stageCount = 0;
+
+ for (auto stage : IterateStages(this->GetStageMask())) {
+ VkPipelineShaderStageCreateInfo shaderStage;
+
+ const ProgrammableStage& programmableStage = GetStage(stage);
+ DAWN_TRY_ASSIGN(shaderStage.module,
+ ToBackend(programmableStage.module)
+ ->GetTransformedModuleHandle(programmableStage.entryPoint.c_str(),
+ ToBackend(GetLayout())));
+
+ shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shaderStage.pNext = nullptr;
+ shaderStage.flags = 0;
+ shaderStage.pSpecializationInfo = nullptr;
+ shaderStage.pName = programmableStage.entryPoint.c_str();
+
+ switch (stage) {
+ case dawn::native::SingleShaderStage::Vertex: {
+ shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
+ break;
+ }
+ case dawn::native::SingleShaderStage::Fragment: {
+ shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+ break;
+ }
+ default: {
+ // For render pipeline only Vertex and Fragment stage is possible
+ DAWN_UNREACHABLE();
+ break;
+ }
+ }
+
+ shaderStage.pSpecializationInfo =
+ GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount],
+ &specializationDataEntriesPerStages[stageCount],
+ &specializationMapEntriesPerStages[stageCount]);
+
+ DAWN_ASSERT(stageCount < 2);
+ shaderStages[stageCount] = shaderStage;
+ stageCount++;
+ }
+
+ PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
+ VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo =
+ ComputeVertexInputDesc(&tempAllocations);
+
+ VkPipelineInputAssemblyStateCreateInfo inputAssembly;
+ inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ inputAssembly.pNext = nullptr;
+ inputAssembly.flags = 0;
+ inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
+ inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
+
+ // A dummy viewport/scissor info. The validation layers force use to provide at least one
+ // scissor and one viewport here, even if we choose to make them dynamic.
+ VkViewport viewportDesc;
+ viewportDesc.x = 0.0f;
+ viewportDesc.y = 0.0f;
+ viewportDesc.width = 1.0f;
+ viewportDesc.height = 1.0f;
+ viewportDesc.minDepth = 0.0f;
+ viewportDesc.maxDepth = 1.0f;
+ VkRect2D scissorRect;
+ scissorRect.offset.x = 0;
+ scissorRect.offset.y = 0;
+ scissorRect.extent.width = 1;
+ scissorRect.extent.height = 1;
+ VkPipelineViewportStateCreateInfo viewport;
+ viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport.pNext = nullptr;
+ viewport.flags = 0;
+ viewport.viewportCount = 1;
+ viewport.pViewports = &viewportDesc;
+ viewport.scissorCount = 1;
+ viewport.pScissors = &scissorRect;
+
+ VkPipelineRasterizationStateCreateInfo rasterization;
+ rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterization.pNext = nullptr;
+ rasterization.flags = 0;
+ rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE;
+ rasterization.rasterizerDiscardEnable = VK_FALSE;
+ rasterization.polygonMode = VK_POLYGON_MODE_FILL;
+ rasterization.cullMode = VulkanCullMode(GetCullMode());
+ rasterization.frontFace = VulkanFrontFace(GetFrontFace());
+ rasterization.depthBiasEnable = IsDepthBiasEnabled();
+ rasterization.depthBiasConstantFactor = GetDepthBias();
+ rasterization.depthBiasClamp = GetDepthBiasClamp();
+ rasterization.depthBiasSlopeFactor = GetDepthBiasSlopeScale();
+ rasterization.lineWidth = 1.0f;
+
+ VkPipelineMultisampleStateCreateInfo multisample;
+ multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisample.pNext = nullptr;
+ multisample.flags = 0;
+ multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount());
+ multisample.sampleShadingEnable = VK_FALSE;
+ multisample.minSampleShading = 0.0f;
+ // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length
+ // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t
+ // we have to assert that this length is indeed 1.
+ ASSERT(multisample.rasterizationSamples <= 32);
+ VkSampleMask sampleMask = GetSampleMask();
+ multisample.pSampleMask = &sampleMask;
+ multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled();
+ multisample.alphaToOneEnable = VK_FALSE;
+
+ VkPipelineDepthStencilStateCreateInfo depthStencilState =
+ ComputeDepthStencilDesc(GetDepthStencilState());
+
+ VkPipelineColorBlendStateCreateInfo colorBlend;
+ // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a
+ // definition scope as same as colorBlend
+ ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments>
+ colorBlendAttachments;
+ if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+ // Initialize the "blend state info" that will be chained in the "create info" from the
+ // data pre-computed in the ColorState
+ const auto& fragmentOutputsWritten =
+ GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ const ColorTargetState* target = GetColorTargetState(i);
+ colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
+ }
+
+ colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ colorBlend.pNext = nullptr;
+ colorBlend.flags = 0;
+ // LogicOp isn't supported so we disable it.
+ colorBlend.logicOpEnable = VK_FALSE;
+ colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
+ colorBlend.attachmentCount = static_cast<uint32_t>(GetColorAttachmentsMask().count());
+ colorBlend.pAttachments = colorBlendAttachments.data();
+ // The blend constant is always dynamic so we fill in a dummy value
+ colorBlend.blendConstants[0] = 0.0f;
+ colorBlend.blendConstants[1] = 0.0f;
+ colorBlend.blendConstants[2] = 0.0f;
+ colorBlend.blendConstants[3] = 0.0f;
+ }
+
+ // Tag all state as dynamic but stencil masks and depth bias.
+ VkDynamicState dynamicStates[] = {
+ VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
+ VK_DYNAMIC_STATE_LINE_WIDTH, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ };
+ VkPipelineDynamicStateCreateInfo dynamic;
+ dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ dynamic.pNext = nullptr;
+ dynamic.flags = 0;
+ dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]);
+ dynamic.pDynamicStates = dynamicStates;
+
+ // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops
+ // don't matter so set them all to LoadOp::Load / StoreOp::Store. Whether the render pass
+ // has resolve target and whether depth/stencil attachment is read-only also don't matter,
+ // so set them both to false.
+ VkRenderPass renderPass = VK_NULL_HANDLE;
+ {
+ RenderPassCacheQuery query;
+
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load,
+ wgpu::StoreOp::Store, false);
+ }
+
+ if (HasDepthStencilAttachment()) {
+ query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load,
+ wgpu::StoreOp::Store, wgpu::LoadOp::Load,
+ wgpu::StoreOp::Store, false);
+ }
+
+ query.SetSampleCount(GetSampleCount());
+
+ DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query));
+ }
+
+ // The create info chains in a bunch of things created on the stack here or inside state
+ // objects.
+ VkGraphicsPipelineCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.stageCount = stageCount;
+ createInfo.pStages = shaderStages.data();
+ createInfo.pVertexInputState = &vertexInputCreateInfo;
+ createInfo.pInputAssemblyState = &inputAssembly;
+ createInfo.pTessellationState = nullptr;
+ createInfo.pViewportState = &viewport;
+ createInfo.pRasterizationState = &rasterization;
+ createInfo.pMultisampleState = &multisample;
+ createInfo.pDepthStencilState = &depthStencilState;
+ createInfo.pColorBlendState =
+ (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr;
+ createInfo.pDynamicState = &dynamic;
+ createInfo.layout = ToBackend(GetLayout())->GetHandle();
+ createInfo.renderPass = renderPass;
+ createInfo.subpass = 0;
+ createInfo.basePipelineHandle = VkPipeline{};
+ createInfo.basePipelineIndex = -1;
+
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{}, 1,
+ &createInfo, nullptr, &*mHandle),
+ "CreateGraphicsPipeline"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ void RenderPipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_RenderPipeline", GetLabel());
+ }
+
+ VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
+ PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) {
+ // Fill in the "binding info" that will be chained in the create info
+ uint32_t bindingCount = 0;
+ for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& bindingInfo = GetVertexBuffer(slot);
+
+ VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount];
+ bindingDesc->binding = static_cast<uint8_t>(slot);
+ bindingDesc->stride = bindingInfo.arrayStride;
+ bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode);
+
+ bindingCount++;
+ }
+
+ // Fill in the "attribute info" that will be chained in the create info
+ uint32_t attributeCount = 0;
+ for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& attributeInfo = GetAttribute(loc);
+
+ VkVertexInputAttributeDescription* attributeDesc =
+ &tempAllocations->attributes[attributeCount];
+ attributeDesc->location = static_cast<uint8_t>(loc);
+ attributeDesc->binding = static_cast<uint8_t>(attributeInfo.vertexBufferSlot);
+ attributeDesc->format = VulkanVertexFormat(attributeInfo.format);
+ attributeDesc->offset = attributeInfo.offset;
+
+ attributeCount++;
+ }
+
+ // Build the create info
+ VkPipelineVertexInputStateCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.vertexBindingDescriptionCount = bindingCount;
+ createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
+ createInfo.vertexAttributeDescriptionCount = attributeCount;
+ createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
+ return createInfo;
+ }
+
+ RenderPipeline::~RenderPipeline() = default;
+
+ void RenderPipeline::DestroyImpl() {
+ RenderPipelineBase::DestroyImpl();
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
+ }
+ }
+
+ VkPipeline RenderPipeline::GetHandle() const {
+ return mHandle;
+ }
+
+ void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h
new file mode 100644
index 00000000000..7d87cacabb9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/RenderPipelineVk.h
@@ -0,0 +1,59 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
+#define DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
+
+#include "dawn/native/RenderPipeline.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class RenderPipeline final : public RenderPipelineBase {
+ public:
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ VkPipeline GetHandle() const;
+
+ MaybeError Initialize() override;
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ ~RenderPipeline() override;
+ void DestroyImpl() override;
+ using RenderPipelineBase::RenderPipelineBase;
+
+ struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
+ std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
+ std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> attributes;
+ };
+ VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
+ PipelineVertexInputStateCreateInfoTemporaryAllocations* temporaryAllocations);
+
+ VkPipeline mHandle = VK_NULL_HANDLE;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp
new file mode 100644
index 00000000000..94ce7fc9340
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.cpp
@@ -0,0 +1,31 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+
+namespace dawn::native::vulkan {
+
+ ResourceHeap::ResourceHeap(VkDeviceMemory memory, size_t memoryType)
+ : mMemory(memory), mMemoryType(memoryType) {
+ }
+
+ VkDeviceMemory ResourceHeap::GetMemory() const {
+ return mMemory;
+ }
+
+ size_t ResourceHeap::GetMemoryType() const {
+ return mMemoryType;
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h
new file mode 100644
index 00000000000..5b822c8990a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceHeapVk.h
@@ -0,0 +1,39 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
+#define DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/ResourceHeap.h"
+
+namespace dawn::native::vulkan {
+
+ // Wrapper for physical memory used with or without a resource object.
+ class ResourceHeap : public ResourceHeapBase {
+ public:
+ ResourceHeap(VkDeviceMemory memory, size_t memoryType);
+ ~ResourceHeap() = default;
+
+ VkDeviceMemory GetMemory() const;
+ size_t GetMemoryType() const;
+
+ private:
+ VkDeviceMemory mMemory = VK_NULL_HANDLE;
+ size_t mMemoryType = 0;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
new file mode 100644
index 00000000000..6088ac0643b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
@@ -0,0 +1,292 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+
+#include "dawn/common/Math.h"
+#include "dawn/native/BuddyMemoryAllocator.h"
+#include "dawn/native/ResourceHeapAllocator.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ namespace {
+
+ // TODO(crbug.com/dawn/849): This is a hardcoded heurstic to choose when to
+ // suballocate but it should ideally depend on the size of the memory heaps and other
+ // factors.
+ constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull; // 4MiB
+
+ // Have each bucket of the buddy system allocate at least some resource of the maximum
+ // size
+ constexpr uint64_t kBuddyHeapsSize = 2 * kMaxSizeForSubAllocation;
+
+ } // anonymous namespace
+
+ // SingleTypeAllocator is a combination of a BuddyMemoryAllocator and its client and can
+ // service suballocation requests, but for a single Vulkan memory type.
+
+ class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
+ public:
+ SingleTypeAllocator(Device* device, size_t memoryTypeIndex, VkDeviceSize memoryHeapSize)
+ : mDevice(device),
+ mMemoryTypeIndex(memoryTypeIndex),
+ mMemoryHeapSize(memoryHeapSize),
+ mPooledMemoryAllocator(this),
+ mBuddySystem(
+ // Round down to a power of 2 that's <= mMemoryHeapSize. This will always
+ // be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
+ uint64_t(1) << Log2(mMemoryHeapSize),
+ // Take the min in the very unlikely case the memory heap is tiny.
+ std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
+ &mPooledMemoryAllocator) {
+ ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
+ }
+ ~SingleTypeAllocator() override = default;
+
+ void DestroyPool() {
+ mPooledMemoryAllocator.DestroyPool();
+ }
+
+ ResultOrError<ResourceMemoryAllocation> AllocateMemory(uint64_t size, uint64_t alignment) {
+ return mBuddySystem.Allocate(size, alignment);
+ }
+
+ void DeallocateMemory(const ResourceMemoryAllocation& allocation) {
+ mBuddySystem.Deallocate(allocation);
+ }
+
+ // Implementation of the MemoryAllocator interface to be a client of BuddyMemoryAllocator
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) override {
+ if (size > mMemoryHeapSize) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Allocation size too large");
+ }
+
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.allocationSize = size;
+ allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+
+ // First check OOM that we want to surface to the application.
+ DAWN_TRY(CheckVkOOMThenSuccess(
+ mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo, nullptr,
+ &*allocatedMemory),
+ "vkAllocateMemory"));
+
+ ASSERT(allocatedMemory != VK_NULL_HANDLE);
+ return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
+ }
+
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(ToBackend(allocation.get())->GetMemory());
+ }
+
+ private:
+ Device* mDevice;
+ size_t mMemoryTypeIndex;
+ VkDeviceSize mMemoryHeapSize;
+ PooledResourceMemoryAllocator mPooledMemoryAllocator;
+ BuddyMemoryAllocator mBuddySystem;
+ };
+
+ // Implementation of ResourceMemoryAllocator
+
+ ResourceMemoryAllocator::ResourceMemoryAllocator(Device* device) : mDevice(device) {
+ const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+ mAllocatorsPerType.reserve(info.memoryTypes.size());
+
+ for (size_t i = 0; i < info.memoryTypes.size(); i++) {
+ mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(
+ mDevice, i, info.memoryHeaps[info.memoryTypes[i].heapIndex].size));
+ }
+ }
+
+ ResourceMemoryAllocator::~ResourceMemoryAllocator() = default;
+
+ ResultOrError<ResourceMemoryAllocation> ResourceMemoryAllocator::Allocate(
+ const VkMemoryRequirements& requirements,
+ MemoryKind kind) {
+ // The Vulkan spec guarantees at least on memory type is valid.
+ int memoryType = FindBestTypeIndex(requirements, kind);
+ ASSERT(memoryType >= 0);
+
+ VkDeviceSize size = requirements.size;
+
+ // Sub-allocate non-mappable resources because at the moment the mapped pointer
+ // is part of the resource and not the heap, which doesn't match the Vulkan model.
+ // TODO(crbug.com/dawn/849): allow sub-allocating mappable resources, maybe.
+ if (requirements.size < kMaxSizeForSubAllocation && kind != MemoryKind::LinearMappable) {
+ // When sub-allocating, Vulkan requires that we respect bufferImageGranularity. Some
+ // hardware puts information on the memory's page table entry and allocating a linear
+ // resource in the same page as a non-linear (aka opaque) resource can cause issues.
+ // Probably because some texture compression flags are stored on the page table entry,
+ // and allocating a linear resource removes these flags.
+ //
+ // Anyway, just to be safe we ask that all sub-allocated resources are allocated with at
+ // least this alignment. TODO(crbug.com/dawn/849): this is suboptimal because multiple
+ // linear (resp. opaque) resources can coexist in the same page. In particular Nvidia
+ // GPUs often use a granularity of 64k which will lead to a lot of wasted spec. Revisit
+ // with a more efficient algorithm later.
+ uint64_t alignment =
+ std::max(requirements.alignment,
+ mDevice->GetDeviceInfo().properties.limits.bufferImageGranularity);
+
+ ResourceMemoryAllocation subAllocation;
+ DAWN_TRY_ASSIGN(subAllocation, mAllocatorsPerType[memoryType]->AllocateMemory(
+ requirements.size, alignment));
+ if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return std::move(subAllocation);
+ }
+ }
+
+ // If sub-allocation failed, allocate memory just for it.
+ std::unique_ptr<ResourceHeapBase> resourceHeap;
+ DAWN_TRY_ASSIGN(resourceHeap, mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
+
+ void* mappedPointer = nullptr;
+ if (kind == MemoryKind::LinearMappable) {
+ DAWN_TRY_WITH_CLEANUP(
+ CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
+ ToBackend(resourceHeap.get())->GetMemory(), 0,
+ size, 0, &mappedPointer),
+ "vkMapMemory"),
+ {
+ mAllocatorsPerType[memoryType]->DeallocateResourceHeap(std::move(resourceHeap));
+ });
+ }
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+ return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
+ static_cast<uint8_t*>(mappedPointer));
+ }
+
+ void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
+ switch (allocation->GetInfo().mMethod) {
+ // Some memory allocation can never be initialized, for example when wrapping
+ // swapchain VkImages with a Texture.
+ case AllocationMethod::kInvalid:
+ break;
+
+ // For direct allocation we can put the memory for deletion immediately and the fence
+ // deleter will make sure the resources are freed before the memory.
+ case AllocationMethod::kDirect: {
+ ResourceHeap* heap = ToBackend(allocation->GetResourceHeap());
+ allocation->Invalidate();
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(heap->GetMemory());
+ delete heap;
+ break;
+ }
+
+ // Suballocations aren't freed immediately, otherwise another resource allocation could
+ // happen just after that aliases the old one and would require a barrier.
+ // TODO(crbug.com/dawn/851): Maybe we can produce the correct barriers to reduce the
+ // latency to reclaim memory.
+ case AllocationMethod::kSubAllocated:
+ mSubAllocationsToDelete.Enqueue(*allocation, mDevice->GetPendingCommandSerial());
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // Invalidate the underlying resource heap in case the client accidentally
+ // calls DeallocateMemory again using the same allocation.
+ allocation->Invalidate();
+ }
+
+ void ResourceMemoryAllocator::Tick(ExecutionSerial completedSerial) {
+ for (const ResourceMemoryAllocation& allocation :
+ mSubAllocationsToDelete.IterateUpTo(completedSerial)) {
+ ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+ size_t memoryType = ToBackend(allocation.GetResourceHeap())->GetMemoryType();
+
+ mAllocatorsPerType[memoryType]->DeallocateMemory(allocation);
+ }
+
+ mSubAllocationsToDelete.ClearUpTo(completedSerial);
+ }
+
+ int ResourceMemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements,
+ MemoryKind kind) {
+ const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+ bool mappable = kind == MemoryKind::LinearMappable;
+
+ // Find a suitable memory type for this allocation
+ int bestType = -1;
+ for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
+ // Resource must support this memory type
+ if ((requirements.memoryTypeBits & (1 << i)) == 0) {
+ continue;
+ }
+
+ // Mappable resource must be host visible
+ if (mappable &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+ continue;
+ }
+
+ // Mappable must also be host coherent.
+ if (mappable &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
+ continue;
+ }
+
+ // Found the first candidate memory type
+ if (bestType == -1) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+
+ // For non-mappable resources, favor device local memory.
+ bool currentDeviceLocal =
+ info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ bool bestDeviceLocal =
+ info.memoryTypes[bestType].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ if (!mappable && (currentDeviceLocal != bestDeviceLocal)) {
+ if (currentDeviceLocal) {
+ bestType = static_cast<int>(i);
+ }
+ continue;
+ }
+
+ // All things equal favor the memory in the biggest heap
+ VkDeviceSize bestTypeHeapSize =
+ info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
+ VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
+ if (candidateHeapSize > bestTypeHeapSize) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+ }
+
+ return bestType;
+ }
+
+ void ResourceMemoryAllocator::DestroyPool() {
+ for (auto& alloc : mAllocatorsPerType) {
+ alloc->DestroyPool();
+ }
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
new file mode 100644
index 00000000000..81864fd13e0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
@@ -0,0 +1,66 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
+#define DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
+
+#include "dawn/common/SerialQueue.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/IntegerTypes.h"
+#include "dawn/native/PooledResourceMemoryAllocator.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+
+#include <memory>
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ // Various kinds of memory that influence the result of the allocation. For example, to take
+ // into account mappability and Vulkan's bufferImageGranularity.
+ enum class MemoryKind {
+ Linear,
+ LinearMappable,
+ Opaque,
+ };
+
+ class ResourceMemoryAllocator {
+ public:
+ ResourceMemoryAllocator(Device* device);
+ ~ResourceMemoryAllocator();
+
+ ResultOrError<ResourceMemoryAllocation> Allocate(const VkMemoryRequirements& requirements,
+ MemoryKind kind);
+ void Deallocate(ResourceMemoryAllocation* allocation);
+
+ void DestroyPool();
+
+ void Tick(ExecutionSerial completedSerial);
+
+ int FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind);
+
+ private:
+ Device* mDevice;
+
+ class SingleTypeAllocator;
+ std::vector<std::unique_ptr<SingleTypeAllocator>> mAllocatorsPerType;
+
+ SerialQueue<ExecutionSerial, ResourceMemoryAllocation> mSubAllocationsToDelete;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp
new file mode 100644
index 00000000000..c7fc1a362c2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.cpp
@@ -0,0 +1,131 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/SamplerVk.h"
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ namespace {
+ VkSamplerAddressMode VulkanSamplerAddressMode(wgpu::AddressMode mode) {
+ switch (mode) {
+ case wgpu::AddressMode::Repeat:
+ return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ case wgpu::AddressMode::MirrorRepeat:
+ return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+ case wgpu::AddressMode::ClampToEdge:
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ }
+ UNREACHABLE();
+ }
+
+ VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
+ switch (filter) {
+ case wgpu::FilterMode::Linear:
+ return VK_FILTER_LINEAR;
+ case wgpu::FilterMode::Nearest:
+ return VK_FILTER_NEAREST;
+ }
+ UNREACHABLE();
+ }
+
+ VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
+ switch (filter) {
+ case wgpu::FilterMode::Linear:
+ return VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ case wgpu::FilterMode::Nearest:
+ return VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ }
+ UNREACHABLE();
+ }
+ } // anonymous namespace
+
+ // static
+ ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
+ const SamplerDescriptor* descriptor) {
+ Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
+ DAWN_TRY(sampler->Initialize(descriptor));
+ return sampler;
+ }
+
+ MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
+ VkSamplerCreateInfo createInfo = {};
+ createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.magFilter = VulkanSamplerFilter(descriptor->magFilter);
+ createInfo.minFilter = VulkanSamplerFilter(descriptor->minFilter);
+ createInfo.mipmapMode = VulkanMipMapMode(descriptor->mipmapFilter);
+ createInfo.addressModeU = VulkanSamplerAddressMode(descriptor->addressModeU);
+ createInfo.addressModeV = VulkanSamplerAddressMode(descriptor->addressModeV);
+ createInfo.addressModeW = VulkanSamplerAddressMode(descriptor->addressModeW);
+ createInfo.mipLodBias = 0.0f;
+ if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+ createInfo.compareOp = ToVulkanCompareOp(descriptor->compare);
+ createInfo.compareEnable = VK_TRUE;
+ } else {
+ // Still set the compareOp so it's not garbage.
+ createInfo.compareOp = VK_COMPARE_OP_NEVER;
+ createInfo.compareEnable = VK_FALSE;
+ }
+ createInfo.minLod = descriptor->lodMinClamp;
+ createInfo.maxLod = descriptor->lodMaxClamp;
+ createInfo.unnormalizedCoordinates = VK_FALSE;
+
+ Device* device = ToBackend(GetDevice());
+ uint16_t maxAnisotropy = GetMaxAnisotropy();
+ if (device->GetDeviceInfo().features.samplerAnisotropy == VK_TRUE && maxAnisotropy > 1) {
+ createInfo.anisotropyEnable = VK_TRUE;
+ // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSamplerCreateInfo.html
+ createInfo.maxAnisotropy =
+ std::min(static_cast<float>(maxAnisotropy),
+ device->GetDeviceInfo().properties.limits.maxSamplerAnisotropy);
+ } else {
+ createInfo.anisotropyEnable = VK_FALSE;
+ createInfo.maxAnisotropy = 1;
+ }
+
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreateSampler"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ Sampler::~Sampler() = default;
+
+ void Sampler::DestroyImpl() {
+ SamplerBase::DestroyImpl();
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
+ }
+ }
+
+ VkSampler Sampler::GetHandle() const {
+ return mHandle;
+ }
+
+ void Sampler::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_SAMPLER,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_Sampler", GetLabel());
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h
new file mode 100644
index 00000000000..1b246c99bdb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/SamplerVk.h
@@ -0,0 +1,48 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_SAMPLERVK_H_
+#define DAWNNATIVE_VULKAN_SAMPLERVK_H_
+
+#include "dawn/native/Sampler.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class Sampler final : public SamplerBase {
+ public:
+ static ResultOrError<Ref<Sampler>> Create(Device* device,
+ const SamplerDescriptor* descriptor);
+
+ VkSampler GetHandle() const;
+
+ private:
+ ~Sampler() override;
+ void DestroyImpl() override;
+ using SamplerBase::SamplerBase;
+ MaybeError Initialize(const SamplerDescriptor* descriptor);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ VkSampler mHandle = VK_NULL_HANDLE;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_SAMPLERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp
new file mode 100644
index 00000000000..9c1e7df4d08
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.cpp
@@ -0,0 +1,248 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/ShaderModuleVk.h"
+
+#include "dawn/native/SpirvValidation.h"
+#include "dawn/native/TintUtils.h"
+#include "dawn/native/vulkan/BindGroupLayoutVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/PipelineLayoutVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/tracing/TraceEvent.h"
+
+#include <tint/tint.h>
+#include <spirv-tools/libspirv.hpp>
+
+namespace dawn::native::vulkan {
+
+ ShaderModule::ConcurrentTransformedShaderModuleCache::ConcurrentTransformedShaderModuleCache(
+ Device* device)
+ : mDevice(device) {
+ }
+
+ ShaderModule::ConcurrentTransformedShaderModuleCache::
+ ~ConcurrentTransformedShaderModuleCache() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ for (const auto& [_, module] : mTransformedShaderModuleCache) {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(module);
+ }
+ }
+
+ VkShaderModule ShaderModule::ConcurrentTransformedShaderModuleCache::FindShaderModule(
+ const PipelineLayoutEntryPointPair& key) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ auto iter = mTransformedShaderModuleCache.find(key);
+ if (iter != mTransformedShaderModuleCache.end()) {
+ auto cached = iter->second;
+ return cached;
+ }
+ return VK_NULL_HANDLE;
+ }
+
+ VkShaderModule ShaderModule::ConcurrentTransformedShaderModuleCache::AddOrGetCachedShaderModule(
+ const PipelineLayoutEntryPointPair& key,
+ VkShaderModule value) {
+ ASSERT(value != VK_NULL_HANDLE);
+ std::lock_guard<std::mutex> lock(mMutex);
+ auto iter = mTransformedShaderModuleCache.find(key);
+ if (iter == mTransformedShaderModuleCache.end()) {
+ mTransformedShaderModuleCache.emplace(key, value);
+ return value;
+ } else {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(value);
+ return iter->second;
+ }
+ }
+
+ // static
+ ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+ DAWN_TRY(module->Initialize(parseResult));
+ return module;
+ }
+
+ ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor),
+ mTransformedShaderModuleCache(
+ std::make_unique<ConcurrentTransformedShaderModuleCache>(device)) {
+ }
+
+ MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+ if (GetDevice()->IsRobustnessEnabled()) {
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ tint::transform::Robustness robustness;
+ tint::transform::DataMap transformInputs;
+
+ tint::Program program;
+ DAWN_TRY_ASSIGN(program, RunTransforms(&robustness, parseResult->tintProgram.get(),
+ transformInputs, nullptr, nullptr));
+ // Rather than use a new ParseResult object, we just reuse the original parseResult
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+ }
+
+ return InitializeBase(parseResult);
+ }
+
+ void ShaderModule::DestroyImpl() {
+ ShaderModuleBase::DestroyImpl();
+ // Remove reference to internal cache to trigger cleanup.
+ mTransformedShaderModuleCache = nullptr;
+ }
+
+ ShaderModule::~ShaderModule() = default;
+
+ ResultOrError<VkShaderModule> ShaderModule::GetTransformedModuleHandle(
+ const char* entryPointName,
+ PipelineLayout* layout) {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General,
+ "ShaderModuleVk::GetTransformedModuleHandle");
+
+ // If the shader was destroyed, we should never call this function.
+ ASSERT(IsAlive());
+
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ auto cacheKey = std::make_pair(layout, entryPointName);
+ VkShaderModule cachedShaderModule =
+ mTransformedShaderModuleCache->FindShaderModule(cacheKey);
+ if (cachedShaderModule != VK_NULL_HANDLE) {
+ return cachedShaderModule;
+ }
+
+ // Creation of VkShaderModule is deferred to this point when using tint generator
+
+ // Remap BindingNumber to BindingIndex in WGSL shader
+ using BindingRemapper = tint::transform::BindingRemapper;
+ using BindingPoint = tint::transform::BindingPoint;
+ BindingRemapper::BindingPoints bindingPoints;
+ BindingRemapper::AccessControls accessControls;
+
+ const BindingInfoArray& moduleBindingInfo = GetEntryPoint(entryPointName).bindings;
+
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+ const auto& groupBindingInfo = moduleBindingInfo[group];
+ for (const auto& it : groupBindingInfo) {
+ BindingNumber binding = it.first;
+ BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+
+ BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(bindingIndex)};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+ }
+ }
+ }
+
+ tint::transform::Manager transformManager;
+ transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
+ // Many Vulkan drivers can't handle multi-entrypoint shader modules.
+ transformManager.append(std::make_unique<tint::transform::SingleEntryPoint>());
+
+ tint::transform::DataMap transformInputs;
+ transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+ std::move(accessControls),
+ /* mayCollide */ false);
+ transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+
+ // Transform external textures into the binding locations specified in the bgl
+ // TODO(dawn:1082): Replace this block with ShaderModuleBase::AddExternalTextureTransform.
+ tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
+ for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
+
+ ExternalTextureBindingExpansionMap expansions =
+ bgl->GetExternalTextureBindingExpansionMap();
+
+ std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
+ expansions.begin();
+
+ while (it != expansions.end()) {
+ newBindingsMap[{static_cast<uint32_t>(i),
+ static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane0))}] = {
+ {static_cast<uint32_t>(i),
+ static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane1))},
+ {static_cast<uint32_t>(i),
+ static_cast<uint32_t>(bgl->GetBindingIndex(it->second.params))}};
+ it++;
+ }
+ }
+
+ if (!newBindingsMap.empty()) {
+ transformManager.Add<tint::transform::MultiplanarExternalTexture>();
+ transformInputs.Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
+ newBindingsMap);
+ }
+
+ tint::Program program;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(),
+ transformInputs, nullptr, nullptr));
+ }
+
+ tint::writer::spirv::Options options;
+ options.emit_vertex_point_size = true;
+ options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+
+ std::vector<uint32_t> spirv;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::spirv::Generate()");
+ auto result = tint::writer::spirv::Generate(&program, options);
+ DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
+ result.error);
+
+ spirv = std::move(result.spirv);
+ }
+
+ DAWN_TRY(
+ ValidateSpirv(GetDevice(), spirv, GetDevice()->IsToggleEnabled(Toggle::DumpShaders)));
+
+ VkShaderModuleCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.codeSize = spirv.size() * sizeof(uint32_t);
+ createInfo.pCode = spirv.data();
+
+ Device* device = ToBackend(GetDevice());
+
+ VkShaderModule newHandle = VK_NULL_HANDLE;
+ {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), General, "vkCreateShaderModule");
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateShaderModule(
+ device->GetVkDevice(), &createInfo, nullptr, &*newHandle),
+ "CreateShaderModule"));
+ }
+ if (newHandle != VK_NULL_HANDLE) {
+ newHandle =
+ mTransformedShaderModuleCache->AddOrGetCachedShaderModule(cacheKey, newHandle);
+ }
+
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_SHADER_MODULE,
+ reinterpret_cast<uint64_t&>(newHandle), "Dawn_ShaderModule", GetLabel());
+
+ return newHandle;
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h
new file mode 100644
index 00000000000..7040b74f7fc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/ShaderModuleVk.h
@@ -0,0 +1,67 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
+#define DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
+
+#include "dawn/native/ShaderModule.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+
+#include <mutex>
+
+namespace dawn::native::vulkan {
+
+ class Device;
+ class PipelineLayout;
+
+ class ShaderModule final : public ShaderModuleBase {
+ public:
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
+
+ ResultOrError<VkShaderModule> GetTransformedModuleHandle(const char* entryPointName,
+ PipelineLayout* layout);
+
+ private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ ~ShaderModule() override;
+ MaybeError Initialize(ShaderModuleParseResult* parseResult);
+ void DestroyImpl() override;
+
+ // New handles created by GetTransformedModuleHandle at pipeline creation time
+ class ConcurrentTransformedShaderModuleCache {
+ public:
+ explicit ConcurrentTransformedShaderModuleCache(Device* device);
+ ~ConcurrentTransformedShaderModuleCache();
+ VkShaderModule FindShaderModule(const PipelineLayoutEntryPointPair& key);
+ VkShaderModule AddOrGetCachedShaderModule(const PipelineLayoutEntryPointPair& key,
+ VkShaderModule value);
+
+ private:
+ Device* mDevice;
+ std::mutex mMutex;
+ std::unordered_map<PipelineLayoutEntryPointPair,
+ VkShaderModule,
+ PipelineLayoutEntryPointPairHashFunc>
+ mTransformedShaderModuleCache;
+ };
+ std::unique_ptr<ConcurrentTransformedShaderModuleCache> mTransformedShaderModuleCache;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp
new file mode 100644
index 00000000000..fb6631592e3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.cpp
@@ -0,0 +1,77 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/StagingBufferVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ StagingBuffer::StagingBuffer(size_t size, Device* device)
+ : StagingBufferBase(size), mDevice(device) {
+ }
+
+ MaybeError StagingBuffer::Initialize() {
+ VkBufferCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.size = GetSize();
+ createInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = 0;
+
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &*mBuffer),
+ "vkCreateBuffer"));
+
+ VkMemoryRequirements requirements;
+ mDevice->fn.GetBufferMemoryRequirements(mDevice->GetVkDevice(), mBuffer, &requirements);
+
+ DAWN_TRY_ASSIGN(mAllocation, mDevice->GetResourceMemoryAllocator()->Allocate(
+ requirements, MemoryKind::LinearMappable));
+
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer,
+ ToBackend(mAllocation.GetResourceHeap())->GetMemory(),
+ mAllocation.GetOffset()),
+ "vkBindBufferMemory"));
+
+ mMappedPointer = mAllocation.GetMappedPointer();
+ if (mMappedPointer == nullptr) {
+ return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
+ }
+
+ SetDebugName(mDevice, VK_OBJECT_TYPE_BUFFER, reinterpret_cast<uint64_t&>(mBuffer),
+ "Dawn_StagingBuffer");
+
+ return {};
+ }
+
+ StagingBuffer::~StagingBuffer() {
+ mMappedPointer = nullptr;
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
+ mDevice->GetResourceMemoryAllocator()->Deallocate(&mAllocation);
+ }
+
+ VkBuffer StagingBuffer::GetBufferHandle() const {
+ return mBuffer;
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h
new file mode 100644
index 00000000000..b6ad68b06ca
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/StagingBufferVk.h
@@ -0,0 +1,42 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_STAGINGBUFFERVK_H_
+#define DAWNNATIVE_STAGINGBUFFERVK_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/native/StagingBuffer.h"
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ class StagingBuffer : public StagingBufferBase {
+ public:
+ StagingBuffer(size_t size, Device* device);
+ ~StagingBuffer() override;
+
+ VkBuffer GetBufferHandle() const;
+
+ MaybeError Initialize() override;
+
+ private:
+ Device* mDevice;
+ VkBuffer mBuffer;
+ ResourceMemoryAllocation mAllocation;
+ };
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_STAGINGBUFFERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp
new file mode 100644
index 00000000000..d0750e6709d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.cpp
@@ -0,0 +1,664 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/SwapChainVk.h"
+
+#include "dawn/common/Compiler.h"
+#include "dawn/native/Instance.h"
+#include "dawn/native/Surface.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <algorithm>
+
+#if defined(DAWN_USE_X11)
+# include "dawn/native/XlibXcbFunctions.h"
+#endif // defined(DAWN_USE_X11)
+
+namespace dawn::native::vulkan {
+
+ // OldSwapChain
+
+ // static
+ Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
+ }
+
+ OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
+ const auto& im = GetImplementation();
+ DawnWSIContextVulkan wsiContext = {};
+ im.Init(im.userData, &wsiContext);
+
+ ASSERT(im.textureUsage != WGPUTextureUsage_None);
+ mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+ }
+
+ OldSwapChain::~OldSwapChain() {
+ }
+
+ TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ const auto& im = GetImplementation();
+ DawnSwapChainNextTexture next = {};
+ DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+
+ if (error) {
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
+ return nullptr;
+ }
+
+ ::VkImage image = NativeNonDispatachableHandleFromU64<::VkImage>(next.texture.u64);
+ VkImage nativeTexture = VkImage::CreateFromHandle(image);
+ return Texture::CreateForSwapChain(ToBackend(GetDevice()), descriptor, nativeTexture)
+ .Detach();
+ }
+
+ MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
+ Device* device = ToBackend(GetDevice());
+
+ // Perform the necessary pipeline barriers for the texture to be used with the usage
+ // requested by the implementation.
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ ToBackend(view->GetTexture())
+ ->TransitionUsageNow(recordingContext, mTextureUsage, view->GetSubresourceRange());
+
+ DAWN_TRY(device->SubmitPendingCommands());
+
+ return {};
+ }
+
+ // SwapChain
+
+ namespace {
+
+ ResultOrError<VkSurfaceKHR> CreateVulkanSurface(Adapter* adapter, Surface* surface) {
+ const VulkanGlobalInfo& info = adapter->GetVulkanInstance()->GetGlobalInfo();
+ const VulkanFunctions& fn = adapter->GetVulkanInstance()->GetFunctions();
+ VkInstance instance = adapter->GetVulkanInstance()->GetVkInstance();
+
+ // May not be used in the platform-specific switches below.
+ DAWN_UNUSED(info);
+ DAWN_UNUSED(fn);
+ DAWN_UNUSED(instance);
+
+ switch (surface->GetType()) {
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ case Surface::Type::MetalLayer:
+ if (info.HasExt(InstanceExt::MetalSurface)) {
+ VkMetalSurfaceCreateInfoEXT createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.pLayer = surface->GetMetalLayer();
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateMetalSurfaceEXT(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateMetalSurface"));
+ return vkSurface;
+ }
+ break;
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ case Surface::Type::WindowsHWND:
+ if (info.HasExt(InstanceExt::Win32Surface)) {
+ VkWin32SurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.hinstance = static_cast<HINSTANCE>(surface->GetHInstance());
+ createInfo.hwnd = static_cast<HWND>(surface->GetHWND());
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateWin32SurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateWin32Surface"));
+ return vkSurface;
+ }
+ break;
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+ case Surface::Type::Xlib: {
+ if (info.HasExt(InstanceExt::XlibSurface)) {
+ VkXlibSurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.dpy = static_cast<Display*>(surface->GetXDisplay());
+ createInfo.window = surface->GetXWindow();
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateXlibSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateXlibSurface"));
+ return vkSurface;
+ }
+
+ // Fall back to using XCB surfaces if the Xlib extension isn't available.
+ // See https://xcb.freedesktop.org/MixingCalls/ for more information about
+ // interoperability between Xlib and XCB
+ const XlibXcbFunctions* xlibXcb =
+ adapter->GetInstance()->GetOrCreateXlibXcbFunctions();
+ ASSERT(xlibXcb != nullptr);
+
+ if (info.HasExt(InstanceExt::XcbSurface) && xlibXcb->IsLoaded()) {
+ VkXcbSurfaceCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ // The XCB connection lives as long as the X11 display.
+ createInfo.connection = xlibXcb->xGetXCBConnection(
+ static_cast<Display*>(surface->GetXDisplay()));
+ createInfo.window = surface->GetXWindow();
+
+ VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ fn.CreateXcbSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+ "CreateXcbSurfaceKHR"));
+ return vkSurface;
+ }
+ break;
+ }
+#endif // defined(DAWN_USE_X11)
+
+ default:
+ break;
+ }
+
+ return DAWN_FORMAT_VALIDATION_ERROR("Unsupported surface type (%s) for Vulkan.",
+ surface->GetType());
+ }
+
+ VkPresentModeKHR ToVulkanPresentMode(wgpu::PresentMode mode) {
+ switch (mode) {
+ case wgpu::PresentMode::Fifo:
+ return VK_PRESENT_MODE_FIFO_KHR;
+ case wgpu::PresentMode::Immediate:
+ return VK_PRESENT_MODE_IMMEDIATE_KHR;
+ case wgpu::PresentMode::Mailbox:
+ return VK_PRESENT_MODE_MAILBOX_KHR;
+ }
+ UNREACHABLE();
+ }
+
+ uint32_t MinImageCountForPresentMode(VkPresentModeKHR mode) {
+ switch (mode) {
+ case VK_PRESENT_MODE_FIFO_KHR:
+ case VK_PRESENT_MODE_IMMEDIATE_KHR:
+ return 2;
+ case VK_PRESENT_MODE_MAILBOX_KHR:
+ return 3;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ } // anonymous namespace
+
+ // static
+ ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+ DAWN_TRY(swapchain->Initialize(previousSwapChain));
+ return swapchain;
+ }
+
+ SwapChain::~SwapChain() = default;
+
+ void SwapChain::DestroyImpl() {
+ SwapChainBase::DestroyImpl();
+ DetachFromSurface();
+ }
+
+ // Note that when we need to re-create the swapchain because it is out of date,
+ // previousSwapChain can be set to `this`.
+ MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+ Device* device = ToBackend(GetDevice());
+ Adapter* adapter = ToBackend(GetDevice()->GetAdapter());
+
+ VkSwapchainKHR previousVkSwapChain = VK_NULL_HANDLE;
+
+ if (previousSwapChain != nullptr) {
+ // TODO(crbug.com/dawn/269): The first time a surface is used with a Device, check
+ // it is supported with vkGetPhysicalDeviceSurfaceSupportKHR.
+
+ // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Vulkan,
+ "Vulkan SwapChain cannot switch backend types from %s to %s.",
+ previousSwapChain->GetBackendType(), wgpu::BackendType::Vulkan);
+
+ // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
+ SwapChain* previousVulkanSwapChain = static_cast<SwapChain*>(previousSwapChain);
+
+ // TODO(crbug.com/dawn/269): Figure out switching a single surface between multiple
+ // Vulkan devices on different VkInstances. Probably needs to block too!
+ VkInstance previousInstance =
+ ToBackend(previousSwapChain->GetDevice())->GetVkInstance();
+ DAWN_INVALID_IF(previousInstance != ToBackend(GetDevice())->GetVkInstance(),
+ "Vulkan SwapChain cannot switch between Vulkan instances.");
+
+ // The previous swapchain is a dawn::native::vulkan::SwapChain so we can reuse its
+ // VkSurfaceKHR provided since they are on the same instance.
+ std::swap(previousVulkanSwapChain->mVkSurface, mVkSurface);
+
+ // The previous swapchain was on the same Vulkan instance so we can use Vulkan's
+ // "oldSwapchain" mechanism to ensure a seamless transition. We track the previous
+ // swapchain for release immediately so it is not leaked in case of an error. (Vulkan
+ // allows destroying it immediately after the call to vkCreateSwapChainKHR but tracking
+ // using the fenced deleter makes the code simpler).
+ std::swap(previousVulkanSwapChain->mSwapChain, previousVkSwapChain);
+ ToBackend(previousSwapChain->GetDevice())
+ ->GetFencedDeleter()
+ ->DeleteWhenUnused(previousVkSwapChain);
+ }
+
+ if (mVkSurface == VK_NULL_HANDLE) {
+ DAWN_TRY_ASSIGN(mVkSurface, CreateVulkanSurface(adapter, GetSurface()));
+ }
+
+ VulkanSurfaceInfo surfaceInfo;
+ DAWN_TRY_ASSIGN(surfaceInfo, GatherSurfaceInfo(*adapter, mVkSurface));
+
+ DAWN_TRY_ASSIGN(mConfig, ChooseConfig(surfaceInfo));
+
+ // TODO Choose config instead of hardcoding
+ VkSwapchainCreateInfoKHR createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.surface = mVkSurface;
+ createInfo.minImageCount = mConfig.targetImageCount;
+ createInfo.imageFormat = mConfig.format;
+ createInfo.imageColorSpace = mConfig.colorSpace;
+ createInfo.imageExtent = mConfig.extent;
+ createInfo.imageArrayLayers = 1;
+ createInfo.imageUsage = mConfig.usage;
+ createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = nullptr;
+ createInfo.preTransform = mConfig.transform;
+ createInfo.compositeAlpha = mConfig.alphaMode;
+ createInfo.presentMode = mConfig.presentMode;
+ createInfo.clipped = false;
+ createInfo.oldSwapchain = previousVkSwapChain;
+
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateSwapchainKHR(device->GetVkDevice(), &createInfo,
+ nullptr, &*mSwapChain),
+ "CreateSwapChain"));
+
+ // Gather the swapchain's images. Implementations are allowed to return more images than the
+ // number we asked for.
+ uint32_t count = 0;
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count, nullptr),
+ "GetSwapChainImages1"));
+
+ mSwapChainImages.resize(count);
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count,
+ AsVkArray(mSwapChainImages.data())),
+ "GetSwapChainImages2"));
+
+ return {};
+ }
+
+ ResultOrError<SwapChain::Config> SwapChain::ChooseConfig(
+ const VulkanSurfaceInfo& surfaceInfo) const {
+ Config config;
+
+ // Choose the present mode. The only guaranteed one is FIFO so it has to be the fallback for
+ // all other present modes. IMMEDIATE has tearing which is generally undesirable so it can't
+ // be the fallback for MAILBOX. So the fallback order is always IMMEDIATE -> MAILBOX ->
+ // FIFO.
+ {
+ auto HasPresentMode = [](const std::vector<VkPresentModeKHR>& modes,
+ VkPresentModeKHR target) -> bool {
+ return std::find(modes.begin(), modes.end(), target) != modes.end();
+ };
+
+ VkPresentModeKHR targetMode = ToVulkanPresentMode(GetPresentMode());
+ const std::array<VkPresentModeKHR, 3> kPresentModeFallbacks = {
+ VK_PRESENT_MODE_IMMEDIATE_KHR,
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_FIFO_KHR,
+ };
+
+ // Go to the target mode.
+ size_t modeIndex = 0;
+ while (kPresentModeFallbacks[modeIndex] != targetMode) {
+ modeIndex++;
+ }
+
+ // Find the first available fallback.
+ while (!HasPresentMode(surfaceInfo.presentModes, kPresentModeFallbacks[modeIndex])) {
+ modeIndex++;
+ }
+
+ ASSERT(modeIndex < kPresentModeFallbacks.size());
+ config.presentMode = kPresentModeFallbacks[modeIndex];
+ }
+
+ // Choose the target width or do a blit.
+ if (GetWidth() < surfaceInfo.capabilities.minImageExtent.width ||
+ GetWidth() > surfaceInfo.capabilities.maxImageExtent.width ||
+ GetHeight() < surfaceInfo.capabilities.minImageExtent.height ||
+ GetHeight() > surfaceInfo.capabilities.maxImageExtent.height) {
+ config.needsBlit = true;
+ } else {
+ config.extent.width = GetWidth();
+ config.extent.height = GetHeight();
+ }
+
+ // Choose the target usage or do a blit.
+ VkImageUsageFlags targetUsages =
+ VulkanImageUsage(GetUsage(), GetDevice()->GetValidInternalFormat(GetFormat()));
+ VkImageUsageFlags supportedUsages = surfaceInfo.capabilities.supportedUsageFlags;
+ if (!IsSubset(targetUsages, supportedUsages)) {
+ config.needsBlit = true;
+ } else {
+ config.usage = targetUsages;
+ config.wgpuUsage = GetUsage();
+ }
+
+ // Only support BGRA8Unorm with SRGB color space for now.
+ bool hasBGRA8Unorm = false;
+ for (const VkSurfaceFormatKHR& format : surfaceInfo.formats) {
+ if (format.format == VK_FORMAT_B8G8R8A8_UNORM &&
+ format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
+ hasBGRA8Unorm = true;
+ break;
+ }
+ }
+ if (!hasBGRA8Unorm) {
+ return DAWN_INTERNAL_ERROR(
+ "Vulkan SwapChain must support BGRA8Unorm with sRGB colorspace.");
+ }
+ config.format = VK_FORMAT_B8G8R8A8_UNORM;
+ config.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+ config.wgpuFormat = wgpu::TextureFormat::BGRA8Unorm;
+
+ // Only the identity transform with opaque alpha is supported for now.
+ DAWN_INVALID_IF((surfaceInfo.capabilities.supportedTransforms &
+ VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) == 0,
+ "Vulkan SwapChain must support the identity transform.");
+
+ config.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+
+ DAWN_INVALID_IF((surfaceInfo.capabilities.supportedCompositeAlpha &
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR) == 0,
+ "Vulkan SwapChain must support opaque alpha.");
+
+ config.alphaMode = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+
+ // Choose the number of images for the swapchain= and clamp it to the min and max from the
+ // surface capabilities. maxImageCount = 0 means there is no limit.
+ ASSERT(surfaceInfo.capabilities.maxImageCount == 0 ||
+ surfaceInfo.capabilities.minImageCount <= surfaceInfo.capabilities.maxImageCount);
+ uint32_t targetCount = MinImageCountForPresentMode(config.presentMode);
+
+ targetCount = std::max(targetCount, surfaceInfo.capabilities.minImageCount);
+ if (surfaceInfo.capabilities.maxImageCount != 0) {
+ targetCount = std::min(targetCount, surfaceInfo.capabilities.maxImageCount);
+ }
+
+ config.targetImageCount = targetCount;
+
+ // Choose a valid config for the swapchain texture that will receive the blit.
+ if (config.needsBlit) {
+ // Vulkan has provisions to have surfaces that adapt to the swapchain size. If that's
+ // the case it is very likely that the target extent works, but clamp it just in case.
+ // Using the target extent for the blit is better when possible so that texels don't
+ // get stretched. This case is exposed by having the special "-1" value in both
+ // dimensions of the extent.
+ constexpr uint32_t kSpecialValue = 0xFFFF'FFFF;
+ if (surfaceInfo.capabilities.currentExtent.width == kSpecialValue &&
+ surfaceInfo.capabilities.currentExtent.height == kSpecialValue) {
+ // extent = clamp(targetExtent, minExtent, maxExtent)
+ config.extent.width = GetWidth();
+ config.extent.width =
+ std::min(config.extent.width, surfaceInfo.capabilities.maxImageExtent.width);
+ config.extent.width =
+ std::max(config.extent.width, surfaceInfo.capabilities.minImageExtent.width);
+
+ config.extent.height = GetHeight();
+ config.extent.height =
+ std::min(config.extent.height, surfaceInfo.capabilities.maxImageExtent.height);
+ config.extent.height =
+ std::max(config.extent.height, surfaceInfo.capabilities.minImageExtent.height);
+ } else {
+ // If it is not an adaptable swapchain, just use the current extent for the blit
+ // texture.
+ config.extent = surfaceInfo.capabilities.currentExtent;
+ }
+
+ // TODO(crbug.com/dawn/269): If the swapchain image doesn't support TRANSFER_DST
+ // then we'll need to have a second fallback that uses a blit shader :(
+ if ((supportedUsages & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
+ return DAWN_INTERNAL_ERROR(
+ "SwapChain cannot fallback to a blit because of a missing "
+ "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
+ }
+ config.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ config.wgpuUsage = wgpu::TextureUsage::CopyDst;
+ }
+
+ return config;
+ }
+
+ MaybeError SwapChain::PresentImpl() {
+ Device* device = ToBackend(GetDevice());
+
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+
+ if (mConfig.needsBlit) {
+ // TODO ditto same as present below: eagerly transition the blit texture to CopySrc.
+ mBlitTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
+ mBlitTexture->GetAllSubresources());
+ mTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
+ mTexture->GetAllSubresources());
+
+ VkImageBlit region;
+ region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ region.srcSubresource.mipLevel = 0;
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ region.srcOffsets[0] = {0, 0, 0};
+ region.srcOffsets[1] = {static_cast<int32_t>(mBlitTexture->GetWidth()),
+ static_cast<int32_t>(mBlitTexture->GetHeight()), 1};
+
+ region.dstSubresource = region.srcSubresource;
+ region.dstOffsets[0] = {0, 0, 0};
+ region.dstOffsets[1] = {static_cast<int32_t>(mTexture->GetWidth()),
+ static_cast<int32_t>(mTexture->GetHeight()), 1};
+
+ device->fn.CmdBlitImage(recordingContext->commandBuffer, mBlitTexture->GetHandle(),
+ mBlitTexture->GetCurrentLayoutForSwapChain(),
+ mTexture->GetHandle(), mTexture->GetCurrentLayoutForSwapChain(),
+ 1, &region, VK_FILTER_LINEAR);
+
+ // TODO(crbug.com/dawn/269): Find a way to reuse the blit texture between frames
+ // instead of creating a new one every time. This will involve "un-destroying" the
+ // texture or making the blit texture "external".
+ mBlitTexture->APIDestroy();
+ mBlitTexture = nullptr;
+ }
+
+ // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
+ // presentable texture to present at the end of submits that use them and ideally even
+ // folding that in the free layout transition at the end of render passes.
+ mTexture->TransitionUsageNow(recordingContext, kPresentTextureUsage,
+ mTexture->GetAllSubresources());
+
+ DAWN_TRY(device->SubmitPendingCommands());
+
+ // Assuming that the present queue is the same as the graphics queue, the proper
+ // synchronization has already been done on the queue so we don't need to wait on any
+ // semaphores.
+ // TODO(crbug.com/dawn/269): Support the present queue not being the main queue.
+ VkPresentInfoKHR presentInfo;
+ presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ presentInfo.pNext = nullptr;
+ presentInfo.waitSemaphoreCount = 0;
+ presentInfo.pWaitSemaphores = nullptr;
+ presentInfo.swapchainCount = 1;
+ presentInfo.pSwapchains = &*mSwapChain;
+ presentInfo.pImageIndices = &mLastImageIndex;
+ presentInfo.pResults = nullptr;
+
+ // Free the texture before present so error handling doesn't skip that step.
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+
+ VkResult result =
+ VkResult::WrapUnsafe(device->fn.QueuePresentKHR(device->GetQueue(), &presentInfo));
+
+ switch (result) {
+ case VK_SUCCESS:
+ // VK_SUBOPTIMAL_KHR means "a swapchain no longer matches the surface properties
+ // exactly, but can still be used to present to the surface successfully", so we
+ // can also treat it as a "success" error code of vkQueuePresentKHR().
+ case VK_SUBOPTIMAL_KHR:
+ return {};
+
+ // This present cannot be recovered. Re-initialize the VkSwapchain so that future
+ // presents work..
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ return Initialize(this);
+
+ // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
+ case VK_ERROR_SURFACE_LOST_KHR:
+ default:
+ return CheckVkSuccess(::VkResult(result), "QueuePresent");
+ }
+ }
+
+ ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
+ return GetCurrentTextureViewInternal();
+ }
+
+ ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewInternal(bool isReentrant) {
+ Device* device = ToBackend(GetDevice());
+
+ // Transiently create a semaphore that will be signaled when the presentation engine is done
+ // with the swapchain image. Further operations on the image will wait for this semaphore.
+ VkSemaphoreCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateSemaphore(device->GetVkDevice(), &createInfo, nullptr, &*semaphore),
+ "CreateSemaphore"));
+
+ VkResult result = VkResult::WrapUnsafe(device->fn.AcquireNextImageKHR(
+ device->GetVkDevice(), mSwapChain, std::numeric_limits<uint64_t>::max(), semaphore,
+ VkFence{}, &mLastImageIndex));
+
+ if (result == VK_SUCCESS) {
+ // TODO(crbug.com/dawn/269) put the semaphore on the texture so it is waited on when
+ // used instead of directly on the recording context?
+ device->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
+ } else {
+ // The semaphore wasn't actually used (? this is unclear in the spec). Delete it when
+ // we get a chance.
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(semaphore);
+ }
+
+ switch (result) {
+ // TODO(crbug.com/dawn/269): Introduce a mechanism to notify the application that
+ // the swapchain is in a suboptimal state?
+ case VK_SUBOPTIMAL_KHR:
+ case VK_SUCCESS:
+ break;
+
+ case VK_ERROR_OUT_OF_DATE_KHR: {
+ // Prevent infinite recursive calls to GetCurrentTextureViewInternal when the
+ // swapchains always return that they are out of date.
+ if (isReentrant) {
+ // TODO(crbug.com/dawn/269): Allow losing the surface instead?
+ return DAWN_INTERNAL_ERROR(
+ "Wasn't able to recuperate the surface after a VK_ERROR_OUT_OF_DATE_KHR");
+ }
+
+ // Re-initialize the VkSwapchain and try getting the texture again.
+ DAWN_TRY(Initialize(this));
+ return GetCurrentTextureViewInternal(true);
+ }
+
+ // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
+ case VK_ERROR_SURFACE_LOST_KHR:
+ default:
+ DAWN_TRY(CheckVkSuccess(::VkResult(result), "AcquireNextImage"));
+ }
+
+ TextureDescriptor textureDesc;
+ textureDesc.size.width = mConfig.extent.width;
+ textureDesc.size.height = mConfig.extent.height;
+ textureDesc.format = mConfig.wgpuFormat;
+ textureDesc.usage = mConfig.wgpuUsage;
+
+ VkImage currentImage = mSwapChainImages[mLastImageIndex];
+ mTexture = Texture::CreateForSwapChain(device, &textureDesc, currentImage);
+
+ // In the happy path we can use the swapchain image directly.
+ if (!mConfig.needsBlit) {
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mTexture->APICreateView();
+ }
+
+ // The blit texture always perfectly matches what the user requested for the swapchain.
+ // We need to add the Vulkan TRANSFER_SRC flag for the vkCmdBlitImage call.
+ TextureDescriptor desc = GetSwapChainBaseTextureDescriptor(this);
+ DAWN_TRY_ASSIGN(mBlitTexture,
+ Texture::Create(device, &desc, VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mBlitTexture->APICreateView();
+ }
+
+ void SwapChain::DetachFromSurfaceImpl() {
+ if (mTexture != nullptr) {
+ mTexture->APIDestroy();
+ mTexture = nullptr;
+ }
+
+ if (mBlitTexture != nullptr) {
+ mBlitTexture->APIDestroy();
+ mBlitTexture = nullptr;
+ }
+
+ // The swapchain images are destroyed with the swapchain.
+ if (mSwapChain != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
+ mSwapChain = VK_NULL_HANDLE;
+ }
+
+ if (mVkSurface != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mVkSurface);
+ mVkSurface = VK_NULL_HANDLE;
+ }
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h
new file mode 100644
index 00000000000..4ec2ad8fed6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/SwapChainVk.h
@@ -0,0 +1,98 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
+#define DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
+
+#include "dawn/native/SwapChain.h"
+
+#include "dawn/common/vulkan_platform.h"
+
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+ class Device;
+ class Texture;
+ struct VulkanSurfaceInfo;
+
+ class OldSwapChain : public OldSwapChainBase {
+ public:
+ static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+
+ protected:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain() override;
+
+ TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+ MaybeError OnBeforePresent(TextureViewBase* texture) override;
+
+ private:
+ wgpu::TextureUsage mTextureUsage;
+ };
+
+ class SwapChain : public NewSwapChainBase {
+ public:
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+ ~SwapChain() override;
+
+ private:
+ using NewSwapChainBase::NewSwapChainBase;
+ MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+ void DestroyImpl() override;
+
+ struct Config {
+ // Information that's passed to vulkan swapchain creation.
+ VkPresentModeKHR presentMode;
+ VkExtent2D extent;
+ VkImageUsageFlags usage;
+ VkFormat format;
+ VkColorSpaceKHR colorSpace;
+ uint32_t targetImageCount;
+ VkSurfaceTransformFlagBitsKHR transform;
+ VkCompositeAlphaFlagBitsKHR alphaMode;
+
+ // Redundant information but as WebGPU enums to create the wgpu::Texture that
+ // encapsulates the native swapchain texture.
+ wgpu::TextureUsage wgpuUsage;
+ wgpu::TextureFormat wgpuFormat;
+
+ // Information about the blit workarounds we need to do (if any)
+ bool needsBlit = false;
+ };
+ ResultOrError<Config> ChooseConfig(const VulkanSurfaceInfo& surfaceInfo) const;
+ ResultOrError<TextureViewBase*> GetCurrentTextureViewInternal(bool isReentrant = false);
+
+ // NewSwapChainBase implementation
+ MaybeError PresentImpl() override;
+ ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+
+ Config mConfig;
+
+ VkSurfaceKHR mVkSurface = VK_NULL_HANDLE;
+ VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
+ std::vector<VkImage> mSwapChainImages;
+ uint32_t mLastImageIndex = 0;
+
+ Ref<Texture> mBlitTexture;
+ Ref<Texture> mTexture;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp
new file mode 100644
index 00000000000..52d03bc0f4b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.cpp
@@ -0,0 +1,1367 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/TextureVk.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Math.h"
+#include "dawn/native/DynamicUploader.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BufferVk.h"
+#include "dawn/native/vulkan/CommandRecordingContext.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/FencedDeleter.h"
+#include "dawn/native/vulkan/ResourceHeapVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/StagingBufferVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ namespace {
+ // Converts an Dawn texture dimension to a Vulkan image view type.
+ // Contrary to image types, image view types include arrayness and cubemapness
+ VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::e1D:
+ return VK_IMAGE_VIEW_TYPE_1D;
+ case wgpu::TextureViewDimension::e2D:
+ return VK_IMAGE_VIEW_TYPE_2D;
+ case wgpu::TextureViewDimension::e2DArray:
+ return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ case wgpu::TextureViewDimension::Cube:
+ return VK_IMAGE_VIEW_TYPE_CUBE;
+ case wgpu::TextureViewDimension::CubeArray:
+ return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+ case wgpu::TextureViewDimension::e3D:
+ return VK_IMAGE_VIEW_TYPE_3D;
+
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ }
+ }
+
+ // Computes which vulkan access type could be required for the given Dawn usage.
+ // TODO(crbug.com/dawn/269): We shouldn't need any access usages for srcAccessMask when
+ // the previous usage is readonly because an execution dependency is sufficient.
+ VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
+ VkAccessFlags flags = 0;
+
+ if (usage & wgpu::TextureUsage::CopySrc) {
+ flags |= VK_ACCESS_TRANSFER_READ_BIT;
+ }
+ if (usage & wgpu::TextureUsage::CopyDst) {
+ flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+ }
+ if (usage & wgpu::TextureUsage::TextureBinding) {
+ flags |= VK_ACCESS_SHADER_READ_BIT;
+ }
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+ }
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ if (format.HasDepthOrStencil()) {
+ flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else {
+ flags |=
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ }
+ }
+ if (usage & kReadOnlyRenderAttachment) {
+ flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+ }
+ if (usage & kPresentTextureUsage) {
+ // The present usage is only used internally by the swapchain and is never used in
+ // combination with other usages.
+ ASSERT(usage == kPresentTextureUsage);
+ // The Vulkan spec has the following note:
+ //
+ // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+ // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
+ // processing, or perform any visibility operations (as vkQueuePresentKHR performs
+ // automatic visibility operations). To achieve this, the dstAccessMask member of
+ // the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
+ // should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
+ //
+ // So on the transition to Present we don't need an access flag. The other
+ // direction doesn't matter because swapchain textures always start a new frame
+ // as uninitialized.
+ flags |= 0;
+ }
+
+ return flags;
+ }
+
+ // Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
+ VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
+ VkPipelineStageFlags flags = 0;
+
+ if (usage == wgpu::TextureUsage::None) {
+ // This only happens when a texture is initially created (and for srcAccessMask) in
+ // which case there is no need to wait on anything to stop accessing this texture.
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ }
+ if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
+ flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+ }
+ if (usage & wgpu::TextureUsage::TextureBinding) {
+ // TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
+ // introducing FS -> VS dependencies that would prevent parallelization on tiler
+ // GPUs
+ flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ flags |=
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+ }
+ if (usage & (wgpu::TextureUsage::RenderAttachment | kReadOnlyRenderAttachment)) {
+ if (format.HasDepthOrStencil()) {
+ flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
+ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ } else {
+ flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ }
+ }
+ if (usage & kPresentTextureUsage) {
+ // The present usage is only used internally by the swapchain and is never used in
+ // combination with other usages.
+ ASSERT(usage == kPresentTextureUsage);
+ // The Vulkan spec has the following note:
+ //
+ // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+ // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
+ // processing, or perform any visibility operations (as vkQueuePresentKHR performs
+ // automatic visibility operations). To achieve this, the dstAccessMask member of
+ // the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
+ // should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
+ //
+ // So on the transition to Present we use the "bottom of pipe" stage. The other
+ // direction doesn't matter because swapchain textures always start a new frame
+ // as uninitialized.
+ flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+ }
+
+ // A zero value isn't a valid pipeline stage mask
+ ASSERT(flags != 0);
+ return flags;
+ }
+
+ VkImageMemoryBarrier BuildMemoryBarrier(const Texture* texture,
+ wgpu::TextureUsage lastUsage,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ VkImageMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.srcAccessMask = VulkanAccessFlags(lastUsage, texture->GetFormat());
+ barrier.dstAccessMask = VulkanAccessFlags(usage, texture->GetFormat());
+ barrier.oldLayout = VulkanImageLayout(texture, lastUsage);
+ barrier.newLayout = VulkanImageLayout(texture, usage);
+ barrier.image = texture->GetHandle();
+ barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects);
+ barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
+ barrier.subresourceRange.levelCount = range.levelCount;
+ barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
+ barrier.subresourceRange.layerCount = range.layerCount;
+
+ barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ return barrier;
+ }
+
+ void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
+ const Extent3D& size = texture.GetSize();
+
+ info->mipLevels = texture.GetNumMipLevels();
+ info->samples = VulkanSampleCount(texture.GetSampleCount());
+
+ // Fill in the image type, and paper over differences in how the array layer count is
+ // specified between WebGPU and Vulkan.
+ switch (texture.GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ info->imageType = VK_IMAGE_TYPE_1D;
+ info->extent = {size.width, 1, 1};
+ info->arrayLayers = 1;
+ break;
+
+ case wgpu::TextureDimension::e2D:
+ info->imageType = VK_IMAGE_TYPE_2D;
+ info->extent = {size.width, size.height, 1};
+ info->arrayLayers = size.depthOrArrayLayers;
+ break;
+
+ case wgpu::TextureDimension::e3D:
+ info->imageType = VK_IMAGE_TYPE_3D;
+ info->extent = {size.width, size.height, size.depthOrArrayLayers};
+ info->arrayLayers = 1;
+ break;
+ }
+ }
+
+ } // namespace
+
+ // Converts Dawn texture format to Vulkan formats.
+ VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R8Unorm:
+ return VK_FORMAT_R8_UNORM;
+ case wgpu::TextureFormat::R8Snorm:
+ return VK_FORMAT_R8_SNORM;
+ case wgpu::TextureFormat::R8Uint:
+ return VK_FORMAT_R8_UINT;
+ case wgpu::TextureFormat::R8Sint:
+ return VK_FORMAT_R8_SINT;
+
+ case wgpu::TextureFormat::R16Uint:
+ return VK_FORMAT_R16_UINT;
+ case wgpu::TextureFormat::R16Sint:
+ return VK_FORMAT_R16_SINT;
+ case wgpu::TextureFormat::R16Float:
+ return VK_FORMAT_R16_SFLOAT;
+ case wgpu::TextureFormat::RG8Unorm:
+ return VK_FORMAT_R8G8_UNORM;
+ case wgpu::TextureFormat::RG8Snorm:
+ return VK_FORMAT_R8G8_SNORM;
+ case wgpu::TextureFormat::RG8Uint:
+ return VK_FORMAT_R8G8_UINT;
+ case wgpu::TextureFormat::RG8Sint:
+ return VK_FORMAT_R8G8_SINT;
+
+ case wgpu::TextureFormat::R32Uint:
+ return VK_FORMAT_R32_UINT;
+ case wgpu::TextureFormat::R32Sint:
+ return VK_FORMAT_R32_SINT;
+ case wgpu::TextureFormat::R32Float:
+ return VK_FORMAT_R32_SFLOAT;
+ case wgpu::TextureFormat::RG16Uint:
+ return VK_FORMAT_R16G16_UINT;
+ case wgpu::TextureFormat::RG16Sint:
+ return VK_FORMAT_R16G16_SINT;
+ case wgpu::TextureFormat::RG16Float:
+ return VK_FORMAT_R16G16_SFLOAT;
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return VK_FORMAT_R8G8B8A8_SRGB;
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return VK_FORMAT_R8G8B8A8_SNORM;
+ case wgpu::TextureFormat::RGBA8Uint:
+ return VK_FORMAT_R8G8B8A8_UINT;
+ case wgpu::TextureFormat::RGBA8Sint:
+ return VK_FORMAT_R8G8B8A8_SINT;
+ case wgpu::TextureFormat::BGRA8Unorm:
+ return VK_FORMAT_B8G8R8A8_UNORM;
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ return VK_FORMAT_B8G8R8A8_SRGB;
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
+
+ case wgpu::TextureFormat::RG32Uint:
+ return VK_FORMAT_R32G32_UINT;
+ case wgpu::TextureFormat::RG32Sint:
+ return VK_FORMAT_R32G32_SINT;
+ case wgpu::TextureFormat::RG32Float:
+ return VK_FORMAT_R32G32_SFLOAT;
+ case wgpu::TextureFormat::RGBA16Uint:
+ return VK_FORMAT_R16G16B16A16_UINT;
+ case wgpu::TextureFormat::RGBA16Sint:
+ return VK_FORMAT_R16G16B16A16_SINT;
+ case wgpu::TextureFormat::RGBA16Float:
+ return VK_FORMAT_R16G16B16A16_SFLOAT;
+
+ case wgpu::TextureFormat::RGBA32Uint:
+ return VK_FORMAT_R32G32B32A32_UINT;
+ case wgpu::TextureFormat::RGBA32Sint:
+ return VK_FORMAT_R32G32B32A32_SINT;
+ case wgpu::TextureFormat::RGBA32Float:
+ return VK_FORMAT_R32G32B32A32_SFLOAT;
+
+ case wgpu::TextureFormat::Depth16Unorm:
+ return VK_FORMAT_D16_UNORM;
+ case wgpu::TextureFormat::Depth32Float:
+ return VK_FORMAT_D32_SFLOAT;
+ case wgpu::TextureFormat::Depth24Plus:
+ return VK_FORMAT_D32_SFLOAT;
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ // Depth24PlusStencil8 maps to either of these two formats because only requires
+ // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
+ // the environment, default to using D32S8, and availability information so we know
+ // that the format is available.
+ if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
+ return VK_FORMAT_D32_SFLOAT_S8_UINT;
+ } else {
+ return VK_FORMAT_D24_UNORM_S8_UINT;
+ }
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ return VK_FORMAT_D24_UNORM_S8_UINT;
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return VK_FORMAT_D32_SFLOAT_S8_UINT;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ return VK_FORMAT_BC2_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ return VK_FORMAT_BC2_SRGB_BLOCK;
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ return VK_FORMAT_BC3_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ return VK_FORMAT_BC3_SRGB_BLOCK;
+ case wgpu::TextureFormat::BC4RSnorm:
+ return VK_FORMAT_BC4_SNORM_BLOCK;
+ case wgpu::TextureFormat::BC4RUnorm:
+ return VK_FORMAT_BC4_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC5RGSnorm:
+ return VK_FORMAT_BC5_SNORM_BLOCK;
+ case wgpu::TextureFormat::BC5RGUnorm:
+ return VK_FORMAT_BC5_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ return VK_FORMAT_BC6H_SFLOAT_BLOCK;
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ return VK_FORMAT_BC6H_UFLOAT_BLOCK;
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ return VK_FORMAT_BC7_UNORM_BLOCK;
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return VK_FORMAT_BC7_SRGB_BLOCK;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
+ case wgpu::TextureFormat::EACR11Unorm:
+ return VK_FORMAT_EAC_R11_UNORM_BLOCK;
+ case wgpu::TextureFormat::EACR11Snorm:
+ return VK_FORMAT_EAC_R11_SNORM_BLOCK;
+ case wgpu::TextureFormat::EACRG11Unorm:
+ return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
+
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
+
+ // TODO(dawn:666): implement stencil8
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ // Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
+ // between color and depth attachment usages.
+ VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
+ VkImageUsageFlags flags = 0;
+
+ if (usage & wgpu::TextureUsage::CopySrc) {
+ flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ }
+ if (usage & wgpu::TextureUsage::CopyDst) {
+ flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+ if (usage & wgpu::TextureUsage::TextureBinding) {
+ flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ // If the sampled texture is a depth/stencil texture, its image layout will be set
+ // to DEPTH_STENCIL_READ_ONLY_OPTIMAL in order to support readonly depth/stencil
+ // attachment. That layout requires DEPTH_STENCIL_ATTACHMENT_BIT image usage.
+ if (format.HasDepthOrStencil() && format.isRenderable) {
+ flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+ }
+ if (usage & wgpu::TextureUsage::StorageBinding) {
+ flags |= VK_IMAGE_USAGE_STORAGE_BIT;
+ }
+ if (usage & wgpu::TextureUsage::RenderAttachment) {
+ if (format.HasDepthOrStencil()) {
+ flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ } else {
+ flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+ }
+ if (usage & kReadOnlyRenderAttachment) {
+ flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+
+ return flags;
+ }
+
+ // Chooses which Vulkan image layout should be used for the given Dawn usage. Note that this
+ // layout must match the layout given to various Vulkan operations as well as the layout given
+ // to descriptor set writes.
+ VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage) {
+ if (usage == wgpu::TextureUsage::None) {
+ return VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+
+ if (!wgpu::HasZeroOrOneBits(usage)) {
+ // Sampled | kReadOnlyRenderAttachment is the only possible multi-bit usage, if more
+ // appear we might need additional special-casing.
+ ASSERT(usage == (wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment));
+
+ // WebGPU requires both aspects to be readonly if the attachment's format does have
+ // both depth and stencil aspects. Vulkan 1.0 supports readonly for both aspects too
+ // via DEPTH_STENCIL_READ_ONLY image layout. Vulkan 1.1 and above can support separate
+ // readonly for a single aspect via DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL and
+ // DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL layouts. But Vulkan 1.0 cannot support
+ // it, and WebGPU doesn't need that currently.
+ return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
+ }
+
+ // Usage has a single bit so we can switch on its value directly.
+ switch (usage) {
+ case wgpu::TextureUsage::CopyDst:
+ return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+
+ // The layout returned here is the one that will be used at bindgroup creation time.
+ // The bindgrpup's layout must match the runtime layout of the image when it is
+ // used via the bindgroup, but we don't know exactly what it will be yet. So we
+ // have to prepare for the pessimistic case.
+ case wgpu::TextureUsage::TextureBinding:
+ // Only VK_IMAGE_LAYOUT_GENERAL can do sampling and storage access of texture at the
+ // same time.
+ if (texture->GetInternalUsage() & wgpu::TextureUsage::StorageBinding) {
+ return VK_IMAGE_LAYOUT_GENERAL;
+ }
+ // The sampled image can be used as a readonly depth/stencil attachment at the same
+ // time if it is a depth/stencil renderable format, so the image layout need to be
+ // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL.
+ if (texture->GetFormat().HasDepthOrStencil() && texture->GetFormat().isRenderable) {
+ return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
+ }
+ return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ // Vulkan texture copy functions require the image to be in _one_ known layout.
+ // Depending on whether parts of the texture have been transitioned to only CopySrc
+ // or a combination with something else, the texture could be in a combination of
+ // GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we make CopySrc use
+ // GENERAL.
+ // TODO(crbug.com/dawn/851): We no longer need to transition resources all at
+ // once and can instead track subresources so we should lift this limitation.
+ case wgpu::TextureUsage::CopySrc:
+ // Read-only and write-only storage textures must use general layout because load
+ // and store operations on storage images can only be done on the images in
+ // VK_IMAGE_LAYOUT_GENERAL layout.
+ case wgpu::TextureUsage::StorageBinding:
+ return VK_IMAGE_LAYOUT_GENERAL;
+
+ case wgpu::TextureUsage::RenderAttachment:
+ if (texture->GetFormat().HasDepthOrStencil()) {
+ return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ }
+
+ case kReadOnlyRenderAttachment:
+ return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
+
+ case kPresentTextureUsage:
+ return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+
+ case wgpu::TextureUsage::None:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
+ switch (sampleCount) {
+ case 1:
+ return VK_SAMPLE_COUNT_1_BIT;
+ case 4:
+ return VK_SAMPLE_COUNT_4_BIT;
+ }
+ UNREACHABLE();
+ }
+
+ MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
+ const TextureDescriptor* descriptor) {
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "Texture dimension (%s) is not %s.", descriptor->dimension,
+ wgpu::TextureDimension::e2D);
+
+ DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+ descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
+ "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
+
+ DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+ descriptor->sampleCount);
+
+ return {};
+ }
+
+ bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
+ const VkImageCreateInfo& imageCreateInfo) {
+ ASSERT(device);
+
+ VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
+ VkImageFormatProperties properties;
+ if (device->fn.GetPhysicalDeviceImageFormatProperties(
+ physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
+ imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
+ &properties) != VK_SUCCESS) {
+ UNREACHABLE();
+ }
+
+ return properties.sampleCounts & imageCreateInfo.samples;
+ }
+
+ // static
+ ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor,
+ VkImageUsageFlags extraUsages) {
+ Ref<Texture> texture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+ DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
+ return std::move(texture);
+ }
+
+ // static
+ ResultOrError<Texture*> Texture::CreateFromExternal(
+ Device* device,
+ const ExternalImageDescriptorVk* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ external_memory::Service* externalMemoryService) {
+ Ref<Texture> texture =
+ AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
+ DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
+ return texture.Detach();
+ }
+
+ // static
+ Ref<Texture> Texture::CreateForSwapChain(Device* device,
+ const TextureDescriptor* descriptor,
+ VkImage nativeImage) {
+ Ref<Texture> texture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+ texture->InitializeForSwapChain(nativeImage);
+ return texture;
+ }
+
+ Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+ : TextureBase(device, descriptor, state),
+ // A usage of none will make sure the texture is transitioned before its first use as
+ // required by the Vulkan spec.
+ mSubresourceLastUsages(std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
+ (ShouldCombineDepthStencilBarriers() ? Aspect::CombinedDepthStencil
+ : GetFormat().aspects),
+ GetArrayLayers(),
+ GetNumMipLevels(),
+ wgpu::TextureUsage::None)) {
+ }
+
+ MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
+ Device* device = ToBackend(GetDevice());
+
+ // Create the Vulkan image "container". We don't need to check that the format supports the
+ // combination of sample, usage etc. because validation should have been done in the Dawn
+ // frontend already based on the minimum supported formats in the Vulkan spec
+ VkImageCreateInfo createInfo = {};
+ FillVulkanCreateInfoSizesAndType(*this, &createInfo);
+
+ createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.format = VulkanImageFormat(device, GetFormat().format);
+ createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ createInfo.usage = VulkanImageUsage(GetInternalUsage(), GetFormat()) | extraUsages;
+ createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ createInfo.queueFamilyIndexCount = 0;
+ createInfo.pQueueFamilyIndices = nullptr;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ ASSERT(IsSampleCountSupported(device, createInfo));
+
+ if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
+ createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ }
+
+ // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
+ // that are used in vkCmdClearColorImage() must have been created with this flag, which is
+ // also required for the implementation of robust resource initialization.
+ createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreateImage"));
+
+ // Create the image memory and associate it with the container
+ VkMemoryRequirements requirements;
+ device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
+
+ DAWN_TRY_ASSIGN(mMemoryAllocation, device->GetResourceMemoryAllocator()->Allocate(
+ requirements, MemoryKind::Opaque));
+
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
+ ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+ mMemoryAllocation.GetOffset()),
+ "BindImageMemory"));
+
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
+ GetAllSubresources(), TextureBase::ClearValue::NonZero));
+ }
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ // Internally managed, but imported from external handle
+ MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
+ external_memory::Service* externalMemoryService) {
+ VkFormat format = VulkanImageFormat(ToBackend(GetDevice()), GetFormat().format);
+ VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
+ DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage,
+ &mSupportsDisjointVkImage),
+ "Creating an image from external memory is not supported.");
+ // mSubresourceLastUsages was initialized with Plane0/Plane1 in the constructor for
+ // multiplanar formats, so we need to correct it to Color here.
+ if (ShouldCombineMultiPlaneBarriers()) {
+ mSubresourceLastUsages = std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
+ ComputeAspectsForSubresourceStorage(), GetArrayLayers(), GetNumMipLevels(),
+ wgpu::TextureUsage::None);
+ }
+
+ mExternalState = ExternalState::PendingAcquire;
+
+ mPendingAcquireOldLayout = descriptor->releasedOldLayout;
+ mPendingAcquireNewLayout = descriptor->releasedNewLayout;
+
+ VkImageCreateInfo baseCreateInfo = {};
+ FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
+
+ baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ baseCreateInfo.pNext = nullptr;
+ baseCreateInfo.format = format;
+ baseCreateInfo.usage = usage;
+ baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ baseCreateInfo.queueFamilyIndexCount = 0;
+ baseCreateInfo.pQueueFamilyIndices = nullptr;
+
+ // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
+ // that are used in vkCmdClearColorImage() must have been created with this flag, which is
+ // also required for the implementation of robust resource initialization.
+ baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
+
+ SetLabelHelper("Dawn_ExternalTexture");
+
+ return {};
+ }
+
+ void Texture::InitializeForSwapChain(VkImage nativeImage) {
+ mHandle = nativeImage;
+ SetLabelHelper("Dawn_SwapChainTexture");
+ }
+
+ MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores) {
+ Device* device = ToBackend(GetDevice());
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
+ "BindImageMemory (external)"));
+
+ // Don't clear imported texture if already initialized
+ if (descriptor->isInitialized) {
+ SetIsSubresourceContentInitialized(true, GetAllSubresources());
+ }
+
+ // Success, acquire all the external objects.
+ mExternalAllocation = externalMemoryAllocation;
+ mSignalSemaphore = signalSemaphore;
+ mWaitRequirements = std::move(waitSemaphores);
+ return {};
+ }
+
+ MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
+ VkSemaphore* signalSemaphore,
+ VkImageLayout* releasedOldLayout,
+ VkImageLayout* releasedNewLayout) {
+ Device* device = ToBackend(GetDevice());
+
+ DAWN_INVALID_IF(mExternalState == ExternalState::Released,
+ "Can't export a signal semaphore from signaled texture %s.", this);
+
+ DAWN_INVALID_IF(
+ mExternalAllocation == VK_NULL_HANDLE,
+ "Can't export a signal semaphore from destroyed or non-external texture %s.", this);
+
+ ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
+
+ // Release the texture
+ mExternalState = ExternalState::Released;
+
+ Aspect aspects = ComputeAspectsForSubresourceStorage();
+ ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
+ wgpu::TextureUsage usage = mSubresourceLastUsages->Get(aspects, 0, 0);
+
+ VkImageMemoryBarrier barrier;
+ barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ barrier.pNext = nullptr;
+ barrier.image = GetHandle();
+ barrier.subresourceRange.aspectMask = VulkanAspectMask(aspects);
+ barrier.subresourceRange.baseMipLevel = 0;
+ barrier.subresourceRange.levelCount = 1;
+ barrier.subresourceRange.baseArrayLayer = 0;
+ barrier.subresourceRange.layerCount = 1;
+
+ barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
+ barrier.dstAccessMask = 0; // The barrier must be paired with another barrier that will
+ // specify the dst access mask on the importing queue.
+
+ barrier.oldLayout = VulkanImageLayout(this, usage);
+ if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
+ // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
+ // special value to indicate no layout transition should be done.
+ barrier.newLayout = barrier.oldLayout;
+ } else {
+ barrier.newLayout = desiredLayout;
+ }
+
+ barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
+ barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+
+ VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
+ VkPipelineStageFlags dstStages =
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // We don't know when the importing queue will need
+ // the texture, so pass
+ // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
+ // the barrier happens-before any usage in the
+ // importing queue.
+
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+ device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+ nullptr, 0, nullptr, 1, &barrier);
+
+ // Queue submit to signal we are done with the texture
+ recordingContext->signalSemaphores.push_back(mSignalSemaphore);
+ DAWN_TRY(device->SubmitPendingCommands());
+
+ // Write out the layouts and signal semaphore
+ *releasedOldLayout = barrier.oldLayout;
+ *releasedNewLayout = barrier.newLayout;
+ *signalSemaphore = mSignalSemaphore;
+
+ mSignalSemaphore = VK_NULL_HANDLE;
+
+ // Destroy the texture so it can't be used again
+ Destroy();
+ return {};
+ }
+
+ Texture::~Texture() {
+ }
+
+ void Texture::SetLabelHelper(const char* prefix) {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE,
+ reinterpret_cast<uint64_t&>(mHandle), prefix, GetLabel());
+ }
+
+ void Texture::SetLabelImpl() {
+ SetLabelHelper("Dawn_InternalTexture");
+ }
+
+ void Texture::DestroyImpl() {
+ if (GetTextureState() == TextureState::OwnedInternal) {
+ Device* device = ToBackend(GetDevice());
+
+ // For textures created from a VkImage, the allocation if kInvalid so the Device knows
+ // to skip the deallocation of the (absence of) VkDeviceMemory.
+ device->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
+
+ if (mHandle != VK_NULL_HANDLE) {
+ device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ }
+
+ if (mExternalAllocation != VK_NULL_HANDLE) {
+ device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
+ }
+
+ mHandle = VK_NULL_HANDLE;
+ mExternalAllocation = VK_NULL_HANDLE;
+ // If a signal semaphore exists it should be requested before we delete the texture
+ ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
+ }
+ // For Vulkan, we currently run the base destruction code after the internal changes because
+ // of the dependency on the texture state which the base code overwrites too early.
+ TextureBase::DestroyImpl();
+ }
+
+ VkImage Texture::GetHandle() const {
+ return mHandle;
+ }
+
+ void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+ std::vector<VkImageMemoryBarrier>* barriers,
+ size_t transitionBarrierStart) {
+ ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
+
+ // transitionBarrierStart specify the index where barriers for current transition start in
+ // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
+ // have already added into the vector during current transition.
+ ASSERT(barriers->size() - transitionBarrierStart <= 1);
+
+ if (mExternalState == ExternalState::PendingAcquire) {
+ if (barriers->size() == transitionBarrierStart) {
+ barriers->push_back(
+ BuildMemoryBarrier(this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
+ SubresourceRange::SingleMipAndLayer(
+ 0, 0, ComputeAspectsForSubresourceStorage())));
+ }
+
+ VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
+ // Transfer texture from external queue to graphics queue
+ barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+ barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
+
+ // srcAccessMask means nothing when importing. Queue transfers require a barrier on
+ // both the importing and exporting queues. The exporting queue should have specified
+ // this.
+ barrier->srcAccessMask = 0;
+
+ // This should be the first barrier after import.
+ ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
+
+ // Save the desired layout. We may need to transition through an intermediate
+ // |mPendingAcquireLayout| first.
+ VkImageLayout desiredLayout = barrier->newLayout;
+
+ bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
+
+ // We don't care about the pending old layout if the texture is uninitialized. The
+ // driver is free to discard it. Likewise, we don't care about the pending new layout if
+ // the texture is uninitialized. We can skip the layout transition.
+ if (!isInitialized) {
+ barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ barrier->newLayout = desiredLayout;
+ } else {
+ barrier->oldLayout = mPendingAcquireOldLayout;
+ barrier->newLayout = mPendingAcquireNewLayout;
+ }
+
+ // If these are unequal, we need an another barrier to transition the layout.
+ if (barrier->newLayout != desiredLayout) {
+ VkImageMemoryBarrier layoutBarrier;
+ layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+ layoutBarrier.pNext = nullptr;
+ layoutBarrier.image = GetHandle();
+ layoutBarrier.subresourceRange = barrier->subresourceRange;
+
+ // Transition from the acquired new layout to the desired layout.
+ layoutBarrier.oldLayout = barrier->newLayout;
+ layoutBarrier.newLayout = desiredLayout;
+
+ // We already transitioned these.
+ layoutBarrier.srcAccessMask = 0;
+ layoutBarrier.dstAccessMask = 0;
+ layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+ layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+
+ barriers->push_back(layoutBarrier);
+ }
+
+ mExternalState = ExternalState::Acquired;
+ }
+
+ mLastExternalState = mExternalState;
+
+ recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
+ mWaitRequirements.begin(), mWaitRequirements.end());
+ mWaitRequirements.clear();
+ }
+
+ bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
+ // Reuse the texture directly and avoid encoding barriers when it isn't needed.
+ bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
+ if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
+ return true;
+ }
+ return false;
+ }
+
+ // Base Vulkan doesn't support transitioning depth and stencil separately. We work around
+ // this limitation by combining the usages in the two planes of `textureUsages` into a
+ // single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
+ // for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
+ bool Texture::ShouldCombineDepthStencilBarriers() const {
+ return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
+ }
+
+ // The Vulkan spec requires:
+ // "If image has a single-plane color format or is not disjoint, then the aspectMask member of
+ // subresourceRange must be VK_IMAGE_ASPECT_COLOR_BIT.".
+ // For multi-planar formats, we currently only support import them in non-disjoint way.
+ bool Texture::ShouldCombineMultiPlaneBarriers() const {
+ // TODO(chromium:1258986): Figure out how to support disjoint vkImage.
+ ASSERT(!mSupportsDisjointVkImage);
+ return GetFormat().aspects == (Aspect::Plane0 | Aspect::Plane1);
+ }
+
+ Aspect Texture::ComputeAspectsForSubresourceStorage() const {
+ if (ShouldCombineDepthStencilBarriers()) {
+ return Aspect::CombinedDepthStencil;
+ }
+ // Force to use Aspect::Color for Aspect::Plane0/1.
+ if (ShouldCombineMultiPlaneBarriers()) {
+ return Aspect::Color;
+ }
+ return GetFormat().aspects;
+ }
+
+ void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
+ const TextureSubresourceUsage& textureUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ if (ShouldCombineBarriers()) {
+ Aspect combinedAspect = ComputeAspectsForSubresourceStorage();
+ SubresourceStorage<wgpu::TextureUsage> combinedUsages(combinedAspect, GetArrayLayers(),
+ GetNumMipLevels());
+ textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+ SubresourceRange updateRange = range;
+ updateRange.aspects = combinedAspect;
+
+ combinedUsages.Update(
+ updateRange, [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
+ *combinedUsage |= usage;
+ });
+ });
+
+ TransitionUsageForPassImpl(recordingContext, combinedUsages, imageBarriers, srcStages,
+ dstStages);
+ } else {
+ TransitionUsageForPassImpl(recordingContext, textureUsages, imageBarriers, srcStages,
+ dstStages);
+ }
+ }
+
+ void Texture::TransitionUsageForPassImpl(
+ CommandRecordingContext* recordingContext,
+ const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ size_t transitionBarrierStart = imageBarriers->size();
+ const Format& format = GetFormat();
+
+ wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
+ wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+
+ mSubresourceLastUsages->Merge(
+ subresourceUsages, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage,
+ const wgpu::TextureUsage& newUsage) {
+ if (newUsage == wgpu::TextureUsage::None ||
+ CanReuseWithoutBarrier(*lastUsage, newUsage)) {
+ return;
+ }
+
+ imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range));
+
+ allLastUsages |= *lastUsage;
+ allUsages |= newUsage;
+
+ *lastUsage = newUsage;
+ });
+
+ if (mExternalState != ExternalState::InternalOnly) {
+ TweakTransitionForExternalUsage(recordingContext, imageBarriers,
+ transitionBarrierStart);
+ }
+
+ *srcStages |= VulkanPipelineStage(allLastUsages, format);
+ *dstStages |= VulkanPipelineStage(allUsages, format);
+ }
+
+ void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range) {
+ std::vector<VkImageMemoryBarrier> barriers;
+
+ VkPipelineStageFlags srcStages = 0;
+ VkPipelineStageFlags dstStages = 0;
+
+ TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
+
+ if (mExternalState != ExternalState::InternalOnly) {
+ TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
+ }
+
+ if (!barriers.empty()) {
+ ASSERT(srcStages != 0 && dstStages != 0);
+ ToBackend(GetDevice())
+ ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+ nullptr, 0, nullptr, barriers.size(), barriers.data());
+ }
+ }
+
+ void Texture::TransitionUsageAndGetResourceBarrier(
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ if (ShouldCombineBarriers()) {
+ SubresourceRange updatedRange = range;
+ updatedRange.aspects = ComputeAspectsForSubresourceStorage();
+ TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
+ dstStages);
+ } else {
+ TransitionUsageAndGetResourceBarrierImpl(usage, range, imageBarriers, srcStages,
+ dstStages);
+ }
+ }
+
+ void Texture::TransitionUsageAndGetResourceBarrierImpl(
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages) {
+ ASSERT(imageBarriers != nullptr);
+ const Format& format = GetFormat();
+
+ wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+ mSubresourceLastUsages->Update(
+ range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
+ if (CanReuseWithoutBarrier(*lastUsage, usage)) {
+ return;
+ }
+
+ imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, usage, range));
+
+ allLastUsages |= *lastUsage;
+ *lastUsage = usage;
+ });
+
+ *srcStages |= VulkanPipelineStage(allLastUsages, format);
+ *dstStages |= VulkanPipelineStage(usage, format);
+ }
+
+ MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue clearValue) {
+ Device* device = ToBackend(GetDevice());
+
+ const bool isZero = clearValue == TextureBase::ClearValue::Zero;
+ uint32_t uClearColor = isZero ? 0 : 1;
+ int32_t sClearColor = isZero ? 0 : 1;
+ float fClearColor = isZero ? 0.f : 1.f;
+
+ TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+
+ VkImageSubresourceRange imageRange = {};
+ imageRange.levelCount = 1;
+ imageRange.layerCount = 1;
+
+ if (GetFormat().isCompressed) {
+ if (range.aspects == Aspect::None) {
+ return {};
+ }
+ // need to clear the texture with a copy from buffer
+ ASSERT(range.aspects == Aspect::Color);
+ const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
+
+ Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+
+ uint32_t bytesPerRow =
+ Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+ device->GetOptimalBytesPerRowAlignment());
+ uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+ largestMipSize.depthOrArrayLayers;
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+ blockInfo.byteSize));
+ memset(uploadHandle.mappedBuffer, uClearColor, bufferSize);
+
+ std::vector<VkBufferImageCopy> regions;
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ Extent3D copySize = GetMipLevelPhysicalSize(level);
+ imageRange.baseMipLevel = level;
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, range.aspects))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ TextureDataLayout dataLayout;
+ dataLayout.offset = uploadHandle.startOffset;
+ dataLayout.rowsPerImage = copySize.height / blockInfo.height;
+ dataLayout.bytesPerRow = bytesPerRow;
+ TextureCopy textureCopy;
+ textureCopy.aspect = range.aspects;
+ textureCopy.mipLevel = level;
+ textureCopy.origin = {0, 0, layer};
+ textureCopy.texture = this;
+
+ regions.push_back(
+ ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
+ }
+ }
+ device->fn.CmdCopyBufferToImage(
+ recordingContext->commandBuffer,
+ ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(), GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions.size(), regions.data());
+ } else {
+ for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+ ++level) {
+ imageRange.baseMipLevel = level;
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ Aspect aspects = Aspect::None;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ aspects |= aspect;
+ }
+
+ if (aspects == Aspect::None) {
+ continue;
+ }
+
+ imageRange.aspectMask = VulkanAspectMask(aspects);
+ imageRange.baseArrayLayer = layer;
+
+ if (aspects &
+ (Aspect::Depth | Aspect::Stencil | Aspect::CombinedDepthStencil)) {
+ VkClearDepthStencilValue clearDepthStencilValue[1];
+ clearDepthStencilValue[0].depth = fClearColor;
+ clearDepthStencilValue[0].stencil = uClearColor;
+ device->fn.CmdClearDepthStencilImage(
+ recordingContext->commandBuffer, GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clearDepthStencilValue, 1,
+ &imageRange);
+ } else {
+ ASSERT(aspects == Aspect::Color);
+ VkClearColorValue clearColorValue;
+ switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+ case wgpu::TextureComponentType::Float:
+ clearColorValue.float32[0] = fClearColor;
+ clearColorValue.float32[1] = fClearColor;
+ clearColorValue.float32[2] = fClearColor;
+ clearColorValue.float32[3] = fClearColor;
+ break;
+ case wgpu::TextureComponentType::Sint:
+ clearColorValue.int32[0] = sClearColor;
+ clearColorValue.int32[1] = sClearColor;
+ clearColorValue.int32[2] = sClearColor;
+ clearColorValue.int32[3] = sClearColor;
+ break;
+ case wgpu::TextureComponentType::Uint:
+ clearColorValue.uint32[0] = uClearColor;
+ clearColorValue.uint32[1] = uClearColor;
+ clearColorValue.uint32[2] = uClearColor;
+ clearColorValue.uint32[3] = uClearColor;
+ break;
+ case wgpu::TextureComponentType::DepthComparison:
+ UNREACHABLE();
+ }
+ device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ &clearColorValue, 1, &imageRange);
+ }
+ }
+ }
+ }
+
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, range);
+ device->IncrementLazyClearCountForTesting();
+ }
+ return {};
+ }
+
+ void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
+ const SubresourceRange& range) {
+ if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
+ }
+ if (!IsSubresourceContentInitialized(range)) {
+ // If subresource has not been initialized, clear it to black as it could contain dirty
+ // bits from recycled memory
+ GetDevice()->ConsumedError(
+ ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
+ }
+ }
+
+ VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
+ ASSERT(GetFormat().aspects == Aspect::Color);
+ return VulkanImageLayout(this, mSubresourceLastUsages->Get(Aspect::Color, 0, 0));
+ }
+
+ // static
+ ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
+ DAWN_TRY(view->Initialize(descriptor));
+ return view;
+ }
+
+ MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+ if ((GetTexture()->GetUsage() &
+ ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
+ // If the texture view has no other usage than CopySrc and CopyDst, then it can't
+ // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
+ // validation errors warn if you create such a vkImageView, so return early.
+ return {};
+ }
+
+ // Texture could be destroyed by the time we make a view.
+ if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+ return {};
+ }
+
+ Device* device = ToBackend(GetTexture()->GetDevice());
+
+ VkImageViewCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.image = ToBackend(GetTexture())->GetHandle();
+ createInfo.viewType = VulkanImageViewType(descriptor->dimension);
+ createInfo.format = VulkanImageFormat(device, descriptor->format);
+ createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
+
+ const SubresourceRange& subresources = GetSubresourceRange();
+ createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
+ createInfo.subresourceRange.levelCount = subresources.levelCount;
+ createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
+ createInfo.subresourceRange.layerCount = subresources.layerCount;
+ createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
+
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "CreateImageView"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ TextureView::~TextureView() {
+ }
+
+ void TextureView::DestroyImpl() {
+ Device* device = ToBackend(GetTexture()->GetDevice());
+
+ if (mHandle != VK_NULL_HANDLE) {
+ device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
+ }
+ }
+
+ VkImageView TextureView::GetHandle() const {
+ return mHandle;
+ }
+
+ void TextureView::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE_VIEW,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_InternalTextureView", GetLabel());
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h
new file mode 100644
index 00000000000..2452ade02cc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/TextureVk.h
@@ -0,0 +1,197 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_TEXTUREVK_H_
+#define DAWNNATIVE_VULKAN_TEXTUREVK_H_
+
+#include "dawn/native/Texture.h"
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/PassResourceUsage.h"
+#include "dawn/native/ResourceMemoryAllocation.h"
+#include "dawn/native/vulkan/ExternalHandle.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native::vulkan {
+
+ struct CommandRecordingContext;
+ class Device;
+ class Texture;
+
+ VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format);
+ VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
+ VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage);
+ VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
+
+ MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase* device,
+ const TextureDescriptor* descriptor);
+
+ bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
+ const VkImageCreateInfo& imageCreateInfo);
+
+ class Texture final : public TextureBase {
+ public:
+ // Used to create a regular texture from a descriptor.
+ static ResultOrError<Ref<Texture>> Create(Device* device,
+ const TextureDescriptor* descriptor,
+ VkImageUsageFlags extraUsages = 0);
+
+ // Creates a texture and initializes it with a VkImage that references an external memory
+ // object. Before the texture can be used, the VkDeviceMemory associated with the external
+ // image must be bound via Texture::BindExternalMemory.
+ static ResultOrError<Texture*> CreateFromExternal(
+ Device* device,
+ const ExternalImageDescriptorVk* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ external_memory::Service* externalMemoryService);
+
+ // Creates a texture that wraps a swapchain-allocated VkImage.
+ static Ref<Texture> CreateForSwapChain(Device* device,
+ const TextureDescriptor* descriptor,
+ VkImage nativeImage);
+
+ VkImage GetHandle() const;
+
+ // Transitions the texture to be used as `usage`, recording any necessary barrier in
+ // `commands`.
+ // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
+ void TransitionUsageNow(CommandRecordingContext* recordingContext,
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range);
+ void TransitionUsageForPass(CommandRecordingContext* recordingContext,
+ const TextureSubresourceUsage& textureUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+
+ void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
+ const SubresourceRange& range);
+
+ VkImageLayout GetCurrentLayoutForSwapChain() const;
+
+ // Binds externally allocated memory to the VkImage and on success, takes ownership of
+ // semaphores.
+ MaybeError BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores);
+
+ MaybeError ExportExternalTexture(VkImageLayout desiredLayout,
+ VkSemaphore* signalSemaphore,
+ VkImageLayout* releasedOldLayout,
+ VkImageLayout* releasedNewLayout);
+
+ void SetLabelHelper(const char* prefix);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ private:
+ ~Texture() override;
+ Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
+
+ MaybeError InitializeAsInternalTexture(VkImageUsageFlags extraUsages);
+ MaybeError InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
+ external_memory::Service* externalMemoryService);
+ void InitializeForSwapChain(VkImage nativeImage);
+
+ void DestroyImpl() override;
+ MaybeError ClearTexture(CommandRecordingContext* recordingContext,
+ const SubresourceRange& range,
+ TextureBase::ClearValue);
+
+ // Implementation details of the barrier computations for the texture.
+ void TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
+ const SubresourceRange& range,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+ void TransitionUsageForPassImpl(
+ CommandRecordingContext* recordingContext,
+ const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+ void TransitionUsageAndGetResourceBarrierImpl(
+ wgpu::TextureUsage usage,
+ const SubresourceRange& range,
+ std::vector<VkImageMemoryBarrier>* imageBarriers,
+ VkPipelineStageFlags* srcStages,
+ VkPipelineStageFlags* dstStages);
+ void TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+ std::vector<VkImageMemoryBarrier>* barriers,
+ size_t transitionBarrierStart);
+ bool CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage);
+
+ // In base Vulkan, Depth and stencil can only be transitioned together. This function
+ // indicates whether we should combine depth and stencil barriers to accommodate this
+ // limitation.
+ bool ShouldCombineDepthStencilBarriers() const;
+
+ // This indicates whether the VK_IMAGE_ASPECT_COLOR_BIT instead of
+ // VK_IMAGE_ASPECT_PLANE_n_BIT must be used.
+ bool ShouldCombineMultiPlaneBarriers() const;
+
+ bool ShouldCombineBarriers() const {
+ return ShouldCombineDepthStencilBarriers() || ShouldCombineMultiPlaneBarriers();
+ }
+
+ // Compute the Aspects of the SubresourceStoage for this texture depending on whether we're
+ // doing the workaround for combined depth and stencil barriers, or combining multi-plane
+ // barriers.
+ Aspect ComputeAspectsForSubresourceStorage() const;
+
+ VkImage mHandle = VK_NULL_HANDLE;
+ ResourceMemoryAllocation mMemoryAllocation;
+ VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
+
+ enum class ExternalState { InternalOnly, PendingAcquire, Acquired, Released };
+ ExternalState mExternalState = ExternalState::InternalOnly;
+ ExternalState mLastExternalState = ExternalState::InternalOnly;
+
+ VkImageLayout mPendingAcquireOldLayout;
+ VkImageLayout mPendingAcquireNewLayout;
+
+ VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
+ std::vector<VkSemaphore> mWaitRequirements;
+
+ // Note that in early Vulkan versions it is not possible to transition depth and stencil
+ // separately so textures with Depth|Stencil aspects will have a single Depth aspect in the
+ // storage.
+ std::unique_ptr<SubresourceStorage<wgpu::TextureUsage>> mSubresourceLastUsages;
+
+ bool mSupportsDisjointVkImage = false;
+ };
+
+ class TextureView final : public TextureViewBase {
+ public:
+ static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+ VkImageView GetHandle() const;
+
+ private:
+ ~TextureView() override;
+ void DestroyImpl() override;
+ using TextureViewBase::TextureViewBase;
+ MaybeError Initialize(const TextureViewDescriptor* descriptor);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
+ VkImageView mHandle = VK_NULL_HANDLE;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_TEXTUREVK_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp
new file mode 100644
index 00000000000..c5290d91141
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.cpp
@@ -0,0 +1,273 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/UtilsVulkan.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/EnumMaskIterator.h"
+#include "dawn/native/Format.h"
+#include "dawn/native/Pipeline.h"
+#include "dawn/native/ShaderModule.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/Forward.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+namespace dawn::native::vulkan {
+
+ VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op) {
+ switch (op) {
+ case wgpu::CompareFunction::Never:
+ return VK_COMPARE_OP_NEVER;
+ case wgpu::CompareFunction::Less:
+ return VK_COMPARE_OP_LESS;
+ case wgpu::CompareFunction::LessEqual:
+ return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case wgpu::CompareFunction::Greater:
+ return VK_COMPARE_OP_GREATER;
+ case wgpu::CompareFunction::GreaterEqual:
+ return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case wgpu::CompareFunction::Equal:
+ return VK_COMPARE_OP_EQUAL;
+ case wgpu::CompareFunction::NotEqual:
+ return VK_COMPARE_OP_NOT_EQUAL;
+ case wgpu::CompareFunction::Always:
+ return VK_COMPARE_OP_ALWAYS;
+
+ case wgpu::CompareFunction::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ // Convert Dawn texture aspects to Vulkan texture aspect flags
+ VkImageAspectFlags VulkanAspectMask(const Aspect& aspects) {
+ VkImageAspectFlags flags = 0;
+ for (Aspect aspect : IterateEnumMask(aspects)) {
+ switch (aspect) {
+ case Aspect::Color:
+ flags |= VK_IMAGE_ASPECT_COLOR_BIT;
+ break;
+ case Aspect::Depth:
+ flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ break;
+ case Aspect::Stencil:
+ flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ break;
+
+ case Aspect::CombinedDepthStencil:
+ flags |= VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
+ break;
+
+ case Aspect::Plane0:
+ flags |= VK_IMAGE_ASPECT_PLANE_0_BIT;
+ break;
+ case Aspect::Plane1:
+ flags |= VK_IMAGE_ASPECT_PLANE_1_BIT;
+ break;
+
+ case Aspect::None:
+ UNREACHABLE();
+ }
+ }
+ return flags;
+ }
+
+ // Vulkan SPEC requires the source/destination region specified by each element of
+ // pRegions must be a region that is contained within srcImage/dstImage. Here the size of
+ // the image refers to the virtual size, while Dawn validates texture copy extent with the
+ // physical size, so we need to re-calculate the texture copy extent to ensure it should fit
+ // in the virtual size of the subresource.
+ Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
+ Extent3D validTextureCopyExtent = copySize;
+ const TextureBase* texture = textureCopy.texture.Get();
+ Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
+ ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
+ ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
+ if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
+ ASSERT(texture->GetFormat().isCompressed);
+ validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
+ }
+ if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
+ ASSERT(texture->GetFormat().isCompressed);
+ validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
+ }
+
+ return validTextureCopyExtent;
+ }
+
+ VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ TextureDataLayout passDataLayout;
+ passDataLayout.offset = bufferCopy.offset;
+ passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
+ passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
+ return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
+ }
+
+ VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
+ const Texture* texture = ToBackend(textureCopy.texture.Get());
+
+ VkBufferImageCopy region;
+
+ region.bufferOffset = dataLayout.offset;
+ // In Vulkan the row length is in texels while it is in bytes for Dawn
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+ ASSERT(dataLayout.bytesPerRow % blockInfo.byteSize == 0);
+ region.bufferRowLength = dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width;
+ region.bufferImageHeight = dataLayout.rowsPerImage * blockInfo.height;
+
+ region.imageSubresource.aspectMask = VulkanAspectMask(textureCopy.aspect);
+ region.imageSubresource.mipLevel = textureCopy.mipLevel;
+
+ switch (textureCopy.texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ ASSERT(textureCopy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
+ region.imageOffset.x = textureCopy.origin.x;
+ region.imageOffset.y = 0;
+ region.imageOffset.z = 0;
+ region.imageSubresource.baseArrayLayer = 0;
+ region.imageSubresource.layerCount = 1;
+
+ ASSERT(!textureCopy.texture->GetFormat().isCompressed);
+ region.imageExtent.width = copySize.width;
+ region.imageExtent.height = 1;
+ region.imageExtent.depth = 1;
+ break;
+
+ case wgpu::TextureDimension::e2D: {
+ region.imageOffset.x = textureCopy.origin.x;
+ region.imageOffset.y = textureCopy.origin.y;
+ region.imageOffset.z = 0;
+ region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
+ region.imageSubresource.layerCount = copySize.depthOrArrayLayers;
+
+ Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
+ region.imageExtent.width = imageExtent.width;
+ region.imageExtent.height = imageExtent.height;
+ region.imageExtent.depth = 1;
+ break;
+ }
+
+ case wgpu::TextureDimension::e3D: {
+ region.imageOffset.x = textureCopy.origin.x;
+ region.imageOffset.y = textureCopy.origin.y;
+ region.imageOffset.z = textureCopy.origin.z;
+ region.imageSubresource.baseArrayLayer = 0;
+ region.imageSubresource.layerCount = 1;
+
+ ASSERT(!textureCopy.texture->GetFormat().isCompressed);
+ region.imageExtent.width = copySize.width;
+ region.imageExtent.height = copySize.height;
+ region.imageExtent.depth = copySize.depthOrArrayLayers;
+ break;
+ }
+ }
+
+ return region;
+ }
+
+ void SetDebugName(Device* device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ const char* prefix,
+ std::string label) {
+ if (!objectHandle) {
+ return;
+ }
+
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ VkDebugUtilsObjectNameInfoEXT objectNameInfo;
+ objectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
+ objectNameInfo.pNext = nullptr;
+ objectNameInfo.objectType = objectType;
+ objectNameInfo.objectHandle = objectHandle;
+
+ if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+ objectNameInfo.pObjectName = prefix;
+ device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
+ return;
+ }
+
+ std::string objectName = prefix;
+ objectName += "_";
+ objectName += label;
+ objectNameInfo.pObjectName = objectName.c_str();
+ device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
+ }
+ }
+
+ VkSpecializationInfo* GetVkSpecializationInfo(
+ const ProgrammableStage& programmableStage,
+ VkSpecializationInfo* specializationInfo,
+ std::vector<OverridableConstantScalar>* specializationDataEntries,
+ std::vector<VkSpecializationMapEntry>* specializationMapEntries) {
+ ASSERT(specializationInfo);
+ ASSERT(specializationDataEntries);
+ ASSERT(specializationMapEntries);
+
+ if (programmableStage.constants.size() == 0) {
+ return nullptr;
+ }
+
+ const EntryPointMetadata& entryPointMetaData =
+ programmableStage.module->GetEntryPoint(programmableStage.entryPoint);
+
+ for (const auto& pipelineConstant : programmableStage.constants) {
+ const std::string& identifier = pipelineConstant.first;
+ double value = pipelineConstant.second;
+
+ // This is already validated so `identifier` must exist
+ const auto& moduleConstant = entryPointMetaData.overridableConstants.at(identifier);
+
+ specializationMapEntries->push_back(
+ VkSpecializationMapEntry{moduleConstant.id,
+ static_cast<uint32_t>(specializationDataEntries->size() *
+ sizeof(OverridableConstantScalar)),
+ sizeof(OverridableConstantScalar)});
+
+ OverridableConstantScalar entry{};
+ switch (moduleConstant.type) {
+ case EntryPointMetadata::OverridableConstant::Type::Boolean:
+ entry.b = static_cast<int32_t>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Float32:
+ entry.f32 = static_cast<float>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Int32:
+ entry.i32 = static_cast<int32_t>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Uint32:
+ entry.u32 = static_cast<uint32_t>(value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ specializationDataEntries->push_back(entry);
+ }
+
+ specializationInfo->mapEntryCount = static_cast<uint32_t>(specializationMapEntries->size());
+ specializationInfo->pMapEntries = specializationMapEntries->data();
+ specializationInfo->dataSize =
+ specializationDataEntries->size() * sizeof(OverridableConstantScalar);
+ specializationInfo->pData = specializationDataEntries->data();
+
+ return specializationInfo;
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h b/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h
new file mode 100644
index 00000000000..b13e03c1960
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/UtilsVulkan.h
@@ -0,0 +1,121 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_UTILSVULKAN_H_
+#define DAWNNATIVE_VULKAN_UTILSVULKAN_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Commands.h"
+#include "dawn/native/dawn_platform.h"
+
+namespace dawn::native {
+ struct ProgrammableStage;
+ union OverridableConstantScalar;
+} // namespace dawn::native
+
+namespace dawn::native::vulkan {
+
+ class Device;
+
+ // A Helper type used to build a pNext chain of extension structs.
+ // Usage is:
+ // 1) Create instance, passing the address of the first struct in the chain. This requires
+ // pNext to be nullptr. If you already have a chain you need to pass a pointer to the tail
+ // of it.
+ //
+ // 2) Call Add(&vk_struct) every time a new struct needs to be appended to the chain.
+ //
+ // 3) Alternatively, call Add(&vk_struct, VK_STRUCTURE_TYPE_XXX) to initialize the struct
+ // with a given VkStructureType value while appending it to the chain.
+ //
+ // Examples:
+ // VkPhysicalFeatures2 features2 = {
+ // .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
+ // .pNext = nullptr,
+ // };
+ //
+ // PNextChainBuilder featuresChain(&features2);
+ //
+ // featuresChain.Add(&featuresExtensions.subgroupSizeControl,
+ // VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+ //
+ struct PNextChainBuilder {
+ // Constructor takes the address of a Vulkan structure instance, and
+ // walks its pNext chain to record the current location of its tail.
+ //
+ // NOTE: Some VK_STRUCT_TYPEs define their pNext field as a const void*
+ // which is why the VkBaseOutStructure* casts below are necessary.
+ template <typename VK_STRUCT_TYPE>
+ explicit PNextChainBuilder(VK_STRUCT_TYPE* head)
+ : mCurrent(reinterpret_cast<VkBaseOutStructure*>(head)) {
+ ASSERT(head->pNext == nullptr);
+ }
+
+ // Add one item to the chain. |vk_struct| must be a Vulkan structure
+ // that is already initialized.
+ template <typename VK_STRUCT_TYPE>
+ void Add(VK_STRUCT_TYPE* vkStruct) {
+ // Sanity checks to ensure proper type safety.
+ static_assert(
+ offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
+ offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
+ "Argument type is not a proper Vulkan structure type");
+ vkStruct->pNext = nullptr;
+
+ mCurrent->pNext = reinterpret_cast<VkBaseOutStructure*>(vkStruct);
+ mCurrent = mCurrent->pNext;
+ }
+
+ // A variant of Add() above that also initializes the |sType| field in |vk_struct|.
+ template <typename VK_STRUCT_TYPE>
+ void Add(VK_STRUCT_TYPE* vkStruct, VkStructureType sType) {
+ vkStruct->sType = sType;
+ Add(vkStruct);
+ }
+
+ private:
+ VkBaseOutStructure* mCurrent;
+ };
+
+ VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
+
+ VkImageAspectFlags VulkanAspectMask(const Aspect& aspects);
+
+ Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
+
+ VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
+ VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
+
+ void SetDebugName(Device* device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ const char* prefix,
+ std::string label = "");
+
+ // Returns nullptr or &specializationInfo
+ // specializationInfo, specializationDataEntries, specializationMapEntries needs to
+ // be alive at least until VkSpecializationInfo is passed into Vulkan Create*Pipelines
+ VkSpecializationInfo* GetVkSpecializationInfo(
+ const ProgrammableStage& programmableStage,
+ VkSpecializationInfo* specializationInfo,
+ std::vector<OverridableConstantScalar>* specializationDataEntries,
+ std::vector<VkSpecializationMapEntry>* specializationMapEntries);
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_UTILSVULKAN_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp
new file mode 100644
index 00000000000..e8f630a824e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanBackend.cpp
@@ -0,0 +1,129 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// VulkanBackend.cpp: contains the definition of symbols exported by VulkanBackend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+// Include vulkan_platform.h before VulkanBackend.h includes vulkan.h so that we use our version
+// of the non-dispatchable handles.
+#include "dawn/common/vulkan_platform.h"
+
+#include "dawn/native/VulkanBackend.h"
+
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/NativeSwapChainImplVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+
+namespace dawn::native::vulkan {
+
+ VkInstance GetInstance(WGPUDevice device) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ return backendDevice->GetVkInstance();
+ }
+
+ DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
+ const char* pName) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
+ }
+
+ // Explicitly export this function because it uses the "native" type for surfaces while the
+ // header as seen in this file uses the wrapped type.
+ DAWN_NATIVE_EXPORT DawnSwapChainImplementation
+ CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
+
+ DawnSwapChainImplementation impl;
+ impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
+ impl.textureUsage = WGPUTextureUsage_Present;
+
+ return impl;
+ }
+
+ WGPUTextureFormat GetNativeSwapChainPreferredFormat(
+ const DawnSwapChainImplementation* swapChain) {
+ NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+ }
+
+ AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_Vulkan) {
+ }
+
+#if defined(DAWN_PLATFORM_LINUX)
+ ExternalImageDescriptorOpaqueFD::ExternalImageDescriptorOpaqueFD()
+ : ExternalImageDescriptorFD(ExternalImageType::OpaqueFD) {
+ }
+
+ ExternalImageDescriptorDmaBuf::ExternalImageDescriptorDmaBuf()
+ : ExternalImageDescriptorFD(ExternalImageType::DmaBuf) {
+ }
+
+ ExternalImageExportInfoOpaqueFD::ExternalImageExportInfoOpaqueFD()
+ : ExternalImageExportInfoFD(ExternalImageType::OpaqueFD) {
+ }
+
+ ExternalImageExportInfoDmaBuf::ExternalImageExportInfoDmaBuf()
+ : ExternalImageExportInfoFD(ExternalImageType::DmaBuf) {
+ }
+#endif // DAWN_PLATFORM_LINUX
+
+ WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
+#if defined(DAWN_PLATFORM_LINUX)
+ switch (descriptor->GetType()) {
+ case ExternalImageType::OpaqueFD:
+ case ExternalImageType::DmaBuf: {
+ Device* backendDevice = ToBackend(FromAPI(device));
+ const ExternalImageDescriptorFD* fdDescriptor =
+ static_cast<const ExternalImageDescriptorFD*>(descriptor);
+
+ return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
+ fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
+ }
+ default:
+ return nullptr;
+ }
+#else
+ return nullptr;
+#endif // DAWN_PLATFORM_LINUX
+ }
+
+ bool ExportVulkanImage(WGPUTexture texture,
+ VkImageLayout desiredLayout,
+ ExternalImageExportInfoVk* info) {
+ if (texture == nullptr) {
+ return false;
+ }
+#if defined(DAWN_PLATFORM_LINUX)
+ switch (info->GetType()) {
+ case ExternalImageType::OpaqueFD:
+ case ExternalImageType::DmaBuf: {
+ Texture* backendTexture = ToBackend(FromAPI(texture));
+ Device* device = ToBackend(backendTexture->GetDevice());
+ ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
+
+ return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
+ &fdInfo->semaphoreHandles);
+ }
+ default:
+ return false;
+ }
+#else
+ return false;
+#endif // DAWN_PLATFORM_LINUX
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp
new file mode 100644
index 00000000000..49416b9c1d9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.cpp
@@ -0,0 +1,109 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <string>
+
+namespace dawn::native::vulkan {
+
+ const char* VkResultAsString(::VkResult result) {
+ // Convert to a int32_t to silence and MSVC warning that the fake errors don't appear in
+ // the original VkResult enum.
+ int32_t code = static_cast<int32_t>(result);
+
+ switch (code) {
+ case VK_SUCCESS:
+ return "VK_SUCCESS";
+ case VK_NOT_READY:
+ return "VK_NOT_READY";
+ case VK_TIMEOUT:
+ return "VK_TIMEOUT";
+ case VK_EVENT_SET:
+ return "VK_EVENT_SET";
+ case VK_EVENT_RESET:
+ return "VK_EVENT_RESET";
+ case VK_INCOMPLETE:
+ return "VK_INCOMPLETE";
+ case VK_ERROR_OUT_OF_HOST_MEMORY:
+ return "VK_ERROR_OUT_OF_HOST_MEMORY";
+ case VK_ERROR_OUT_OF_DEVICE_MEMORY:
+ return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
+ case VK_ERROR_INITIALIZATION_FAILED:
+ return "VK_ERROR_INITIALIZATION_FAILED";
+ case VK_ERROR_DEVICE_LOST:
+ return "VK_ERROR_DEVICE_LOST";
+ case VK_ERROR_MEMORY_MAP_FAILED:
+ return "VK_ERROR_MEMORY_MAP_FAILED";
+ case VK_ERROR_LAYER_NOT_PRESENT:
+ return "VK_ERROR_LAYER_NOT_PRESENT";
+ case VK_ERROR_EXTENSION_NOT_PRESENT:
+ return "VK_ERROR_EXTENSION_NOT_PRESENT";
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ return "VK_ERROR_FEATURE_NOT_PRESENT";
+ case VK_ERROR_INCOMPATIBLE_DRIVER:
+ return "VK_ERROR_INCOMPATIBLE_DRIVER";
+ case VK_ERROR_TOO_MANY_OBJECTS:
+ return "VK_ERROR_TOO_MANY_OBJECTS";
+ case VK_ERROR_FORMAT_NOT_SUPPORTED:
+ return "VK_ERROR_FORMAT_NOT_SUPPORTED";
+ case VK_ERROR_FRAGMENTED_POOL:
+ return "VK_ERROR_FRAGMENTED_POOL";
+
+ case VK_ERROR_SURFACE_LOST_KHR:
+ return "VK_ERROR_SURFACE_LOST_KHR";
+ case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
+ return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
+
+ case VK_FAKE_DEVICE_OOM_FOR_TESTING:
+ return "VK_FAKE_DEVICE_OOM_FOR_TESTING";
+ case VK_FAKE_ERROR_FOR_TESTING:
+ return "VK_FAKE_ERROR_FOR_TESTING";
+ default:
+ return "<Unknown VkResult>";
+ }
+ }
+
+ MaybeError CheckVkSuccessImpl(VkResult result, const char* context) {
+ if (DAWN_LIKELY(result == VK_SUCCESS)) {
+ return {};
+ }
+
+ std::string message = std::string(context) + " failed with " + VkResultAsString(result);
+
+ if (result == VK_ERROR_DEVICE_LOST) {
+ return DAWN_DEVICE_LOST_ERROR(message);
+ } else {
+ return DAWN_INTERNAL_ERROR(message);
+ }
+ }
+
+ MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context) {
+ if (DAWN_LIKELY(result == VK_SUCCESS)) {
+ return {};
+ }
+
+ std::string message = std::string(context) + " failed with " + VkResultAsString(result);
+
+ if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_ERROR_OUT_OF_HOST_MEMORY ||
+ result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
+ return DAWN_OUT_OF_MEMORY_ERROR(message);
+ } else if (result == VK_ERROR_DEVICE_LOST) {
+ return DAWN_DEVICE_LOST_ERROR(message);
+ } else {
+ return DAWN_INTERNAL_ERROR(message);
+ }
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h
new file mode 100644
index 00000000000..e17e73b7ae4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanError.h
@@ -0,0 +1,50 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANERROR_H_
+#define DAWNNATIVE_VULKAN_VULKANERROR_H_
+
+#include "dawn/native/ErrorInjector.h"
+#include "dawn/native/vulkan/VulkanFunctions.h"
+
+constexpr VkResult VK_FAKE_ERROR_FOR_TESTING = VK_RESULT_MAX_ENUM;
+constexpr VkResult VK_FAKE_DEVICE_OOM_FOR_TESTING = static_cast<VkResult>(VK_RESULT_MAX_ENUM - 1);
+
+namespace dawn::native::vulkan {
+
+ // Returns a string version of the result.
+ const char* VkResultAsString(::VkResult result);
+
+ MaybeError CheckVkSuccessImpl(VkResult result, const char* context);
+ MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context);
+
+// Returns a success only if result if VK_SUCCESS, an error with the context and stringified
+// result value instead. Can be used like this:
+//
+// DAWN_TRY(CheckVkSuccess(vkDoSomething, "doing something"));
+#define CheckVkSuccess(resultIn, contextIn) \
+ ::dawn::native::vulkan::CheckVkSuccessImpl( \
+ ::dawn::native::vulkan::VkResult::WrapUnsafe( \
+ INJECT_ERROR_OR_RUN(resultIn, VK_FAKE_ERROR_FOR_TESTING)), \
+ contextIn)
+
+#define CheckVkOOMThenSuccess(resultIn, contextIn) \
+ ::dawn::native::vulkan::CheckVkOOMThenSuccessImpl( \
+ ::dawn::native::vulkan::VkResult::WrapUnsafe(INJECT_ERROR_OR_RUN( \
+ resultIn, VK_FAKE_DEVICE_OOM_FOR_TESTING, VK_FAKE_ERROR_FOR_TESTING)), \
+ contextIn)
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_VULKANERROR_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp
new file mode 100644
index 00000000000..429b36aa398
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.cpp
@@ -0,0 +1,326 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/VulkanExtensions.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/vulkan_platform.h"
+
+#include <array>
+#include <limits>
+
+namespace dawn::native::vulkan {
+
+ static constexpr uint32_t VulkanVersion_1_1 = VK_MAKE_VERSION(1, 1, 0);
+ static constexpr uint32_t VulkanVersion_1_2 = VK_MAKE_VERSION(1, 2, 0);
+ static constexpr uint32_t NeverPromoted = std::numeric_limits<uint32_t>::max();
+
+ // A static array for InstanceExtInfo that can be indexed with InstanceExts.
+ // GetInstanceExtInfo checks that "index" matches the index used to access this array so an
+ // assert will fire if it isn't in the correct order.
+ static constexpr size_t kInstanceExtCount = static_cast<size_t>(InstanceExt::EnumCount);
+ static constexpr std::array<InstanceExtInfo, kInstanceExtCount> sInstanceExtInfos{{
+ //
+ {InstanceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+ VulkanVersion_1_1},
+ {InstanceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+ VulkanVersion_1_1},
+ {InstanceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+ VulkanVersion_1_1},
+
+ {InstanceExt::Surface, "VK_KHR_surface", NeverPromoted},
+ {InstanceExt::FuchsiaImagePipeSurface, "VK_FUCHSIA_imagepipe_surface", NeverPromoted},
+ {InstanceExt::MetalSurface, "VK_EXT_metal_surface", NeverPromoted},
+ {InstanceExt::WaylandSurface, "VK_KHR_wayland_surface", NeverPromoted},
+ {InstanceExt::Win32Surface, "VK_KHR_win32_surface", NeverPromoted},
+ {InstanceExt::XcbSurface, "VK_KHR_xcb_surface", NeverPromoted},
+ {InstanceExt::XlibSurface, "VK_KHR_xlib_surface", NeverPromoted},
+
+ {InstanceExt::DebugUtils, "VK_EXT_debug_utils", NeverPromoted},
+ {InstanceExt::ValidationFeatures, "VK_EXT_validation_features", NeverPromoted},
+ //
+ }};
+
+ const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext) {
+ uint32_t index = static_cast<uint32_t>(ext);
+ ASSERT(index < sInstanceExtInfos.size());
+ ASSERT(sInstanceExtInfos[index].index == ext);
+ return sInstanceExtInfos[index];
+ }
+
+ std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap() {
+ std::unordered_map<std::string, InstanceExt> result;
+ for (const InstanceExtInfo& info : sInstanceExtInfos) {
+ result[info.name] = info.index;
+ }
+ return result;
+ }
+
+ InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts) {
+ // We need to check that all transitive dependencies of extensions are advertised.
+ // To do that in a single pass and no data structures, the extensions are topologically
+ // sorted in the definition of InstanceExt.
+ // To ensure the order is correct, we mark visited extensions in `visitedSet` and each
+ // dependency check will first assert all its dependents have been visited.
+ InstanceExtSet visitedSet;
+ InstanceExtSet trimmedSet;
+
+ auto HasDep = [&](InstanceExt ext) -> bool {
+ ASSERT(visitedSet[ext]);
+ return trimmedSet[ext];
+ };
+
+ for (uint32_t i = 0; i < sInstanceExtInfos.size(); i++) {
+ InstanceExt ext = static_cast<InstanceExt>(i);
+
+ bool hasDependencies = false;
+ switch (ext) {
+ case InstanceExt::GetPhysicalDeviceProperties2:
+ case InstanceExt::Surface:
+ case InstanceExt::DebugUtils:
+ case InstanceExt::ValidationFeatures:
+ hasDependencies = true;
+ break;
+
+ case InstanceExt::ExternalMemoryCapabilities:
+ case InstanceExt::ExternalSemaphoreCapabilities:
+ hasDependencies = HasDep(InstanceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case InstanceExt::FuchsiaImagePipeSurface:
+ case InstanceExt::MetalSurface:
+ case InstanceExt::WaylandSurface:
+ case InstanceExt::Win32Surface:
+ case InstanceExt::XcbSurface:
+ case InstanceExt::XlibSurface:
+ hasDependencies = HasDep(InstanceExt::Surface);
+ break;
+
+ case InstanceExt::EnumCount:
+ UNREACHABLE();
+ }
+
+ trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
+ visitedSet.set(ext, true);
+ }
+
+ return trimmedSet;
+ }
+
+ void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version) {
+ for (const InstanceExtInfo& info : sInstanceExtInfos) {
+ if (info.versionPromoted <= version) {
+ extensions->set(info.index, true);
+ }
+ }
+ }
+
+ static constexpr size_t kDeviceExtCount = static_cast<size_t>(DeviceExt::EnumCount);
+ static constexpr std::array<DeviceExtInfo, kDeviceExtCount> sDeviceExtInfos{{
+ //
+ {DeviceExt::BindMemory2, "VK_KHR_bind_memory2", VulkanVersion_1_1},
+ {DeviceExt::Maintenance1, "VK_KHR_maintenance1", VulkanVersion_1_1},
+ {DeviceExt::StorageBufferStorageClass, "VK_KHR_storage_buffer_storage_class",
+ VulkanVersion_1_1},
+ {DeviceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+ VulkanVersion_1_1},
+ {DeviceExt::GetMemoryRequirements2, "VK_KHR_get_memory_requirements2", VulkanVersion_1_1},
+ {DeviceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+ VulkanVersion_1_1},
+ {DeviceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+ VulkanVersion_1_1},
+ {DeviceExt::ExternalMemory, "VK_KHR_external_memory", VulkanVersion_1_1},
+ {DeviceExt::ExternalSemaphore, "VK_KHR_external_semaphore", VulkanVersion_1_1},
+ {DeviceExt::_16BitStorage, "VK_KHR_16bit_storage", VulkanVersion_1_1},
+ {DeviceExt::SamplerYCbCrConversion, "VK_KHR_sampler_ycbcr_conversion", VulkanVersion_1_1},
+
+ {DeviceExt::DriverProperties, "VK_KHR_driver_properties", VulkanVersion_1_2},
+ {DeviceExt::ImageFormatList, "VK_KHR_image_format_list", VulkanVersion_1_2},
+ {DeviceExt::ShaderFloat16Int8, "VK_KHR_shader_float16_int8", VulkanVersion_1_2},
+
+ {DeviceExt::ExternalMemoryFD, "VK_KHR_external_memory_fd", NeverPromoted},
+ {DeviceExt::ExternalMemoryDmaBuf, "VK_EXT_external_memory_dma_buf", NeverPromoted},
+ {DeviceExt::ExternalMemoryZirconHandle, "VK_FUCHSIA_external_memory", NeverPromoted},
+ {DeviceExt::ExternalSemaphoreFD, "VK_KHR_external_semaphore_fd", NeverPromoted},
+ {DeviceExt::ExternalSemaphoreZirconHandle, "VK_FUCHSIA_external_semaphore", NeverPromoted},
+
+ {DeviceExt::ImageDrmFormatModifier, "VK_EXT_image_drm_format_modifier", NeverPromoted},
+ {DeviceExt::Swapchain, "VK_KHR_swapchain", NeverPromoted},
+ {DeviceExt::SubgroupSizeControl, "VK_EXT_subgroup_size_control", NeverPromoted},
+ //
+ }};
+
+ const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext) {
+ uint32_t index = static_cast<uint32_t>(ext);
+ ASSERT(index < sDeviceExtInfos.size());
+ ASSERT(sDeviceExtInfos[index].index == ext);
+ return sDeviceExtInfos[index];
+ }
+
+ std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap() {
+ std::unordered_map<std::string, DeviceExt> result;
+ for (const DeviceExtInfo& info : sDeviceExtInfos) {
+ result[info.name] = info.index;
+ }
+ return result;
+ }
+
+ DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+ const InstanceExtSet& instanceExts,
+ uint32_t icdVersion) {
+ // This is very similar to EnsureDependencies for instanceExtSet. See comment there for
+ // an explanation of what happens.
+ DeviceExtSet visitedSet;
+ DeviceExtSet trimmedSet;
+
+ auto HasDep = [&](DeviceExt ext) -> bool {
+ ASSERT(visitedSet[ext]);
+ return trimmedSet[ext];
+ };
+
+ for (uint32_t i = 0; i < sDeviceExtInfos.size(); i++) {
+ DeviceExt ext = static_cast<DeviceExt>(i);
+
+ bool hasDependencies = false;
+ switch (ext) {
+ // Happy extensions don't need anybody else!
+ case DeviceExt::BindMemory2:
+ case DeviceExt::GetMemoryRequirements2:
+ case DeviceExt::Maintenance1:
+ case DeviceExt::ImageFormatList:
+ case DeviceExt::StorageBufferStorageClass:
+ hasDependencies = true;
+ break;
+
+ // Physical device extensions technically don't require the instance to support
+ // them but VulkanFunctions only loads the function pointers if the instance
+ // advertises the extension. So if we didn't have this check, we'd risk a calling
+ // a nullptr.
+ case DeviceExt::GetPhysicalDeviceProperties2:
+ hasDependencies = instanceExts[InstanceExt::GetPhysicalDeviceProperties2];
+ break;
+ case DeviceExt::ExternalMemoryCapabilities:
+ hasDependencies = instanceExts[InstanceExt::ExternalMemoryCapabilities] &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+ case DeviceExt::ExternalSemaphoreCapabilities:
+ hasDependencies = instanceExts[InstanceExt::ExternalSemaphoreCapabilities] &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::ImageDrmFormatModifier:
+ hasDependencies = HasDep(DeviceExt::BindMemory2) &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+ HasDep(DeviceExt::ImageFormatList) &&
+ HasDep(DeviceExt::SamplerYCbCrConversion);
+ break;
+
+ case DeviceExt::Swapchain:
+ hasDependencies = instanceExts[InstanceExt::Surface];
+ break;
+
+ case DeviceExt::SamplerYCbCrConversion:
+ hasDependencies = HasDep(DeviceExt::Maintenance1) &&
+ HasDep(DeviceExt::BindMemory2) &&
+ HasDep(DeviceExt::GetMemoryRequirements2) &&
+ HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::DriverProperties:
+ case DeviceExt::ShaderFloat16Int8:
+ hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+ break;
+
+ case DeviceExt::ExternalMemory:
+ hasDependencies = HasDep(DeviceExt::ExternalMemoryCapabilities);
+ break;
+
+ case DeviceExt::ExternalSemaphore:
+ hasDependencies = HasDep(DeviceExt::ExternalSemaphoreCapabilities);
+ break;
+
+ case DeviceExt::ExternalMemoryFD:
+ case DeviceExt::ExternalMemoryZirconHandle:
+ hasDependencies = HasDep(DeviceExt::ExternalMemory);
+ break;
+
+ case DeviceExt::ExternalMemoryDmaBuf:
+ hasDependencies = HasDep(DeviceExt::ExternalMemoryFD);
+ break;
+
+ case DeviceExt::ExternalSemaphoreFD:
+ case DeviceExt::ExternalSemaphoreZirconHandle:
+ hasDependencies = HasDep(DeviceExt::ExternalSemaphore);
+ break;
+
+ case DeviceExt::_16BitStorage:
+ hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+ HasDep(DeviceExt::StorageBufferStorageClass);
+ break;
+
+ case DeviceExt::SubgroupSizeControl:
+ // Using the extension requires DeviceExt::GetPhysicalDeviceProperties2, but we
+ // don't need to check for it as it also requires Vulkan 1.1 in which
+ // VK_KHR_get_physical_device_properties2 was promoted.
+ hasDependencies = icdVersion >= VulkanVersion_1_1;
+ break;
+
+ case DeviceExt::EnumCount:
+ UNREACHABLE();
+ }
+
+ trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
+ visitedSet.set(ext, true);
+ }
+
+ return trimmedSet;
+ }
+
+ void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version) {
+ for (const DeviceExtInfo& info : sDeviceExtInfos) {
+ if (info.versionPromoted <= version) {
+ extensions->set(info.index, true);
+ }
+ }
+ }
+
+ // A static array for VulkanLayerInfo that can be indexed with VulkanLayers.
+ // GetVulkanLayerInfo checks that "index" matches the index used to access this array so an
+ // assert will fire if it isn't in the correct order.
+ static constexpr size_t kVulkanLayerCount = static_cast<size_t>(VulkanLayer::EnumCount);
+ static constexpr std::array<VulkanLayerInfo, kVulkanLayerCount> sVulkanLayerInfos{{
+ //
+ {VulkanLayer::Validation, "VK_LAYER_KHRONOS_validation"},
+ {VulkanLayer::LunargVkTrace, "VK_LAYER_LUNARG_vktrace"},
+ {VulkanLayer::RenderDocCapture, "VK_LAYER_RENDERDOC_Capture"},
+ {VulkanLayer::FuchsiaImagePipeSwapchain, "VK_LAYER_FUCHSIA_imagepipe_swapchain"},
+ //
+ }};
+
+ const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer) {
+ uint32_t index = static_cast<uint32_t>(layer);
+ ASSERT(index < sVulkanLayerInfos.size());
+ ASSERT(sVulkanLayerInfos[index].layer == layer);
+ return sVulkanLayerInfos[index];
+ }
+
+ std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap() {
+ std::unordered_map<std::string, VulkanLayer> result;
+ for (const VulkanLayerInfo& info : sVulkanLayerInfos) {
+ result[info.name] = info.layer;
+ }
+ return result;
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h
new file mode 100644
index 00000000000..0a2c8e8c4a5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanExtensions.h
@@ -0,0 +1,162 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
+#define DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
+
+#include "dawn/common/ityp_bitset.h"
+
+#include <unordered_map>
+
+namespace dawn::native::vulkan {
+
+ // The list of known instance extensions. They must be in dependency order (this is checked
+ // inside EnsureDependencies)
+ enum class InstanceExt {
+ // Promoted to 1.1
+ GetPhysicalDeviceProperties2,
+ ExternalMemoryCapabilities,
+ ExternalSemaphoreCapabilities,
+
+ // Surface extensions
+ Surface,
+ FuchsiaImagePipeSurface,
+ MetalSurface,
+ WaylandSurface,
+ Win32Surface,
+ XcbSurface,
+ XlibSurface,
+
+ // Others
+ DebugUtils,
+ ValidationFeatures,
+
+ EnumCount,
+ };
+
+ // A bitset that is indexed with InstanceExt.
+ using InstanceExtSet = ityp::bitset<InstanceExt, static_cast<uint32_t>(InstanceExt::EnumCount)>;
+
+ // Information about a known instance extension.
+ struct InstanceExtInfo {
+ InstanceExt index;
+ const char* name;
+ // The version in which this extension was promoted as built with VK_MAKE_VERSION,
+ // or NeverPromoted if it was never promoted.
+ uint32_t versionPromoted;
+ };
+
+ // Returns the information about a known InstanceExt
+ const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext);
+ // Returns a map that maps a Vulkan extension name to its InstanceExt.
+ std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap();
+
+ // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+ void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version);
+ // From a set of extensions advertised as supported by the instance (or promoted), remove all
+ // extensions that don't have all their transitive dependencies in advertisedExts.
+ InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts);
+
+ // The list of known device extensions. They must be in dependency order (this is checked
+ // inside EnsureDependencies)
+ enum class DeviceExt {
+ // Promoted to 1.1
+ BindMemory2,
+ Maintenance1,
+ StorageBufferStorageClass,
+ GetPhysicalDeviceProperties2,
+ GetMemoryRequirements2,
+ ExternalMemoryCapabilities,
+ ExternalSemaphoreCapabilities,
+ ExternalMemory,
+ ExternalSemaphore,
+ _16BitStorage,
+ SamplerYCbCrConversion,
+
+ // Promoted to 1.2
+ DriverProperties,
+ ImageFormatList,
+ ShaderFloat16Int8,
+
+ // External* extensions
+ ExternalMemoryFD,
+ ExternalMemoryDmaBuf,
+ ExternalMemoryZirconHandle,
+ ExternalSemaphoreFD,
+ ExternalSemaphoreZirconHandle,
+
+ // Others
+ ImageDrmFormatModifier,
+ Swapchain,
+ SubgroupSizeControl,
+
+ EnumCount,
+ };
+
+ // A bitset that is indexed with DeviceExt.
+ using DeviceExtSet = ityp::bitset<DeviceExt, static_cast<uint32_t>(DeviceExt::EnumCount)>;
+
+ // Information about a known device extension.
+ struct DeviceExtInfo {
+ DeviceExt index;
+ const char* name;
+ // The version in which this extension was promoted as built with VK_MAKE_VERSION,
+ // or NeverPromoted if it was never promoted.
+ uint32_t versionPromoted;
+ };
+
+ // Returns the information about a known DeviceExt
+ const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext);
+ // Returns a map that maps a Vulkan extension name to its DeviceExt.
+ std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap();
+
+ // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+ void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version);
+ // From a set of extensions advertised as supported by the device (or promoted), remove all
+ // extensions that don't have all their transitive dependencies in advertisedExts or in
+ // instanceExts.
+ DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+ const InstanceExtSet& instanceExts,
+ uint32_t icdVersion);
+
+ // The list of all known Vulkan layers.
+ enum class VulkanLayer {
+ Validation,
+ LunargVkTrace,
+ RenderDocCapture,
+
+ // Fuchsia implements the swapchain through a layer (VK_LAYER_FUCHSIA_image_pipe_swapchain),
+ // which adds an instance extensions (VK_FUCHSIA_image_surface) to all ICDs.
+ FuchsiaImagePipeSwapchain,
+
+ EnumCount,
+ };
+
+ // A bitset that is indexed with VulkanLayer.
+ using VulkanLayerSet = ityp::bitset<VulkanLayer, static_cast<uint32_t>(VulkanLayer::EnumCount)>;
+
+ // Information about a known layer
+ struct VulkanLayerInfo {
+ VulkanLayer layer;
+ const char* name;
+ };
+
+ // Returns the information about a known VulkanLayer
+ const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer);
+ // Returns a map that maps a Vulkan layer name to its VulkanLayer.
+ std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap();
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp
new file mode 100644
index 00000000000..782cd5a698d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.cpp
@@ -0,0 +1,332 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/VulkanFunctions.h"
+
+#include "dawn/common/DynamicLib.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+namespace dawn::native::vulkan {
+
+#define GET_GLOBAL_PROC(name) \
+ do { \
+ name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(nullptr, "vk" #name)); \
+ if (name == nullptr) { \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
+ } \
+ } while (0)
+
+ MaybeError VulkanFunctions::LoadGlobalProcs(const DynamicLib& vulkanLib) {
+ if (!vulkanLib.GetProc(&GetInstanceProcAddr, "vkGetInstanceProcAddr")) {
+ return DAWN_INTERNAL_ERROR("Couldn't get vkGetInstanceProcAddr");
+ }
+
+ GET_GLOBAL_PROC(CreateInstance);
+ GET_GLOBAL_PROC(EnumerateInstanceExtensionProperties);
+ GET_GLOBAL_PROC(EnumerateInstanceLayerProperties);
+
+ // Is not available in Vulkan 1.0, so allow nullptr
+ EnumerateInstanceVersion = reinterpret_cast<decltype(EnumerateInstanceVersion)>(
+ GetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
+
+ return {};
+ }
+
+#define GET_INSTANCE_PROC_BASE(name, procName) \
+ do { \
+ name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(instance, "vk" #procName)); \
+ if (name == nullptr) { \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #procName); \
+ } \
+ } while (0)
+
+#define GET_INSTANCE_PROC(name) GET_INSTANCE_PROC_BASE(name, name)
+#define GET_INSTANCE_PROC_VENDOR(name, vendor) GET_INSTANCE_PROC_BASE(name, name##vendor)
+
+ MaybeError VulkanFunctions::LoadInstanceProcs(VkInstance instance,
+ const VulkanGlobalInfo& globalInfo) {
+ // Load this proc first so that we can destroy the instance even if some other
+ // GET_INSTANCE_PROC fails
+ GET_INSTANCE_PROC(DestroyInstance);
+
+ GET_INSTANCE_PROC(CreateDevice);
+ GET_INSTANCE_PROC(DestroyDevice);
+ GET_INSTANCE_PROC(EnumerateDeviceExtensionProperties);
+ GET_INSTANCE_PROC(EnumerateDeviceLayerProperties);
+ GET_INSTANCE_PROC(EnumeratePhysicalDevices);
+ GET_INSTANCE_PROC(GetDeviceProcAddr);
+ GET_INSTANCE_PROC(GetPhysicalDeviceFeatures);
+ GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties);
+
+ if (globalInfo.HasExt(InstanceExt::DebugUtils)) {
+ GET_INSTANCE_PROC(CmdBeginDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(CmdEndDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(CmdInsertDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(CreateDebugUtilsMessengerEXT);
+ GET_INSTANCE_PROC(DestroyDebugUtilsMessengerEXT);
+ GET_INSTANCE_PROC(QueueBeginDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(QueueEndDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(QueueInsertDebugUtilsLabelEXT);
+ GET_INSTANCE_PROC(SetDebugUtilsObjectNameEXT);
+ GET_INSTANCE_PROC(SetDebugUtilsObjectTagEXT);
+ GET_INSTANCE_PROC(SubmitDebugUtilsMessageEXT);
+ }
+
+ // Vulkan 1.1 is not required to report promoted extensions from 1.0 and is not required to
+ // support the vendor entrypoint in GetProcAddress.
+ if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
+ } else if (globalInfo.HasExt(InstanceExt::ExternalMemoryCapabilities)) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
+ }
+
+ if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
+ } else if (globalInfo.HasExt(InstanceExt::ExternalSemaphoreCapabilities)) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
+ }
+
+ if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
+ } else if (globalInfo.HasExt(InstanceExt::GetPhysicalDeviceProperties2)) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceImageFormatProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceQueueFamilyProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceMemoryProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
+ }
+
+ if (globalInfo.HasExt(InstanceExt::Surface)) {
+ GET_INSTANCE_PROC(DestroySurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceSupportKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
+ }
+
+#if defined(VK_USE_PLATFORM_FUCHSIA)
+ if (globalInfo.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
+ GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
+ }
+#endif // defined(VK_USE_PLATFORM_FUCHSIA)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ if (globalInfo.HasExt(InstanceExt::MetalSurface)) {
+ GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ if (globalInfo.HasExt(InstanceExt::Win32Surface)) {
+ GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
+ }
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+ if (globalInfo.HasExt(InstanceExt::XlibSurface)) {
+ GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
+ }
+ if (globalInfo.HasExt(InstanceExt::XcbSurface)) {
+ GET_INSTANCE_PROC(CreateXcbSurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceXcbPresentationSupportKHR);
+ }
+#endif // defined(DAWN_USE_X11)
+ return {};
+ }
+
+#define GET_DEVICE_PROC(name) \
+ do { \
+ name = reinterpret_cast<decltype(name)>(GetDeviceProcAddr(device, "vk" #name)); \
+ if (name == nullptr) { \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
+ } \
+ } while (0)
+
+ MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device,
+ const VulkanDeviceInfo& deviceInfo) {
+ GET_DEVICE_PROC(AllocateCommandBuffers);
+ GET_DEVICE_PROC(AllocateDescriptorSets);
+ GET_DEVICE_PROC(AllocateMemory);
+ GET_DEVICE_PROC(BeginCommandBuffer);
+ GET_DEVICE_PROC(BindBufferMemory);
+ GET_DEVICE_PROC(BindImageMemory);
+ GET_DEVICE_PROC(CmdBeginQuery);
+ GET_DEVICE_PROC(CmdBeginRenderPass);
+ GET_DEVICE_PROC(CmdBindDescriptorSets);
+ GET_DEVICE_PROC(CmdBindIndexBuffer);
+ GET_DEVICE_PROC(CmdBindPipeline);
+ GET_DEVICE_PROC(CmdBindVertexBuffers);
+ GET_DEVICE_PROC(CmdBlitImage);
+ GET_DEVICE_PROC(CmdClearAttachments);
+ GET_DEVICE_PROC(CmdClearColorImage);
+ GET_DEVICE_PROC(CmdClearDepthStencilImage);
+ GET_DEVICE_PROC(CmdCopyBuffer);
+ GET_DEVICE_PROC(CmdCopyBufferToImage);
+ GET_DEVICE_PROC(CmdCopyImage);
+ GET_DEVICE_PROC(CmdCopyImageToBuffer);
+ GET_DEVICE_PROC(CmdCopyQueryPoolResults);
+ GET_DEVICE_PROC(CmdDispatch);
+ GET_DEVICE_PROC(CmdDispatchIndirect);
+ GET_DEVICE_PROC(CmdDraw);
+ GET_DEVICE_PROC(CmdDrawIndexed);
+ GET_DEVICE_PROC(CmdDrawIndexedIndirect);
+ GET_DEVICE_PROC(CmdDrawIndirect);
+ GET_DEVICE_PROC(CmdEndQuery);
+ GET_DEVICE_PROC(CmdEndRenderPass);
+ GET_DEVICE_PROC(CmdExecuteCommands);
+ GET_DEVICE_PROC(CmdFillBuffer);
+ GET_DEVICE_PROC(CmdNextSubpass);
+ GET_DEVICE_PROC(CmdPipelineBarrier);
+ GET_DEVICE_PROC(CmdPushConstants);
+ GET_DEVICE_PROC(CmdResetEvent);
+ GET_DEVICE_PROC(CmdResetQueryPool);
+ GET_DEVICE_PROC(CmdResolveImage);
+ GET_DEVICE_PROC(CmdSetBlendConstants);
+ GET_DEVICE_PROC(CmdSetDepthBias);
+ GET_DEVICE_PROC(CmdSetDepthBounds);
+ GET_DEVICE_PROC(CmdSetEvent);
+ GET_DEVICE_PROC(CmdSetLineWidth);
+ GET_DEVICE_PROC(CmdSetScissor);
+ GET_DEVICE_PROC(CmdSetStencilCompareMask);
+ GET_DEVICE_PROC(CmdSetStencilReference);
+ GET_DEVICE_PROC(CmdSetStencilWriteMask);
+ GET_DEVICE_PROC(CmdSetViewport);
+ GET_DEVICE_PROC(CmdUpdateBuffer);
+ GET_DEVICE_PROC(CmdWaitEvents);
+ GET_DEVICE_PROC(CmdWriteTimestamp);
+ GET_DEVICE_PROC(CreateBuffer);
+ GET_DEVICE_PROC(CreateBufferView);
+ GET_DEVICE_PROC(CreateCommandPool);
+ GET_DEVICE_PROC(CreateComputePipelines);
+ GET_DEVICE_PROC(CreateDescriptorPool);
+ GET_DEVICE_PROC(CreateDescriptorSetLayout);
+ GET_DEVICE_PROC(CreateEvent);
+ GET_DEVICE_PROC(CreateFence);
+ GET_DEVICE_PROC(CreateFramebuffer);
+ GET_DEVICE_PROC(CreateGraphicsPipelines);
+ GET_DEVICE_PROC(CreateImage);
+ GET_DEVICE_PROC(CreateImageView);
+ GET_DEVICE_PROC(CreatePipelineCache);
+ GET_DEVICE_PROC(CreatePipelineLayout);
+ GET_DEVICE_PROC(CreateQueryPool);
+ GET_DEVICE_PROC(CreateRenderPass);
+ GET_DEVICE_PROC(CreateSampler);
+ GET_DEVICE_PROC(CreateSemaphore);
+ GET_DEVICE_PROC(CreateShaderModule);
+ GET_DEVICE_PROC(DestroyBuffer);
+ GET_DEVICE_PROC(DestroyBufferView);
+ GET_DEVICE_PROC(DestroyCommandPool);
+ GET_DEVICE_PROC(DestroyDescriptorPool);
+ GET_DEVICE_PROC(DestroyDescriptorSetLayout);
+ GET_DEVICE_PROC(DestroyEvent);
+ GET_DEVICE_PROC(DestroyFence);
+ GET_DEVICE_PROC(DestroyFramebuffer);
+ GET_DEVICE_PROC(DestroyImage);
+ GET_DEVICE_PROC(DestroyImageView);
+ GET_DEVICE_PROC(DestroyPipeline);
+ GET_DEVICE_PROC(DestroyPipelineCache);
+ GET_DEVICE_PROC(DestroyPipelineLayout);
+ GET_DEVICE_PROC(DestroyQueryPool);
+ GET_DEVICE_PROC(DestroyRenderPass);
+ GET_DEVICE_PROC(DestroySampler);
+ GET_DEVICE_PROC(DestroySemaphore);
+ GET_DEVICE_PROC(DestroyShaderModule);
+ GET_DEVICE_PROC(DeviceWaitIdle);
+ GET_DEVICE_PROC(EndCommandBuffer);
+ GET_DEVICE_PROC(FlushMappedMemoryRanges);
+ GET_DEVICE_PROC(FreeCommandBuffers);
+ GET_DEVICE_PROC(FreeDescriptorSets);
+ GET_DEVICE_PROC(FreeMemory);
+ GET_DEVICE_PROC(GetBufferMemoryRequirements);
+ GET_DEVICE_PROC(GetDeviceMemoryCommitment);
+ GET_DEVICE_PROC(GetDeviceQueue);
+ GET_DEVICE_PROC(GetEventStatus);
+ GET_DEVICE_PROC(GetFenceStatus);
+ GET_DEVICE_PROC(GetImageMemoryRequirements);
+ GET_DEVICE_PROC(GetImageSparseMemoryRequirements);
+ GET_DEVICE_PROC(GetImageSubresourceLayout);
+ GET_DEVICE_PROC(GetPipelineCacheData);
+ GET_DEVICE_PROC(GetQueryPoolResults);
+ GET_DEVICE_PROC(GetRenderAreaGranularity);
+ GET_DEVICE_PROC(InvalidateMappedMemoryRanges);
+ GET_DEVICE_PROC(MapMemory);
+ GET_DEVICE_PROC(MergePipelineCaches);
+ GET_DEVICE_PROC(QueueBindSparse);
+ GET_DEVICE_PROC(QueueSubmit);
+ GET_DEVICE_PROC(QueueWaitIdle);
+ GET_DEVICE_PROC(ResetCommandBuffer);
+ GET_DEVICE_PROC(ResetCommandPool);
+ GET_DEVICE_PROC(ResetDescriptorPool);
+ GET_DEVICE_PROC(ResetEvent);
+ GET_DEVICE_PROC(ResetFences);
+ GET_DEVICE_PROC(SetEvent);
+ GET_DEVICE_PROC(UnmapMemory);
+ GET_DEVICE_PROC(UpdateDescriptorSets);
+ GET_DEVICE_PROC(WaitForFences);
+
+ if (deviceInfo.HasExt(DeviceExt::ExternalMemoryFD)) {
+ GET_DEVICE_PROC(GetMemoryFdKHR);
+ GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
+ }
+
+ if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
+ GET_DEVICE_PROC(ImportSemaphoreFdKHR);
+ GET_DEVICE_PROC(GetSemaphoreFdKHR);
+ }
+
+ if (deviceInfo.HasExt(DeviceExt::Swapchain)) {
+ GET_DEVICE_PROC(CreateSwapchainKHR);
+ GET_DEVICE_PROC(DestroySwapchainKHR);
+ GET_DEVICE_PROC(GetSwapchainImagesKHR);
+ GET_DEVICE_PROC(AcquireNextImageKHR);
+ GET_DEVICE_PROC(QueuePresentKHR);
+ }
+
+ if (deviceInfo.HasExt(DeviceExt::GetMemoryRequirements2)) {
+ GET_DEVICE_PROC(GetBufferMemoryRequirements2);
+ GET_DEVICE_PROC(GetImageMemoryRequirements2);
+ GET_DEVICE_PROC(GetImageSparseMemoryRequirements2);
+ }
+
+#if VK_USE_PLATFORM_FUCHSIA
+ if (deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle)) {
+ GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
+ GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
+ }
+
+ if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
+ GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
+ GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
+ }
+#endif
+
+ return {};
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h
new file mode 100644
index 00000000000..07b8b39dc06
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanFunctions.h
@@ -0,0 +1,326 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
+#define DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
+
+#include "dawn/common/vulkan_platform.h"
+
+#include "dawn/native/Error.h"
+
+class DynamicLib;
+
+namespace dawn::native::vulkan {
+
+ struct VulkanGlobalInfo;
+ struct VulkanDeviceInfo;
+
+ // Stores the Vulkan entry points. Also loads them from the dynamic library
+ // and the vkGet*ProcAddress entry points.
+ struct VulkanFunctions {
+ MaybeError LoadGlobalProcs(const DynamicLib& vulkanLib);
+ MaybeError LoadInstanceProcs(VkInstance instance, const VulkanGlobalInfo& globalInfo);
+ MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo);
+
+ // ---------- Global procs
+
+ // Initial proc from which we can get all the others
+ PFN_vkGetInstanceProcAddr GetInstanceProcAddr = nullptr;
+
+ PFN_vkCreateInstance CreateInstance = nullptr;
+ PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties = nullptr;
+ PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties = nullptr;
+ // DestroyInstance isn't technically a global proc but we want to be able to use it
+ // before querying the instance procs in case we need to error out during initialization.
+ PFN_vkDestroyInstance DestroyInstance = nullptr;
+
+ // Core Vulkan 1.1
+ PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion = nullptr;
+
+ // ---------- Instance procs
+
+ // Core Vulkan 1.0
+ PFN_vkCreateDevice CreateDevice = nullptr;
+ PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties = nullptr;
+ PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties = nullptr;
+ PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices = nullptr;
+ PFN_vkGetDeviceProcAddr GetDeviceProcAddr = nullptr;
+ PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures = nullptr;
+ PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties = nullptr;
+ PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties =
+ nullptr;
+ PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties = nullptr;
+ PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties = nullptr;
+ PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties =
+ nullptr;
+ PFN_vkGetPhysicalDeviceSparseImageFormatProperties
+ GetPhysicalDeviceSparseImageFormatProperties = nullptr;
+ // Not technically an instance proc but we want to be able to use it as soon as the
+ // device is created.
+ PFN_vkDestroyDevice DestroyDevice = nullptr;
+
+ // VK_EXT_debug_utils
+ PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT = nullptr;
+ PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT = nullptr;
+ PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT = nullptr;
+ PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT = nullptr;
+ PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT = nullptr;
+ PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT = nullptr;
+ PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT = nullptr;
+ PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT = nullptr;
+ PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT = nullptr;
+ PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT = nullptr;
+ PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT = nullptr;
+
+ // VK_KHR_surface
+ PFN_vkDestroySurfaceKHR DestroySurfaceKHR = nullptr;
+ PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR = nullptr;
+ PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR =
+ nullptr;
+ PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR = nullptr;
+ PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR =
+ nullptr;
+
+ // Core Vulkan 1.1 promoted extensions, set if either the core version or the extension is
+ // present.
+
+ // VK_KHR_external_memory_capabilities
+ PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties =
+ nullptr;
+
+ // VK_KHR_external_semaphore_capabilities
+ PFN_vkGetPhysicalDeviceExternalSemaphoreProperties
+ GetPhysicalDeviceExternalSemaphoreProperties = nullptr;
+
+ // VK_KHR_get_physical_device_properties2
+ PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2 = nullptr;
+ PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2 = nullptr;
+ PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2 = nullptr;
+ PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2 =
+ nullptr;
+ PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2 =
+ nullptr;
+ PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2 = nullptr;
+ PFN_vkGetPhysicalDeviceSparseImageFormatProperties2
+ GetPhysicalDeviceSparseImageFormatProperties2 = nullptr;
+
+#if defined(VK_USE_PLATFORM_FUCHSIA)
+ // FUCHSIA_image_pipe_surface
+ PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA = nullptr;
+#endif // defined(VK_USE_PLATFORM_FUCHSIA)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ // EXT_metal_surface
+ PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT = nullptr;
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ // KHR_win32_surface
+ PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR = nullptr;
+ PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
+ GetPhysicalDeviceWin32PresentationSupportKHR = nullptr;
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+ // KHR_xlib_surface
+ PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR = nullptr;
+ PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR
+ GetPhysicalDeviceXlibPresentationSupportKHR = nullptr;
+
+ // KHR_xcb_surface
+ PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR = nullptr;
+ PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR
+ GetPhysicalDeviceXcbPresentationSupportKHR = nullptr;
+#endif // defined(DAWN_USE_X11)
+
+ // ---------- Device procs
+
+ // Core Vulkan 1.0
+ PFN_vkAllocateCommandBuffers AllocateCommandBuffers = nullptr;
+ PFN_vkAllocateDescriptorSets AllocateDescriptorSets = nullptr;
+ PFN_vkAllocateMemory AllocateMemory = nullptr;
+ PFN_vkBeginCommandBuffer BeginCommandBuffer = nullptr;
+ PFN_vkBindBufferMemory BindBufferMemory = nullptr;
+ PFN_vkBindImageMemory BindImageMemory = nullptr;
+ PFN_vkCmdBeginQuery CmdBeginQuery = nullptr;
+ PFN_vkCmdBeginRenderPass CmdBeginRenderPass = nullptr;
+ PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets = nullptr;
+ PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer = nullptr;
+ PFN_vkCmdBindPipeline CmdBindPipeline = nullptr;
+ PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers = nullptr;
+ PFN_vkCmdBlitImage CmdBlitImage = nullptr;
+ PFN_vkCmdClearAttachments CmdClearAttachments = nullptr;
+ PFN_vkCmdClearColorImage CmdClearColorImage = nullptr;
+ PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage = nullptr;
+ PFN_vkCmdCopyBuffer CmdCopyBuffer = nullptr;
+ PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage = nullptr;
+ PFN_vkCmdCopyImage CmdCopyImage = nullptr;
+ PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer = nullptr;
+ PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults = nullptr;
+ PFN_vkCmdDispatch CmdDispatch = nullptr;
+ PFN_vkCmdDispatchIndirect CmdDispatchIndirect = nullptr;
+ PFN_vkCmdDraw CmdDraw = nullptr;
+ PFN_vkCmdDrawIndexed CmdDrawIndexed = nullptr;
+ PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect = nullptr;
+ PFN_vkCmdDrawIndirect CmdDrawIndirect = nullptr;
+ PFN_vkCmdEndQuery CmdEndQuery = nullptr;
+ PFN_vkCmdEndRenderPass CmdEndRenderPass = nullptr;
+ PFN_vkCmdExecuteCommands CmdExecuteCommands = nullptr;
+ PFN_vkCmdFillBuffer CmdFillBuffer = nullptr;
+ PFN_vkCmdNextSubpass CmdNextSubpass = nullptr;
+ PFN_vkCmdPipelineBarrier CmdPipelineBarrier = nullptr;
+ PFN_vkCmdPushConstants CmdPushConstants = nullptr;
+ PFN_vkCmdResetEvent CmdResetEvent = nullptr;
+ PFN_vkCmdResetQueryPool CmdResetQueryPool = nullptr;
+ PFN_vkCmdResolveImage CmdResolveImage = nullptr;
+ PFN_vkCmdSetBlendConstants CmdSetBlendConstants = nullptr;
+ PFN_vkCmdSetDepthBias CmdSetDepthBias = nullptr;
+ PFN_vkCmdSetDepthBounds CmdSetDepthBounds = nullptr;
+ PFN_vkCmdSetEvent CmdSetEvent = nullptr;
+ PFN_vkCmdSetLineWidth CmdSetLineWidth = nullptr;
+ PFN_vkCmdSetScissor CmdSetScissor = nullptr;
+ PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask = nullptr;
+ PFN_vkCmdSetStencilReference CmdSetStencilReference = nullptr;
+ PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask = nullptr;
+ PFN_vkCmdSetViewport CmdSetViewport = nullptr;
+ PFN_vkCmdUpdateBuffer CmdUpdateBuffer = nullptr;
+ PFN_vkCmdWaitEvents CmdWaitEvents = nullptr;
+ PFN_vkCmdWriteTimestamp CmdWriteTimestamp = nullptr;
+ PFN_vkCreateBuffer CreateBuffer = nullptr;
+ PFN_vkCreateBufferView CreateBufferView = nullptr;
+ PFN_vkCreateCommandPool CreateCommandPool = nullptr;
+ PFN_vkCreateComputePipelines CreateComputePipelines = nullptr;
+ PFN_vkCreateDescriptorPool CreateDescriptorPool = nullptr;
+ PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout = nullptr;
+ PFN_vkCreateEvent CreateEvent = nullptr;
+ PFN_vkCreateFence CreateFence = nullptr;
+ PFN_vkCreateFramebuffer CreateFramebuffer = nullptr;
+ PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines = nullptr;
+ PFN_vkCreateImage CreateImage = nullptr;
+ PFN_vkCreateImageView CreateImageView = nullptr;
+ PFN_vkCreatePipelineCache CreatePipelineCache = nullptr;
+ PFN_vkCreatePipelineLayout CreatePipelineLayout = nullptr;
+ PFN_vkCreateQueryPool CreateQueryPool = nullptr;
+ PFN_vkCreateRenderPass CreateRenderPass = nullptr;
+ PFN_vkCreateSampler CreateSampler = nullptr;
+ PFN_vkCreateSemaphore CreateSemaphore = nullptr;
+ PFN_vkCreateShaderModule CreateShaderModule = nullptr;
+ PFN_vkDestroyBuffer DestroyBuffer = nullptr;
+ PFN_vkDestroyBufferView DestroyBufferView = nullptr;
+ PFN_vkDestroyCommandPool DestroyCommandPool = nullptr;
+ PFN_vkDestroyDescriptorPool DestroyDescriptorPool = nullptr;
+ PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout = nullptr;
+ PFN_vkDestroyEvent DestroyEvent = nullptr;
+ PFN_vkDestroyFence DestroyFence = nullptr;
+ PFN_vkDestroyFramebuffer DestroyFramebuffer = nullptr;
+ PFN_vkDestroyImage DestroyImage = nullptr;
+ PFN_vkDestroyImageView DestroyImageView = nullptr;
+ PFN_vkDestroyPipeline DestroyPipeline = nullptr;
+ PFN_vkDestroyPipelineCache DestroyPipelineCache = nullptr;
+ PFN_vkDestroyPipelineLayout DestroyPipelineLayout = nullptr;
+ PFN_vkDestroyQueryPool DestroyQueryPool = nullptr;
+ PFN_vkDestroyRenderPass DestroyRenderPass = nullptr;
+ PFN_vkDestroySampler DestroySampler = nullptr;
+ PFN_vkDestroySemaphore DestroySemaphore = nullptr;
+ PFN_vkDestroyShaderModule DestroyShaderModule = nullptr;
+ PFN_vkDeviceWaitIdle DeviceWaitIdle = nullptr;
+ PFN_vkEndCommandBuffer EndCommandBuffer = nullptr;
+ PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges = nullptr;
+ PFN_vkFreeCommandBuffers FreeCommandBuffers = nullptr;
+ PFN_vkFreeDescriptorSets FreeDescriptorSets = nullptr;
+ PFN_vkFreeMemory FreeMemory = nullptr;
+ PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements = nullptr;
+ PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment = nullptr;
+ PFN_vkGetDeviceQueue GetDeviceQueue = nullptr;
+ PFN_vkGetEventStatus GetEventStatus = nullptr;
+ PFN_vkGetFenceStatus GetFenceStatus = nullptr;
+ PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements = nullptr;
+ PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements = nullptr;
+ PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout = nullptr;
+ PFN_vkGetPipelineCacheData GetPipelineCacheData = nullptr;
+ PFN_vkGetQueryPoolResults GetQueryPoolResults = nullptr;
+ PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity = nullptr;
+ PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges = nullptr;
+ PFN_vkMapMemory MapMemory = nullptr;
+ PFN_vkMergePipelineCaches MergePipelineCaches = nullptr;
+ PFN_vkQueueBindSparse QueueBindSparse = nullptr;
+ PFN_vkQueueSubmit QueueSubmit = nullptr;
+ PFN_vkQueueWaitIdle QueueWaitIdle = nullptr;
+ PFN_vkResetCommandBuffer ResetCommandBuffer = nullptr;
+ PFN_vkResetCommandPool ResetCommandPool = nullptr;
+ PFN_vkResetDescriptorPool ResetDescriptorPool = nullptr;
+ PFN_vkResetEvent ResetEvent = nullptr;
+ PFN_vkResetFences ResetFences = nullptr;
+ PFN_vkSetEvent SetEvent = nullptr;
+ PFN_vkUnmapMemory UnmapMemory = nullptr;
+ PFN_vkUpdateDescriptorSets UpdateDescriptorSets = nullptr;
+ PFN_vkWaitForFences WaitForFences = nullptr;
+
+ // VK_KHR_external_memory_fd
+ PFN_vkGetMemoryFdKHR GetMemoryFdKHR = nullptr;
+ PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR = nullptr;
+
+ // VK_KHR_external_semaphore_fd
+ PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR = nullptr;
+ PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR = nullptr;
+
+ // VK_KHR_get_memory_requirements2
+ PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2 = nullptr;
+ PFN_vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2 = nullptr;
+ PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2 = nullptr;
+
+ // VK_KHR_swapchain
+ PFN_vkCreateSwapchainKHR CreateSwapchainKHR = nullptr;
+ PFN_vkDestroySwapchainKHR DestroySwapchainKHR = nullptr;
+ PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR = nullptr;
+ PFN_vkAcquireNextImageKHR AcquireNextImageKHR = nullptr;
+ PFN_vkQueuePresentKHR QueuePresentKHR = nullptr;
+
+#if VK_USE_PLATFORM_FUCHSIA
+ // VK_FUCHSIA_external_memory
+ PFN_vkGetMemoryZirconHandleFUCHSIA GetMemoryZirconHandleFUCHSIA = nullptr;
+ PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA GetMemoryZirconHandlePropertiesFUCHSIA =
+ nullptr;
+
+ // VK_FUCHSIA_external_semaphore
+ PFN_vkImportSemaphoreZirconHandleFUCHSIA ImportSemaphoreZirconHandleFUCHSIA = nullptr;
+ PFN_vkGetSemaphoreZirconHandleFUCHSIA GetSemaphoreZirconHandleFUCHSIA = nullptr;
+#endif
+ };
+
+ // Create a wrapper around VkResult in the dawn::native::vulkan namespace. This shadows the
+ // default VkResult (::VkResult). This ensures that assigning or creating a VkResult from a raw
+ // ::VkResult uses WrapUnsafe. This makes it clear that users of VkResult must be intentional
+ // about handling error cases.
+ class VkResult {
+ public:
+ constexpr static VkResult WrapUnsafe(::VkResult value) {
+ return VkResult(value);
+ }
+
+ constexpr operator ::VkResult() const {
+ return mValue;
+ }
+
+ private:
+ // Private. Use VkResult::WrapUnsafe instead.
+ constexpr VkResult(::VkResult value) : mValue(value) {
+ }
+
+ ::VkResult mValue;
+ };
+
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp
new file mode 100644
index 00000000000..a734a9b6f45
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.cpp
@@ -0,0 +1,334 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+
+#include <cstring>
+
+namespace dawn::native::vulkan {
+
+ namespace {
+ ResultOrError<InstanceExtSet> GatherInstanceExtensions(
+ const char* layerName,
+ const dawn::native::vulkan::VulkanFunctions& vkFunctions,
+ const std::unordered_map<std::string, InstanceExt>& knownExts) {
+ uint32_t count = 0;
+ VkResult vkResult = VkResult::WrapUnsafe(
+ vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr));
+ if (vkResult != VK_SUCCESS && vkResult != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
+ }
+
+ std::vector<VkExtensionProperties> extensions(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceExtensionProperties(
+ layerName, &count, extensions.data()),
+ "vkEnumerateInstanceExtensionProperties"));
+
+ InstanceExtSet result;
+ for (const VkExtensionProperties& extension : extensions) {
+ auto it = knownExts.find(extension.extensionName);
+ if (it != knownExts.end()) {
+ result.set(it->second, true);
+ }
+ }
+
+ return result;
+ }
+
+ } // namespace
+
+ bool VulkanGlobalKnobs::HasExt(InstanceExt ext) const {
+ return extensions[ext];
+ }
+
+ bool VulkanDeviceKnobs::HasExt(DeviceExt ext) const {
+ return extensions[ext];
+ }
+
+ ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions) {
+ VulkanGlobalInfo info = {};
+ // Gather info on available API version
+ {
+ info.apiVersion = VK_MAKE_VERSION(1, 0, 0);
+ if (vkFunctions.EnumerateInstanceVersion != nullptr) {
+ DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceVersion(&info.apiVersion),
+ "vkEnumerateInstanceVersion"));
+ }
+ }
+
+ // Gather the info about the instance layers
+ {
+ uint32_t count = 0;
+ VkResult result =
+ VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr));
+ // From the Vulkan spec result should be success if there are 0 layers,
+ // incomplete otherwise. This means that both values represent a success.
+ // This is the same for all Enumarte functions
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumerateInstanceLayerProperties");
+ }
+
+ std::vector<VkLayerProperties> layersProperties(count);
+ DAWN_TRY(CheckVkSuccess(
+ vkFunctions.EnumerateInstanceLayerProperties(&count, layersProperties.data()),
+ "vkEnumerateInstanceLayerProperties"));
+
+ std::unordered_map<std::string, VulkanLayer> knownLayers = CreateVulkanLayerNameMap();
+ for (const VkLayerProperties& layer : layersProperties) {
+ auto it = knownLayers.find(layer.layerName);
+ if (it != knownLayers.end()) {
+ info.layers.set(it->second, true);
+ }
+ }
+ }
+
+ // Gather the info about the instance extensions
+ {
+ std::unordered_map<std::string, InstanceExt> knownExts = CreateInstanceExtNameMap();
+
+ DAWN_TRY_ASSIGN(info.extensions,
+ GatherInstanceExtensions(nullptr, vkFunctions, knownExts));
+ MarkPromotedExtensions(&info.extensions, info.apiVersion);
+ info.extensions = EnsureDependencies(info.extensions);
+
+ for (VulkanLayer layer : IterateBitSet(info.layers)) {
+ DAWN_TRY_ASSIGN(info.layerExtensions[layer],
+ GatherInstanceExtensions(GetVulkanLayerInfo(layer).name,
+ vkFunctions, knownExts));
+ MarkPromotedExtensions(&info.layerExtensions[layer], info.apiVersion);
+ info.layerExtensions[layer] = EnsureDependencies(info.layerExtensions[layer]);
+ }
+ }
+
+ return std::move(info);
+ }
+
+ ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
+ VkInstance instance,
+ const VulkanFunctions& vkFunctions) {
+ uint32_t count = 0;
+ VkResult result =
+ VkResult::WrapUnsafe(vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumeratePhysicalDevices");
+ }
+
+ std::vector<VkPhysicalDevice> physicalDevices(count);
+ DAWN_TRY(CheckVkSuccess(
+ vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data()),
+ "vkEnumeratePhysicalDevices"));
+
+ return std::move(physicalDevices);
+ }
+
+ ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
+ VulkanDeviceInfo info = {};
+ VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+ const VulkanGlobalInfo& globalInfo = adapter.GetVulkanInstance()->GetGlobalInfo();
+ const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+
+ // Query the device properties first to get the ICD's `apiVersion`
+ vkFunctions.GetPhysicalDeviceProperties(physicalDevice, &info.properties);
+
+ // Gather info about device memory.
+ {
+ VkPhysicalDeviceMemoryProperties memory;
+ vkFunctions.GetPhysicalDeviceMemoryProperties(physicalDevice, &memory);
+
+ info.memoryTypes.assign(memory.memoryTypes,
+ memory.memoryTypes + memory.memoryTypeCount);
+ info.memoryHeaps.assign(memory.memoryHeaps,
+ memory.memoryHeaps + memory.memoryHeapCount);
+ }
+
+ // Gather info about device queue families
+ {
+ uint32_t count = 0;
+ vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
+
+ info.queueFamilies.resize(count);
+ vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count,
+ info.queueFamilies.data());
+ }
+
+ // Gather the info about the device layers
+ {
+ uint32_t count = 0;
+ VkResult result = VkResult::WrapUnsafe(
+ vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumerateDeviceLayerProperties");
+ }
+
+ info.layers.resize(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceLayerProperties(
+ physicalDevice, &count, info.layers.data()),
+ "vkEnumerateDeviceLayerProperties"));
+ }
+
+ // Gather the info about the device extensions
+ {
+ uint32_t count = 0;
+ VkResult result = VkResult::WrapUnsafe(vkFunctions.EnumerateDeviceExtensionProperties(
+ physicalDevice, nullptr, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
+ }
+
+ std::vector<VkExtensionProperties> extensionsProperties;
+ extensionsProperties.resize(count);
+ DAWN_TRY(
+ CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
+ physicalDevice, nullptr, &count, extensionsProperties.data()),
+ "vkEnumerateDeviceExtensionProperties"));
+
+ std::unordered_map<std::string, DeviceExt> knownExts = CreateDeviceExtNameMap();
+
+ for (const VkExtensionProperties& extension : extensionsProperties) {
+ auto it = knownExts.find(extension.extensionName);
+ if (it != knownExts.end()) {
+ info.extensions.set(it->second, true);
+ }
+ }
+
+ MarkPromotedExtensions(&info.extensions, info.properties.apiVersion);
+ info.extensions = EnsureDependencies(info.extensions, globalInfo.extensions,
+ info.properties.apiVersion);
+ }
+
+ // Gather general and extension features and properties
+ //
+ // Use vkGetPhysicalDevice{Features,Properties}2 if required to gather information about
+ // the extensions. DeviceExt::GetPhysicalDeviceProperties2 is guaranteed to be available
+ // because these extensions (transitively) depend on it in `EnsureDependencies`
+ VkPhysicalDeviceFeatures2 features2 = {};
+ features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+ features2.pNext = nullptr;
+ PNextChainBuilder featuresChain(&features2);
+
+ VkPhysicalDeviceProperties2 properties2 = {};
+ properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+ features2.pNext = nullptr;
+ PNextChainBuilder propertiesChain(&properties2);
+
+ if (info.extensions[DeviceExt::ShaderFloat16Int8]) {
+ featuresChain.Add(&info.shaderFloat16Int8Features,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+ }
+
+ if (info.extensions[DeviceExt::_16BitStorage]) {
+ featuresChain.Add(&info._16BitStorageFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+ }
+
+ if (info.extensions[DeviceExt::SubgroupSizeControl]) {
+ featuresChain.Add(&info.subgroupSizeControlFeatures,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+ propertiesChain.Add(
+ &info.subgroupSizeControlProperties,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT);
+ }
+
+ if (info.extensions[DeviceExt::DriverProperties]) {
+ propertiesChain.Add(&info.driverProperties,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES);
+ }
+
+ // If we have DeviceExt::GetPhysicalDeviceProperties2, use features2 and properties2 so
+ // that features no covered by VkPhysicalDevice{Features,Properties} can be queried.
+ //
+ // Note that info.properties has already been filled at the start of this function to get
+ // `apiVersion`.
+ ASSERT(info.properties.apiVersion != 0);
+ if (info.extensions[DeviceExt::GetPhysicalDeviceProperties2]) {
+ vkFunctions.GetPhysicalDeviceProperties2(physicalDevice, &properties2);
+ vkFunctions.GetPhysicalDeviceFeatures2(physicalDevice, &features2);
+ info.features = features2.features;
+ } else {
+ ASSERT(features2.pNext == nullptr && properties2.pNext == nullptr);
+ vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
+ }
+
+ // TODO(cwallez@chromium.org): gather info about formats
+
+ return std::move(info);
+ }
+
+ ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
+ VkSurfaceKHR surface) {
+ VulkanSurfaceInfo info = {};
+
+ VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+ const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+
+ // Get the surface capabilities
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
+ physicalDevice, surface, &info.capabilities),
+ "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
+
+ // Query which queue families support presenting this surface
+ {
+ size_t nQueueFamilies = adapter.GetDeviceInfo().queueFamilies.size();
+ info.supportedQueueFamilies.resize(nQueueFamilies, false);
+
+ for (uint32_t i = 0; i < nQueueFamilies; ++i) {
+ VkBool32 supported = VK_FALSE;
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
+ physicalDevice, i, surface, &supported),
+ "vkGetPhysicalDeviceSurfaceSupportKHR"));
+
+ info.supportedQueueFamilies[i] = (supported == VK_TRUE);
+ }
+ }
+
+ // Gather supported formats
+ {
+ uint32_t count = 0;
+ VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+ physicalDevice, surface, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
+ }
+
+ info.formats.resize(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+ physicalDevice, surface, &count, info.formats.data()),
+ "vkGetPhysicalDeviceSurfaceFormatsKHR"));
+ }
+
+ // Gather supported presents modes
+ {
+ uint32_t count = 0;
+ VkResult result =
+ VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+ physicalDevice, surface, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
+ }
+
+ info.presentModes.resize(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+ physicalDevice, surface, &count, info.presentModes.data()),
+ "vkGetPhysicalDeviceSurfacePresentModesKHR"));
+ }
+
+ return std::move(info);
+ }
+
+} // namespace dawn::native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h
new file mode 100644
index 00000000000..2e0c2aa8275
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/VulkanInfo.h
@@ -0,0 +1,89 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_VULKANINFO_H_
+#define DAWNNATIVE_VULKAN_VULKANINFO_H_
+
+#include "dawn/common/ityp_array.h"
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/vulkan/VulkanExtensions.h"
+
+#include <vector>
+
+namespace dawn::native::vulkan {
+
+ class Adapter;
+ class Backend;
+ struct VulkanFunctions;
+
+ // Global information - gathered before the instance is created
+ struct VulkanGlobalKnobs {
+ VulkanLayerSet layers;
+ ityp::array<VulkanLayer, InstanceExtSet, static_cast<uint32_t>(VulkanLayer::EnumCount)>
+ layerExtensions;
+
+ // During information gathering `extensions` only contains the instance's extensions but
+ // during the instance creation logic it becomes the OR of the instance's extensions and
+ // the selected layers' extensions.
+ InstanceExtSet extensions;
+ bool HasExt(InstanceExt ext) const;
+ };
+
+ struct VulkanGlobalInfo : VulkanGlobalKnobs {
+ uint32_t apiVersion;
+ };
+
+ // Device information - gathered before the device is created.
+ struct VulkanDeviceKnobs {
+ VkPhysicalDeviceFeatures features;
+ VkPhysicalDeviceShaderFloat16Int8FeaturesKHR shaderFloat16Int8Features;
+ VkPhysicalDevice16BitStorageFeaturesKHR _16BitStorageFeatures;
+ VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
+
+ bool HasExt(DeviceExt ext) const;
+ DeviceExtSet extensions;
+ };
+
+ struct VulkanDeviceInfo : VulkanDeviceKnobs {
+ VkPhysicalDeviceProperties properties;
+ VkPhysicalDeviceDriverProperties driverProperties;
+ VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
+
+ std::vector<VkQueueFamilyProperties> queueFamilies;
+
+ std::vector<VkMemoryType> memoryTypes;
+ std::vector<VkMemoryHeap> memoryHeaps;
+
+ std::vector<VkLayerProperties> layers;
+ // TODO(cwallez@chromium.org): layer instance extensions
+ };
+
+ struct VulkanSurfaceInfo {
+ VkSurfaceCapabilitiesKHR capabilities;
+ std::vector<VkSurfaceFormatKHR> formats;
+ std::vector<VkPresentModeKHR> presentModes;
+ std::vector<bool> supportedQueueFamilies;
+ };
+
+ ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions);
+ ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
+ VkInstance instance,
+ const VulkanFunctions& vkFunctions);
+ ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter);
+ ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
+ VkSurfaceKHR surface);
+} // namespace dawn::native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_VULKANINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h
new file mode 100644
index 00000000000..034bada4cfd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryService.h
@@ -0,0 +1,78 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
+#define DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/VulkanBackend.h"
+#include "dawn/native/vulkan/ExternalHandle.h"
+
+namespace dawn::native::vulkan {
+ class Device;
+ struct VulkanDeviceInfo;
+} // namespace dawn::native::vulkan
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+ struct MemoryImportParams {
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+ };
+
+ class Service {
+ public:
+ explicit Service(Device* device);
+ ~Service();
+
+ static bool CheckSupport(const VulkanDeviceInfo& deviceInfo);
+
+ // True if the device reports it supports importing external memory.
+ bool SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags);
+
+ // True if the device reports it supports creating VkImages from external memory.
+ bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint);
+
+ // Returns the parameters required for importing memory
+ ResultOrError<MemoryImportParams> GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image);
+
+ // Given an external handle pointing to memory, import it into a VkDeviceMemory
+ ResultOrError<VkDeviceMemory> ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image);
+
+ // Create a VkImage for the given handle type
+ ResultOrError<VkImage> CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo);
+
+ private:
+ Device* mDevice = nullptr;
+
+ // True if early checks pass that determine if the service is supported
+ bool mSupported = false;
+ };
+
+}} // namespace dawn::native::vulkan::external_memory
+
+#endif // DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
new file mode 100644
index 00000000000..c304a922ff2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
@@ -0,0 +1,357 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn/native/vulkan/UtilsVulkan.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+ namespace {
+
+ bool GetFormatModifierProps(const VulkanFunctions& fn,
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ uint64_t modifier,
+ VkDrmFormatModifierPropertiesEXT* formatModifierProps) {
+ std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierPropsVector;
+ VkFormatProperties2 formatProps = {};
+ formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
+ PNextChainBuilder formatPropsChain(&formatProps);
+
+ VkDrmFormatModifierPropertiesListEXT formatModifierPropsList = {};
+ formatModifierPropsList.drmFormatModifierCount = 0;
+ formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
+ formatPropsChain.Add(&formatModifierPropsList,
+ VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
+
+ fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
+
+ uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
+ formatModifierPropsVector.resize(modifierCount);
+ formatModifierPropsList.pDrmFormatModifierProperties = formatModifierPropsVector.data();
+
+ fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
+ for (const auto& props : formatModifierPropsVector) {
+ if (props.drmFormatModifier == modifier) {
+ *formatModifierProps = props;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Some modifiers use multiple planes (for example, see the comment for
+ // I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h).
+ ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ uint64_t modifier) {
+ VkDrmFormatModifierPropertiesEXT props;
+ if (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props)) {
+ return static_cast<uint32_t>(props.drmFormatModifierPlaneCount);
+ }
+ return DAWN_FORMAT_VALIDATION_ERROR("DRM format modifier not supported.");
+ }
+
+ bool IsMultiPlanarVkFormat(VkFormat format) {
+ switch (format) {
+ case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+ case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+ case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
+ case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
+ case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
+ case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
+ case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
+ case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
+ case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
+ case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
+ case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
+ case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
+ case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
+ case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ bool SupportsDisjoint(const VulkanFunctions& fn,
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ uint64_t modifier) {
+ if (IsMultiPlanarVkFormat(format)) {
+ VkDrmFormatModifierPropertiesEXT props;
+ return (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props) &&
+ (props.drmFormatModifierTilingFeatures & VK_FORMAT_FEATURE_DISJOINT_BIT));
+ }
+ return false;
+ }
+
+ } // anonymous namespace
+
+ Service::Service(Device* device)
+ : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+ }
+
+ Service::~Service() = default;
+
+ // static
+ bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+ return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD) &&
+ deviceInfo.HasExt(DeviceExt::ImageDrmFormatModifier);
+ }
+
+ bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ return mSupported && (!IsMultiPlanarVkFormat(format) ||
+ (format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM &&
+ mDevice->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)));
+ }
+
+ bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint) {
+ *supportsDisjoint = false;
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return false;
+ }
+ if (descriptor->GetType() != ExternalImageType::DmaBuf) {
+ return false;
+ }
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+
+ // Verify plane count for the modifier.
+ VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+ uint32_t planeCount = 0;
+ if (mDevice->ConsumedError(GetModifierPlaneCount(mDevice->fn, physicalDevice, format,
+ dmaBufDescriptor->drmModifier),
+ &planeCount)) {
+ return false;
+ }
+ if (planeCount == 0) {
+ return false;
+ }
+ // Only support the NV12 multi-planar format for now.
+ if (planeCount > 1 && format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) {
+ return false;
+ }
+ *supportsDisjoint =
+ SupportsDisjoint(mDevice->fn, physicalDevice, format, dmaBufDescriptor->drmModifier);
+
+ // Verify that the format modifier of the external memory and the requested Vulkan format
+ // are actually supported together in a dma-buf import.
+ VkPhysicalDeviceImageFormatInfo2 imageFormatInfo = {};
+ imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
+ imageFormatInfo.format = format;
+ imageFormatInfo.type = VK_IMAGE_TYPE_2D;
+ imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+ imageFormatInfo.usage = usage;
+ imageFormatInfo.flags = 0;
+ PNextChainBuilder imageFormatInfoChain(&imageFormatInfo);
+
+ VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo = {};
+ externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+ imageFormatInfoChain.Add(&externalImageFormatInfo,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
+
+ VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo = {};
+ drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+ drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ imageFormatInfoChain.Add(
+ &drmModifierInfo, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
+
+ // For mutable vkimage of multi-planar format, we also need to make sure the each
+ // plane's view format can be supported.
+ std::array<VkFormat, 2> viewFormats;
+ VkImageFormatListCreateInfo imageFormatListInfo = {};
+
+ if (planeCount > 1) {
+ ASSERT(format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM);
+ viewFormats = {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM};
+ imageFormatListInfo.viewFormatCount = 2;
+ imageFormatListInfo.pViewFormats = viewFormats.data();
+ imageFormatInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+ imageFormatInfoChain.Add(&imageFormatListInfo,
+ VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
+ }
+
+ VkImageFormatProperties2 imageFormatProps = {};
+ imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+ PNextChainBuilder imageFormatPropsChain(&imageFormatProps);
+
+ VkExternalImageFormatProperties externalImageFormatProps = {};
+ imageFormatPropsChain.Add(&externalImageFormatProps,
+ VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES);
+
+ VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+ physicalDevice, &imageFormatInfo, &imageFormatProps));
+ if (result != VK_SUCCESS) {
+ return false;
+ }
+ VkExternalMemoryFeatureFlags featureFlags =
+ externalImageFormatProps.externalMemoryProperties.externalMemoryFeatures;
+ return featureFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
+ }
+
+ ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
+ "ExternalImageDescriptor is not a ExternalImageDescriptorDmaBuf.");
+
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+ VkDevice device = mDevice->GetVkDevice();
+
+ // Get the valid memory types for the VkImage.
+ VkMemoryRequirements memoryRequirements;
+ mDevice->fn.GetImageMemoryRequirements(device, image, &memoryRequirements);
+
+ VkMemoryFdPropertiesKHR fdProperties;
+ fdProperties.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
+ fdProperties.pNext = nullptr;
+
+ // Get the valid memory types that the external memory can be imported as.
+ mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ dmaBufDescriptor->memoryFD, &fdProperties);
+ // Choose the best memory type that satisfies both the image's constraint and the
+ // import's constraint.
+ memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
+ int memoryTypeIndex = mDevice->GetResourceMemoryAllocator()->FindBestTypeIndex(
+ memoryRequirements, MemoryKind::Opaque);
+ DAWN_INVALID_IF(memoryTypeIndex == -1,
+ "Unable to find an appropriate memory type for import.");
+
+ MemoryImportParams params = {memoryRequirements.size,
+ static_cast<uint32_t>(memoryTypeIndex)};
+ return params;
+ }
+
+ ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
+
+ VkMemoryAllocateInfo memoryAllocateInfo = {};
+ memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ memoryAllocateInfo.allocationSize = importParams.allocationSize;
+ memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+ PNextChainBuilder memoryAllocateInfoChain(&memoryAllocateInfo);
+
+ VkImportMemoryFdInfoKHR importMemoryFdInfo;
+ importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ importMemoryFdInfo.fd = handle;
+ memoryAllocateInfoChain.Add(&importMemoryFdInfo,
+ VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR);
+
+ VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
+ memoryDedicatedAllocateInfo.image = image;
+ memoryDedicatedAllocateInfo.buffer = VkBuffer{};
+ memoryAllocateInfoChain.Add(&memoryDedicatedAllocateInfo,
+ VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO);
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
+ nullptr, &*allocatedMemory),
+ "vkAllocateMemory"));
+ return allocatedMemory;
+ }
+
+ ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
+ "ExternalImageDescriptor is not a dma-buf descriptor.");
+
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+ VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+ VkDevice device = mDevice->GetVkDevice();
+
+ uint32_t planeCount;
+ DAWN_TRY_ASSIGN(planeCount,
+ GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
+ dmaBufDescriptor->drmModifier));
+
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ createInfo.flags = 0;
+ createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+ PNextChainBuilder createInfoChain(&createInfo);
+
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo = {};
+ externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+ createInfoChain.Add(&externalMemoryImageCreateInfo,
+ VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+
+ // For single plane formats.
+ VkSubresourceLayout planeLayout = {};
+ planeLayout.offset = 0;
+ planeLayout.size = 0; // VK_EXT_image_drm_format_modifier mandates size = 0.
+ planeLayout.rowPitch = dmaBufDescriptor->stride;
+ planeLayout.arrayPitch = 0; // Not an array texture
+ planeLayout.depthPitch = 0; // Not a depth texture
+
+ VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo = {};
+ explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+ explicitCreateInfo.drmFormatModifierPlaneCount = 1;
+ explicitCreateInfo.pPlaneLayouts = &planeLayout;
+
+ // For multi-planar formats, we can't explicitly specify VkSubresourceLayout for each plane
+ // due to the lack of knowledge about the required 'offset'. Alternatively
+ // VkImageDrmFormatModifierListCreateInfoEXT can be used to create image with the DRM format
+ // modifier.
+ VkImageDrmFormatModifierListCreateInfoEXT listCreateInfo = {};
+ listCreateInfo.drmFormatModifierCount = 1;
+ listCreateInfo.pDrmFormatModifiers = &dmaBufDescriptor->drmModifier;
+
+ if (planeCount > 1) {
+ // For multi-planar formats, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT specifies that a
+ // VkImageView can be plane's format which might differ from the image's format.
+ createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+ createInfoChain.Add(&listCreateInfo,
+ VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
+ } else {
+ createInfoChain.Add(
+ &explicitCreateInfo,
+ VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
+ }
+
+ // Create a new VkImage with tiling equal to the DRM format modifier.
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &*image),
+ "CreateImage"));
+ return image;
+ }
+
+}} // namespace dawn::native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
new file mode 100644
index 00000000000..7b3c2399162
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
@@ -0,0 +1,65 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+ Service::Service(Device* device) : mDevice(device) {
+ DAWN_UNUSED(mDevice);
+ DAWN_UNUSED(mSupported);
+ }
+
+ Service::~Service() = default;
+
+ // static
+ bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+ return false;
+ }
+
+ bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ return false;
+ }
+
+ bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint) {
+ *supportsDisjoint = false;
+ return false;
+ }
+
+ ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+ }
+
+ ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+ }
+
+ ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+ }
+
+}} // namespace dawn::native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
new file mode 100644
index 00000000000..24a830b4080
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
@@ -0,0 +1,156 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+ Service::Service(Device* device)
+ : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+ }
+
+ Service::~Service() = default;
+
+ // static
+ bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+ return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD);
+ }
+
+ bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return false;
+ }
+
+ VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+ externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+ externalFormatInfo.pNext = nullptr;
+ externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+
+ VkPhysicalDeviceImageFormatInfo2 formatInfo;
+ formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+ formatInfo.pNext = &externalFormatInfo;
+ formatInfo.format = format;
+ formatInfo.type = type;
+ formatInfo.tiling = tiling;
+ formatInfo.usage = usage;
+ formatInfo.flags = flags;
+
+ VkExternalImageFormatProperties externalFormatProperties;
+ externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+ externalFormatProperties.pNext = nullptr;
+
+ VkImageFormatProperties2 formatProperties;
+ formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+ formatProperties.pNext = &externalFormatProperties;
+
+ VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+ ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties));
+
+ // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+ if (result != VK_SUCCESS) {
+ return false;
+ }
+
+ // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+ VkFlags memoryFlags =
+ externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+ return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
+ }
+
+ bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint) {
+ *supportsDisjoint = false;
+ return mSupported;
+ }
+
+ ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::OpaqueFD,
+ "ExternalImageDescriptor is not an OpaqueFD descriptor.");
+
+ const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+ static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+
+ MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+ opaqueFDDescriptor->memoryTypeIndex};
+ return params;
+ }
+
+ ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
+
+ VkMemoryRequirements requirements;
+ mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+ DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
+ "Requested allocation size (%u) is smaller than the image requires (%u).",
+ importParams.allocationSize, requirements.size);
+
+ VkImportMemoryFdInfoKHR importMemoryFdInfo;
+ importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
+ importMemoryFdInfo.pNext = nullptr;
+ importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+ importMemoryFdInfo.fd = handle;
+
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = &importMemoryFdInfo;
+ allocateInfo.allocationSize = importParams.allocationSize;
+ allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+ nullptr, &*allocatedMemory),
+ "vkAllocateMemory"));
+ return allocatedMemory;
+ }
+
+ ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+ externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ externalMemoryImageCreateInfo.pNext = nullptr;
+ externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.pNext = &externalMemoryImageCreateInfo;
+ createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+ createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ ASSERT(IsSampleCountSupported(mDevice, createInfo));
+
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
+ "CreateImage"));
+ return image;
+ }
+
+}} // namespace dawn::native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
new file mode 100644
index 00000000000..b61b0c515bd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -0,0 +1,158 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/TextureVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn::native { namespace vulkan::external_memory {
+
+ Service::Service(Device* device)
+ : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+ }
+
+ Service::~Service() = default;
+
+ // static
+ bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+ return deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle);
+ }
+
+ bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return false;
+ }
+
+ VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+ externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+ externalFormatInfo.pNext = nullptr;
+ externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+
+ VkPhysicalDeviceImageFormatInfo2 formatInfo;
+ formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+ formatInfo.pNext = &externalFormatInfo;
+ formatInfo.format = format;
+ formatInfo.type = type;
+ formatInfo.tiling = tiling;
+ formatInfo.usage = usage;
+ formatInfo.flags = flags;
+
+ VkExternalImageFormatProperties externalFormatProperties;
+ externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+ externalFormatProperties.pNext = nullptr;
+
+ VkImageFormatProperties2 formatProperties;
+ formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+ formatProperties.pNext = &externalFormatProperties;
+
+ VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+ ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
+
+ // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+ if (result != VK_SUCCESS) {
+ return false;
+ }
+
+ // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+ VkFlags memoryFlags =
+ externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+ return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
+ }
+
+ bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage,
+ bool* supportsDisjoint) {
+ *supportsDisjoint = false;
+ return mSupported;
+ }
+
+ ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ DAWN_INVALID_IF(descriptor->type != ExternalImageType::OpaqueFD,
+ "ExternalImageDescriptor is not an OpaqueFD descriptor.");
+
+ const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+ static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+
+ MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+ opaqueFDDescriptor->memoryTypeIndex};
+ return params;
+ }
+
+ ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing memory with an invalid handle.");
+
+ VkMemoryRequirements requirements;
+ mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+ DAWN_INVALID_IF(
+ requirements.size > importParams.allocationSize,
+ "Requested allocation size (%u) is smaller than the required image size (%u).",
+ importParams.allocationSize, requirements.size);
+
+ VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
+ importMemoryHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA;
+ importMemoryHandleInfo.pNext = nullptr;
+ importMemoryHandleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+ importMemoryHandleInfo.handle = handle;
+
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = &importMemoryHandleInfo;
+ allocateInfo.allocationSize = importParams.allocationSize;
+ allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+ nullptr, &*allocatedMemory),
+ "vkAllocateMemory"));
+ return allocatedMemory;
+ }
+
+ ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+ externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ externalMemoryImageCreateInfo.pNext = nullptr;
+ externalMemoryImageCreateInfo.handleTypes =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.pNext = &externalMemoryImageCreateInfo;
+ createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+ createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ ASSERT(IsSampleCountSupported(mDevice, createInfo));
+
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
+ "CreateImage"));
+ return image;
+ }
+
+}} // namespace dawn::native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
new file mode 100644
index 00000000000..c1f69f1dae1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
@@ -0,0 +1,60 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
+#define DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
+
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/native/Error.h"
+#include "dawn/native/vulkan/ExternalHandle.h"
+#include "dawn/native/vulkan/VulkanFunctions.h"
+#include "dawn/native/vulkan/VulkanInfo.h"
+
+namespace dawn::native::vulkan {
+ class Device;
+} // namespace dawn::native::vulkan
+
+namespace dawn::native { namespace vulkan::external_semaphore {
+
+ class Service {
+ public:
+ explicit Service(Device* device);
+ ~Service();
+
+ static bool CheckSupport(const VulkanDeviceInfo& deviceInfo,
+ VkPhysicalDevice physicalDevice,
+ const VulkanFunctions& fn);
+
+ // True if the device reports it supports this feature
+ bool Supported();
+
+ // Given an external handle, import it into a VkSemaphore
+ ResultOrError<VkSemaphore> ImportSemaphore(ExternalSemaphoreHandle handle);
+
+ // Create a VkSemaphore that is exportable into an external handle later
+ ResultOrError<VkSemaphore> CreateExportableSemaphore();
+
+ // Export a VkSemaphore into an external handle
+ ResultOrError<ExternalSemaphoreHandle> ExportSemaphore(VkSemaphore semaphore);
+
+ private:
+ Device* mDevice = nullptr;
+
+ // True if early checks pass that determine if the service is supported
+ bool mSupported = false;
+ };
+
+}} // namespace dawn::native::vulkan::external_semaphore
+
+#endif // DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
new file mode 100644
index 00000000000..7e2b619f5ca
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
@@ -0,0 +1,137 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
+
+static constexpr VkExternalSemaphoreHandleTypeFlagBits kHandleType =
+#if defined(DAWN_USE_SYNC_FDS)
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+#else
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+#endif // defined(DAWN_USE_SYNC_FDS)
+
+namespace dawn::native { namespace vulkan::external_semaphore {
+
+ Service::Service(Device* device)
+ : mDevice(device),
+ mSupported(CheckSupport(device->GetDeviceInfo(),
+ ToBackend(device->GetAdapter())->GetPhysicalDevice(),
+ device->fn)) {
+ }
+
+ Service::~Service() = default;
+
+ // static
+ bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+ VkPhysicalDevice physicalDevice,
+ const VulkanFunctions& fn) {
+ if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
+ return false;
+ }
+
+ VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+ semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+ semaphoreInfo.pNext = nullptr;
+ semaphoreInfo.handleType = kHandleType;
+
+ VkExternalSemaphorePropertiesKHR semaphoreProperties;
+ semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+ semaphoreProperties.pNext = nullptr;
+
+ fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
+ &semaphoreProperties);
+
+ VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+
+ return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+ }
+
+ bool Service::Supported() {
+ return mSupported;
+ }
+
+ ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+ DAWN_INVALID_IF(handle < 0, "Importing a semaphore with an invalid handle.");
+
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ VkSemaphoreCreateInfo info;
+ info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ info.pNext = nullptr;
+ info.flags = 0;
+
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
+ "vkCreateSemaphore"));
+
+ VkImportSemaphoreFdInfoKHR importSemaphoreFdInfo;
+ importSemaphoreFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
+ importSemaphoreFdInfo.pNext = nullptr;
+ importSemaphoreFdInfo.semaphore = semaphore;
+ importSemaphoreFdInfo.flags = 0;
+ importSemaphoreFdInfo.handleType = kHandleType;
+ importSemaphoreFdInfo.fd = handle;
+
+ MaybeError status = CheckVkSuccess(
+ mDevice->fn.ImportSemaphoreFdKHR(mDevice->GetVkDevice(), &importSemaphoreFdInfo),
+ "vkImportSemaphoreFdKHR");
+
+ if (status.IsError()) {
+ mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+ DAWN_TRY(std::move(status));
+ }
+
+ return semaphore;
+ }
+
+ ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+ VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+ exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+ exportSemaphoreInfo.pNext = nullptr;
+ exportSemaphoreInfo.handleTypes = kHandleType;
+
+ VkSemaphoreCreateInfo semaphoreCreateInfo;
+ semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+ semaphoreCreateInfo.flags = 0;
+
+ VkSemaphore signalSemaphore;
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+ nullptr, &*signalSemaphore),
+ "vkCreateSemaphore"));
+ return signalSemaphore;
+ }
+
+ ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+ VkSemaphoreGetFdInfoKHR semaphoreGetFdInfo;
+ semaphoreGetFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
+ semaphoreGetFdInfo.pNext = nullptr;
+ semaphoreGetFdInfo.semaphore = semaphore;
+ semaphoreGetFdInfo.handleType = kHandleType;
+
+ int fd = -1;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.GetSemaphoreFdKHR(mDevice->GetVkDevice(), &semaphoreGetFdInfo, &fd),
+ "vkGetSemaphoreFdKHR"));
+
+ ASSERT(fd >= 0);
+ return fd;
+ }
+
+}} // namespace dawn::native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
new file mode 100644
index 00000000000..3146e3771ad
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
@@ -0,0 +1,50 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
+
+namespace dawn::native { namespace vulkan::external_semaphore {
+
+ Service::Service(Device* device) : mDevice(device) {
+ DAWN_UNUSED(mDevice);
+ DAWN_UNUSED(mSupported);
+ }
+
+ Service::~Service() = default;
+
+ // static
+ bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+ VkPhysicalDevice physicalDevice,
+ const VulkanFunctions& fn) {
+ return false;
+ }
+
+ bool Service::Supported() {
+ return false;
+ }
+
+ ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+ }
+
+ ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+ }
+
+ ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+ }
+
+}} // namespace dawn::native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
new file mode 100644
index 00000000000..03fa79c65d2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
@@ -0,0 +1,135 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/vulkan/AdapterVk.h"
+#include "dawn/native/vulkan/BackendVk.h"
+#include "dawn/native/vulkan/DeviceVk.h"
+#include "dawn/native/vulkan/VulkanError.h"
+#include "dawn/native/vulkan/external_semaphore/SemaphoreService.h"
+
+namespace dawn::native { namespace vulkan::external_semaphore {
+
+ Service::Service(Device* device)
+ : mDevice(device),
+ mSupported(CheckSupport(device->GetDeviceInfo(),
+ ToBackend(device->GetAdapter())->GetPhysicalDevice(),
+ device->fn)) {
+ }
+
+ Service::~Service() = default;
+
+ // static
+ bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+ VkPhysicalDevice physicalDevice,
+ const VulkanFunctions& fn) {
+ if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
+ return false;
+ }
+
+ VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+ semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+ semaphoreInfo.pNext = nullptr;
+ semaphoreInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ VkExternalSemaphorePropertiesKHR semaphoreProperties;
+ semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+ semaphoreProperties.pNext = nullptr;
+
+ fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
+ &semaphoreProperties);
+
+ VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+
+ return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+ }
+
+ bool Service::Supported() {
+ return mSupported;
+ }
+
+ ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+ DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID,
+ "Importing a semaphore with an invalid handle.");
+
+ VkSemaphore semaphore = VK_NULL_HANDLE;
+ VkSemaphoreCreateInfo info;
+ info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ info.pNext = nullptr;
+ info.flags = 0;
+
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
+ "vkCreateSemaphore"));
+
+ VkImportSemaphoreZirconHandleInfoFUCHSIA importSemaphoreHandleInfo;
+ importSemaphoreHandleInfo.sType =
+ VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA;
+ importSemaphoreHandleInfo.pNext = nullptr;
+ importSemaphoreHandleInfo.semaphore = semaphore;
+ importSemaphoreHandleInfo.flags = 0;
+ importSemaphoreHandleInfo.handleType =
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+ importSemaphoreHandleInfo.handle = handle;
+
+ MaybeError status = CheckVkSuccess(mDevice->fn.ImportSemaphoreZirconHandleFUCHSIA(
+ mDevice->GetVkDevice(), &importSemaphoreHandleInfo),
+ "vkImportSemaphoreZirconHandleFUCHSIA");
+
+ if (status.IsError()) {
+ mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+ DAWN_TRY(std::move(status));
+ }
+
+ return semaphore;
+ }
+
+ ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+ VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+ exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+ exportSemaphoreInfo.pNext = nullptr;
+ exportSemaphoreInfo.handleTypes =
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ VkSemaphoreCreateInfo semaphoreCreateInfo;
+ semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+ semaphoreCreateInfo.flags = 0;
+
+ VkSemaphore signalSemaphore;
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+ nullptr, &*signalSemaphore),
+ "vkCreateSemaphore"));
+ return signalSemaphore;
+ }
+
+ ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+ VkSemaphoreGetZirconHandleInfoFUCHSIA semaphoreGetHandleInfo;
+ semaphoreGetHandleInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA;
+ semaphoreGetHandleInfo.pNext = nullptr;
+ semaphoreGetHandleInfo.semaphore = semaphore;
+ semaphoreGetHandleInfo.handleType =
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+
+ zx_handle_t handle = ZX_HANDLE_INVALID;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.GetSemaphoreZirconHandleFUCHSIA(
+ mDevice->GetVkDevice(), &semaphoreGetHandleInfo, &handle),
+ "VkSemaphoreGetZirconHandleInfoFUCHSIA"));
+
+ ASSERT(handle != ZX_HANDLE_INVALID);
+ return handle;
+ }
+
+}} // namespace dawn::native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp b/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp
new file mode 100644
index 00000000000..79e32421df7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.cpp
@@ -0,0 +1,132 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/native/webgpu_absl_format.h"
+
+#include "dawn/native/Device.h"
+#include "dawn/native/ObjectBase.h"
+#include "dawn/native/Texture.h"
+
+namespace dawn::native {
+
+ //
+ // Structs
+ //
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Color* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
+ return {true};
+ }
+ s->Append(absl::StrFormat("[Color r:%f, g:%f, b:%f, a:%f]", value->r, value->g, value->b,
+ value->a));
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Extent3D* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
+ return {true};
+ }
+ s->Append(absl::StrFormat("[Extent3D width:%u, height:%u, depthOrArrayLayers:%u]",
+ value->width, value->height, value->depthOrArrayLayers));
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Origin3D* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
+ return {true};
+ }
+ s->Append(absl::StrFormat("[Origin3D x:%u, y:%u, z:%u]", value->x, value->y, value->z));
+ return {true};
+ }
+
+ //
+ // Objects
+ //
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const DeviceBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
+ return {true};
+ }
+ s->Append("[Device");
+ const std::string& label = value->GetLabel();
+ if (!label.empty()) {
+ s->Append(absl::StrFormat(" \"%s\"", label));
+ }
+ s->Append("]");
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const ApiObjectBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
+ return {true};
+ }
+ s->Append("[");
+ if (value->IsError()) {
+ s->Append("Invalid ");
+ }
+ s->Append(ObjectTypeAsString(value->GetType()));
+ const std::string& label = value->GetLabel();
+ if (!label.empty()) {
+ s->Append(absl::StrFormat(" \"%s\"", label));
+ }
+ s->Append("]");
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const TextureViewBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ if (value == nullptr) {
+ s->Append("[null]");
+ return {true};
+ }
+ s->Append("[");
+ if (value->IsError()) {
+ s->Append("Invalid ");
+ }
+ s->Append(ObjectTypeAsString(value->GetType()));
+ const std::string& label = value->GetLabel();
+ if (!label.empty()) {
+ s->Append(absl::StrFormat(" \"%s\"", label));
+ }
+ const std::string& textureLabel = value->GetTexture()->GetLabel();
+ if (!textureLabel.empty()) {
+ s->Append(absl::StrFormat(" of Texture \"%s\"", textureLabel));
+ }
+ s->Append("]");
+ return {true};
+ }
+
+} // namespace dawn::native
diff --git a/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h b/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h
new file mode 100644
index 00000000000..4dc51175984
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/native/webgpu_absl_format.h
@@ -0,0 +1,72 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_WEBGPUABSLFORMAT_H_
+#define DAWNNATIVE_WEBGPUABSLFORMAT_H_
+
+#include "absl/strings/str_format.h"
+#include "dawn/native/dawn_platform.h"
+#include "dawn/native/webgpu_absl_format_autogen.h"
+
+namespace dawn::native {
+
+ //
+ // Structs
+ //
+
+ struct Color;
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Color* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+ struct Extent3D;
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Extent3D* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+ struct Origin3D;
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const Origin3D* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+ //
+ // Objects
+ //
+
+ class DeviceBase;
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const DeviceBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+ class ApiObjectBase;
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const ApiObjectBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+ // Special case for TextureViews, since frequently the texture will be the
+ // thing that's labeled.
+ class TextureViewBase;
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ const TextureViewBase* value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s);
+
+} // namespace dawn::native
+
+#endif // DAWNNATIVE_WEBGPUABSLFORMAT_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/node/CMakeLists.txt
index 0cdeaa26efc..0cdeaa26efc 100644
--- a/chromium/third_party/dawn/src/dawn_node/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/node/CMakeLists.txt
diff --git a/chromium/third_party/dawn/src/dawn/node/Module.cpp b/chromium/third_party/dawn/src/dawn/node/Module.cpp
new file mode 100644
index 00000000000..f87631b0e55
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/Module.cpp
@@ -0,0 +1,65 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/dawn_proc.h"
+#include "src/dawn/node/binding/Flags.h"
+#include "src/dawn/node/binding/GPU.h"
+
+namespace {
+ Napi::Value CreateGPU(const Napi::CallbackInfo& info) {
+ const auto& env = info.Env();
+
+ std::tuple<std::vector<std::string>> args;
+ auto res = wgpu::interop::FromJS(info, args);
+ if (res != wgpu::interop::Success) {
+ Napi::Error::New(env, res.error).ThrowAsJavaScriptException();
+ return env.Undefined();
+ }
+
+ wgpu::binding::Flags flags;
+
+ // Parse out the key=value flags out of the input args array
+ for (const auto& arg : std::get<0>(args)) {
+ const size_t sep_index = arg.find("=");
+ if (sep_index == std::string::npos) {
+ Napi::Error::New(env, "Flags expected argument format is <key>=<value>")
+ .ThrowAsJavaScriptException();
+ return env.Undefined();
+ }
+ flags.Set(arg.substr(0, sep_index), arg.substr(sep_index + 1));
+ }
+
+ // Construct a wgpu::interop::GPU interface, implemented by wgpu::bindings::GPU.
+ return wgpu::interop::GPU::Create<wgpu::binding::GPU>(env, std::move(flags));
+ }
+
+} // namespace
+
+// Initialize() initializes the Dawn node module, registering all the WebGPU
+// types into the global object, and adding the 'create' function on the exported
+// object.
+Napi::Object Initialize(Napi::Env env, Napi::Object exports) {
+ // Begin by setting the Dawn procedure function pointers.
+ dawnProcSetProcs(&dawn::native::GetProcs());
+
+ // Register all the interop types
+ wgpu::interop::Initialize(env);
+
+ // Export function that creates and returns the wgpu::interop::GPU interface
+ exports.Set(Napi::String::New(env, "create"), Napi::Function::New<CreateGPU>(env));
+
+ return exports;
+}
+
+NODE_API_MODULE(addon, Initialize)
diff --git a/chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp b/chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp
new file mode 100644
index 00000000000..a557eca4d50
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/NapiSymbols.cpp
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/utils/Debug.h"
+
+// To reduce the build dependencies for compiling the dawn.node targets, we do
+// not use cmake-js for building, but instead just depend on node_api_headers.
+// As the name suggests, node_api_headers contains just the *headers* of Napi,
+// and does not provide a library to link against.
+// Fortunately node_api_headers provides a list of Napi symbols exported by Node,
+// which we can use to produce weak-symbol stubs.
+
+#ifdef _WIN32
+# error "NapiSymbols.cpp is not used on Windows"
+#endif
+
+#define NAPI_SYMBOL(NAME) \
+ __attribute__((weak)) void NAME() { \
+ UNREACHABLE( \
+ "#NAME is a weak stub, and should have been runtime replaced by the node " \
+ "implementation"); \
+ }
+
+extern "C" {
+// List of Napi symbols generated from the node_api_headers/symbols.js file
+#include "NapiSymbols.h"
+}
diff --git a/chromium/third_party/dawn/src/dawn_node/OWNERS b/chromium/third_party/dawn/src/dawn/node/OWNERS
index d19725d4a1e..d19725d4a1e 100644
--- a/chromium/third_party/dawn/src/dawn_node/OWNERS
+++ b/chromium/third_party/dawn/src/dawn/node/OWNERS
diff --git a/chromium/third_party/dawn/src/dawn/node/README.md b/chromium/third_party/dawn/src/dawn/node/README.md
new file mode 100644
index 00000000000..d1b7e4ab061
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/README.md
@@ -0,0 +1,135 @@
+# Dawn bindings for NodeJS
+
+Note: This code is currently WIP. There are a number of [known issues](#known-issues).
+
+## Building
+
+## System requirements
+
+- [CMake 3.10](https://cmake.org/download/) or greater
+- [Go 1.13](https://golang.org/dl/) or greater
+
+## Install `depot_tools`
+
+Dawn uses the Chromium build system and dependency management so you need to [install depot_tools] and add it to the PATH.
+
+[install depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
+
+### Fetch dependencies
+
+First, the steps are similar to [`doc/building.md`](../../docs/dawn/building.md), but instead of the `Get the code` step, run:
+
+```sh
+# Clone the repo as "dawn"
+git clone https://dawn.googlesource.com/dawn dawn && cd dawn
+
+# Bootstrap the NodeJS binding gclient configuration
+cp scripts/standalone-with-node.gclient .gclient
+
+# Fetch external dependencies and toolchains with gclient
+gclient sync
+```
+
+Optionally, on Linux install X11-xcb support:
+
+```sh
+sudo apt-get install libx11-xcb-dev
+```
+
+If you don't have those supporting libraries, then you must use the
+`-DDAWN_USE_X11=OFF` flag on Cmake.
+
+### Build
+
+Currently, the node bindings can only be built with CMake:
+
+```sh
+mkdir <build-output-path>
+cd <build-output-path>
+cmake <dawn-root-path> -GNinja -DDAWN_BUILD_NODE_BINDINGS=1 -DDAWN_ENABLE_PIC=1 -DDAWN_USE_X11=OFF
+ninja dawn.node
+```
+
+### Running WebGPU CTS
+
+1. [Build](#build) the `dawn.node` NodeJS module.
+2. Checkout the [WebGPU CTS repo](https://github.com/gpuweb/cts)
+ - Run `npm install` from inside the CTS directory to install its dependencies
+
+```sh
+./src/dawn/node/tools/run-cts --cts=<path-to-webgpu-cts> --dawn-node=<path-to-dawn.node> [WebGPU CTS query]
+```
+
+If this fails with the error message `TypeError: expander is not a function or its return value is not iterable`, try appending `--build=false` to the start of the `run-cts` command line flags.
+
+To test against SwiftShader instead of the default Vulkan device, prefix `./src/dawn/node/tools/run-cts` with `VK_ICD_FILENAMES=<swiftshader-cmake-build>/Linux/vk_swiftshader_icd.json` and append `--flag=dawn-backend=vulkan` to the start of run-cts command line flags. For example:
+
+```sh
+VK_ICD_FILENAMES=<swiftshader-cmake-build>/Linux/vk_swiftshader_icd.json ./src/dawn/node/tools/run-cts --cts=<path-to-webgpu-cts> --dawn-node=<path-to-dawn.node> --flag=dawn-backend=vulkan [WebGPU CTS query]
+```
+
+The `--flag` parameter must be passed in multiple times, once for each flag begin set. Here are some common arguments:
+* `dawn-backend=<null|webgpu|d3d11|d3d12|metal|vulkan|opengl|opengles>`
+* `dlldir=<path>` - used to add an extra DLL search path on Windows, primarily to load the right d3dcompiler_47.dll
+* `enable-dawn-features=<features>` - enable [Dawn toggles](https://dawn.googlesource.com/dawn/+/refs/heads/main/src/dawn/native/Toggles.cpp), e.g. `dump_shaders`
+* `disable-dawn-features=<features>` - disable [Dawn toggles](https://dawn.googlesource.com/dawn/+/refs/heads/main/src/dawn/native/Toggles.cpp)
+
+For example, on Windows, to use the d3dcompiler_47.dll from a Chromium checkout, and to dump shader output, we could run the following using Git Bash:
+
+```sh
+./src/dawn/node/tools/run-cts --verbose --dawn-node=/c/src/dawn/build/Debug/dawn.node --cts=/c/src/gpuweb-cts --flag=dlldir="C:\src\chromium\src\out\Release" --flag=enable-dawn-features=dump_shaders 'webgpu:shader,execution,builtin,abs:integer_builtin_functions,abs_unsigned:storageClass="storage";storageMode="read_write";containerType="vector";isAtomic=false;baseType="u32";type="vec2%3Cu32%3E"'
+```
+
+Note that we pass `--verbose` above so that all test output, including the dumped shader, is written to stdout.
+
+### Testing against a `run-cts` expectations file
+
+You can write out an expectations file with the `--output <path>` command line flag, and then compare this snapshot to a later run with `--expect <path>`.
+
+## Debugging TypeScript with VSCode
+
+Open or create the `.vscode/launch.json` file, and add:
+
+```json
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Debug with node",
+ "type": "node",
+ "request": "launch",
+ "outFiles": [ "./**/*.js" ],
+ "args": [
+ "-e", "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');",
+ "--", "dummy-arg",
+ "--gpu-provider",
+ "[path-to-dawn.node]", // REPLACE: [path-to-dawn.node]
+ "[test-query]", // REPLACE: [test-query]
+ ],
+ "cwd": "[cts-root]" // REPLACE: [cts-root]
+ }
+ ]
+}
+```
+
+Replacing:
+
+- `[cts-root]` with the path to the CTS root directory. If you are editing the `.vscode/launch.json` from within the CTS workspace, then you may use `${workspaceFolder}`.
+- `[path-to-dawn.node]` this the path to the `dawn.node` module built by the [build step](#Build)
+- `test-query` with the test query string. Example: `webgpu:shader,execution,builtin,abs:*`
+
+
+## Known issues
+
+- Many WebGPU CTS tests are currently known to fail
+- Dawn uses special token values for some parameters / fields. These are currently passed straight through to dawn from the JavaScript. discussions: [1](https://dawn-review.googlesource.com/c/dawn/+/64907/5/src/dawn/node/binding/Converter.cpp#167), [2](https://dawn-review.googlesource.com/c/dawn/+/64907/5/src/dawn/node/binding/Converter.cpp#928), [3](https://dawn-review.googlesource.com/c/dawn/+/64909/4/src/dawn/node/binding/GPUTexture.cpp#42)
+- Backend validation is currently always set to 'full' to aid in debugging. This can be extremely slow. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64916/4/src/dawn/node/binding/GPU.cpp#25)
+- Attempting to call `new T` in JavaScript, where `T` is an IDL interface type, should result in a TypeError "Illegal constructor". [discussion](https://dawn-review.googlesource.com/c/dawn/+/64902/9/src/dawn/node/interop/WebGPU.cpp.tmpl#293)
+- `GPUDevice` currently maintains a list of "lost promises". This should return the same promise. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64906/6/src/dawn/node/binding/GPUDevice.h#107)
+
+## Remaining work
+
+- Investigate CTS failures that are not expected to fail.
+- Generated includes live in `src/` for `dawn/node`, but outside for Dawn. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64903/9/src/dawn/node/interop/CMakeLists.txt#56)
+- Hook up to presubmit bots (CQ / Kokoro)
+- `binding::GPU` will require significant rework [once Dawn implements the device / adapter creation path properly](https://dawn-review.googlesource.com/c/dawn/+/64916/4/src/dawn/node/binding/GPU.cpp).
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp b/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp
new file mode 100644
index 00000000000..a978fa8e3b2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.cpp
@@ -0,0 +1,60 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/AsyncRunner.h"
+
+#include <cassert>
+#include <limits>
+
+namespace wgpu::binding {
+
+ AsyncRunner::AsyncRunner(Napi::Env env, wgpu::Device device) : env_(env), device_(device) {
+ }
+
+ void AsyncRunner::Begin() {
+ assert(count_ != std::numeric_limits<decltype(count_)>::max());
+ if (count_++ == 0) {
+ QueueTick();
+ }
+ }
+
+ void AsyncRunner::End() {
+ assert(count_ > 0);
+ count_--;
+ }
+
+ void AsyncRunner::QueueTick() {
+ // TODO(crbug.com/dawn/1127): We probably want to reduce the frequency at which this gets
+ // called.
+ if (tick_queued_) {
+ return;
+ }
+ tick_queued_ = true;
+ env_.Global()
+ .Get("setImmediate")
+ .As<Napi::Function>()
+ .Call({
+ // TODO(crbug.com/dawn/1127): Create once, reuse.
+ Napi::Function::New(env_,
+ [this](const Napi::CallbackInfo&) {
+ tick_queued_ = false;
+ if (count_ > 0) {
+ device_.Tick();
+ QueueTick();
+ }
+ }),
+ });
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h b/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h
new file mode 100644
index 00000000000..9ed6e5c5e03
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/AsyncRunner.h
@@ -0,0 +1,77 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_ASYNC_RUNNER_H_
+#define DAWN_NODE_BINDING_ASYNC_RUNNER_H_
+
+#include <stdint.h>
+#include <memory>
+
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+
+namespace wgpu::binding {
+
+ // AsyncRunner is used to poll a wgpu::Device with calls to Tick() while there are asynchronous
+ // tasks in flight.
+ class AsyncRunner {
+ public:
+ AsyncRunner(Napi::Env env, wgpu::Device device);
+
+ // Begin() should be called when a new asynchronous task is started.
+ // If the number of executing asynchronous tasks transitions from 0 to 1, then a function
+ // will be scheduled on the main JavaScript thread to call wgpu::Device::Tick() whenever the
+ // thread is idle. This will be repeatedly called until the number of executing asynchronous
+ // tasks reaches 0 again.
+ void Begin();
+
+ // End() should be called once the asynchronous task has finished.
+ // Every call to Begin() should eventually result in a call to End().
+ void End();
+
+ private:
+ void QueueTick();
+ Napi::Env env_;
+ wgpu::Device const device_;
+ uint64_t count_ = 0;
+ bool tick_queued_ = false;
+ };
+
+ // AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
+ // AsyncRunner::End() on destruction.
+ class AsyncTask {
+ public:
+ inline AsyncTask(AsyncTask&&) = default;
+
+ // Constructor.
+ // Calls AsyncRunner::Begin()
+ inline AsyncTask(std::shared_ptr<AsyncRunner> runner) : runner_(std::move(runner)) {
+ runner_->Begin();
+ };
+
+ // Destructor.
+ // Calls AsyncRunner::End()
+ inline ~AsyncTask() {
+ runner_->End();
+ }
+
+ private:
+ AsyncTask(const AsyncTask&) = delete;
+ AsyncTask& operator=(const AsyncTask&) = delete;
+ std::shared_ptr<AsyncRunner> runner_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_ASYNC_RUNNER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt
new file mode 100644
index 00000000000..1113a5df4f6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/CMakeLists.txt
@@ -0,0 +1,82 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_node_binding STATIC
+ "AsyncRunner.cpp"
+ "AsyncRunner.h"
+ "Converter.cpp"
+ "Converter.h"
+ "Errors.cpp"
+ "Errors.h"
+ "Flags.cpp"
+ "Flags.h"
+ "GPU.cpp"
+ "GPU.h"
+ "GPUAdapter.cpp"
+ "GPUAdapter.h"
+ "GPUBindGroup.cpp"
+ "GPUBindGroup.h"
+ "GPUBindGroupLayout.cpp"
+ "GPUBindGroupLayout.h"
+ "GPUBuffer.cpp"
+ "GPUBuffer.h"
+ "GPUCommandBuffer.cpp"
+ "GPUCommandBuffer.h"
+ "GPUCommandEncoder.cpp"
+ "GPUCommandEncoder.h"
+ "GPUComputePassEncoder.cpp"
+ "GPUComputePassEncoder.h"
+ "GPUComputePipeline.cpp"
+ "GPUComputePipeline.h"
+ "GPUDevice.cpp"
+ "GPUDevice.h"
+ "GPUPipelineLayout.cpp"
+ "GPUPipelineLayout.h"
+ "GPUQuerySet.cpp"
+ "GPUQuerySet.h"
+ "GPUQueue.cpp"
+ "GPUQueue.h"
+ "GPURenderBundle.cpp"
+ "GPURenderBundle.h"
+ "GPURenderBundleEncoder.cpp"
+ "GPURenderBundleEncoder.h"
+ "GPURenderPassEncoder.cpp"
+ "GPURenderPassEncoder.h"
+ "GPURenderPipeline.cpp"
+ "GPURenderPipeline.h"
+ "GPUSampler.cpp"
+ "GPUSampler.h"
+ "GPUShaderModule.cpp"
+ "GPUShaderModule.h"
+ "GPUSupportedLimits.cpp"
+ "GPUSupportedLimits.h"
+ "GPUTexture.cpp"
+ "GPUTexture.h"
+ "GPUTextureView.cpp"
+ "GPUTextureView.h"
+)
+
+target_include_directories(dawn_node_binding
+ PRIVATE
+ "${CMAKE_SOURCE_DIR}"
+ "${NODE_API_HEADERS_DIR}/include"
+ "${NODE_ADDON_API_DIR}"
+ "${GEN_DIR}"
+)
+
+target_link_libraries(dawn_node_binding
+ PRIVATE
+ dawncpp
+ dawn_node_interop
+)
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp b/chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp
new file mode 100644
index 00000000000..7387ce120bf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Converter.cpp
@@ -0,0 +1,1241 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/Converter.h"
+
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUPipelineLayout.h"
+#include "src/dawn/node/binding/GPUSampler.h"
+#include "src/dawn/node/binding/GPUShaderModule.h"
+#include "src/dawn/node/binding/GPUTexture.h"
+#include "src/dawn/node/binding/GPUTextureView.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ Converter::~Converter() {
+ for (auto& free : free_) {
+ free();
+ }
+ }
+
+ bool Converter::Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in) {
+ out = {};
+ if (auto* dict = std::get_if<interop::GPUExtent3DDict>(&in)) {
+ out.depthOrArrayLayers = dict->depthOrArrayLayers;
+ out.width = dict->width;
+ out.height = dict->height;
+ return true;
+ }
+ if (auto* vec = std::get_if<std::vector<interop::GPUIntegerCoordinate>>(&in)) {
+ switch (vec->size()) {
+ default:
+ case 3:
+ out.depthOrArrayLayers = (*vec)[2];
+ case 2: // fallthrough
+ out.height = (*vec)[1];
+ case 1: // fallthrough
+ out.width = (*vec)[0];
+ return true;
+ case 0:
+ break;
+ }
+ }
+ Napi::Error::New(env, "invalid value for GPUExtent3D").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in) {
+ out = {};
+ out.x = in.x;
+ out.y = in.y;
+ out.z = in.z;
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::Color& out, const interop::GPUColor& in) {
+ out = {};
+ if (auto* dict = std::get_if<interop::GPUColorDict>(&in)) {
+ out.r = dict->r;
+ out.g = dict->g;
+ out.b = dict->b;
+ out.a = dict->a;
+ return true;
+ }
+ if (auto* vec = std::get_if<std::vector<double>>(&in)) {
+ switch (vec->size()) {
+ default:
+ case 4:
+ out.a = (*vec)[3];
+ case 3: // fallthrough
+ out.b = (*vec)[2];
+ case 2: // fallthrough
+ out.g = (*vec)[1];
+ case 1: // fallthrough
+ out.r = (*vec)[0];
+ return true;
+ case 0:
+ break;
+ }
+ }
+ Napi::Error::New(env, "invalid value for GPUColor").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::Origin3D& out,
+ const std::vector<interop::GPUIntegerCoordinate>& in) {
+ out = {};
+ switch (in.size()) {
+ default:
+ case 3:
+ out.z = in[2];
+ case 2: // fallthrough
+ out.y = in[1];
+ case 1: // fallthrough
+ out.x = in[0];
+ case 0:
+ break;
+ }
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in) {
+ out = wgpu::TextureAspect::All;
+ switch (in) {
+ case interop::GPUTextureAspect::kAll:
+ out = wgpu::TextureAspect::All;
+ return true;
+ case interop::GPUTextureAspect::kStencilOnly:
+ out = wgpu::TextureAspect::StencilOnly;
+ return true;
+ case interop::GPUTextureAspect::kDepthOnly:
+ out = wgpu::TextureAspect::DepthOnly;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureAspect").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in) {
+ out = {};
+ return Convert(out.texture, in.texture) && Convert(out.mipLevel, in.mipLevel) &&
+ Convert(out.origin, in.origin) && Convert(out.aspect, in.aspect);
+ }
+
+ bool Converter::Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in) {
+ out = {};
+ out.buffer = *in.buffer.As<GPUBuffer>();
+ return Convert(out.layout.offset, in.offset) &&
+ Convert(out.layout.bytesPerRow, in.bytesPerRow) &&
+ Convert(out.layout.rowsPerImage, in.rowsPerImage);
+ }
+
+ bool Converter::Convert(BufferSource& out, interop::BufferSource in) {
+ out = {};
+ if (auto* view = std::get_if<interop::ArrayBufferView>(&in)) {
+ std::visit(
+ [&](auto&& v) {
+ auto arr = v.ArrayBuffer();
+ out.data = arr.Data();
+ out.size = arr.ByteLength();
+ },
+ *view);
+ return true;
+ }
+ if (auto* arr = std::get_if<interop::ArrayBuffer>(&in)) {
+ out.data = arr->Data();
+ out.size = arr->ByteLength();
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for BufferSource").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in) {
+ out = {};
+ return Convert(out.bytesPerRow, in.bytesPerRow) && Convert(out.offset, in.offset) &&
+ Convert(out.rowsPerImage, in.rowsPerImage);
+ }
+
+ bool Converter::Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in) {
+ out = wgpu::TextureFormat::Undefined;
+ switch (in) {
+ case interop::GPUTextureFormat::kR8Unorm:
+ out = wgpu::TextureFormat::R8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kR8Snorm:
+ out = wgpu::TextureFormat::R8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kR8Uint:
+ out = wgpu::TextureFormat::R8Uint;
+ return true;
+ case interop::GPUTextureFormat::kR8Sint:
+ out = wgpu::TextureFormat::R8Sint;
+ return true;
+ case interop::GPUTextureFormat::kR16Uint:
+ out = wgpu::TextureFormat::R16Uint;
+ return true;
+ case interop::GPUTextureFormat::kR16Sint:
+ out = wgpu::TextureFormat::R16Sint;
+ return true;
+ case interop::GPUTextureFormat::kR16Float:
+ out = wgpu::TextureFormat::R16Float;
+ return true;
+ case interop::GPUTextureFormat::kRg8Unorm:
+ out = wgpu::TextureFormat::RG8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRg8Snorm:
+ out = wgpu::TextureFormat::RG8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kRg8Uint:
+ out = wgpu::TextureFormat::RG8Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg8Sint:
+ out = wgpu::TextureFormat::RG8Sint;
+ return true;
+ case interop::GPUTextureFormat::kR32Uint:
+ out = wgpu::TextureFormat::R32Uint;
+ return true;
+ case interop::GPUTextureFormat::kR32Sint:
+ out = wgpu::TextureFormat::R32Sint;
+ return true;
+ case interop::GPUTextureFormat::kR32Float:
+ out = wgpu::TextureFormat::R32Float;
+ return true;
+ case interop::GPUTextureFormat::kRg16Uint:
+ out = wgpu::TextureFormat::RG16Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg16Sint:
+ out = wgpu::TextureFormat::RG16Sint;
+ return true;
+ case interop::GPUTextureFormat::kRg16Float:
+ out = wgpu::TextureFormat::RG16Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Unorm:
+ out = wgpu::TextureFormat::RGBA8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRgba8UnormSrgb:
+ out = wgpu::TextureFormat::RGBA8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Snorm:
+ out = wgpu::TextureFormat::RGBA8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Uint:
+ out = wgpu::TextureFormat::RGBA8Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Sint:
+ out = wgpu::TextureFormat::RGBA8Sint;
+ return true;
+ case interop::GPUTextureFormat::kBgra8Unorm:
+ out = wgpu::TextureFormat::BGRA8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kBgra8UnormSrgb:
+ out = wgpu::TextureFormat::BGRA8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kRgb9E5Ufloat:
+ out = wgpu::TextureFormat::RGB9E5Ufloat;
+ return true;
+ case interop::GPUTextureFormat::kRgb10A2Unorm:
+ out = wgpu::TextureFormat::RGB10A2Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRg11B10Ufloat:
+ out = wgpu::TextureFormat::RG11B10Ufloat;
+ return true;
+ case interop::GPUTextureFormat::kRg32Uint:
+ out = wgpu::TextureFormat::RG32Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg32Sint:
+ out = wgpu::TextureFormat::RG32Sint;
+ return true;
+ case interop::GPUTextureFormat::kRg32Float:
+ out = wgpu::TextureFormat::RG32Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Uint:
+ out = wgpu::TextureFormat::RGBA16Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Sint:
+ out = wgpu::TextureFormat::RGBA16Sint;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Float:
+ out = wgpu::TextureFormat::RGBA16Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Uint:
+ out = wgpu::TextureFormat::RGBA32Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Sint:
+ out = wgpu::TextureFormat::RGBA32Sint;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Float:
+ out = wgpu::TextureFormat::RGBA32Float;
+ return true;
+ case interop::GPUTextureFormat::kStencil8:
+ out = wgpu::TextureFormat::Stencil8;
+ return true;
+ case interop::GPUTextureFormat::kDepth16Unorm:
+ out = wgpu::TextureFormat::Depth16Unorm;
+ return true;
+ case interop::GPUTextureFormat::kDepth24Plus:
+ out = wgpu::TextureFormat::Depth24Plus;
+ return true;
+ case interop::GPUTextureFormat::kDepth24PlusStencil8:
+ out = wgpu::TextureFormat::Depth24PlusStencil8;
+ return true;
+ case interop::GPUTextureFormat::kDepth32Float:
+ out = wgpu::TextureFormat::Depth32Float;
+ return true;
+ case interop::GPUTextureFormat::kDepth24UnormStencil8:
+ out = wgpu::TextureFormat::Depth24UnormStencil8;
+ return true;
+ case interop::GPUTextureFormat::kDepth32FloatStencil8:
+ out = wgpu::TextureFormat::Depth32FloatStencil8;
+ return true;
+ case interop::GPUTextureFormat::kBc1RgbaUnorm:
+ out = wgpu::TextureFormat::BC1RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc1RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC1RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc2RgbaUnorm:
+ out = wgpu::TextureFormat::BC2RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc2RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC2RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc3RgbaUnorm:
+ out = wgpu::TextureFormat::BC3RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc3RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC3RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc4RUnorm:
+ out = wgpu::TextureFormat::BC4RUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc4RSnorm:
+ out = wgpu::TextureFormat::BC4RSnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc5RgUnorm:
+ out = wgpu::TextureFormat::BC5RGUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc5RgSnorm:
+ out = wgpu::TextureFormat::BC5RGSnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc6HRgbUfloat:
+ out = wgpu::TextureFormat::BC6HRGBUfloat;
+ return true;
+ case interop::GPUTextureFormat::kBc6HRgbFloat:
+ out = wgpu::TextureFormat::BC6HRGBFloat;
+ return true;
+ case interop::GPUTextureFormat::kBc7RgbaUnorm:
+ out = wgpu::TextureFormat::BC7RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc7RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC7RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgb8Unorm:
+ out = wgpu::TextureFormat::ETC2RGB8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgb8UnormSrgb:
+ out = wgpu::TextureFormat::ETC2RGB8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgb8A1Unorm:
+ out = wgpu::TextureFormat::ETC2RGB8A1Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgb8A1UnormSrgb:
+ out = wgpu::TextureFormat::ETC2RGB8A1UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgba8Unorm:
+ out = wgpu::TextureFormat::ETC2RGBA8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEtc2Rgba8UnormSrgb:
+ out = wgpu::TextureFormat::ETC2RGBA8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kEacR11Unorm:
+ out = wgpu::TextureFormat::EACR11Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEacR11Snorm:
+ out = wgpu::TextureFormat::EACR11Snorm;
+ return true;
+ case interop::GPUTextureFormat::kEacRg11Unorm:
+ out = wgpu::TextureFormat::EACRG11Unorm;
+ return true;
+ case interop::GPUTextureFormat::kEacRg11Snorm:
+ out = wgpu::TextureFormat::EACRG11Snorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc4X4Unorm:
+ out = wgpu::TextureFormat::ASTC4x4Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc4X4UnormSrgb:
+ out = wgpu::TextureFormat::ASTC4x4UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc5X4Unorm:
+ out = wgpu::TextureFormat::ASTC5x4Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc5X4UnormSrgb:
+ out = wgpu::TextureFormat::ASTC5x4UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc5X5Unorm:
+ out = wgpu::TextureFormat::ASTC5x5Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc5X5UnormSrgb:
+ out = wgpu::TextureFormat::ASTC5x5UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc6X5Unorm:
+ out = wgpu::TextureFormat::ASTC6x5Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc6X5UnormSrgb:
+ out = wgpu::TextureFormat::ASTC6x5UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc6X6Unorm:
+ out = wgpu::TextureFormat::ASTC6x6Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc6X6UnormSrgb:
+ out = wgpu::TextureFormat::ASTC6x6UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X5Unorm:
+ out = wgpu::TextureFormat::ASTC8x5Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X5UnormSrgb:
+ out = wgpu::TextureFormat::ASTC8x5UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X6Unorm:
+ out = wgpu::TextureFormat::ASTC8x6Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X6UnormSrgb:
+ out = wgpu::TextureFormat::ASTC8x6UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X8Unorm:
+ out = wgpu::TextureFormat::ASTC8x8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc8X8UnormSrgb:
+ out = wgpu::TextureFormat::ASTC8x8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X5Unorm:
+ out = wgpu::TextureFormat::ASTC10x5Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X5UnormSrgb:
+ out = wgpu::TextureFormat::ASTC10x5UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X6Unorm:
+ out = wgpu::TextureFormat::ASTC10x6Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X6UnormSrgb:
+ out = wgpu::TextureFormat::ASTC10x6UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X8Unorm:
+ out = wgpu::TextureFormat::ASTC10x8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X8UnormSrgb:
+ out = wgpu::TextureFormat::ASTC10x8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X10Unorm:
+ out = wgpu::TextureFormat::ASTC10x10Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc10X10UnormSrgb:
+ out = wgpu::TextureFormat::ASTC10x10UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc12X10Unorm:
+ out = wgpu::TextureFormat::ASTC12x10Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc12X10UnormSrgb:
+ out = wgpu::TextureFormat::ASTC12x12UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kAstc12X12Unorm:
+ out = wgpu::TextureFormat::ASTC12x12Unorm;
+ return true;
+ case interop::GPUTextureFormat::kAstc12X12UnormSrgb:
+ out = wgpu::TextureFormat::ASTC12x12UnormSrgb;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureFormat").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in) {
+ out = static_cast<wgpu::TextureUsage>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in) {
+ out = static_cast<wgpu::ColorWriteMask>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in) {
+ out = static_cast<wgpu::BufferUsage>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in) {
+ out = static_cast<wgpu::MapMode>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in) {
+ out = static_cast<wgpu::ShaderStage>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in) {
+ out = wgpu::TextureDimension::e1D;
+ switch (in) {
+ case interop::GPUTextureDimension::k1D:
+ out = wgpu::TextureDimension::e1D;
+ return true;
+ case interop::GPUTextureDimension::k2D:
+ out = wgpu::TextureDimension::e2D;
+ return true;
+ case interop::GPUTextureDimension::k3D:
+ out = wgpu::TextureDimension::e3D;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureDimension").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::TextureViewDimension& out,
+ const interop::GPUTextureViewDimension& in) {
+ out = wgpu::TextureViewDimension::Undefined;
+ switch (in) {
+ case interop::GPUTextureViewDimension::k1D:
+ out = wgpu::TextureViewDimension::e1D;
+ return true;
+ case interop::GPUTextureViewDimension::k2D:
+ out = wgpu::TextureViewDimension::e2D;
+ return true;
+ case interop::GPUTextureViewDimension::k2DArray:
+ out = wgpu::TextureViewDimension::e2DArray;
+ return true;
+ case interop::GPUTextureViewDimension::kCube:
+ out = wgpu::TextureViewDimension::Cube;
+ return true;
+ case interop::GPUTextureViewDimension::kCubeArray:
+ out = wgpu::TextureViewDimension::CubeArray;
+ return true;
+ case interop::GPUTextureViewDimension::k3D:
+ out = wgpu::TextureViewDimension::e3D;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureViewDimension")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::ProgrammableStageDescriptor& out,
+ const interop::GPUProgrammableStage& in) {
+ out = {};
+ out.entryPoint = in.entryPoint.c_str();
+ out.module = *in.module.As<GPUShaderModule>();
+ return Convert(out.constants, out.constantCount, in.constants);
+ }
+
+ bool Converter::Convert(wgpu::ConstantEntry& out,
+ const std::string& in_name,
+ wgpu::interop::GPUPipelineConstantValue in_value) {
+ out.key = in_name.c_str();
+ out.value = in_value;
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in) {
+ out = {};
+ return Convert(out.operation, in.operation) && Convert(out.dstFactor, in.dstFactor) &&
+ Convert(out.srcFactor, in.srcFactor);
+ }
+
+ bool Converter::Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in) {
+ out = wgpu::BlendFactor::Zero;
+ switch (in) {
+ case interop::GPUBlendFactor::kZero:
+ out = wgpu::BlendFactor::Zero;
+ return true;
+ case interop::GPUBlendFactor::kOne:
+ out = wgpu::BlendFactor::One;
+ return true;
+ case interop::GPUBlendFactor::kSrc:
+ out = wgpu::BlendFactor::Src;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusSrc:
+ out = wgpu::BlendFactor::OneMinusSrc;
+ return true;
+ case interop::GPUBlendFactor::kSrcAlpha:
+ out = wgpu::BlendFactor::SrcAlpha;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusSrcAlpha:
+ out = wgpu::BlendFactor::OneMinusSrcAlpha;
+ return true;
+ case interop::GPUBlendFactor::kDst:
+ out = wgpu::BlendFactor::Dst;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusDst:
+ out = wgpu::BlendFactor::OneMinusDst;
+ return true;
+ case interop::GPUBlendFactor::kDstAlpha:
+ out = wgpu::BlendFactor::DstAlpha;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusDstAlpha:
+ out = wgpu::BlendFactor::OneMinusDstAlpha;
+ return true;
+ case interop::GPUBlendFactor::kSrcAlphaSaturated:
+ out = wgpu::BlendFactor::SrcAlphaSaturated;
+ return true;
+ case interop::GPUBlendFactor::kConstant:
+ out = wgpu::BlendFactor::Constant;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusConstant:
+ out = wgpu::BlendFactor::OneMinusConstant;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUBlendFactor").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in) {
+ out = wgpu::BlendOperation::Add;
+ switch (in) {
+ case interop::GPUBlendOperation::kAdd:
+ out = wgpu::BlendOperation::Add;
+ return true;
+ case interop::GPUBlendOperation::kSubtract:
+ out = wgpu::BlendOperation::Subtract;
+ return true;
+ case interop::GPUBlendOperation::kReverseSubtract:
+ out = wgpu::BlendOperation::ReverseSubtract;
+ return true;
+ case interop::GPUBlendOperation::kMin:
+ out = wgpu::BlendOperation::Min;
+ return true;
+ case interop::GPUBlendOperation::kMax:
+ out = wgpu::BlendOperation::Max;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUBlendOperation").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::BlendState& out, const interop::GPUBlendState& in) {
+ out = {};
+ return Convert(out.alpha, in.alpha) && Convert(out.color, in.color);
+ }
+
+ bool Converter::Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in) {
+ out = {};
+ return Convert(out.topology, in.topology) &&
+ Convert(out.stripIndexFormat, in.stripIndexFormat) &&
+ Convert(out.frontFace, in.frontFace) && Convert(out.cullMode, in.cullMode);
+ }
+
+ bool Converter::Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in) {
+ out = {};
+ return Convert(out.format, in.format) && Convert(out.blend, in.blend) &&
+ Convert(out.writeMask, in.writeMask);
+ }
+
+ bool Converter::Convert(wgpu::DepthStencilState& out, const interop::GPUDepthStencilState& in) {
+ out = {};
+ return Convert(out.format, in.format) &&
+ Convert(out.depthWriteEnabled, in.depthWriteEnabled) &&
+ Convert(out.depthCompare, in.depthCompare) &&
+ Convert(out.stencilFront, in.stencilFront) &&
+ Convert(out.stencilBack, in.stencilBack) &&
+ Convert(out.stencilReadMask, in.stencilReadMask) &&
+ Convert(out.stencilWriteMask, in.stencilWriteMask) &&
+ Convert(out.depthBias, in.depthBias) &&
+ Convert(out.depthBiasSlopeScale, in.depthBiasSlopeScale) &&
+ Convert(out.depthBiasClamp, in.depthBiasClamp);
+ }
+
+ bool Converter::Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in) {
+ out = {};
+ return Convert(out.count, in.count) && Convert(out.mask, in.mask) &&
+ Convert(out.alphaToCoverageEnabled, in.alphaToCoverageEnabled);
+ }
+
+ bool Converter::Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in) {
+ out = {};
+ return Convert(out.targets, out.targetCount, in.targets) && //
+ Convert(out.module, in.module) && //
+ Convert(out.entryPoint, in.entryPoint) && //
+ Convert(out.constants, out.constantCount, in.constants);
+ }
+
+ bool Converter::Convert(wgpu::PrimitiveTopology& out, const interop::GPUPrimitiveTopology& in) {
+ out = wgpu::PrimitiveTopology::LineList;
+ switch (in) {
+ case interop::GPUPrimitiveTopology::kPointList:
+ out = wgpu::PrimitiveTopology::PointList;
+ return true;
+ case interop::GPUPrimitiveTopology::kLineList:
+ out = wgpu::PrimitiveTopology::LineList;
+ return true;
+ case interop::GPUPrimitiveTopology::kLineStrip:
+ out = wgpu::PrimitiveTopology::LineStrip;
+ return true;
+ case interop::GPUPrimitiveTopology::kTriangleList:
+ out = wgpu::PrimitiveTopology::TriangleList;
+ return true;
+ case interop::GPUPrimitiveTopology::kTriangleStrip:
+ out = wgpu::PrimitiveTopology::TriangleStrip;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUPrimitiveTopology")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in) {
+ out = wgpu::FrontFace::CW;
+ switch (in) {
+ case interop::GPUFrontFace::kCw:
+ out = wgpu::FrontFace::CW;
+ return true;
+ case interop::GPUFrontFace::kCcw:
+ out = wgpu::FrontFace::CCW;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUFrontFace").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::CullMode& out, const interop::GPUCullMode& in) {
+ out = wgpu::CullMode::None;
+ switch (in) {
+ case interop::GPUCullMode::kNone:
+ out = wgpu::CullMode::None;
+ return true;
+ case interop::GPUCullMode::kFront:
+ out = wgpu::CullMode::Front;
+ return true;
+ case interop::GPUCullMode::kBack:
+ out = wgpu::CullMode::Back;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUCullMode").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in) {
+ out = wgpu::CompareFunction::Undefined;
+ switch (in) {
+ case interop::GPUCompareFunction::kNever:
+ out = wgpu::CompareFunction::Never;
+ return true;
+ case interop::GPUCompareFunction::kLess:
+ out = wgpu::CompareFunction::Less;
+ return true;
+ case interop::GPUCompareFunction::kLessEqual:
+ out = wgpu::CompareFunction::LessEqual;
+ return true;
+ case interop::GPUCompareFunction::kGreater:
+ out = wgpu::CompareFunction::Greater;
+ return true;
+ case interop::GPUCompareFunction::kGreaterEqual:
+ out = wgpu::CompareFunction::GreaterEqual;
+ return true;
+ case interop::GPUCompareFunction::kEqual:
+ out = wgpu::CompareFunction::Equal;
+ return true;
+ case interop::GPUCompareFunction::kNotEqual:
+ out = wgpu::CompareFunction::NotEqual;
+ return true;
+ case interop::GPUCompareFunction::kAlways:
+ out = wgpu::CompareFunction::Always;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUCompareFunction").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in) {
+ out = wgpu::IndexFormat::Undefined;
+ switch (in) {
+ case interop::GPUIndexFormat::kUint16:
+ out = wgpu::IndexFormat::Uint16;
+ return true;
+ case interop::GPUIndexFormat::kUint32:
+ out = wgpu::IndexFormat::Uint32;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUIndexFormat").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in) {
+ out = wgpu::StencilOperation::Zero;
+ switch (in) {
+ case interop::GPUStencilOperation::kKeep:
+ out = wgpu::StencilOperation::Keep;
+ return true;
+ case interop::GPUStencilOperation::kZero:
+ out = wgpu::StencilOperation::Zero;
+ return true;
+ case interop::GPUStencilOperation::kReplace:
+ out = wgpu::StencilOperation::Replace;
+ return true;
+ case interop::GPUStencilOperation::kInvert:
+ out = wgpu::StencilOperation::Invert;
+ return true;
+ case interop::GPUStencilOperation::kIncrementClamp:
+ out = wgpu::StencilOperation::IncrementClamp;
+ return true;
+ case interop::GPUStencilOperation::kDecrementClamp:
+ out = wgpu::StencilOperation::DecrementClamp;
+ return true;
+ case interop::GPUStencilOperation::kIncrementWrap:
+ out = wgpu::StencilOperation::IncrementWrap;
+ return true;
+ case interop::GPUStencilOperation::kDecrementWrap:
+ out = wgpu::StencilOperation::DecrementWrap;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUStencilOperation").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in) {
+ return Convert(out.compare, in.compare) && Convert(out.failOp, in.failOp) &&
+ Convert(out.depthFailOp, in.depthFailOp) && Convert(out.passOp, in.passOp);
+ }
+
+ bool Converter::Convert(wgpu::VertexBufferLayout& out,
+ const interop::GPUVertexBufferLayout& in) {
+ out = {};
+ return Convert(out.attributes, out.attributeCount, in.attributes) &&
+ Convert(out.arrayStride, in.arrayStride) && Convert(out.stepMode, in.stepMode);
+ }
+
+ bool Converter::Convert(wgpu::VertexState& out, const interop::GPUVertexState& in) {
+ out = {};
+ return Convert(out.module, in.module) &&
+ Convert(out.buffers, out.bufferCount, in.buffers) &&
+ Convert(out.entryPoint, in.entryPoint) &&
+ Convert(out.constants, out.constantCount, in.constants);
+ }
+
+ bool Converter::Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in) {
+ out = wgpu::VertexStepMode::Instance;
+ switch (in) {
+ case interop::GPUVertexStepMode::kInstance:
+ out = wgpu::VertexStepMode::Instance;
+ return true;
+ case interop::GPUVertexStepMode::kVertex:
+ out = wgpu::VertexStepMode::Vertex;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUVertexStepMode").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in) {
+ return Convert(out.format, in.format) && Convert(out.offset, in.offset) &&
+ Convert(out.shaderLocation, in.shaderLocation);
+ }
+
+ bool Converter::Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in) {
+ out = wgpu::VertexFormat::Undefined;
+ switch (in) {
+ case interop::GPUVertexFormat::kUint8X2:
+ out = wgpu::VertexFormat::Uint8x2;
+ return true;
+ case interop::GPUVertexFormat::kUint8X4:
+ out = wgpu::VertexFormat::Uint8x4;
+ return true;
+ case interop::GPUVertexFormat::kSint8X2:
+ out = wgpu::VertexFormat::Sint8x2;
+ return true;
+ case interop::GPUVertexFormat::kSint8X4:
+ out = wgpu::VertexFormat::Sint8x4;
+ return true;
+ case interop::GPUVertexFormat::kUnorm8X2:
+ out = wgpu::VertexFormat::Unorm8x2;
+ return true;
+ case interop::GPUVertexFormat::kUnorm8X4:
+ out = wgpu::VertexFormat::Unorm8x4;
+ return true;
+ case interop::GPUVertexFormat::kSnorm8X2:
+ out = wgpu::VertexFormat::Snorm8x2;
+ return true;
+ case interop::GPUVertexFormat::kSnorm8X4:
+ out = wgpu::VertexFormat::Snorm8x4;
+ return true;
+ case interop::GPUVertexFormat::kUint16X2:
+ out = wgpu::VertexFormat::Uint16x2;
+ return true;
+ case interop::GPUVertexFormat::kUint16X4:
+ out = wgpu::VertexFormat::Uint16x4;
+ return true;
+ case interop::GPUVertexFormat::kSint16X2:
+ out = wgpu::VertexFormat::Sint16x2;
+ return true;
+ case interop::GPUVertexFormat::kSint16X4:
+ out = wgpu::VertexFormat::Sint16x4;
+ return true;
+ case interop::GPUVertexFormat::kUnorm16X2:
+ out = wgpu::VertexFormat::Unorm16x2;
+ return true;
+ case interop::GPUVertexFormat::kUnorm16X4:
+ out = wgpu::VertexFormat::Unorm16x4;
+ return true;
+ case interop::GPUVertexFormat::kSnorm16X2:
+ out = wgpu::VertexFormat::Snorm16x2;
+ return true;
+ case interop::GPUVertexFormat::kSnorm16X4:
+ out = wgpu::VertexFormat::Snorm16x4;
+ return true;
+ case interop::GPUVertexFormat::kFloat16X2:
+ out = wgpu::VertexFormat::Float16x2;
+ return true;
+ case interop::GPUVertexFormat::kFloat16X4:
+ out = wgpu::VertexFormat::Float16x4;
+ return true;
+ case interop::GPUVertexFormat::kFloat32:
+ out = wgpu::VertexFormat::Float32;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X2:
+ out = wgpu::VertexFormat::Float32x2;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X3:
+ out = wgpu::VertexFormat::Float32x3;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X4:
+ out = wgpu::VertexFormat::Float32x4;
+ return true;
+ case interop::GPUVertexFormat::kUint32:
+ out = wgpu::VertexFormat::Uint32;
+ return true;
+ case interop::GPUVertexFormat::kUint32X2:
+ out = wgpu::VertexFormat::Uint32x2;
+ return true;
+ case interop::GPUVertexFormat::kUint32X3:
+ out = wgpu::VertexFormat::Uint32x3;
+ return true;
+ case interop::GPUVertexFormat::kUint32X4:
+ out = wgpu::VertexFormat::Uint32x4;
+ return true;
+ case interop::GPUVertexFormat::kSint32:
+ out = wgpu::VertexFormat::Sint32;
+ return true;
+ case interop::GPUVertexFormat::kSint32X2:
+ out = wgpu::VertexFormat::Sint32x2;
+ return true;
+ case interop::GPUVertexFormat::kSint32X3:
+ out = wgpu::VertexFormat::Sint32x3;
+ return true;
+ case interop::GPUVertexFormat::kSint32X4:
+ out = wgpu::VertexFormat::Sint32x4;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUVertexFormat").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::RenderPassColorAttachment& out,
+ const interop::GPURenderPassColorAttachment& in) {
+ out = {};
+ if (auto* op = std::get_if<interop::GPULoadOp>(&in.loadValue)) {
+ if (!Convert(out.loadOp, *op)) {
+ return false;
+ }
+ } else if (auto* color = std::get_if<interop::GPUColor>(&in.loadValue)) {
+ out.loadOp = wgpu::LoadOp::Clear;
+ if (!Convert(out.clearColor, *color)) {
+ return false;
+ }
+ } else {
+ Napi::Error::New(env, "invalid value for GPURenderPassColorAttachment.loadValue")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ return Convert(out.view, in.view) && Convert(out.resolveTarget, in.resolveTarget) &&
+ Convert(out.storeOp, in.storeOp);
+ }
+
+ bool Converter::Convert(wgpu::RenderPassDepthStencilAttachment& out,
+ const interop::GPURenderPassDepthStencilAttachment& in) {
+ out = {};
+ if (auto* op = std::get_if<interop::GPULoadOp>(&in.depthLoadValue)) {
+ if (!Convert(out.depthLoadOp, *op)) {
+ return false;
+ }
+ } else if (auto* value = std::get_if<float>(&in.depthLoadValue)) {
+ out.stencilLoadOp = wgpu::LoadOp::Clear;
+ if (!Convert(out.clearDepth, *value)) {
+ return false;
+ }
+ } else {
+ Napi::Error::New(env,
+ "invalid value for GPURenderPassDepthStencilAttachment.depthLoadValue")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ if (auto* op = std::get_if<interop::GPULoadOp>(&in.stencilLoadValue)) {
+ if (!Convert(out.stencilLoadOp, *op)) {
+ return false;
+ }
+ } else if (auto* value = std::get_if<interop::GPUStencilValue>(&in.stencilLoadValue)) {
+ if (!Convert(out.clearStencil, *value)) {
+ return false;
+ }
+ } else {
+ Napi::Error::New(env,
+ "invalid value for "
+ "GPURenderPassDepthStencilAttachment.stencilLoadValue")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ return Convert(out.view, in.view) && Convert(out.depthStoreOp, in.depthStoreOp) &&
+ Convert(out.depthReadOnly, in.depthReadOnly) &&
+ Convert(out.stencilStoreOp, in.stencilStoreOp) &&
+ Convert(out.stencilReadOnly, in.stencilReadOnly);
+ }
+
+ bool Converter::Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in) {
+ out = wgpu::LoadOp::Clear;
+ switch (in) {
+ case interop::GPULoadOp::kLoad:
+ out = wgpu::LoadOp::Load;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPULoadOp").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in) {
+ out = wgpu::StoreOp::Store;
+ switch (in) {
+ case interop::GPUStoreOp::kStore:
+ out = wgpu::StoreOp::Store;
+ return true;
+ case interop::GPUStoreOp::kDiscard:
+ out = wgpu::StoreOp::Discard;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUStoreOp").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in) {
+ out = {};
+ if (!Convert(out.binding, in.binding)) {
+ return false;
+ }
+
+ if (auto* res = std::get_if<interop::Interface<interop::GPUSampler>>(&in.resource)) {
+ return Convert(out.sampler, *res);
+ }
+ if (auto* res = std::get_if<interop::Interface<interop::GPUTextureView>>(&in.resource)) {
+ return Convert(out.textureView, *res);
+ }
+ if (auto* res = std::get_if<interop::GPUBufferBinding>(&in.resource)) {
+ auto buffer = res->buffer.As<GPUBuffer>();
+ out.size = wgpu::kWholeSize;
+ if (!buffer || !Convert(out.offset, res->offset) || !Convert(out.size, res->size)) {
+ return false;
+ }
+ out.buffer = *buffer;
+ return true;
+ }
+ if (auto* res =
+ std::get_if<interop::Interface<interop::GPUExternalTexture>>(&in.resource)) {
+ // TODO(crbug.com/dawn/1129): External textures
+ UNIMPLEMENTED();
+ }
+ Napi::Error::New(env, "invalid value for GPUBindGroupEntry.resource")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::BindGroupLayoutEntry& out,
+ const interop::GPUBindGroupLayoutEntry& in) {
+ // TODO(crbug.com/dawn/1129): External textures
+ return Convert(out.binding, in.binding) && Convert(out.visibility, in.visibility) &&
+ Convert(out.buffer, in.buffer) && Convert(out.sampler, in.sampler) &&
+ Convert(out.texture, in.texture) && Convert(out.storageTexture, in.storageTexture);
+ }
+
+ bool Converter::Convert(wgpu::BufferBindingLayout& out,
+ const interop::GPUBufferBindingLayout& in) {
+ return Convert(out.type, in.type) && Convert(out.hasDynamicOffset, in.hasDynamicOffset) &&
+ Convert(out.minBindingSize, in.minBindingSize);
+ }
+
+ bool Converter::Convert(wgpu::SamplerBindingLayout& out,
+ const interop::GPUSamplerBindingLayout& in) {
+ return Convert(out.type, in.type);
+ }
+
+ bool Converter::Convert(wgpu::TextureBindingLayout& out,
+ const interop::GPUTextureBindingLayout& in) {
+ return Convert(out.sampleType, in.sampleType) &&
+ Convert(out.viewDimension, in.viewDimension) &&
+ Convert(out.multisampled, in.multisampled);
+ }
+
+ bool Converter::Convert(wgpu::StorageTextureBindingLayout& out,
+ const interop::GPUStorageTextureBindingLayout& in) {
+ return Convert(out.access, in.access) && Convert(out.format, in.format) &&
+ Convert(out.viewDimension, in.viewDimension);
+ }
+
+ bool Converter::Convert(wgpu::BufferBindingType& out, const interop::GPUBufferBindingType& in) {
+ out = wgpu::BufferBindingType::Undefined;
+ switch (in) {
+ case interop::GPUBufferBindingType::kUniform:
+ out = wgpu::BufferBindingType::Uniform;
+ return true;
+ case interop::GPUBufferBindingType::kStorage:
+ out = wgpu::BufferBindingType::Storage;
+ return true;
+ case interop::GPUBufferBindingType::kReadOnlyStorage:
+ out = wgpu::BufferBindingType::ReadOnlyStorage;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUBufferBindingType")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::TextureSampleType& out, const interop::GPUTextureSampleType& in) {
+ out = wgpu::TextureSampleType::Undefined;
+ switch (in) {
+ case interop::GPUTextureSampleType::kFloat:
+ out = wgpu::TextureSampleType::Float;
+ return true;
+ case interop::GPUTextureSampleType::kUnfilterableFloat:
+ out = wgpu::TextureSampleType::UnfilterableFloat;
+ return true;
+ case interop::GPUTextureSampleType::kDepth:
+ out = wgpu::TextureSampleType::Depth;
+ return true;
+ case interop::GPUTextureSampleType::kSint:
+ out = wgpu::TextureSampleType::Sint;
+ return true;
+ case interop::GPUTextureSampleType::kUint:
+ out = wgpu::TextureSampleType::Uint;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureSampleType")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::SamplerBindingType& out,
+ const interop::GPUSamplerBindingType& in) {
+ out = wgpu::SamplerBindingType::Undefined;
+ switch (in) {
+ case interop::GPUSamplerBindingType::kFiltering:
+ out = wgpu::SamplerBindingType::Filtering;
+ return true;
+ case interop::GPUSamplerBindingType::kNonFiltering:
+ out = wgpu::SamplerBindingType::NonFiltering;
+ return true;
+ case interop::GPUSamplerBindingType::kComparison:
+ out = wgpu::SamplerBindingType::Comparison;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUSamplerBindingType")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::StorageTextureAccess& out,
+ const interop::GPUStorageTextureAccess& in) {
+ out = wgpu::StorageTextureAccess::Undefined;
+ switch (in) {
+ case interop::GPUStorageTextureAccess::kWriteOnly:
+ out = wgpu::StorageTextureAccess::WriteOnly;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUStorageTextureAccess")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::QueryType& out, const interop::GPUQueryType& in) {
+ out = wgpu::QueryType::Occlusion;
+ switch (in) {
+ case interop::GPUQueryType::kOcclusion:
+ out = wgpu::QueryType::Occlusion;
+ return true;
+ case interop::GPUQueryType::kTimestamp:
+ out = wgpu::QueryType::Timestamp;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUQueryType").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in) {
+ out = wgpu::AddressMode::Repeat;
+ switch (in) {
+ case interop::GPUAddressMode::kClampToEdge:
+ out = wgpu::AddressMode::ClampToEdge;
+ return true;
+ case interop::GPUAddressMode::kRepeat:
+ out = wgpu::AddressMode::Repeat;
+ return true;
+ case interop::GPUAddressMode::kMirrorRepeat:
+ out = wgpu::AddressMode::MirrorRepeat;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUAddressMode").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in) {
+ out = wgpu::FilterMode::Nearest;
+ switch (in) {
+ case interop::GPUFilterMode::kNearest:
+ out = wgpu::FilterMode::Nearest;
+ return true;
+ case interop::GPUFilterMode::kLinear:
+ out = wgpu::FilterMode::Linear;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::ComputePipelineDescriptor& out,
+ const interop::GPUComputePipelineDescriptor& in) {
+ return Convert(out.label, in.label) && //
+ Convert(out.layout, in.layout) && //
+ Convert(out.compute, in.compute);
+ }
+
+ bool Converter::Convert(wgpu::RenderPipelineDescriptor& out,
+ const interop::GPURenderPipelineDescriptor& in) {
+ wgpu::RenderPipelineDescriptor desc{};
+ return Convert(out.label, in.label) && //
+ Convert(out.layout, in.layout) && //
+ Convert(out.vertex, in.vertex) && //
+ Convert(out.primitive, in.primitive) && //
+ Convert(out.depthStencil, in.depthStencil) && //
+ Convert(out.multisample, in.multisample) && //
+ Convert(out.fragment, in.fragment);
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Converter.h b/chromium/third_party/dawn/src/dawn/node/binding/Converter.h
new file mode 100644
index 00000000000..cfe38e681cb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Converter.h
@@ -0,0 +1,395 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_CONVERTER_H_
+#define DAWN_NODE_BINDING_CONVERTER_H_
+
+#include <functional>
+#include <type_traits>
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // ImplOfTraits is a traits helper that is used to associate the interop interface type to the
+ // binding implementation type.
+ template <typename T>
+ struct ImplOfTraits {};
+
+ // DECLARE_IMPL() is a macro that declares a specialization of ImplOfTraits so that
+ // `typename ImplOfTraits<interop::NAME>::type` is equivalent to `binding::NAME`.
+#define DECLARE_IMPL(NAME) \
+ class NAME; \
+ template <> \
+ struct ImplOfTraits<interop::NAME> { \
+ using type = binding::NAME; \
+ }
+
+ // Declare the interop interface to binding implementations
+ DECLARE_IMPL(GPUBindGroup);
+ DECLARE_IMPL(GPUBindGroupLayout);
+ DECLARE_IMPL(GPUBuffer);
+ DECLARE_IMPL(GPUPipelineLayout);
+ DECLARE_IMPL(GPUQuerySet);
+ DECLARE_IMPL(GPURenderBundle);
+ DECLARE_IMPL(GPURenderPipeline);
+ DECLARE_IMPL(GPUSampler);
+ DECLARE_IMPL(GPUShaderModule);
+ DECLARE_IMPL(GPUTexture);
+ DECLARE_IMPL(GPUTextureView);
+#undef DECLARE_IMPL
+
+ // Helper for obtaining the binding implementation type from the interop interface type
+ template <typename T>
+ using ImplOf = typename ImplOfTraits<T>::type;
+
+ // Converter is a utility class for converting IDL generated interop types into Dawn types.
+ // As the Dawn C++ API uses raw C pointers for a number of its interfaces, Converter performs
+ // heap allocations for conversions of vector or optional types. These pointers are
+ // automatically freed when the Converter is destructed.
+ class Converter {
+ public:
+ Converter(Napi::Env e) : env(e) {
+ }
+ ~Converter();
+
+ // Conversion function. Converts the interop type IN to the Dawn type OUT.
+ // Returns true on success, false on failure.
+ template <typename OUT, typename IN>
+ [[nodiscard]] inline bool operator()(OUT&& out, IN&& in) {
+ return Convert(std::forward<OUT>(out), std::forward<IN>(in));
+ }
+
+ // Vector conversion function. Converts the vector of interop type IN to a pointer of
+ // elements of Dawn type OUT, which is assigned to 'out_els'.
+ // out_count is assigned the number of elements in 'in'.
+ // Returns true on success, false on failure.
+ // The pointer assigned to 'out_els' is valid until the Converter is destructed.
+ template <typename OUT, typename IN>
+ [[nodiscard]] inline bool operator()(OUT*& out_els,
+ uint32_t& out_count,
+ const std::vector<IN>& in) {
+ return Convert(out_els, out_count, in);
+ }
+
+ // Returns the Env that this Converter was constructed with.
+ inline Napi::Env Env() const {
+ return env;
+ }
+
+ // BufferSource is the converted type of interop::BufferSource.
+ struct BufferSource {
+ void* data;
+ size_t size;
+ };
+
+ private:
+ // Below are the various overloads of Convert() used to convert the interop -> Dawn types.
+ [[nodiscard]] bool Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in);
+
+ [[nodiscard]] bool Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in);
+
+ [[nodiscard]] bool Convert(wgpu::Color& out, const interop::GPUColor& in);
+
+ [[nodiscard]] bool Convert(wgpu::Origin3D& out,
+ const std::vector<interop::GPUIntegerCoordinate>& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in);
+
+ [[nodiscard]] bool Convert(wgpu::ImageCopyTexture& out,
+ const interop::GPUImageCopyTexture& in);
+
+ [[nodiscard]] bool Convert(wgpu::ImageCopyBuffer& out,
+ const interop::GPUImageCopyBuffer& in);
+
+ [[nodiscard]] bool Convert(BufferSource& out, interop::BufferSource in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureDataLayout& out,
+ const interop::GPUImageDataLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureUsage& out,
+ const interop::GPUTextureUsageFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::ColorWriteMask& out,
+ const interop::GPUColorWriteFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureDimension& out,
+ const interop::GPUTextureDimension& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureViewDimension& out,
+ const interop::GPUTextureViewDimension& in);
+
+ [[nodiscard]] bool Convert(wgpu::ProgrammableStageDescriptor& out,
+ const interop::GPUProgrammableStage& in);
+
+ [[nodiscard]] bool Convert(wgpu::ConstantEntry& out,
+ const std::string& in_name,
+ wgpu::interop::GPUPipelineConstantValue in_value);
+
+ [[nodiscard]] bool Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in);
+
+ [[nodiscard]] bool Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in);
+
+ [[nodiscard]] bool Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in);
+
+ [[nodiscard]] bool Convert(wgpu::BlendState& out, const interop::GPUBlendState& in);
+
+ [[nodiscard]] bool Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in);
+
+ [[nodiscard]] bool Convert(wgpu::ColorTargetState& out,
+ const interop::GPUColorTargetState& in);
+
+ [[nodiscard]] bool Convert(wgpu::DepthStencilState& out,
+ const interop::GPUDepthStencilState& in);
+
+ [[nodiscard]] bool Convert(wgpu::MultisampleState& out,
+ const interop::GPUMultisampleState& in);
+
+ [[nodiscard]] bool Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in);
+
+ [[nodiscard]] bool Convert(wgpu::PrimitiveTopology& out,
+ const interop::GPUPrimitiveTopology& in);
+
+ [[nodiscard]] bool Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in);
+
+ [[nodiscard]] bool Convert(wgpu::CullMode& out, const interop::GPUCullMode& in);
+
+ [[nodiscard]] bool Convert(wgpu::CompareFunction& out,
+ const interop::GPUCompareFunction& in);
+
+ [[nodiscard]] bool Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in);
+
+ [[nodiscard]] bool Convert(wgpu::StencilOperation& out,
+ const interop::GPUStencilOperation& in);
+
+ [[nodiscard]] bool Convert(wgpu::StencilFaceState& out,
+ const interop::GPUStencilFaceState& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexState& out, const interop::GPUVertexState& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexBufferLayout& out,
+ const interop::GPUVertexBufferLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexAttribute& out,
+ const interop::GPUVertexAttribute& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in);
+
+ [[nodiscard]] bool Convert(wgpu::RenderPassColorAttachment& out,
+ const interop::GPURenderPassColorAttachment& in);
+
+ [[nodiscard]] bool Convert(wgpu::RenderPassDepthStencilAttachment& out,
+ const interop::GPURenderPassDepthStencilAttachment& in);
+
+ [[nodiscard]] bool Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in);
+
+ [[nodiscard]] bool Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in);
+
+ [[nodiscard]] bool Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in);
+
+ [[nodiscard]] bool Convert(wgpu::BindGroupLayoutEntry& out,
+ const interop::GPUBindGroupLayoutEntry& in);
+
+ [[nodiscard]] bool Convert(wgpu::BufferBindingLayout& out,
+ const interop::GPUBufferBindingLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::SamplerBindingLayout& out,
+ const interop::GPUSamplerBindingLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureBindingLayout& out,
+ const interop::GPUTextureBindingLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::StorageTextureBindingLayout& out,
+ const interop::GPUStorageTextureBindingLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::BufferBindingType& out,
+ const interop::GPUBufferBindingType& in);
+
+ [[nodiscard]] bool Convert(wgpu::SamplerBindingType& out,
+ const interop::GPUSamplerBindingType& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureSampleType& out,
+ const interop::GPUTextureSampleType& in);
+
+ [[nodiscard]] bool Convert(wgpu::StorageTextureAccess& out,
+ const interop::GPUStorageTextureAccess& in);
+
+ [[nodiscard]] bool Convert(wgpu::QueryType& out, const interop::GPUQueryType& in);
+
+ [[nodiscard]] bool Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in);
+
+ [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in);
+
+ [[nodiscard]] bool Convert(wgpu::ComputePipelineDescriptor& out,
+ const interop::GPUComputePipelineDescriptor& in);
+
+ [[nodiscard]] bool Convert(wgpu::RenderPipelineDescriptor& out,
+ const interop::GPURenderPipelineDescriptor& in);
+
+ // std::string to C string
+ inline bool Convert(const char*& out, const std::string& in) {
+ out = in.c_str();
+ return true;
+ }
+
+ // Pass-through (no conversion)
+ template <typename T>
+ inline bool Convert(T& out, const T& in) {
+ out = in;
+ return true;
+ }
+
+ // Integral number conversion, with dynamic limit checking
+ template <typename OUT,
+ typename IN,
+ typename = std::enable_if_t<std::is_integral_v<IN> && std::is_integral_v<OUT>>>
+ inline bool Convert(OUT& out, const IN& in) {
+ out = static_cast<OUT>(in);
+ if (static_cast<IN>(out) != in) {
+ Napi::Error::New(env, "Integer value (" + std::to_string(in) +
+ ") cannot be converted to the Dawn data type without "
+ "truncation of the value")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+ return true;
+ }
+
+ template <typename OUT, typename... IN_TYPES>
+ inline bool Convert(OUT& out, const std::variant<IN_TYPES...>& in) {
+ return std::visit([&](auto&& i) { return Convert(out, i); }, in);
+ }
+
+ // If the std::optional does not have a value, then Convert() simply returns true and 'out'
+ // is not assigned a new value.
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT& out, const std::optional<IN>& in) {
+ if (in.has_value()) {
+ return Convert(out, in.value());
+ }
+ return true;
+ }
+
+ // std::optional -> T*
+ // OUT* is assigned either a pointer to the converted value, or nullptr, depending on
+ // whether 'in' has a value.
+ template <typename OUT,
+ typename IN,
+ typename _ = std::enable_if_t<!std::is_same_v<IN, std::string>>>
+ inline bool Convert(OUT*& out, const std::optional<IN>& in) {
+ if (in.has_value()) {
+ auto* el = Allocate<std::remove_const_t<OUT>>();
+ if (!Convert(*el, in.value())) {
+ return false;
+ }
+ out = el;
+ } else {
+ out = nullptr;
+ }
+ return true;
+ }
+
+ // interop::Interface -> Dawn object
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT& out, const interop::Interface<IN>& in) {
+ using Impl = ImplOf<IN>;
+ out = *in.template As<Impl>();
+ if (!out) {
+ LOG("Dawn object has been destroyed. This should not happen");
+ return false;
+ }
+ return true;
+ }
+
+ // vector -> raw pointer + count
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::vector<IN>& in) {
+ if (in.size() == 0) {
+ out_els = nullptr;
+ out_count = 0;
+ return true;
+ }
+ auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+ for (size_t i = 0; i < in.size(); i++) {
+ if (!Convert(els[i], in[i])) {
+ return false;
+ }
+ }
+ out_els = els;
+ return Convert(out_count, in.size());
+ }
+
+ // unordered_map -> raw pointer + count
+ template <typename OUT, typename IN_KEY, typename IN_VALUE>
+ inline bool Convert(OUT*& out_els,
+ uint32_t& out_count,
+ const std::unordered_map<IN_KEY, IN_VALUE>& in) {
+ if (in.size() == 0) {
+ out_els = nullptr;
+ out_count = 0;
+ return true;
+ }
+ auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+ size_t i = 0;
+ for (auto& [key, value] : in) {
+ if (!Convert(els[i++], key, value)) {
+ return false;
+ }
+ }
+ out_els = els;
+ return Convert(out_count, in.size());
+ }
+
+ // std::optional<T> -> raw pointer + count
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::optional<IN>& in) {
+ if (!in.has_value()) {
+ out_els = nullptr;
+ out_count = 0;
+ return true;
+ }
+ return Convert(out_els, out_count, in.value());
+ }
+
+ Napi::Env env;
+
+ // Allocate() allocates and constructs an array of 'n' elements, and returns a pointer to
+ // the first element. The array is freed when the Converter is destructed.
+ template <typename T>
+ T* Allocate(size_t n = 1) {
+ auto* ptr = new T[n]{};
+ free_.emplace_back([ptr] { delete[] ptr; });
+ return ptr;
+ }
+
+ std::vector<std::function<void()>> free_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_CONVERTER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp b/chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp
new file mode 100644
index 00000000000..36cf2032be2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Errors.cpp
@@ -0,0 +1,179 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/Errors.h"
+
+namespace wgpu::binding {
+
+ namespace {
+ constexpr char kHierarchyRequestError[] = "HierarchyRequestError";
+ constexpr char kWrongDocumentError[] = "WrongDocumentError";
+ constexpr char kInvalidCharacterError[] = "InvalidCharacterError";
+ constexpr char kNoModificationAllowedError[] = "NoModificationAllowedError";
+ constexpr char kNotFoundError[] = "NotFoundError";
+ constexpr char kNotSupportedError[] = "NotSupportedError";
+ constexpr char kInUseAttributeError[] = "InUseAttributeError";
+ constexpr char kInvalidStateError[] = "InvalidStateError";
+ constexpr char kSyntaxError[] = "SyntaxError";
+ constexpr char kInvalidModificationError[] = "InvalidModificationError";
+ constexpr char kNamespaceError[] = "NamespaceError";
+ constexpr char kSecurityError[] = "SecurityError";
+ constexpr char kNetworkError[] = "NetworkError";
+ constexpr char kAbortError[] = "AbortError";
+ constexpr char kURLMismatchError[] = "URLMismatchError";
+ constexpr char kQuotaExceededError[] = "QuotaExceededError";
+ constexpr char kTimeoutError[] = "TimeoutError";
+ constexpr char kInvalidNodeTypeError[] = "InvalidNodeTypeError";
+ constexpr char kDataCloneError[] = "DataCloneError";
+ constexpr char kEncodingError[] = "EncodingError";
+ constexpr char kNotReadableError[] = "NotReadableError";
+ constexpr char kUnknownError[] = "UnknownError";
+ constexpr char kConstraintError[] = "ConstraintError";
+ constexpr char kDataError[] = "DataError";
+ constexpr char kTransactionInactiveError[] = "TransactionInactiveError";
+ constexpr char kReadOnlyError[] = "ReadOnlyError";
+ constexpr char kVersionError[] = "VersionError";
+ constexpr char kOperationError[] = "OperationError";
+ constexpr char kNotAllowedError[] = "NotAllowedError";
+
+ static Napi::Error New(Napi::Env env,
+ std::string name,
+ std::string message = {},
+ unsigned short code = 0) {
+ auto err = Napi::Error::New(env);
+ err.Set("name", name);
+ err.Set("message", message.empty() ? name : message);
+ err.Set("code", static_cast<double>(code));
+ return err;
+ }
+
+ } // namespace
+
+ Napi::Error Errors::HierarchyRequestError(Napi::Env env) {
+ return New(env, kHierarchyRequestError);
+ }
+
+ Napi::Error Errors::WrongDocumentError(Napi::Env env) {
+ return New(env, kWrongDocumentError);
+ }
+
+ Napi::Error Errors::InvalidCharacterError(Napi::Env env) {
+ return New(env, kInvalidCharacterError);
+ }
+
+ Napi::Error Errors::NoModificationAllowedError(Napi::Env env) {
+ return New(env, kNoModificationAllowedError);
+ }
+
+ Napi::Error Errors::NotFoundError(Napi::Env env) {
+ return New(env, kNotFoundError);
+ }
+
+ Napi::Error Errors::NotSupportedError(Napi::Env env) {
+ return New(env, kNotSupportedError);
+ }
+
+ Napi::Error Errors::InUseAttributeError(Napi::Env env) {
+ return New(env, kInUseAttributeError);
+ }
+
+ Napi::Error Errors::InvalidStateError(Napi::Env env) {
+ return New(env, kInvalidStateError);
+ }
+
+ Napi::Error Errors::SyntaxError(Napi::Env env) {
+ return New(env, kSyntaxError);
+ }
+
+ Napi::Error Errors::InvalidModificationError(Napi::Env env) {
+ return New(env, kInvalidModificationError);
+ }
+
+ Napi::Error Errors::NamespaceError(Napi::Env env) {
+ return New(env, kNamespaceError);
+ }
+
+ Napi::Error Errors::SecurityError(Napi::Env env) {
+ return New(env, kSecurityError);
+ }
+
+ Napi::Error Errors::NetworkError(Napi::Env env) {
+ return New(env, kNetworkError);
+ }
+
+ Napi::Error Errors::AbortError(Napi::Env env) {
+ return New(env, kAbortError);
+ }
+
+ Napi::Error Errors::URLMismatchError(Napi::Env env) {
+ return New(env, kURLMismatchError);
+ }
+
+ Napi::Error Errors::QuotaExceededError(Napi::Env env) {
+ return New(env, kQuotaExceededError);
+ }
+
+ Napi::Error Errors::TimeoutError(Napi::Env env) {
+ return New(env, kTimeoutError);
+ }
+
+ Napi::Error Errors::InvalidNodeTypeError(Napi::Env env) {
+ return New(env, kInvalidNodeTypeError);
+ }
+
+ Napi::Error Errors::DataCloneError(Napi::Env env) {
+ return New(env, kDataCloneError);
+ }
+
+ Napi::Error Errors::EncodingError(Napi::Env env) {
+ return New(env, kEncodingError);
+ }
+
+ Napi::Error Errors::NotReadableError(Napi::Env env) {
+ return New(env, kNotReadableError);
+ }
+
+ Napi::Error Errors::UnknownError(Napi::Env env) {
+ return New(env, kUnknownError);
+ }
+
+ Napi::Error Errors::ConstraintError(Napi::Env env) {
+ return New(env, kConstraintError);
+ }
+
+ Napi::Error Errors::DataError(Napi::Env env) {
+ return New(env, kDataError);
+ }
+
+ Napi::Error Errors::TransactionInactiveError(Napi::Env env) {
+ return New(env, kTransactionInactiveError);
+ }
+
+ Napi::Error Errors::ReadOnlyError(Napi::Env env) {
+ return New(env, kReadOnlyError);
+ }
+
+ Napi::Error Errors::VersionError(Napi::Env env) {
+ return New(env, kVersionError);
+ }
+
+ Napi::Error Errors::OperationError(Napi::Env env) {
+ return New(env, kOperationError);
+ }
+
+ Napi::Error Errors::NotAllowedError(Napi::Env env) {
+ return New(env, kNotAllowedError);
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Errors.h b/chromium/third_party/dawn/src/dawn/node/binding/Errors.h
new file mode 100644
index 00000000000..97a357793ab
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Errors.h
@@ -0,0 +1,60 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_ERRORS_H_
+#define DAWN_NODE_BINDING_ERRORS_H_
+
+#include "napi.h"
+
+namespace wgpu::binding {
+
+ // Errors contains static helper methods for creating DOMException error
+ // messages as documented at:
+ // https://heycam.github.io/webidl/#idl-DOMException-error-names
+ class Errors {
+ public:
+ static Napi::Error HierarchyRequestError(Napi::Env);
+ static Napi::Error WrongDocumentError(Napi::Env);
+ static Napi::Error InvalidCharacterError(Napi::Env);
+ static Napi::Error NoModificationAllowedError(Napi::Env);
+ static Napi::Error NotFoundError(Napi::Env);
+ static Napi::Error NotSupportedError(Napi::Env);
+ static Napi::Error InUseAttributeError(Napi::Env);
+ static Napi::Error InvalidStateError(Napi::Env);
+ static Napi::Error SyntaxError(Napi::Env);
+ static Napi::Error InvalidModificationError(Napi::Env);
+ static Napi::Error NamespaceError(Napi::Env);
+ static Napi::Error SecurityError(Napi::Env);
+ static Napi::Error NetworkError(Napi::Env);
+ static Napi::Error AbortError(Napi::Env);
+ static Napi::Error URLMismatchError(Napi::Env);
+ static Napi::Error QuotaExceededError(Napi::Env);
+ static Napi::Error TimeoutError(Napi::Env);
+ static Napi::Error InvalidNodeTypeError(Napi::Env);
+ static Napi::Error DataCloneError(Napi::Env);
+ static Napi::Error EncodingError(Napi::Env);
+ static Napi::Error NotReadableError(Napi::Env);
+ static Napi::Error UnknownError(Napi::Env);
+ static Napi::Error ConstraintError(Napi::Env);
+ static Napi::Error DataError(Napi::Env);
+ static Napi::Error TransactionInactiveError(Napi::Env);
+ static Napi::Error ReadOnlyError(Napi::Env);
+ static Napi::Error VersionError(Napi::Env);
+ static Napi::Error OperationError(Napi::Env);
+ static Napi::Error NotAllowedError(Napi::Env);
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_ERRORS_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp b/chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp
new file mode 100644
index 00000000000..40b0560eeea
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Flags.cpp
@@ -0,0 +1,29 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/Flags.h"
+
+namespace wgpu::binding {
+ void Flags::Set(const std::string& key, const std::string& value) {
+ flags_[key] = value;
+ }
+
+ std::optional<std::string> Flags::Get(const std::string& key) const {
+ auto iter = flags_.find(key);
+ if (iter != flags_.end()) {
+ return iter->second;
+ }
+ return {};
+ }
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/Flags.h b/chromium/third_party/dawn/src/dawn/node/binding/Flags.h
new file mode 100644
index 00000000000..89b7b43fb9a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/Flags.h
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_FLAGS_H_
+#define DAWN_NODE_BINDING_FLAGS_H_
+
+#include <optional>
+#include <string>
+#include <unordered_map>
+
+namespace wgpu::binding {
+ // Flags maintains a key-value mapping of input flags passed into the module's create()
+ // function, used to configure dawn_node.
+ class Flags {
+ public:
+ void Set(const std::string& key, const std::string& value);
+ std::optional<std::string> Get(const std::string& key) const;
+
+ private:
+ std::unordered_map<std::string, std::string> flags_;
+ };
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_FLAGS_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp
new file mode 100644
index 00000000000..43472ec5461
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPU.cpp
@@ -0,0 +1,165 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPU.h"
+
+#include "src/dawn/node/binding/GPUAdapter.h"
+
+#include <cstdlib>
+
+#if defined(_WIN32)
+# include <Windows.h>
+#endif
+
+namespace {
+ std::string GetEnvVar(const char* varName) {
+#if defined(_WIN32)
+ // Use _dupenv_s to avoid unsafe warnings about std::getenv
+ char* value = nullptr;
+ _dupenv_s(&value, nullptr, varName);
+ if (value) {
+ std::string result = value;
+ free(value);
+ return result;
+ }
+ return "";
+#else
+ if (auto* val = std::getenv(varName)) {
+ return val;
+ }
+ return "";
+#endif
+ }
+
+ void SetDllDir(const char* dir) {
+ (void)dir;
+#if defined(_WIN32)
+ ::SetDllDirectory(dir);
+#endif
+ }
+
+} // namespace
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPU
+ ////////////////////////////////////////////////////////////////////////////////
+ GPU::GPU(Flags flags) : flags_(std::move(flags)) {
+ // TODO: Disable in 'release'
+ instance_.EnableBackendValidation(true);
+ instance_.SetBackendValidationLevel(dawn::native::BackendValidationLevel::Full);
+
+ // Setting the DllDir changes where we load adapter DLLs from (e.g. d3dcompiler_47.dll)
+ if (auto dir = flags_.Get("dlldir")) {
+ SetDllDir(dir->c_str());
+ }
+ instance_.DiscoverDefaultAdapters();
+ }
+
+ interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> GPU::requestAdapter(
+ Napi::Env env,
+ interop::GPURequestAdapterOptions options) {
+ auto promise = interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>>(
+ env, PROMISE_INFO);
+
+ if (options.forceFallbackAdapter) {
+ // Software adapters are not currently supported.
+ promise.Resolve({});
+ return promise;
+ }
+
+ auto adapters = instance_.GetAdapters();
+ if (adapters.empty()) {
+ promise.Resolve({});
+ return promise;
+ }
+
+#if defined(_WIN32)
+ constexpr auto defaultBackendType = wgpu::BackendType::D3D12;
+#elif defined(__linux__)
+ constexpr auto defaultBackendType = wgpu::BackendType::Vulkan;
+#elif defined(__APPLE__)
+ constexpr auto defaultBackendType = wgpu::BackendType::Metal;
+#else
+# error "Unsupported platform"
+#endif
+
+ auto targetBackendType = defaultBackendType;
+ std::string forceBackend;
+
+ // Check for override from env var
+ if (std::string envVar = GetEnvVar("DAWNNODE_BACKEND"); !envVar.empty()) {
+ forceBackend = envVar;
+ }
+
+ // Check for override from flag
+ if (auto f = flags_.Get("dawn-backend")) {
+ forceBackend = *f;
+ }
+
+ std::transform(forceBackend.begin(), forceBackend.end(), forceBackend.begin(),
+ [](char c) { return std::tolower(c); });
+
+ // Default to first adapter if a backend is not specified
+ size_t adapterIndex = 0;
+
+ if (!forceBackend.empty()) {
+ if (forceBackend == "null") {
+ targetBackendType = wgpu::BackendType::Null;
+ } else if (forceBackend == "webgpu") {
+ targetBackendType = wgpu::BackendType::WebGPU;
+ } else if (forceBackend == "d3d11") {
+ targetBackendType = wgpu::BackendType::D3D11;
+ } else if (forceBackend == "d3d12" || forceBackend == "d3d") {
+ targetBackendType = wgpu::BackendType::D3D12;
+ } else if (forceBackend == "metal") {
+ targetBackendType = wgpu::BackendType::Metal;
+ } else if (forceBackend == "vulkan" || forceBackend == "vk") {
+ targetBackendType = wgpu::BackendType::Vulkan;
+ } else if (forceBackend == "opengl" || forceBackend == "gl") {
+ targetBackendType = wgpu::BackendType::OpenGL;
+ } else if (forceBackend == "opengles" || forceBackend == "gles") {
+ targetBackendType = wgpu::BackendType::OpenGLES;
+ } else {
+ promise.Reject("unknown backend '" + forceBackend + "'");
+ return promise;
+ }
+ }
+
+ bool found = false;
+ for (size_t i = 0; i < adapters.size(); ++i) {
+ wgpu::AdapterProperties props;
+ adapters[i].GetProperties(&props);
+ if (props.backendType == targetBackendType) {
+ adapterIndex = i;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ if (!forceBackend.empty()) {
+ promise.Reject("backend '" + forceBackend + "' not found");
+ } else {
+ promise.Reject("no suitable backends found");
+ }
+ return promise;
+ }
+
+ auto adapter = GPUAdapter::Create<GPUAdapter>(env, adapters[adapterIndex], flags_);
+ promise.Resolve(std::optional<interop::Interface<interop::GPUAdapter>>(adapter));
+ return promise;
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPU.h b/chromium/third_party/dawn/src/dawn/node/binding/GPU.h
new file mode 100644
index 00000000000..de6b140ee09
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPU.h
@@ -0,0 +1,42 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPU_H_
+#define DAWN_NODE_BINDING_GPU_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/Flags.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+ // GPU is an implementation of interop::GPU that wraps a dawn::native::Instance.
+ class GPU final : public interop::GPU {
+ public:
+ GPU(Flags flags);
+
+ // interop::GPU interface compliance
+ interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> requestAdapter(
+ Napi::Env env,
+ interop::GPURequestAdapterOptions options) override;
+
+ private:
+ const Flags flags_;
+ dawn::native::Instance instance_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPU_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp
new file mode 100644
index 00000000000..dadd300bdd4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.cpp
@@ -0,0 +1,252 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUAdapter.h"
+
+#include <unordered_set>
+
+#include "src/dawn/node/binding/Flags.h"
+#include "src/dawn/node/binding/GPUDevice.h"
+#include "src/dawn/node/binding/GPUSupportedLimits.h"
+
+namespace {
+ // TODO(amaiorano): Move to utility header
+ std::vector<std::string> Split(const std::string& s, char delim) {
+ if (s.empty())
+ return {};
+
+ std::vector<std::string> result;
+ const size_t lastIndex = s.length() - 1;
+ size_t startIndex = 0;
+ size_t i = startIndex;
+
+ while (i <= lastIndex) {
+ if (s[i] == delim) {
+ auto token = s.substr(startIndex, i - startIndex);
+ if (!token.empty()) // Discard empty tokens
+ result.push_back(token);
+ startIndex = i + 1;
+ } else if (i == lastIndex) {
+ auto token = s.substr(startIndex, i - startIndex + 1);
+ if (!token.empty()) // Discard empty tokens
+ result.push_back(token);
+ }
+ ++i;
+ }
+ return result;
+ }
+} // namespace
+
+namespace wgpu::binding {
+
+ namespace {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::binding::<anon>::Features
+ // Implements interop::GPUSupportedFeatures
+ ////////////////////////////////////////////////////////////////////////////////
+ class Features : public interop::GPUSupportedFeatures {
+ public:
+ Features(WGPUDeviceProperties properties) {
+ if (properties.depth24UnormStencil8) {
+ enabled_.emplace(interop::GPUFeatureName::kDepth24UnormStencil8);
+ }
+ if (properties.depth32FloatStencil8) {
+ enabled_.emplace(interop::GPUFeatureName::kDepth32FloatStencil8);
+ }
+ if (properties.timestampQuery) {
+ enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
+ }
+ if (properties.textureCompressionBC) {
+ enabled_.emplace(interop::GPUFeatureName::kTextureCompressionBc);
+ }
+ if (properties.textureCompressionETC2) {
+ enabled_.emplace(interop::GPUFeatureName::kTextureCompressionEtc2);
+ }
+ if (properties.textureCompressionASTC) {
+ enabled_.emplace(interop::GPUFeatureName::kTextureCompressionAstc);
+ }
+ if (properties.timestampQuery) {
+ enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
+ }
+
+ // TODO(dawn:1123) add support for these extensions when possible.
+ // wgpu::interop::GPUFeatureName::kIndirectFirstInstance
+ // wgpu::interop::GPUFeatureName::kDepthClipControl
+ }
+
+ bool has(interop::GPUFeatureName feature) {
+ return enabled_.count(feature) != 0;
+ }
+
+ // interop::GPUSupportedFeatures compliance
+ bool has(Napi::Env, std::string name) override {
+ interop::GPUFeatureName feature;
+ if (interop::Converter<interop::GPUFeatureName>::FromString(name, feature)) {
+ return has(feature);
+ }
+ return false;
+ }
+ std::vector<std::string> keys(Napi::Env) override {
+ std::vector<std::string> out;
+ out.reserve(enabled_.size());
+ for (auto feature : enabled_) {
+ out.push_back(interop::Converter<interop::GPUFeatureName>::ToString(feature));
+ }
+ return out;
+ }
+
+ private:
+ std::unordered_set<interop::GPUFeatureName> enabled_;
+ };
+
+ } // namespace
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUAdapter
+ // TODO(crbug.com/dawn/1133): This is a stub implementation. Properly implement.
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUAdapter::GPUAdapter(dawn::native::Adapter a, const Flags& flags)
+ : adapter_(a), flags_(flags) {
+ }
+
+ std::string GPUAdapter::getName(Napi::Env) {
+ return "dawn-adapter";
+ }
+
+ interop::Interface<interop::GPUSupportedFeatures> GPUAdapter::getFeatures(Napi::Env env) {
+ return interop::GPUSupportedFeatures::Create<Features>(env,
+ adapter_.GetAdapterProperties());
+ }
+
+ interop::Interface<interop::GPUSupportedLimits> GPUAdapter::getLimits(Napi::Env env) {
+ WGPUSupportedLimits limits{};
+ if (!adapter_.GetLimits(&limits)) {
+ Napi::Error::New(env, "failed to get adapter limits").ThrowAsJavaScriptException();
+ }
+
+ wgpu::SupportedLimits wgpuLimits{};
+
+#define COPY_LIMIT(LIMIT) wgpuLimits.limits.LIMIT = limits.limits.LIMIT
+ COPY_LIMIT(maxTextureDimension1D);
+ COPY_LIMIT(maxTextureDimension2D);
+ COPY_LIMIT(maxTextureDimension3D);
+ COPY_LIMIT(maxTextureArrayLayers);
+ COPY_LIMIT(maxBindGroups);
+ COPY_LIMIT(maxDynamicUniformBuffersPerPipelineLayout);
+ COPY_LIMIT(maxDynamicStorageBuffersPerPipelineLayout);
+ COPY_LIMIT(maxSampledTexturesPerShaderStage);
+ COPY_LIMIT(maxSamplersPerShaderStage);
+ COPY_LIMIT(maxStorageBuffersPerShaderStage);
+ COPY_LIMIT(maxStorageTexturesPerShaderStage);
+ COPY_LIMIT(maxUniformBuffersPerShaderStage);
+ COPY_LIMIT(maxUniformBufferBindingSize);
+ COPY_LIMIT(maxStorageBufferBindingSize);
+ COPY_LIMIT(minUniformBufferOffsetAlignment);
+ COPY_LIMIT(minStorageBufferOffsetAlignment);
+ COPY_LIMIT(maxVertexBuffers);
+ COPY_LIMIT(maxVertexAttributes);
+ COPY_LIMIT(maxVertexBufferArrayStride);
+ COPY_LIMIT(maxInterStageShaderComponents);
+ COPY_LIMIT(maxComputeWorkgroupStorageSize);
+ COPY_LIMIT(maxComputeInvocationsPerWorkgroup);
+ COPY_LIMIT(maxComputeWorkgroupSizeX);
+ COPY_LIMIT(maxComputeWorkgroupSizeY);
+ COPY_LIMIT(maxComputeWorkgroupSizeZ);
+ COPY_LIMIT(maxComputeWorkgroupsPerDimension);
+#undef COPY_LIMIT
+
+ return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, wgpuLimits);
+ }
+
+ bool GPUAdapter::getIsFallbackAdapter(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
+ Napi::Env env,
+ interop::GPUDeviceDescriptor descriptor) {
+ wgpu::DeviceDescriptor desc{}; // TODO(crbug.com/dawn/1133): Fill in.
+ interop::Promise<interop::Interface<interop::GPUDevice>> promise(env, PROMISE_INFO);
+
+ std::vector<wgpu::FeatureName> requiredFeatures;
+ // See src/dawn/native/Features.cpp for enum <-> string mappings.
+ for (auto required : descriptor.requiredFeatures) {
+ switch (required) {
+ case interop::GPUFeatureName::kTextureCompressionBc:
+ requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionBC);
+ continue;
+ case interop::GPUFeatureName::kTextureCompressionEtc2:
+ requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionETC2);
+ continue;
+ case interop::GPUFeatureName::kTextureCompressionAstc:
+ requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionASTC);
+ continue;
+ case interop::GPUFeatureName::kTimestampQuery:
+ requiredFeatures.emplace_back(wgpu::FeatureName::TimestampQuery);
+ continue;
+ case interop::GPUFeatureName::kDepth24UnormStencil8:
+ requiredFeatures.emplace_back(wgpu::FeatureName::Depth24UnormStencil8);
+ continue;
+ case interop::GPUFeatureName::kDepth32FloatStencil8:
+ requiredFeatures.emplace_back(wgpu::FeatureName::Depth32FloatStencil8);
+ continue;
+ case interop::GPUFeatureName::kDepthClipControl:
+ case interop::GPUFeatureName::kIndirectFirstInstance:
+ // TODO(dawn:1123) Add support for these extensions when possible.
+ continue;
+ }
+ UNIMPLEMENTED("required: ", required);
+ }
+
+ // Propogate enabled/disabled dawn features
+ // Note: DawnDeviceTogglesDescriptor::forceEnabledToggles and forceDisabledToggles are
+ // vectors of 'const char*', so we make sure the parsed strings survive the CreateDevice()
+ // call by storing them on the stack.
+ std::vector<std::string> enabledToggles;
+ std::vector<std::string> disabledToggles;
+ std::vector<const char*> forceEnabledToggles;
+ std::vector<const char*> forceDisabledToggles;
+ if (auto values = flags_.Get("enable-dawn-features")) {
+ enabledToggles = Split(*values, ',');
+ for (auto& t : enabledToggles) {
+ forceEnabledToggles.emplace_back(t.c_str());
+ }
+ }
+ if (auto values = flags_.Get("disable-dawn-features")) {
+ disabledToggles = Split(*values, ',');
+ for (auto& t : disabledToggles) {
+ forceDisabledToggles.emplace_back(t.c_str());
+ }
+ }
+
+ desc.requiredFeaturesCount = requiredFeatures.size();
+ desc.requiredFeatures = requiredFeatures.data();
+
+ DawnTogglesDeviceDescriptor togglesDesc = {};
+ desc.nextInChain = &togglesDesc;
+ togglesDesc.forceEnabledTogglesCount = forceEnabledToggles.size();
+ togglesDesc.forceEnabledToggles = forceEnabledToggles.data();
+ togglesDesc.forceDisabledTogglesCount = forceDisabledToggles.size();
+ togglesDesc.forceDisabledToggles = forceDisabledToggles.data();
+
+ auto wgpu_device = adapter_.CreateDevice(&desc);
+ if (wgpu_device) {
+ promise.Resolve(interop::GPUDevice::Create<GPUDevice>(env, env, wgpu_device));
+ } else {
+ Napi::Error::New(env, "failed to create device").ThrowAsJavaScriptException();
+ }
+ return promise;
+ }
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h
new file mode 100644
index 00000000000..de03234cc6b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUAdapter.h
@@ -0,0 +1,47 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUADAPTER_H_
+#define DAWN_NODE_BINDING_GPUADAPTER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+ class Flags;
+
+ // GPUAdapter is an implementation of interop::GPUAdapter that wraps a dawn::native::Adapter.
+ class GPUAdapter final : public interop::GPUAdapter {
+ public:
+ GPUAdapter(dawn::native::Adapter a, const Flags& flags);
+
+ // interop::GPUAdapter interface compliance
+ std::string getName(Napi::Env) override;
+ interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+ interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+ bool getIsFallbackAdapter(Napi::Env) override;
+ interop::Promise<interop::Interface<interop::GPUDevice>> requestDevice(
+ Napi::Env env,
+ interop::GPUDeviceDescriptor descriptor) override;
+
+ private:
+ dawn::native::Adapter adapter_;
+ const Flags& flags_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUADAPTER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp
new file mode 100644
index 00000000000..36b092966ad
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.cpp
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUBindGroup.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUBindGroup
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUBindGroup::GPUBindGroup(wgpu::BindGroup group) : group_(std::move(group)) {
+ }
+
+ std::optional<std::string> GPUBindGroup::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUBindGroup::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h
new file mode 100644
index 00000000000..b644b906454
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroup.h
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBINDGROUP_H_
+#define DAWN_NODE_BINDING_GPUBINDGROUP_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUBindGroup is an implementation of interop::GPUBindGroup that wraps a wgpu::BindGroup.
+ class GPUBindGroup final : public interop::GPUBindGroup {
+ public:
+ GPUBindGroup(wgpu::BindGroup group);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::BindGroup&() const {
+ return group_;
+ }
+
+ // interop::GPUBindGroup interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::BindGroup group_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUBINDGROUP_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp
new file mode 100644
index 00000000000..6b1a5abcf59
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUBindGroupLayout
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUBindGroupLayout::GPUBindGroupLayout(wgpu::BindGroupLayout layout)
+ : layout_(std::move(layout)) {
+ }
+
+ std::optional<std::string> GPUBindGroupLayout::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUBindGroupLayout::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h
new file mode 100644
index 00000000000..009c96c29b3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBindGroupLayout.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
+#define DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUBindGroupLayout is an implementation of interop::GPUBindGroupLayout that wraps a
+ // wgpu::BindGroupLayout.
+ class GPUBindGroupLayout final : public interop::GPUBindGroupLayout {
+ public:
+ GPUBindGroupLayout(wgpu::BindGroupLayout layout);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::BindGroupLayout&() const {
+ return layout_;
+ }
+
+ // interop::GPUBindGroupLayout interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::BindGroupLayout layout_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp
new file mode 100644
index 00000000000..5f84bc100b4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.cpp
@@ -0,0 +1,169 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUBuffer.h"
+
+#include <memory>
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUBuffer
+ // TODO(crbug.com/dawn/1134): We may be doing more validation here than necessary. Once CTS is
+ // robustly passing, pull out validation and see what / if breaks.
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUBuffer::GPUBuffer(wgpu::Buffer buffer,
+ wgpu::BufferDescriptor desc,
+ wgpu::Device device,
+ std::shared_ptr<AsyncRunner> async)
+ : buffer_(std::move(buffer)),
+ desc_(desc),
+ device_(std::move(device)),
+ async_(std::move(async)) {
+ if (desc.mappedAtCreation) {
+ state_ = State::MappedAtCreation;
+ }
+ }
+
+ interop::Promise<void> GPUBuffer::mapAsync(Napi::Env env,
+ interop::GPUMapModeFlags mode,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ wgpu::MapMode md{};
+ Converter conv(env);
+ if (!conv(md, mode)) {
+ interop::Promise<void> promise(env, PROMISE_INFO);
+ promise.Reject(Errors::OperationError(env));
+ return promise;
+ }
+
+ if (state_ != State::Unmapped) {
+ interop::Promise<void> promise(env, PROMISE_INFO);
+ promise.Reject(Errors::OperationError(env));
+ device_.InjectError(wgpu::ErrorType::Validation,
+ "mapAsync called on buffer that is not in the unmapped state");
+ return promise;
+ }
+
+ struct Context {
+ Napi::Env env;
+ interop::Promise<void> promise;
+ AsyncTask task;
+ State& state;
+ };
+ auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), async_, state_};
+ auto promise = ctx->promise;
+
+ uint64_t s = size.has_value() ? size.value() : (desc_.size - offset);
+
+ state_ = State::MappingPending;
+
+ buffer_.MapAsync(
+ md, offset, s,
+ [](WGPUBufferMapAsyncStatus status, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ c->state = State::Unmapped;
+ switch (status) {
+ case WGPUBufferMapAsyncStatus_Force32:
+ UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
+ break;
+ case WGPUBufferMapAsyncStatus_Success:
+ c->promise.Resolve();
+ c->state = State::Mapped;
+ break;
+ case WGPUBufferMapAsyncStatus_Error:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
+ case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
+ c->promise.Reject(Errors::AbortError(c->env));
+ break;
+ case WGPUBufferMapAsyncStatus_Unknown:
+ case WGPUBufferMapAsyncStatus_DeviceLost:
+ // TODO: The spec is a bit vague around what the promise should do
+ // here.
+ c->promise.Reject(Errors::UnknownError(c->env));
+ break;
+ }
+ },
+ ctx);
+
+ return promise;
+ }
+
+ interop::ArrayBuffer GPUBuffer::getMappedRange(Napi::Env env,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ if (state_ != State::Mapped && state_ != State::MappedAtCreation) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+
+ uint64_t s = size.has_value() ? size.value() : (desc_.size - offset);
+
+ uint64_t start = offset;
+ uint64_t end = offset + s;
+ for (auto& mapping : mapped_) {
+ if (mapping.Intersects(start, end)) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+ }
+
+ auto* ptr = (desc_.usage & wgpu::BufferUsage::MapWrite)
+ ? buffer_.GetMappedRange(offset, s)
+ : const_cast<void*>(buffer_.GetConstMappedRange(offset, s));
+ if (!ptr) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+ auto array_buffer = Napi::ArrayBuffer::New(env, ptr, s);
+ // TODO(crbug.com/dawn/1135): Ownership here is the wrong way around.
+ mapped_.emplace_back(Mapping{start, end, Napi::Persistent(array_buffer)});
+ return array_buffer;
+ }
+
+ void GPUBuffer::unmap(Napi::Env env) {
+ if (state_ == State::Destroyed) {
+ device_.InjectError(wgpu::ErrorType::Validation,
+ "unmap() called on a destroyed buffer");
+ return;
+ }
+
+ for (auto& mapping : mapped_) {
+ mapping.buffer.Value().Detach();
+ }
+ mapped_.clear();
+ buffer_.Unmap();
+ state_ = State::Unmapped;
+ }
+
+ void GPUBuffer::destroy(Napi::Env) {
+ buffer_.Destroy();
+ state_ = State::Destroyed;
+ }
+
+ std::optional<std::string> GPUBuffer::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUBuffer::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h
new file mode 100644
index 00000000000..32cdfe6d3cf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUBuffer.h
@@ -0,0 +1,86 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBUFFER_H_
+#define DAWN_NODE_BINDING_GPUBUFFER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/AsyncRunner.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUBuffer is an implementation of interop::GPUBuffer that wraps a wgpu::Buffer.
+ class GPUBuffer final : public interop::GPUBuffer {
+ public:
+ GPUBuffer(wgpu::Buffer buffer,
+ wgpu::BufferDescriptor desc,
+ wgpu::Device device,
+ std::shared_ptr<AsyncRunner> async);
+
+ // Desc() returns the wgpu::BufferDescriptor used to construct the buffer
+ const wgpu::BufferDescriptor& Desc() const {
+ return desc_;
+ }
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Buffer&() const {
+ return buffer_;
+ }
+
+ // interop::GPUBuffer interface compliance
+ interop::Promise<void> mapAsync(Napi::Env env,
+ interop::GPUMapModeFlags mode,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ interop::ArrayBuffer getMappedRange(Napi::Env env,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void unmap(Napi::Env) override;
+ void destroy(Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ struct Mapping {
+ uint64_t start;
+ uint64_t end;
+ inline bool Intersects(uint64_t s, uint64_t e) const {
+ return s < end && e > start;
+ }
+ Napi::Reference<interop::ArrayBuffer> buffer;
+ };
+
+ // https://www.w3.org/TR/webgpu/#buffer-interface
+ enum class State {
+ Unmapped,
+ Mapped,
+ MappedAtCreation,
+ MappingPending,
+ Destroyed,
+ };
+
+ wgpu::Buffer buffer_;
+ wgpu::BufferDescriptor const desc_;
+ wgpu::Device const device_;
+ std::shared_ptr<AsyncRunner> async_;
+ State state_ = State::Unmapped;
+ std::vector<Mapping> mapped_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp
new file mode 100644
index 00000000000..ab0f7083a1a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUCommandBuffer.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUCommandBuffer
+ ////////////////////////////////////////////////////////////////////////////////
+
+ GPUCommandBuffer::GPUCommandBuffer(wgpu::CommandBuffer cmd_buf) : cmd_buf_(std::move(cmd_buf)) {
+ }
+
+ std::optional<std::string> GPUCommandBuffer::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUCommandBuffer::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h
new file mode 100644
index 00000000000..bd84d1b746b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandBuffer.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
+#define DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUCommandBuffer is an implementation of interop::GPUCommandBuffer that wraps a
+ // wgpu::CommandBuffer.
+ class GPUCommandBuffer final : public interop::GPUCommandBuffer {
+ public:
+ GPUCommandBuffer(wgpu::CommandBuffer cmd_buf);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::CommandBuffer&() const {
+ return cmd_buf_;
+ }
+
+ // interop::GPUCommandBuffer interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::CommandBuffer cmd_buf_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp
new file mode 100644
index 00000000000..b6beee65779
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.cpp
@@ -0,0 +1,215 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUCommandEncoder.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPU.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUCommandBuffer.h"
+#include "src/dawn/node/binding/GPUComputePassEncoder.h"
+#include "src/dawn/node/binding/GPUQuerySet.h"
+#include "src/dawn/node/binding/GPURenderPassEncoder.h"
+#include "src/dawn/node/binding/GPUTexture.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUCommandEncoder
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUCommandEncoder::GPUCommandEncoder(wgpu::CommandEncoder enc) : enc_(std::move(enc)) {
+ }
+
+ interop::Interface<interop::GPURenderPassEncoder> GPUCommandEncoder::beginRenderPass(
+ Napi::Env env,
+ interop::GPURenderPassDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderPassDescriptor desc{};
+ // TODO(dawn:1250) handle timestampWrites
+ if (!conv(desc.colorAttachments, desc.colorAttachmentCount, descriptor.colorAttachments) ||
+ !conv(desc.depthStencilAttachment, descriptor.depthStencilAttachment) ||
+ !conv(desc.label, descriptor.label) ||
+ !conv(desc.occlusionQuerySet, descriptor.occlusionQuerySet)) {
+ return {};
+ }
+
+ return interop::GPURenderPassEncoder::Create<GPURenderPassEncoder>(
+ env, enc_.BeginRenderPass(&desc));
+ }
+
+ interop::Interface<interop::GPUComputePassEncoder> GPUCommandEncoder::beginComputePass(
+ Napi::Env env,
+ interop::GPUComputePassDescriptor descriptor) {
+ wgpu::ComputePassDescriptor desc{};
+ // TODO(dawn:1250) handle timestampWrites
+ return interop::GPUComputePassEncoder::Create<GPUComputePassEncoder>(
+ env, enc_.BeginComputePass(&desc));
+ }
+
+ void GPUCommandEncoder::clearBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || //
+ !conv(s, size)) {
+ return;
+ }
+
+ enc_.ClearBuffer(b, offset, s);
+ }
+
+ void GPUCommandEncoder::copyBufferToBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> source,
+ interop::GPUSize64 sourceOffset,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset,
+ interop::GPUSize64 size) {
+ Converter conv(env);
+
+ wgpu::Buffer src{};
+ wgpu::Buffer dst{};
+ if (!conv(src, source) || //
+ !conv(dst, destination)) {
+ return;
+ }
+
+ enc_.CopyBufferToBuffer(src, sourceOffset, dst, destinationOffset, size);
+ }
+
+ void GPUCommandEncoder::copyBufferToTexture(Napi::Env env,
+ interop::GPUImageCopyBuffer source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyBuffer src{};
+ wgpu::ImageCopyTexture dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyBufferToTexture(&src, &dst, &size);
+ }
+
+ void GPUCommandEncoder::copyTextureToBuffer(Napi::Env env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyBuffer destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyTexture src{};
+ wgpu::ImageCopyBuffer dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyTextureToBuffer(&src, &dst, &size);
+ }
+
+ void GPUCommandEncoder::copyTextureToTexture(Napi::Env env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyTexture src{};
+ wgpu::ImageCopyTexture dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyTextureToTexture(&src, &dst, &size);
+ }
+
+ void GPUCommandEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+ }
+
+ void GPUCommandEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+ }
+
+ void GPUCommandEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+ }
+
+ void GPUCommandEncoder::writeTimestamp(Napi::Env env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) {
+ Converter conv(env);
+
+ wgpu::QuerySet q{};
+ if (!conv(q, querySet)) {
+ return;
+ }
+
+ enc_.WriteTimestamp(q, queryIndex);
+ }
+
+ void GPUCommandEncoder::resolveQuerySet(Napi::Env env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 firstQuery,
+ interop::GPUSize32 queryCount,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset) {
+ Converter conv(env);
+
+ wgpu::QuerySet q{};
+ uint32_t f = 0;
+ uint32_t c = 0;
+ wgpu::Buffer b{};
+ uint64_t o = 0;
+
+ if (!conv(q, querySet) || //
+ !conv(f, firstQuery) || //
+ !conv(c, queryCount) || //
+ !conv(b, destination) || //
+ !conv(o, destinationOffset)) {
+ return;
+ }
+
+ enc_.ResolveQuerySet(q, f, c, b, o);
+ }
+
+ interop::Interface<interop::GPUCommandBuffer> GPUCommandEncoder::finish(
+ Napi::Env env,
+ interop::GPUCommandBufferDescriptor descriptor) {
+ wgpu::CommandBufferDescriptor desc{};
+ return interop::GPUCommandBuffer::Create<GPUCommandBuffer>(env, enc_.Finish(&desc));
+ }
+
+ std::optional<std::string> GPUCommandEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUCommandEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h
new file mode 100644
index 00000000000..cf5a77dc8a2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUCommandEncoder.h
@@ -0,0 +1,84 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
+#define DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUCommandEncoder is an implementation of interop::GPUCommandEncoder that wraps a
+ // wgpu::CommandEncoder.
+ class GPUCommandEncoder final : public interop::GPUCommandEncoder {
+ public:
+ GPUCommandEncoder(wgpu::CommandEncoder enc);
+
+ // interop::GPUCommandEncoder interface compliance
+ interop::Interface<interop::GPURenderPassEncoder> beginRenderPass(
+ Napi::Env,
+ interop::GPURenderPassDescriptor descriptor) override;
+ interop::Interface<interop::GPUComputePassEncoder> beginComputePass(
+ Napi::Env,
+ interop::GPUComputePassDescriptor descriptor) override;
+ void clearBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void copyBufferToBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> source,
+ interop::GPUSize64 sourceOffset,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset,
+ interop::GPUSize64 size) override;
+ void copyBufferToTexture(Napi::Env,
+ interop::GPUImageCopyBuffer source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) override;
+ void copyTextureToBuffer(Napi::Env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyBuffer destination,
+ interop::GPUExtent3D copySize) override;
+ void copyTextureToTexture(Napi::Env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void writeTimestamp(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) override;
+ void resolveQuerySet(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 firstQuery,
+ interop::GPUSize32 queryCount,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset) override;
+ interop::Interface<interop::GPUCommandBuffer> finish(
+ Napi::Env env,
+ interop::GPUCommandBufferDescriptor descriptor) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::CommandEncoder enc_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp
new file mode 100644
index 00000000000..224dc03e785
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.cpp
@@ -0,0 +1,115 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUComputePassEncoder.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBindGroup.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUComputePipeline.h"
+#include "src/dawn/node/binding/GPUQuerySet.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUComputePassEncoder
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUComputePassEncoder::GPUComputePassEncoder(wgpu::ComputePassEncoder enc)
+ : enc_(std::move(enc)) {
+ }
+
+ void GPUComputePassEncoder::setPipeline(
+ Napi::Env,
+ interop::Interface<interop::GPUComputePipeline> pipeline) {
+ enc_.SetPipeline(*pipeline.As<GPUComputePipeline>());
+ }
+
+ void GPUComputePassEncoder::dispatch(Napi::Env,
+ interop::GPUSize32 workgroupCountX,
+ interop::GPUSize32 workgroupCountY,
+ interop::GPUSize32 workgroupCountZ) {
+ enc_.Dispatch(workgroupCountX, workgroupCountY, workgroupCountZ);
+ }
+
+ void GPUComputePassEncoder::dispatchIndirect(
+ Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ enc_.DispatchIndirect(*indirectBuffer.As<GPUBuffer>(), indirectOffset);
+ }
+
+ void GPUComputePassEncoder::end(Napi::Env) {
+ enc_.End();
+ }
+
+ void GPUComputePassEncoder::endPass(Napi::Env) {
+ enc_.EndPass();
+ }
+
+ void GPUComputePassEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+ }
+
+ void GPUComputePassEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+ }
+
+ void GPUComputePassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+ }
+
+ void GPUComputePassEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+ }
+
+ void GPUComputePassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+ }
+
+ std::optional<std::string> GPUComputePassEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUComputePassEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h
new file mode 100644
index 00000000000..01eab694b83
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePassEncoder.h
@@ -0,0 +1,70 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
+#define DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUComputePassEncoder is an implementation of interop::GPUComputePassEncoder that wraps a
+ // wgpu::ComputePassEncoder.
+ class GPUComputePassEncoder final : public interop::GPUComputePassEncoder {
+ public:
+ GPUComputePassEncoder(wgpu::ComputePassEncoder enc);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ComputePassEncoder&() const {
+ return enc_;
+ }
+
+ // interop::GPUComputePassEncoder interface compliance
+ void setPipeline(Napi::Env,
+ interop::Interface<interop::GPUComputePipeline> pipeline) override;
+ void dispatch(Napi::Env,
+ interop::GPUSize32 workgroupCountX,
+ interop::GPUSize32 workgroupCountY,
+ interop::GPUSize32 workgroupCountZ) override;
+ void dispatchIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void end(Napi::Env) override;
+ void endPass(Napi::Env) override; // TODO(dawn:1286): Remove after deprecation period.
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::ComputePassEncoder enc_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp
new file mode 100644
index 00000000000..9e57961d267
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.cpp
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUComputePipeline.h"
+
+#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUComputePipeline
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUComputePipeline::GPUComputePipeline(wgpu::ComputePipeline pipeline)
+ : pipeline_(std::move(pipeline)) {
+ }
+
+ interop::Interface<interop::GPUBindGroupLayout> GPUComputePipeline::getBindGroupLayout(
+ Napi::Env env,
+ uint32_t index) {
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, pipeline_.GetBindGroupLayout(index));
+ }
+
+ std::optional<std::string> GPUComputePipeline::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUComputePipeline::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h
new file mode 100644
index 00000000000..c37eb284ac7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUComputePipeline.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUPIPELINE_H_
+#define DAWN_NODE_BINDING_GPUPIPELINE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUComputePipeline is an implementation of interop::GPUComputePipeline that wraps a
+ // wgpu::ComputePipeline.
+ class GPUComputePipeline final : public interop::GPUComputePipeline {
+ public:
+ GPUComputePipeline(wgpu::ComputePipeline pipeline);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ComputePipeline&() const {
+ return pipeline_;
+ }
+
+ // interop::GPUComputePipeline interface compliance
+ interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+ uint32_t index) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::ComputePipeline pipeline_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp
new file mode 100644
index 00000000000..2358b700f02
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.cpp
@@ -0,0 +1,528 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUDevice.h"
+
+#include <memory>
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/binding/GPUBindGroup.h"
+#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUCommandBuffer.h"
+#include "src/dawn/node/binding/GPUCommandEncoder.h"
+#include "src/dawn/node/binding/GPUComputePipeline.h"
+#include "src/dawn/node/binding/GPUPipelineLayout.h"
+#include "src/dawn/node/binding/GPUQuerySet.h"
+#include "src/dawn/node/binding/GPUQueue.h"
+#include "src/dawn/node/binding/GPURenderBundleEncoder.h"
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include "src/dawn/node/binding/GPUSampler.h"
+#include "src/dawn/node/binding/GPUShaderModule.h"
+#include "src/dawn/node/binding/GPUSupportedLimits.h"
+#include "src/dawn/node/binding/GPUTexture.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ namespace {
+
+ class DeviceLostInfo : public interop::GPUDeviceLostInfo {
+ public:
+ DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
+ : reason_(reason), message_(message) {
+ }
+ std::variant<interop::GPUDeviceLostReason> getReason(Napi::Env env) override {
+ return reason_;
+ }
+ std::string getMessage(Napi::Env) override {
+ return message_;
+ }
+
+ private:
+ interop::GPUDeviceLostReason reason_;
+ std::string message_;
+ };
+
+ class OOMError : public interop::GPUOutOfMemoryError {};
+ class ValidationError : public interop::GPUValidationError {
+ public:
+ ValidationError(std::string message) : message_(std::move(message)) {
+ }
+
+ std::string getMessage(Napi::Env) override {
+ return message_;
+ };
+
+ private:
+ std::string message_;
+ };
+
+ } // namespace
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUDevice
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUDevice::GPUDevice(Napi::Env env, wgpu::Device device)
+ : env_(env), device_(device), async_(std::make_shared<AsyncRunner>(env, device)) {
+ device_.SetLoggingCallback(
+ [](WGPULoggingType type, char const* message, void* userdata) {
+ std::cout << type << ": " << message << std::endl;
+ },
+ nullptr);
+ device_.SetUncapturedErrorCallback(
+ [](WGPUErrorType type, char const* message, void* userdata) {
+ std::cout << type << ": " << message << std::endl;
+ },
+ nullptr);
+
+ device_.SetDeviceLostCallback(
+ [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
+ auto r = interop::GPUDeviceLostReason::kDestroyed;
+ switch (reason) {
+ case WGPUDeviceLostReason_Force32:
+ UNREACHABLE("WGPUDeviceLostReason_Force32");
+ break;
+ case WGPUDeviceLostReason_Destroyed:
+ case WGPUDeviceLostReason_Undefined:
+ r = interop::GPUDeviceLostReason::kDestroyed;
+ break;
+ }
+ auto* self = static_cast<GPUDevice*>(userdata);
+ for (auto promise : self->lost_promises_) {
+ promise.Resolve(
+ interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
+ }
+ },
+ this);
+ }
+
+ GPUDevice::~GPUDevice() {
+ }
+
+ interop::Interface<interop::GPUSupportedFeatures> GPUDevice::getFeatures(Napi::Env env) {
+ class Features : public interop::GPUSupportedFeatures {
+ public:
+ bool has(Napi::Env, std::string feature) override {
+ UNIMPLEMENTED();
+ }
+ std::vector<std::string> keys(Napi::Env) override {
+ UNIMPLEMENTED();
+ }
+ };
+ return interop::GPUSupportedFeatures::Create<Features>(env);
+ }
+
+ interop::Interface<interop::GPUSupportedLimits> GPUDevice::getLimits(Napi::Env env) {
+ wgpu::SupportedLimits limits{};
+ if (!device_.GetLimits(&limits)) {
+ Napi::Error::New(env, "failed to get device limits").ThrowAsJavaScriptException();
+ }
+ return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, limits);
+ }
+
+ interop::Interface<interop::GPUQueue> GPUDevice::getQueue(Napi::Env env) {
+ // TODO(crbug.com/dawn/1144): Should probably return the same Queue JS object.
+ return interop::GPUQueue::Create<GPUQueue>(env, device_.GetQueue(), async_);
+ }
+
+ void GPUDevice::destroy(Napi::Env env) {
+ for (auto promise : lost_promises_) {
+ promise.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
+ env_, interop::GPUDeviceLostReason::kDestroyed, "device was destroyed"));
+ }
+ lost_promises_.clear();
+ device_.Release();
+ }
+
+ interop::Interface<interop::GPUBuffer> GPUDevice::createBuffer(
+ Napi::Env env,
+ interop::GPUBufferDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::BufferDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.mappedAtCreation, descriptor.mappedAtCreation) ||
+ !conv(desc.size, descriptor.size) || !conv(desc.usage, descriptor.usage)) {
+ return {};
+ }
+ return interop::GPUBuffer::Create<GPUBuffer>(env, device_.CreateBuffer(&desc), desc,
+ device_, async_);
+ }
+
+ interop::Interface<interop::GPUTexture> GPUDevice::createTexture(
+ Napi::Env env,
+ interop::GPUTextureDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::TextureDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.usage, descriptor.usage) || //
+ !conv(desc.size, descriptor.size) || //
+ !conv(desc.dimension, descriptor.dimension) || //
+ !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
+ !conv(desc.sampleCount, descriptor.sampleCount) || //
+ !conv(desc.format, descriptor.format)) {
+ return {};
+ }
+ return interop::GPUTexture::Create<GPUTexture>(env, device_.CreateTexture(&desc));
+ }
+
+ interop::Interface<interop::GPUSampler> GPUDevice::createSampler(
+ Napi::Env env,
+ interop::GPUSamplerDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::SamplerDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || //
+ !conv(desc.addressModeU, descriptor.addressModeU) || //
+ !conv(desc.addressModeV, descriptor.addressModeV) || //
+ !conv(desc.addressModeW, descriptor.addressModeW) || //
+ !conv(desc.magFilter, descriptor.magFilter) || //
+ !conv(desc.minFilter, descriptor.minFilter) || //
+ !conv(desc.mipmapFilter, descriptor.mipmapFilter) || //
+ !conv(desc.lodMinClamp, descriptor.lodMinClamp) || //
+ !conv(desc.lodMaxClamp, descriptor.lodMaxClamp) || //
+ !conv(desc.compare, descriptor.compare) || //
+ !conv(desc.maxAnisotropy, descriptor.maxAnisotropy)) {
+ return {};
+ }
+ return interop::GPUSampler::Create<GPUSampler>(env, device_.CreateSampler(&desc));
+ }
+
+ interop::Interface<interop::GPUExternalTexture> GPUDevice::importExternalTexture(
+ Napi::Env,
+ interop::GPUExternalTextureDescriptor descriptor) {
+ UNIMPLEMENTED();
+ }
+
+ interop::Interface<interop::GPUBindGroupLayout> GPUDevice::createBindGroupLayout(
+ Napi::Env env,
+ interop::GPUBindGroupLayoutDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::BindGroupLayoutDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+ return {};
+ }
+
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, device_.CreateBindGroupLayout(&desc));
+ }
+
+ interop::Interface<interop::GPUPipelineLayout> GPUDevice::createPipelineLayout(
+ Napi::Env env,
+ interop::GPUPipelineLayoutDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::PipelineLayoutDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.bindGroupLayouts, desc.bindGroupLayoutCount, descriptor.bindGroupLayouts)) {
+ return {};
+ }
+
+ return interop::GPUPipelineLayout::Create<GPUPipelineLayout>(
+ env, device_.CreatePipelineLayout(&desc));
+ }
+
+ interop::Interface<interop::GPUBindGroup> GPUDevice::createBindGroup(
+ Napi::Env env,
+ interop::GPUBindGroupDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::BindGroupDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.layout, descriptor.layout) ||
+ !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+ return {};
+ }
+
+ return interop::GPUBindGroup::Create<GPUBindGroup>(env, device_.CreateBindGroup(&desc));
+ }
+
+ interop::Interface<interop::GPUShaderModule> GPUDevice::createShaderModule(
+ Napi::Env env,
+ interop::GPUShaderModuleDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::ShaderModuleWGSLDescriptor wgsl_desc{};
+ wgpu::ShaderModuleDescriptor sm_desc{};
+ if (!conv(wgsl_desc.source, descriptor.code) || !conv(sm_desc.label, descriptor.label)) {
+ return {};
+ }
+ sm_desc.nextInChain = &wgsl_desc;
+
+ return interop::GPUShaderModule::Create<GPUShaderModule>(
+ env, device_.CreateShaderModule(&sm_desc), async_);
+ }
+
+ interop::Interface<interop::GPUComputePipeline> GPUDevice::createComputePipeline(
+ Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::ComputePipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ return {};
+ }
+
+ return interop::GPUComputePipeline::Create<GPUComputePipeline>(
+ env, device_.CreateComputePipeline(&desc));
+ }
+
+ interop::Interface<interop::GPURenderPipeline> GPUDevice::createRenderPipeline(
+ Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderPipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ return {};
+ }
+
+ return interop::GPURenderPipeline::Create<GPURenderPipeline>(
+ env, device_.CreateRenderPipeline(&desc));
+ }
+
+ interop::Promise<interop::Interface<interop::GPUComputePipeline>>
+ GPUDevice::createComputePipelineAsync(Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) {
+ using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
+
+ Converter conv(env);
+
+ wgpu::ComputePipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ Promise promise(env, PROMISE_INFO);
+ promise.Reject(Errors::OperationError(env));
+ return promise;
+ }
+
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
+ auto promise = ctx->promise;
+
+ device_.CreateComputePipelineAsync(
+ &desc,
+ [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
+ char const* message, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ switch (status) {
+ case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+ c->promise.Resolve(interop::GPUComputePipeline::Create<GPUComputePipeline>(
+ c->env, pipeline));
+ break;
+ default:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ }
+ },
+ ctx);
+
+ return promise;
+ }
+
+ interop::Promise<interop::Interface<interop::GPURenderPipeline>>
+ GPUDevice::createRenderPipelineAsync(Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) {
+ using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
+
+ Converter conv(env);
+
+ wgpu::RenderPipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ Promise promise(env, PROMISE_INFO);
+ promise.Reject(Errors::OperationError(env));
+ return promise;
+ }
+
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
+ auto promise = ctx->promise;
+
+ device_.CreateRenderPipelineAsync(
+ &desc,
+ [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
+ char const* message, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ switch (status) {
+ case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+ c->promise.Resolve(interop::GPURenderPipeline::Create<GPURenderPipeline>(
+ c->env, pipeline));
+ break;
+ default:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ }
+ },
+ ctx);
+
+ return promise;
+ }
+
+ interop::Interface<interop::GPUCommandEncoder> GPUDevice::createCommandEncoder(
+ Napi::Env env,
+ interop::GPUCommandEncoderDescriptor descriptor) {
+ wgpu::CommandEncoderDescriptor desc{};
+ return interop::GPUCommandEncoder::Create<GPUCommandEncoder>(
+ env, device_.CreateCommandEncoder(&desc));
+ }
+
+ interop::Interface<interop::GPURenderBundleEncoder> GPUDevice::createRenderBundleEncoder(
+ Napi::Env env,
+ interop::GPURenderBundleEncoderDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderBundleEncoderDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.colorFormats, desc.colorFormatsCount, descriptor.colorFormats) ||
+ !conv(desc.depthStencilFormat, descriptor.depthStencilFormat) ||
+ !conv(desc.sampleCount, descriptor.sampleCount)) {
+ return {};
+ }
+
+ return interop::GPURenderBundleEncoder::Create<GPURenderBundleEncoder>(
+ env, device_.CreateRenderBundleEncoder(&desc));
+ }
+
+ interop::Interface<interop::GPUQuerySet> GPUDevice::createQuerySet(
+ Napi::Env env,
+ interop::GPUQuerySetDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::QuerySetDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.type, descriptor.type) ||
+ !conv(desc.count, descriptor.count)) {
+ return {};
+ }
+
+ return interop::GPUQuerySet::Create<GPUQuerySet>(env, device_.CreateQuerySet(&desc));
+ }
+
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> GPUDevice::getLost(
+ Napi::Env env) {
+ auto promise =
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>>(env, PROMISE_INFO);
+ lost_promises_.emplace_back(promise);
+ return promise;
+ }
+
+ void GPUDevice::pushErrorScope(Napi::Env env, interop::GPUErrorFilter filter) {
+ wgpu::ErrorFilter f;
+ switch (filter) {
+ case interop::GPUErrorFilter::kOutOfMemory:
+ f = wgpu::ErrorFilter::OutOfMemory;
+ break;
+ case interop::GPUErrorFilter::kValidation:
+ f = wgpu::ErrorFilter::Validation;
+ break;
+ default:
+ Napi::Error::New(env, "unhandled GPUErrorFilter value")
+ .ThrowAsJavaScriptException();
+ return;
+ }
+ device_.PushErrorScope(f);
+ }
+
+ interop::Promise<std::optional<interop::GPUError>> GPUDevice::popErrorScope(Napi::Env env) {
+ using Promise = interop::Promise<std::optional<interop::GPUError>>;
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto* ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
+ auto promise = ctx->promise;
+
+ bool ok = device_.PopErrorScope(
+ [](WGPUErrorType type, char const* message, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ auto env = c->env;
+ switch (type) {
+ case WGPUErrorType::WGPUErrorType_NoError:
+ c->promise.Resolve({});
+ break;
+ case WGPUErrorType::WGPUErrorType_OutOfMemory:
+ c->promise.Resolve(interop::GPUOutOfMemoryError::Create<OOMError>(env));
+ break;
+ case WGPUErrorType::WGPUErrorType_Unknown:
+ case WGPUErrorType::WGPUErrorType_DeviceLost:
+ case WGPUErrorType::WGPUErrorType_Validation:
+ c->promise.Resolve(
+ interop::GPUValidationError::Create<ValidationError>(env, message));
+ break;
+ default:
+ c->promise.Reject("unhandled error type");
+ break;
+ }
+ },
+ ctx);
+
+ if (ok) {
+ return promise;
+ }
+
+ delete ctx;
+ promise.Reject(Errors::OperationError(env));
+ return promise;
+ }
+
+ std::optional<std::string> GPUDevice::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ };
+
+ void GPUDevice::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ };
+
+ interop::Interface<interop::EventHandler> GPUDevice::getOnuncapturederror(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUDevice::setOnuncapturederror(Napi::Env,
+ interop::Interface<interop::EventHandler> value) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUDevice::addEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUDevice::removeEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::EventListenerOptions, bool>> options) {
+ UNIMPLEMENTED();
+ }
+
+ bool GPUDevice::dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h
new file mode 100644
index 00000000000..84ba67d027f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUDevice.h
@@ -0,0 +1,113 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUDEVICE_H_
+#define DAWN_NODE_BINDING_GPUDEVICE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/AsyncRunner.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+ // GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
+ class GPUDevice final : public interop::GPUDevice {
+ public:
+ GPUDevice(Napi::Env env, wgpu::Device device);
+ ~GPUDevice();
+
+ // interop::GPUDevice interface compliance
+ interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+ interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+ interop::Interface<interop::GPUQueue> getQueue(Napi::Env env) override;
+ void destroy(Napi::Env) override;
+ interop::Interface<interop::GPUBuffer> createBuffer(
+ Napi::Env env,
+ interop::GPUBufferDescriptor descriptor) override;
+ interop::Interface<interop::GPUTexture> createTexture(
+ Napi::Env,
+ interop::GPUTextureDescriptor descriptor) override;
+ interop::Interface<interop::GPUSampler> createSampler(
+ Napi::Env,
+ interop::GPUSamplerDescriptor descriptor) override;
+ interop::Interface<interop::GPUExternalTexture> importExternalTexture(
+ Napi::Env,
+ interop::GPUExternalTextureDescriptor descriptor) override;
+ interop::Interface<interop::GPUBindGroupLayout> createBindGroupLayout(
+ Napi::Env,
+ interop::GPUBindGroupLayoutDescriptor descriptor) override;
+ interop::Interface<interop::GPUPipelineLayout> createPipelineLayout(
+ Napi::Env,
+ interop::GPUPipelineLayoutDescriptor descriptor) override;
+ interop::Interface<interop::GPUBindGroup> createBindGroup(
+ Napi::Env,
+ interop::GPUBindGroupDescriptor descriptor) override;
+ interop::Interface<interop::GPUShaderModule> createShaderModule(
+ Napi::Env,
+ interop::GPUShaderModuleDescriptor descriptor) override;
+ interop::Interface<interop::GPUComputePipeline> createComputePipeline(
+ Napi::Env,
+ interop::GPUComputePipelineDescriptor descriptor) override;
+ interop::Interface<interop::GPURenderPipeline> createRenderPipeline(
+ Napi::Env,
+ interop::GPURenderPipelineDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPUComputePipeline>>
+ createComputePipelineAsync(Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPURenderPipeline>> createRenderPipelineAsync(
+ Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) override;
+ interop::Interface<interop::GPUCommandEncoder> createCommandEncoder(
+ Napi::Env env,
+ interop::GPUCommandEncoderDescriptor descriptor) override;
+ interop::Interface<interop::GPURenderBundleEncoder> createRenderBundleEncoder(
+ Napi::Env,
+ interop::GPURenderBundleEncoderDescriptor descriptor) override;
+ interop::Interface<interop::GPUQuerySet> createQuerySet(
+ Napi::Env,
+ interop::GPUQuerySetDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> getLost(
+ Napi::Env env) override;
+ void pushErrorScope(Napi::Env, interop::GPUErrorFilter filter) override;
+ interop::Promise<std::optional<interop::GPUError>> popErrorScope(Napi::Env env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+ interop::Interface<interop::EventHandler> getOnuncapturederror(Napi::Env) override;
+ void setOnuncapturederror(Napi::Env,
+ interop::Interface<interop::EventHandler> value) override;
+ void addEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) override;
+ void removeEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::EventListenerOptions, bool>> options) override;
+ bool dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) override;
+
+ private:
+ void QueueTick();
+
+ Napi::Env env_;
+ wgpu::Device device_;
+ std::shared_ptr<AsyncRunner> async_;
+ std::vector<interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>>>
+ lost_promises_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUDEVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp
new file mode 100644
index 00000000000..945fb4a8762
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.cpp
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUPipelineLayout.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUPipelineLayout
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUPipelineLayout::GPUPipelineLayout(wgpu::PipelineLayout layout) : layout_(std::move(layout)) {
+ }
+
+ std::optional<std::string> GPUPipelineLayout::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUPipelineLayout::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h
new file mode 100644
index 00000000000..174b1611b57
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUPipelineLayout.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
+#define DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUPipelineLayout is an implementation of interop::GPUPipelineLayout that wraps a
+ // wgpu::PipelineLayout.
+ class GPUPipelineLayout final : public interop::GPUPipelineLayout {
+ public:
+ GPUPipelineLayout(wgpu::PipelineLayout layout);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::PipelineLayout&() const {
+ return layout_;
+ }
+
+ // interop::GPUPipelineLayout interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::PipelineLayout layout_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp
new file mode 100644
index 00000000000..b0714f4849a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.cpp
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUQuerySet.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUQuerySet
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUQuerySet::GPUQuerySet(wgpu::QuerySet query_set) : query_set_(std::move(query_set)) {
+ }
+
+ void GPUQuerySet::destroy(Napi::Env) {
+ query_set_.Destroy();
+ }
+
+ std::optional<std::string> GPUQuerySet::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUQuerySet::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h
new file mode 100644
index 00000000000..a7e37d3b56f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUQuerySet.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUQUERYSET_H_
+#define DAWN_NODE_BINDING_GPUQUERYSET_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUQuerySet is an implementation of interop::GPUQuerySet that wraps a wgpu::QuerySet.
+ class GPUQuerySet final : public interop::GPUQuerySet {
+ public:
+ GPUQuerySet(wgpu::QuerySet query_set);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::QuerySet&() const {
+ return query_set_;
+ }
+
+ // interop::GPUQuerySet interface compliance
+ void destroy(Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::QuerySet query_set_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUQUERYSET_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp
new file mode 100644
index 00000000000..e7f7a7290a5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.cpp
@@ -0,0 +1,132 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUQueue.h"
+
+#include <memory>
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUCommandBuffer.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUQueue
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUQueue::GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async)
+ : queue_(std::move(queue)), async_(std::move(async)) {
+ }
+
+ void GPUQueue::submit(
+ Napi::Env env,
+ std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) {
+ std::vector<wgpu::CommandBuffer> bufs(commandBuffers.size());
+ for (size_t i = 0; i < commandBuffers.size(); i++) {
+ bufs[i] = *commandBuffers[i].As<GPUCommandBuffer>();
+ }
+ Converter conv(env);
+ uint32_t bufs_size;
+ if (!conv(bufs_size, bufs.size())) {
+ return;
+ }
+ queue_.Submit(bufs_size, bufs.data());
+ }
+
+ interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
+ struct Context {
+ Napi::Env env;
+ interop::Promise<void> promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), async_};
+ auto promise = ctx->promise;
+
+ queue_.OnSubmittedWorkDone(
+ 0,
+ [](WGPUQueueWorkDoneStatus status, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
+ Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
+ .ThrowAsJavaScriptException();
+ }
+ c->promise.Resolve();
+ },
+ ctx);
+
+ return promise;
+ }
+
+ void GPUQueue::writeBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 bufferOffset,
+ interop::BufferSource data,
+ interop::GPUSize64 dataOffset,
+ std::optional<interop::GPUSize64> size) {
+ wgpu::Buffer buf = *buffer.As<GPUBuffer>();
+ Converter::BufferSource src{};
+ Converter conv(env);
+ if (!conv(src, data)) {
+ return;
+ }
+
+ // TODO(crbug.com/dawn/1132): Bounds check
+ if (src.data) {
+ src.data = reinterpret_cast<uint8_t*>(src.data) + dataOffset;
+ }
+ src.size -= dataOffset;
+ if (size.has_value()) {
+ src.size = size.value();
+ }
+
+ queue_.WriteBuffer(buf, bufferOffset, src.data, src.size);
+ }
+
+ void GPUQueue::writeTexture(Napi::Env env,
+ interop::GPUImageCopyTexture destination,
+ interop::BufferSource data,
+ interop::GPUImageDataLayout dataLayout,
+ interop::GPUExtent3D size) {
+ wgpu::ImageCopyTexture dst{};
+ Converter::BufferSource src{};
+ wgpu::TextureDataLayout layout{};
+ wgpu::Extent3D sz{};
+ Converter conv(env);
+ if (!conv(dst, destination) || //
+ !conv(src, data) || //
+ !conv(layout, dataLayout) || //
+ !conv(sz, size)) {
+ return;
+ }
+
+ queue_.WriteTexture(&dst, src.data, src.size, &layout, &sz);
+ }
+
+ void GPUQueue::copyExternalImageToTexture(Napi::Env,
+ interop::GPUImageCopyExternalImage source,
+ interop::GPUImageCopyTextureTagged destination,
+ interop::GPUExtent3D copySize) {
+ UNIMPLEMENTED();
+ }
+
+ std::optional<std::string> GPUQueue::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUQueue::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h
new file mode 100644
index 00000000000..be8ba8c7121
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUQueue.h
@@ -0,0 +1,61 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUQUEUE_H_
+#define DAWN_NODE_BINDING_GPUQUEUE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/AsyncRunner.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUQueue is an implementation of interop::GPUQueue that wraps a wgpu::Queue.
+ class GPUQueue final : public interop::GPUQueue {
+ public:
+ GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async);
+
+ // interop::GPUQueue interface compliance
+ void submit(
+ Napi::Env,
+ std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) override;
+ interop::Promise<void> onSubmittedWorkDone(Napi::Env) override;
+ void writeBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 bufferOffset,
+ interop::BufferSource data,
+ interop::GPUSize64 dataOffset,
+ std::optional<interop::GPUSize64> size) override;
+ void writeTexture(Napi::Env,
+ interop::GPUImageCopyTexture destination,
+ interop::BufferSource data,
+ interop::GPUImageDataLayout dataLayout,
+ interop::GPUExtent3D size) override;
+ void copyExternalImageToTexture(Napi::Env,
+ interop::GPUImageCopyExternalImage source,
+ interop::GPUImageCopyTextureTagged destination,
+ interop::GPUExtent3D copySize) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::Queue queue_;
+ std::shared_ptr<AsyncRunner> async_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUQUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp
new file mode 100644
index 00000000000..c136d034395
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.cpp
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPURenderBundle.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPURenderBundle.h"
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPURenderBundle
+ ////////////////////////////////////////////////////////////////////////////////
+ GPURenderBundle::GPURenderBundle(wgpu::RenderBundle bundle) : bundle_(std::move(bundle)) {
+ }
+
+ std::optional<std::string> GPURenderBundle::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderBundle::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h
new file mode 100644
index 00000000000..76635c3f66d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundle.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
+#define DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPURenderBundle is an implementation of interop::GPURenderBundle that wraps a
+ // wgpu::RenderBundle.
+ class GPURenderBundle final : public interop::GPURenderBundle {
+ public:
+ GPURenderBundle(wgpu::RenderBundle bundle);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderBundle&() const {
+ return bundle_;
+ }
+
+ // interop::GPURenderBundle interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::RenderBundle bundle_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp
new file mode 100644
index 00000000000..6c3fe9961c6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.cpp
@@ -0,0 +1,192 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPURenderBundleEncoder.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBindGroup.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPURenderBundle.h"
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPURenderBundleEncoder
+ ////////////////////////////////////////////////////////////////////////////////
+ GPURenderBundleEncoder::GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc)
+ : enc_(std::move(enc)) {
+ }
+
+ interop::Interface<interop::GPURenderBundle> GPURenderBundleEncoder::finish(
+ Napi::Env env,
+ interop::GPURenderBundleDescriptor descriptor) {
+ wgpu::RenderBundleDescriptor desc{};
+
+ return interop::GPURenderBundle::Create<GPURenderBundle>(env, enc_.Finish(&desc));
+ }
+
+ void GPURenderBundleEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+ }
+
+ void GPURenderBundleEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+ }
+
+ void GPURenderBundleEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+ }
+
+ void GPURenderBundleEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+ }
+
+ void GPURenderBundleEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+ }
+
+ void GPURenderBundleEncoder::setPipeline(
+ Napi::Env env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) {
+ Converter conv(env);
+
+ wgpu::RenderPipeline p{};
+ if (!conv(p, pipeline)) {
+ return;
+ }
+
+ enc_.SetPipeline(p);
+ }
+
+ void GPURenderBundleEncoder::setIndexBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ wgpu::IndexFormat f{};
+ uint64_t o = 0;
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || //
+ !conv(f, indexFormat) || //
+ !conv(o, offset) || //
+ !conv(s, size)) {
+ return;
+ }
+
+ enc_.SetIndexBuffer(b, f, o, s);
+ }
+
+ void GPURenderBundleEncoder::setVertexBuffer(Napi::Env env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || !conv(s, size)) {
+ return;
+ }
+ enc_.SetVertexBuffer(slot, b, offset, s);
+ }
+
+ void GPURenderBundleEncoder::draw(Napi::Env env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+ }
+
+ void GPURenderBundleEncoder::drawIndexed(Napi::Env env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+ }
+
+ void GPURenderBundleEncoder::drawIndirect(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint32_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndirect(b, o);
+ }
+
+ void GPURenderBundleEncoder::drawIndexedIndirect(
+ Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint32_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndexedIndirect(b, o);
+ }
+
+ std::optional<std::string> GPURenderBundleEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderBundleEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h
new file mode 100644
index 00000000000..c459e84db23
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderBundleEncoder.h
@@ -0,0 +1,86 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
+#define DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPURenderBundleEncoder is an implementation of interop::GPURenderBundleEncoder that wraps a
+ // wgpu::RenderBundleEncoder.
+ class GPURenderBundleEncoder final : public interop::GPURenderBundleEncoder {
+ public:
+ GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc);
+
+ // interop::GPURenderBundleEncoder interface compliance
+ interop::Interface<interop::GPURenderBundle> finish(
+ Napi::Env,
+ interop::GPURenderBundleDescriptor descriptor) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void setPipeline(Napi::Env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) override;
+ void setIndexBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void setVertexBuffer(Napi::Env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void draw(Napi::Env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndexed(Napi::Env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void drawIndexedIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::RenderBundleEncoder enc_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp
new file mode 100644
index 00000000000..3fe4791e36f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.cpp
@@ -0,0 +1,242 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPURenderPassEncoder.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/GPUBindGroup.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/binding/GPUQuerySet.h"
+#include "src/dawn/node/binding/GPURenderBundle.h"
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPURenderPassEncoder
+ ////////////////////////////////////////////////////////////////////////////////
+ GPURenderPassEncoder::GPURenderPassEncoder(wgpu::RenderPassEncoder enc) : enc_(std::move(enc)) {
+ }
+
+ void GPURenderPassEncoder::setViewport(Napi::Env,
+ float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) {
+ enc_.SetViewport(x, y, width, height, minDepth, maxDepth);
+ }
+
+ void GPURenderPassEncoder::setScissorRect(Napi::Env,
+ interop::GPUIntegerCoordinate x,
+ interop::GPUIntegerCoordinate y,
+ interop::GPUIntegerCoordinate width,
+ interop::GPUIntegerCoordinate height) {
+ enc_.SetScissorRect(x, y, width, height);
+ }
+
+ void GPURenderPassEncoder::setBlendConstant(Napi::Env env, interop::GPUColor color) {
+ Converter conv(env);
+
+ wgpu::Color c{};
+ if (!conv(c, color)) {
+ return;
+ }
+
+ enc_.SetBlendConstant(&c);
+ }
+
+ void GPURenderPassEncoder::setStencilReference(Napi::Env, interop::GPUStencilValue reference) {
+ enc_.SetStencilReference(reference);
+ }
+
+ void GPURenderPassEncoder::beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) {
+ enc_.BeginOcclusionQuery(queryIndex);
+ }
+
+ void GPURenderPassEncoder::endOcclusionQuery(Napi::Env) {
+ enc_.EndOcclusionQuery();
+ }
+
+ void GPURenderPassEncoder::executeBundles(
+ Napi::Env env,
+ std::vector<interop::Interface<interop::GPURenderBundle>> bundles_in) {
+ Converter conv(env);
+
+ wgpu::RenderBundle* bundles = nullptr;
+ uint32_t bundleCount = 0;
+ if (!conv(bundles, bundleCount, bundles_in)) {
+ return;
+ }
+
+ enc_.ExecuteBundles(bundleCount, bundles);
+ }
+
+ void GPURenderPassEncoder::end(Napi::Env) {
+ enc_.End();
+ }
+
+ void GPURenderPassEncoder::endPass(Napi::Env) {
+ enc_.EndPass();
+ }
+
+ void GPURenderPassEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+ }
+
+ void GPURenderPassEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+ }
+
+ void GPURenderPassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+ }
+
+ void GPURenderPassEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+ }
+
+ void GPURenderPassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+ }
+
+ void GPURenderPassEncoder::setPipeline(
+ Napi::Env env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) {
+ Converter conv(env);
+ wgpu::RenderPipeline rp{};
+ if (!conv(rp, pipeline)) {
+ return;
+ }
+ enc_.SetPipeline(rp);
+ }
+
+ void GPURenderPassEncoder::setIndexBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ wgpu::IndexFormat f;
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || //
+ !conv(f, indexFormat) || //
+ !conv(s, size)) {
+ return;
+ }
+ enc_.SetIndexBuffer(b, f, offset, s);
+ }
+
+ void GPURenderPassEncoder::setVertexBuffer(Napi::Env env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || !conv(s, size)) {
+ return;
+ }
+ enc_.SetVertexBuffer(slot, b, offset, s);
+ }
+
+ void GPURenderPassEncoder::draw(Napi::Env env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+ }
+
+ void GPURenderPassEncoder::drawIndexed(Napi::Env env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+ }
+
+ void GPURenderPassEncoder::drawIndirect(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint32_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndirect(b, o);
+ }
+
+ void GPURenderPassEncoder::drawIndexedIndirect(
+ Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint32_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndexedIndirect(b, o);
+ }
+
+ std::optional<std::string> GPURenderPassEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderPassEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h
new file mode 100644
index 00000000000..3926052ff7f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPassEncoder.h
@@ -0,0 +1,109 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
+#define DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPURenderPassEncoder is an implementation of interop::GPURenderPassEncoder that wraps a
+ // wgpu::RenderPassEncoder.
+ class GPURenderPassEncoder final : public interop::GPURenderPassEncoder {
+ public:
+ GPURenderPassEncoder(wgpu::RenderPassEncoder enc);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderPassEncoder&() const {
+ return enc_;
+ }
+
+ // interop::GPURenderPassEncoder interface compliance
+ void setViewport(Napi::Env,
+ float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) override;
+ void setScissorRect(Napi::Env,
+ interop::GPUIntegerCoordinate x,
+ interop::GPUIntegerCoordinate y,
+ interop::GPUIntegerCoordinate width,
+ interop::GPUIntegerCoordinate height) override;
+ void setBlendConstant(Napi::Env, interop::GPUColor color) override;
+ void setStencilReference(Napi::Env, interop::GPUStencilValue reference) override;
+ void beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) override;
+ void endOcclusionQuery(Napi::Env) override;
+ void executeBundles(
+ Napi::Env,
+ std::vector<interop::Interface<interop::GPURenderBundle>> bundles) override;
+ void end(Napi::Env) override;
+ void endPass(Napi::Env) override; // TODO(dawn:1286): Remove after deprecation period.
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void setPipeline(Napi::Env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) override;
+ void setIndexBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void setVertexBuffer(Napi::Env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void draw(Napi::Env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndexed(Napi::Env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void drawIndexedIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::RenderPassEncoder enc_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp
new file mode 100644
index 00000000000..58003d987e3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.cpp
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPURenderPipeline.h"
+
+#include "src/dawn/node/binding/GPUBindGroupLayout.h"
+#include "src/dawn/node/binding/GPUBuffer.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPURenderPipeline
+ ////////////////////////////////////////////////////////////////////////////////
+ GPURenderPipeline::GPURenderPipeline(wgpu::RenderPipeline pipeline)
+ : pipeline_(std::move(pipeline)) {
+ }
+
+ interop::Interface<interop::GPUBindGroupLayout> GPURenderPipeline::getBindGroupLayout(
+ Napi::Env env,
+ uint32_t index) {
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, pipeline_.GetBindGroupLayout(index));
+ }
+
+ std::optional<std::string> GPURenderPipeline::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderPipeline::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h
new file mode 100644
index 00000000000..3eaf2cd15f2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPURenderPipeline.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
+#define DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPURenderPipeline is an implementation of interop::GPURenderPipeline that wraps a
+ // wgpu::RenderPipeline.
+ class GPURenderPipeline final : public interop::GPURenderPipeline {
+ public:
+ GPURenderPipeline(wgpu::RenderPipeline pipeline);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderPipeline&() const {
+ return pipeline_;
+ }
+
+ // interop::GPURenderPipeline interface compliance
+ interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+ uint32_t index) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::RenderPipeline pipeline_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp
new file mode 100644
index 00000000000..70f653603c3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUSampler.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUSampler
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUSampler::GPUSampler(wgpu::Sampler sampler) : sampler_(std::move(sampler)) {
+ }
+
+ std::optional<std::string> GPUSampler::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUSampler::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h
new file mode 100644
index 00000000000..4f1ff204447
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUSampler.h
@@ -0,0 +1,44 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSAMPLER_H_
+#define DAWN_NODE_BINDING_GPUSAMPLER_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+ // GPUSampler is an implementation of interop::GPUSampler that wraps a wgpu::Sampler.
+ class GPUSampler final : public interop::GPUSampler {
+ public:
+ GPUSampler(wgpu::Sampler sampler);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Sampler&() const {
+ return sampler_;
+ }
+
+ // interop::GPUSampler interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::Sampler sampler_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUSAMPLER_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp
new file mode 100644
index 00000000000..a889fdbe923
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.cpp
@@ -0,0 +1,125 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUShaderModule.h"
+
+#include <memory>
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUShaderModule
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUShaderModule::GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async)
+ : shader_(std::move(shader)), async_(std::move(async)) {
+ }
+
+ interop::Promise<interop::Interface<interop::GPUCompilationInfo>>
+ GPUShaderModule::compilationInfo(Napi::Env env) {
+ struct GPUCompilationMessage : public interop::GPUCompilationMessage {
+ WGPUCompilationMessage message;
+
+ GPUCompilationMessage(const WGPUCompilationMessage& m) : message(m) {
+ }
+ std::string getMessage(Napi::Env) override {
+ return message.message;
+ }
+ interop::GPUCompilationMessageType getType(Napi::Env) override {
+ switch (message.type) {
+ case WGPUCompilationMessageType_Error:
+ return interop::GPUCompilationMessageType::kError;
+ case WGPUCompilationMessageType_Warning:
+ return interop::GPUCompilationMessageType::kWarning;
+ case WGPUCompilationMessageType_Info:
+ return interop::GPUCompilationMessageType::kInfo;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+ uint64_t getLineNum(Napi::Env) override {
+ return message.lineNum;
+ }
+ uint64_t getLinePos(Napi::Env) override {
+ return message.linePos;
+ }
+ uint64_t getOffset(Napi::Env) override {
+ return message.offset;
+ }
+ uint64_t getLength(Napi::Env) override {
+ return message.length;
+ }
+ };
+
+ using Messages = std::vector<interop::Interface<interop::GPUCompilationMessage>>;
+
+ struct GPUCompilationInfo : public interop::GPUCompilationInfo {
+ std::vector<Napi::ObjectReference> messages;
+
+ GPUCompilationInfo(Napi::Env env, Messages msgs) {
+ messages.reserve(msgs.size());
+ for (auto& msg : msgs) {
+ messages.emplace_back(Napi::Persistent(Napi::Object(env, msg)));
+ }
+ }
+ Messages getMessages(Napi::Env) override {
+ Messages out;
+ out.reserve(messages.size());
+ for (auto& msg : messages) {
+ out.emplace_back(msg.Value());
+ }
+ return out;
+ }
+ };
+
+ using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
+
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
+ auto promise = ctx->promise;
+
+ shader_.GetCompilationInfo(
+ [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
+ void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ Messages messages(compilationInfo->messageCount);
+ for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
+ auto& msg = compilationInfo->messages[i];
+ messages[i] =
+ interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
+ }
+
+ c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
+ c->env, c->env, std::move(messages)));
+ },
+ ctx);
+
+ return promise;
+ }
+
+ std::optional<std::string> GPUShaderModule::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUShaderModule::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h
new file mode 100644
index 00000000000..c0a94aab30a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUShaderModule.h
@@ -0,0 +1,50 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSHADERMODULE_H_
+#define DAWN_NODE_BINDING_GPUSHADERMODULE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/binding/AsyncRunner.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUShaderModule is an implementation of interop::GPUShaderModule that wraps a
+ // wgpu::ShaderModule.
+ class GPUShaderModule final : public interop::GPUShaderModule {
+ public:
+ GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ShaderModule&() const {
+ return shader_;
+ }
+
+ // interop::GPUShaderModule interface compliance
+ interop::Promise<interop::Interface<interop::GPUCompilationInfo>> compilationInfo(
+ Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::ShaderModule shader_;
+ std::shared_ptr<AsyncRunner> async_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUSHADERMODULE_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp
new file mode 100644
index 00000000000..23c19b21875
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.cpp
@@ -0,0 +1,131 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUSupportedLimits.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUSupportedLimits
+ ////////////////////////////////////////////////////////////////////////////////
+
+ GPUSupportedLimits::GPUSupportedLimits(wgpu::SupportedLimits limits)
+ : limits_(std::move(limits)) {
+ }
+
+ uint32_t GPUSupportedLimits::getMaxTextureDimension1D(Napi::Env) {
+ return limits_.limits.maxTextureDimension1D;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxTextureDimension2D(Napi::Env) {
+ return limits_.limits.maxTextureDimension2D;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxTextureDimension3D(Napi::Env) {
+ return limits_.limits.maxTextureDimension3D;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxTextureArrayLayers(Napi::Env) {
+ return limits_.limits.maxTextureArrayLayers;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxBindGroups(Napi::Env) {
+ return limits_.limits.maxBindGroups;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) {
+ return limits_.limits.maxDynamicUniformBuffersPerPipelineLayout;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) {
+ return limits_.limits.maxDynamicStorageBuffersPerPipelineLayout;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxSampledTexturesPerShaderStage(Napi::Env) {
+ return limits_.limits.maxSampledTexturesPerShaderStage;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxSamplersPerShaderStage(Napi::Env) {
+ return limits_.limits.maxSamplersPerShaderStage;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxStorageBuffersPerShaderStage(Napi::Env) {
+ return limits_.limits.maxStorageBuffersPerShaderStage;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxStorageTexturesPerShaderStage(Napi::Env) {
+ return limits_.limits.maxStorageTexturesPerShaderStage;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxUniformBuffersPerShaderStage(Napi::Env) {
+ return limits_.limits.maxUniformBuffersPerShaderStage;
+ }
+
+ uint64_t GPUSupportedLimits::getMaxUniformBufferBindingSize(Napi::Env) {
+ return limits_.limits.maxUniformBufferBindingSize;
+ }
+
+ uint64_t GPUSupportedLimits::getMaxStorageBufferBindingSize(Napi::Env) {
+ return limits_.limits.maxStorageBufferBindingSize;
+ }
+
+ uint32_t GPUSupportedLimits::getMinUniformBufferOffsetAlignment(Napi::Env) {
+ return limits_.limits.minUniformBufferOffsetAlignment;
+ }
+
+ uint32_t GPUSupportedLimits::getMinStorageBufferOffsetAlignment(Napi::Env) {
+ return limits_.limits.minStorageBufferOffsetAlignment;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxVertexBuffers(Napi::Env) {
+ return limits_.limits.maxVertexBuffers;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxVertexAttributes(Napi::Env) {
+ return limits_.limits.maxVertexAttributes;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxVertexBufferArrayStride(Napi::Env) {
+ return limits_.limits.maxVertexBufferArrayStride;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxInterStageShaderComponents(Napi::Env) {
+ return limits_.limits.maxInterStageShaderComponents;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupStorageSize(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupStorageSize;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeInvocationsPerWorkgroup(Napi::Env) {
+ return limits_.limits.maxComputeInvocationsPerWorkgroup;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeX(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupSizeX;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeY(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupSizeY;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeZ(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupSizeZ;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupsPerDimension(Napi::Env) {
+ return limits_.limits.maxComputeWorkgroupsPerDimension;
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h
new file mode 100644
index 00000000000..e571c67c37b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUSupportedLimits.h
@@ -0,0 +1,64 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
+#define DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUSupportedLimits is an implementation of interop::GPUSupportedLimits.
+ class GPUSupportedLimits final : public interop::GPUSupportedLimits {
+ public:
+ GPUSupportedLimits(wgpu::SupportedLimits);
+
+ // interop::GPUSupportedLimits interface compliance
+ uint32_t getMaxTextureDimension1D(Napi::Env) override;
+ uint32_t getMaxTextureDimension2D(Napi::Env) override;
+ uint32_t getMaxTextureDimension3D(Napi::Env) override;
+ uint32_t getMaxTextureArrayLayers(Napi::Env) override;
+ uint32_t getMaxBindGroups(Napi::Env) override;
+ uint32_t getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) override;
+ uint32_t getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) override;
+ uint32_t getMaxSampledTexturesPerShaderStage(Napi::Env) override;
+ uint32_t getMaxSamplersPerShaderStage(Napi::Env) override;
+ uint32_t getMaxStorageBuffersPerShaderStage(Napi::Env) override;
+ uint32_t getMaxStorageTexturesPerShaderStage(Napi::Env) override;
+ uint32_t getMaxUniformBuffersPerShaderStage(Napi::Env) override;
+ uint64_t getMaxUniformBufferBindingSize(Napi::Env) override;
+ uint64_t getMaxStorageBufferBindingSize(Napi::Env) override;
+ uint32_t getMinUniformBufferOffsetAlignment(Napi::Env) override;
+ uint32_t getMinStorageBufferOffsetAlignment(Napi::Env) override;
+ uint32_t getMaxVertexBuffers(Napi::Env) override;
+ uint32_t getMaxVertexAttributes(Napi::Env) override;
+ uint32_t getMaxVertexBufferArrayStride(Napi::Env) override;
+ uint32_t getMaxInterStageShaderComponents(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupStorageSize(Napi::Env) override;
+ uint32_t getMaxComputeInvocationsPerWorkgroup(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeX(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeY(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeZ(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupsPerDimension(Napi::Env) override;
+
+ private:
+ wgpu::SupportedLimits limits_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp
new file mode 100644
index 00000000000..03872484503
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.cpp
@@ -0,0 +1,64 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUTexture.h"
+
+#include "src/dawn/node/binding/Converter.h"
+#include "src/dawn/node/binding/Errors.h"
+#include "src/dawn/node/binding/GPUTextureView.h"
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUTexture
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUTexture::GPUTexture(wgpu::Texture texture) : texture_(std::move(texture)) {
+ }
+
+ interop::Interface<interop::GPUTextureView> GPUTexture::createView(
+ Napi::Env env,
+ interop::GPUTextureViewDescriptor descriptor) {
+ if (!texture_) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+
+ wgpu::TextureViewDescriptor desc{};
+ Converter conv(env);
+ if (!conv(desc.baseMipLevel, descriptor.baseMipLevel) || //
+ !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
+ !conv(desc.baseArrayLayer, descriptor.baseArrayLayer) || //
+ !conv(desc.arrayLayerCount, descriptor.arrayLayerCount) || //
+ !conv(desc.format, descriptor.format) || //
+ !conv(desc.dimension, descriptor.dimension) || //
+ !conv(desc.aspect, descriptor.aspect)) {
+ return {};
+ }
+ return interop::GPUTextureView::Create<GPUTextureView>(env, texture_.CreateView(&desc));
+ }
+
+ void GPUTexture::destroy(Napi::Env) {
+ texture_.Destroy();
+ }
+
+ std::optional<std::string> GPUTexture::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUTexture::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h
new file mode 100644
index 00000000000..cdff7014a0c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUTexture.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUTEXTURE_H_
+#define DAWN_NODE_BINDING_GPUTEXTURE_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUTexture is an implementation of interop::GPUTexture that wraps a wgpu::Texture.
+ class GPUTexture final : public interop::GPUTexture {
+ public:
+ GPUTexture(wgpu::Texture texture);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Texture&() const {
+ return texture_;
+ }
+
+ // interop::GPUTexture interface compliance
+ interop::Interface<interop::GPUTextureView> createView(
+ Napi::Env,
+ interop::GPUTextureViewDescriptor descriptor) override;
+ void destroy(Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::Texture texture_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUTEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp b/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp
new file mode 100644
index 00000000000..1e183dbda85
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.cpp
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/binding/GPUTextureView.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu::binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUTextureView
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUTextureView::GPUTextureView(wgpu::TextureView view) : view_(std::move(view)) {
+ }
+
+ std::optional<std::string> GPUTextureView::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUTextureView::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ };
+
+} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h b/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h
new file mode 100644
index 00000000000..6568e70e557
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/binding/GPUTextureView.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
+#define DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn/node/interop/WebGPU.h"
+
+namespace wgpu::binding {
+
+ // GPUTextureView is an implementation of interop::GPUTextureView that wraps a
+ // wgpu::TextureView.
+ class GPUTextureView final : public interop::GPUTextureView {
+ public:
+ GPUTextureView(wgpu::TextureView view);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::TextureView&() const {
+ return view_;
+ }
+
+ // interop::GPUTextureView interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::TextureView view_;
+ };
+
+} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/Browser.idl b/chromium/third_party/dawn/src/dawn/node/interop/Browser.idl
index 8208058f822..8208058f822 100644
--- a/chromium/third_party/dawn/src/dawn_node/interop/Browser.idl
+++ b/chromium/third_party/dawn/src/dawn/node/interop/Browser.idl
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt
new file mode 100644
index 00000000000..98b5695ed54
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/interop/CMakeLists.txt
@@ -0,0 +1,62 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Paths to generated files
+set(INTEROP_GEN_DIR "${GEN_DIR}/src/dawn/node/interop")
+set(INTEROP_WEBGPU_H "${INTEROP_GEN_DIR}/WebGPU.h")
+set(INTEROP_WEBGPU_CPP "${INTEROP_GEN_DIR}/WebGPU.cpp")
+
+idlgen(
+ TEMPLATE
+ "${CMAKE_CURRENT_SOURCE_DIR}/WebGPU.h.tmpl"
+ IDLS
+ "${CMAKE_CURRENT_SOURCE_DIR}/Browser.idl"
+ "${WEBGPU_IDL_PATH}"
+ DEPENDS
+ "${CMAKE_CURRENT_SOURCE_DIR}/WebGPUCommon.tmpl"
+ OUTPUT
+ "${INTEROP_WEBGPU_H}"
+)
+
+idlgen(
+ TEMPLATE
+ "${CMAKE_CURRENT_SOURCE_DIR}/WebGPU.cpp.tmpl"
+ IDLS
+ "${CMAKE_CURRENT_SOURCE_DIR}/Browser.idl"
+ "${WEBGPU_IDL_PATH}"
+ DEPENDS
+ "${CMAKE_CURRENT_SOURCE_DIR}/WebGPUCommon.tmpl"
+ OUTPUT
+ "${INTEROP_WEBGPU_CPP}"
+)
+
+add_library(dawn_node_interop STATIC
+ "Core.cpp"
+ "Core.h"
+ "${INTEROP_WEBGPU_H}"
+ "${INTEROP_WEBGPU_CPP}"
+)
+
+target_include_directories(dawn_node_interop
+ PRIVATE
+ "${CMAKE_SOURCE_DIR}"
+ "${NODE_API_HEADERS_DIR}/include"
+ "${NODE_ADDON_API_DIR}"
+ "${GEN_DIR}"
+)
+
+target_link_libraries(dawn_node_interop
+ PRIVATE
+ dawncpp
+)
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/Core.cpp b/chromium/third_party/dawn/src/dawn/node/interop/Core.cpp
new file mode 100644
index 00000000000..a2232ae707a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/interop/Core.cpp
@@ -0,0 +1,160 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn/node/interop/Core.h"
+
+namespace wgpu::interop {
+
+ Result Success;
+
+ Result Error(std::string msg) {
+ return {msg};
+ }
+
+ Result Converter<bool>::FromJS(Napi::Env env, Napi::Value value, bool& out) {
+ if (value.IsBoolean()) {
+ out = value.ToBoolean();
+ return Success;
+ }
+ return Error("value is not a boolean");
+ }
+ Napi::Value Converter<bool>::ToJS(Napi::Env env, bool value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<std::string>::FromJS(Napi::Env env, Napi::Value value, std::string& out) {
+ if (value.IsString()) {
+ out = value.ToString();
+ return Success;
+ }
+ return Error("value is not a string");
+ }
+ Napi::Value Converter<std::string>::ToJS(Napi::Env env, std::string value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<int8_t>::FromJS(Napi::Env env, Napi::Value value, int8_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<int8_t>::ToJS(Napi::Env env, int8_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<uint8_t>::FromJS(Napi::Env env, Napi::Value value, uint8_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<uint8_t>::ToJS(Napi::Env env, uint8_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<int16_t>::FromJS(Napi::Env env, Napi::Value value, int16_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<int16_t>::ToJS(Napi::Env env, int16_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<uint16_t>::FromJS(Napi::Env env, Napi::Value value, uint16_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<uint16_t>::ToJS(Napi::Env env, uint16_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<int32_t>::FromJS(Napi::Env env, Napi::Value value, int32_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<int32_t>::ToJS(Napi::Env env, int32_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<uint32_t>::FromJS(Napi::Env env, Napi::Value value, uint32_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<uint32_t>::ToJS(Napi::Env env, uint32_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<int64_t>::FromJS(Napi::Env env, Napi::Value value, int64_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int64Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<int64_t>::ToJS(Napi::Env env, int64_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<uint64_t>::FromJS(Napi::Env env, Napi::Value value, uint64_t& out) {
+ if (value.IsNumber()) {
+ // Note that the JS Number type only stores doubles, so the max integer
+ // range of values without precision loss is -2^53 to 2^53 (52 bit mantissa
+ // with 1 implicit bit). This is why there's no UInt64Value() function.
+ out = static_cast<uint64_t>(value.ToNumber().Int64Value());
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<uint64_t>::ToJS(Napi::Env env, uint64_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<float>::FromJS(Napi::Env env, Napi::Value value, float& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().FloatValue();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<float>::ToJS(Napi::Env env, float value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<double>::FromJS(Napi::Env env, Napi::Value value, double& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().DoubleValue();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<double>::ToJS(Napi::Env env, double value) {
+ return Napi::Value::From(env, value);
+ }
+
+} // namespace wgpu::interop
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/Core.h b/chromium/third_party/dawn/src/dawn/node/interop/Core.h
new file mode 100644
index 00000000000..30545695cb3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/interop/Core.h
@@ -0,0 +1,692 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides core interop helpers used by the code generated by the
+// templates.
+
+#ifndef DAWN_NODE_INTEROP_CORE_WEBGPU_H_
+#define DAWN_NODE_INTEROP_CORE_WEBGPU_H_
+
+#include <cstdint>
+#include <optional>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <variant>
+#include <vector>
+
+#include "napi.h"
+
+#include "src/dawn/node/utils/Debug.h"
+
+#define ENABLE_INTEROP_LOGGING 0 // Enable for verbose interop logging
+
+#if ENABLE_INTEROP_LOGGING
+# define INTEROP_LOG(...) LOG(__VA_ARGS__)
+#else
+# define INTEROP_LOG(...)
+#endif
+
+// A helper macro for constructing a PromiseInfo with the current file, function and line.
+// See PromiseInfo
+#define PROMISE_INFO \
+ ::wgpu::interop::PromiseInfo { \
+ __FILE__, __FUNCTION__, __LINE__ \
+ }
+
+namespace wgpu::interop {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Primitive JavaScript types
+ ////////////////////////////////////////////////////////////////////////////////
+ using Object = Napi::Object;
+ using ArrayBuffer = Napi::ArrayBuffer;
+ using Int8Array = Napi::TypedArrayOf<int8_t>;
+ using Int16Array = Napi::TypedArrayOf<int16_t>;
+ using Int32Array = Napi::TypedArrayOf<int32_t>;
+ using Uint8Array = Napi::TypedArrayOf<uint8_t>;
+ using Uint16Array = Napi::TypedArrayOf<uint16_t>;
+ using Uint32Array = Napi::TypedArrayOf<uint32_t>;
+ using Float32Array = Napi::TypedArrayOf<float>;
+ using Float64Array = Napi::TypedArrayOf<double>;
+ using DataView = Napi::TypedArray;
+
+ template <typename T>
+ using FrozenArray = std::vector<T>;
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Result
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Result is used to hold an success / error state by functions that perform JS <-> C++
+ // conversion
+ struct [[nodiscard]] Result {
+ // Returns true if the operation succeeded, false if there was an error
+ inline operator bool() const {
+ return error.empty();
+ }
+
+ // If Result is an error, then a new Error is returned with the
+ // stringified values append to the error message.
+ // If Result is a success, then a success Result is returned.
+ template <typename... VALUES>
+ Result Append(VALUES && ... values) {
+ if (*this) {
+ return *this;
+ }
+ std::stringstream ss;
+ ss << error << "\n";
+ utils::Write(ss, std::forward<VALUES>(values)...);
+ return {ss.str()};
+ }
+
+ // The error message, if the operation failed.
+ std::string error;
+ };
+
+ // A successful result
+ extern Result Success;
+
+ // Returns a Result with the given error message
+ Result Error(std::string msg);
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Interface<T>
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Interface<T> is a templated wrapper around a JavaScript object, which
+ // implements the template-generated interface type T. Interfaces are returned
+ // by either calling T::Bind() or T::Create().
+ template <typename T>
+ class Interface {
+ public:
+ // Constructs an Interface with no JS object.
+ inline Interface() {
+ }
+
+ // Constructs an Interface wrapping the given JS object.
+ // The JS object must have been created with a call to T::Bind().
+ explicit inline Interface(Napi::Object o) : object(o) {
+ }
+
+ // Implicit conversion operators to Napi objects.
+ inline operator napi_value() const {
+ return object;
+ }
+ inline operator const Napi::Value&() const {
+ return object;
+ }
+ inline operator const Napi::Object&() const {
+ return object;
+ }
+
+ // Member and dereference operators
+ inline T* operator->() const {
+ return T::Unwrap(object);
+ }
+ inline T* operator*() const {
+ return T::Unwrap(object);
+ }
+
+ // As<IMPL>() returns the unwrapped object cast to the implementation type.
+ // The interface implementation *must* be of the template type IMPL.
+ template <typename IMPL>
+ inline IMPL* As() const {
+ return static_cast<IMPL*>(T::Unwrap(object));
+ }
+
+ private:
+ Napi::Object object;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Promise<T>
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Info holds details about where the promise was constructed.
+ // Used for printing debug messages when a promise is finalized without being resolved
+ // or rejected.
+ // Use the PROMISE_INFO macro to populate this structure.
+ struct PromiseInfo {
+ const char* file = nullptr;
+ const char* function = nullptr;
+ int line = 0;
+ };
+
+ namespace detail {
+ // Base class for Promise<T> specializations.
+ class PromiseBase {
+ public:
+ // Implicit conversion operators to Napi promises.
+ inline operator napi_value() const {
+ return state->deferred.Promise();
+ }
+ inline operator Napi::Value() const {
+ return state->deferred.Promise();
+ }
+ inline operator Napi::Promise() const {
+ return state->deferred.Promise();
+ }
+
+ // Reject() rejects the promise with the given failure value.
+ void Reject(Napi::Value value) const {
+ state->deferred.Reject(value);
+ state->resolved_or_rejected = true;
+ }
+ void Reject(Napi::Error err) const {
+ Reject(err.Value());
+ }
+ void Reject(std::string err) const {
+ Reject(Napi::Error::New(state->deferred.Env(), err));
+ }
+
+ protected:
+ void Resolve(Napi::Value value) const {
+ state->deferred.Resolve(value);
+ state->resolved_or_rejected = true;
+ }
+
+ struct State {
+ Napi::Promise::Deferred deferred;
+ PromiseInfo info;
+ bool resolved_or_rejected = false;
+ };
+
+ PromiseBase(Napi::Env env, const PromiseInfo& info)
+ : state(new State{Napi::Promise::Deferred::New(env), info}) {
+ state->deferred.Promise().AddFinalizer(
+ [](Napi::Env, State* state) {
+ // TODO(https://github.com/gpuweb/cts/issues/784):
+ // Devices are never destroyed, so we always end up
+ // leaking the Device.lost promise. Enable this once
+ // fixed.
+ if ((false)) {
+ if (!state->resolved_or_rejected) {
+ ::wgpu::utils::Fatal("Promise not resolved or rejected",
+ state->info.file, state->info.line,
+ state->info.function);
+ }
+ }
+ delete state;
+ },
+ state);
+ }
+
+ State* const state;
+ };
+ } // namespace detail
+
+ // Promise<T> is a templated wrapper around a JavaScript promise, which can
+ // resolve to the template type T.
+ template <typename T>
+ class Promise : public detail::PromiseBase {
+ public:
+ // Constructor
+ Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
+ }
+
+ // Resolve() fulfills the promise with the given value.
+ void Resolve(T&& value) const {
+ PromiseBase::Resolve(ToJS(state->deferred.Env(), std::forward<T>(value)));
+ }
+ };
+
+ // Specialization for Promises that resolve with no value
+ template <>
+ class Promise<void> : public detail::PromiseBase {
+ public:
+ // Constructor
+ Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
+ }
+
+ // Resolve() fulfills the promise.
+ void Resolve() const {
+ PromiseBase::Resolve(state->deferred.Env().Undefined());
+ }
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Converter<T>
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Converter<T> is specialized for each type T which can be converted from C++
+ // to JavaScript, or JavaScript to C++.
+ // Each specialization of Converter<T> is expected to have two static methods
+ // with the signatures:
+ //
+ // // FromJS() converts the JavaScript value 'in' to the C++ value 'out'.
+ // static Result FromJS(Napi::Env, Napi::Value in, T& out);
+ //
+ // // ToJS() converts the C++ value 'in' to a JavaScript value, and returns
+ // // this value.
+ // static Napi::Value ToJS(Napi::Env, T in);
+ template <typename T>
+ class Converter {};
+
+ template <>
+ class Converter<Napi::Object> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::Object& out) {
+ if (value.IsObject()) {
+ out = value.ToObject();
+ return Success;
+ }
+ return Error("value is not an object");
+ }
+ static inline Napi::Value ToJS(Napi::Env, Napi::Object value) {
+ return value;
+ }
+ };
+
+ template <>
+ class Converter<ArrayBuffer> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, ArrayBuffer& out) {
+ if (value.IsArrayBuffer()) {
+ out = value.As<ArrayBuffer>();
+ return Success;
+ }
+ return Error("value is not a ArrayBuffer");
+ };
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+ return value;
+ }
+ };
+
+ template <>
+ class Converter<Napi::TypedArray> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArray& out) {
+ if (value.IsTypedArray()) {
+ out = value.As<Napi::TypedArray>();
+ return Success;
+ }
+ return Error("value is not a TypedArray");
+ };
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+ return value;
+ }
+ };
+
+ template <typename T>
+ class Converter<Napi::TypedArrayOf<T>> {
+ public:
+ // clang-format off
+ // The Napi element type of T
+ static constexpr napi_typedarray_type element_type =
+ std::is_same<T, int8_t>::value ? napi_int8_array
+ : std::is_same<T, uint8_t>::value ? napi_uint8_array
+ : std::is_same<T, int16_t>::value ? napi_int16_array
+ : std::is_same<T, uint16_t>::value ? napi_uint16_array
+ : std::is_same<T, int32_t>::value ? napi_int32_array
+ : std::is_same<T, uint32_t>::value ? napi_uint32_array
+ : std::is_same<T, float>::value ? napi_float32_array
+ : std::is_same<T, double>::value ? napi_float64_array
+ : std::is_same<T, int64_t>::value ? napi_bigint64_array
+ : std::is_same<T, uint64_t>::value ? napi_biguint64_array
+ : static_cast<napi_typedarray_type>(-1);
+ // clang-format on
+ static_assert(static_cast<int>(element_type) >= 0,
+ "unsupported T type for Napi::TypedArrayOf<T>");
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArrayOf<T>& out) {
+ if (value.IsTypedArray()) {
+ auto arr = value.As<Napi::TypedArrayOf<T>>();
+ if (arr.TypedArrayType() == element_type) {
+ out = arr;
+ return Success;
+ }
+ return Error("value is not a TypedArray of the correct element type");
+ }
+ return Error("value is not a TypedArray");
+ };
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+ return value;
+ }
+ };
+
+ template <>
+ class Converter<std::string> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, std::string&);
+ static Napi::Value ToJS(Napi::Env, std::string);
+ };
+
+ template <>
+ class Converter<bool> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, bool&);
+ static Napi::Value ToJS(Napi::Env, bool);
+ };
+
+ template <>
+ class Converter<int8_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int8_t&);
+ static Napi::Value ToJS(Napi::Env, int8_t);
+ };
+
+ template <>
+ class Converter<uint8_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint8_t&);
+ static Napi::Value ToJS(Napi::Env, uint8_t);
+ };
+
+ template <>
+ class Converter<int16_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int16_t&);
+ static Napi::Value ToJS(Napi::Env, int16_t);
+ };
+
+ template <>
+ class Converter<uint16_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint16_t&);
+ static Napi::Value ToJS(Napi::Env, uint16_t);
+ };
+
+ template <>
+ class Converter<int32_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int32_t&);
+ static Napi::Value ToJS(Napi::Env, int32_t);
+ };
+
+ template <>
+ class Converter<uint32_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint32_t&);
+ static Napi::Value ToJS(Napi::Env, uint32_t);
+ };
+
+ template <>
+ class Converter<int64_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int64_t&);
+ static Napi::Value ToJS(Napi::Env, int64_t);
+ };
+
+ template <>
+ class Converter<uint64_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint64_t&);
+ static Napi::Value ToJS(Napi::Env, uint64_t);
+ };
+
+ template <>
+ class Converter<float> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, float&);
+ static Napi::Value ToJS(Napi::Env, float);
+ };
+
+ template <>
+ class Converter<double> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, double&);
+ static Napi::Value ToJS(Napi::Env, double);
+ };
+
+ template <typename T>
+ class Converter<Interface<T>> {
+ public:
+ static Result FromJS(Napi::Env env, Napi::Value value, Interface<T>& out) {
+ if (!value.IsObject()) {
+ return Error("value is not object");
+ }
+ auto obj = value.As<Napi::Object>();
+ if (!T::Unwrap(obj)) {
+ return Error("object is not of the correct interface type");
+ }
+ out = Interface<T>(obj);
+ return Success;
+ }
+ static Napi::Value ToJS(Napi::Env env, const Interface<T>& value) {
+ return {env, value};
+ }
+ };
+
+ template <typename T>
+ class Converter<std::optional<T>> {
+ public:
+ static Result FromJS(Napi::Env env, Napi::Value value, std::optional<T>& out) {
+ if (value.IsNull() || value.IsUndefined()) {
+ out.reset();
+ return Success;
+ }
+ T v{};
+ auto res = Converter<T>::FromJS(env, value, v);
+ if (!res) {
+ return res;
+ }
+ out = std::move(v);
+ return Success;
+ }
+ static Napi::Value ToJS(Napi::Env env, std::optional<T> value) {
+ if (value.has_value()) {
+ return Converter<T>::ToJS(env, value.value());
+ }
+ return env.Null();
+ }
+ };
+
+ template <typename T>
+ class Converter<std::vector<T>> {
+ public:
+ static inline Result FromJS(Napi::Env env, Napi::Value value, std::vector<T>& out) {
+ if (!value.IsArray()) {
+ return Error("value is not an array");
+ }
+ auto arr = value.As<Napi::Array>();
+ std::vector<T> vec(arr.Length());
+ for (size_t i = 0; i < vec.size(); i++) {
+ auto res = Converter<T>::FromJS(env, arr[static_cast<uint32_t>(i)], vec[i]);
+ if (!res) {
+ return res.Append("for array element ", i);
+ }
+ }
+ out = std::move(vec);
+ return Success;
+ }
+ static inline Napi::Value ToJS(Napi::Env env, const std::vector<T>& vec) {
+ auto arr = Napi::Array::New(env, vec.size());
+ for (size_t i = 0; i < vec.size(); i++) {
+ arr.Set(static_cast<uint32_t>(i), Converter<T>::ToJS(env, vec[i]));
+ }
+ return arr;
+ }
+ };
+
+ template <typename K, typename V>
+ class Converter<std::unordered_map<K, V>> {
+ public:
+ static inline Result FromJS(Napi::Env env,
+ Napi::Value value,
+ std::unordered_map<K, V>& out) {
+ if (!value.IsObject()) {
+ return Error("value is not an object");
+ }
+ auto obj = value.ToObject();
+ auto keys = obj.GetPropertyNames();
+ std::unordered_map<K, V> map(keys.Length());
+ for (uint32_t i = 0; i < static_cast<uint32_t>(keys.Length()); i++) {
+ K key{};
+ V value{};
+ auto key_res = Converter<K>::FromJS(env, keys[i], key);
+ if (!key_res) {
+ return key_res.Append("for object key");
+ }
+ auto value_res = Converter<V>::FromJS(env, obj.Get(keys[i]), value);
+ if (!value_res) {
+ return value_res.Append("for object value of key: ", key);
+ }
+ map[key] = value;
+ }
+ out = std::move(map);
+ return Success;
+ }
+ static inline Napi::Value ToJS(Napi::Env env, std::unordered_map<K, V> value) {
+ auto obj = Napi::Object::New(env);
+ for (auto it : value) {
+ obj.Set(Converter<K>::ToJS(env, it.first), Converter<V>::ToJS(env, it.second));
+ }
+ return obj;
+ }
+ };
+
+ template <typename... TYPES>
+ class Converter<std::variant<TYPES...>> {
+ template <typename TY>
+ static inline Result TryFromJS(Napi::Env env,
+ Napi::Value value,
+ std::variant<TYPES...>& out) {
+ TY v{};
+ auto res = Converter<TY>::FromJS(env, value, v);
+ if (!res) {
+ return Error("no possible types matched");
+ }
+ out = std::move(v);
+ return Success;
+ }
+
+ template <typename T0, typename T1, typename... TN>
+ static inline Result TryFromJS(Napi::Env env,
+ Napi::Value value,
+ std::variant<TYPES...>& out) {
+ if (TryFromJS<T0>(env, value, out)) {
+ return Success;
+ }
+ return TryFromJS<T1, TN...>(env, value, out);
+ }
+
+ public:
+ static inline Result FromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+ return TryFromJS<TYPES...>(env, value, out);
+ }
+ static inline Napi::Value ToJS(Napi::Env env, std::variant<TYPES...> value) {
+ return std::visit(
+ [&](auto&& v) {
+ using T = std::remove_cv_t<std::remove_reference_t<decltype(v)>>;
+ return Converter<T>::ToJS(env, v);
+ },
+ value);
+ }
+ };
+
+ template <typename T>
+ class Converter<Promise<T>> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value, Promise<T>&) {
+ UNIMPLEMENTED();
+ }
+ static inline Napi::Value ToJS(Napi::Env, Promise<T> promise) {
+ return promise;
+ }
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Helpers
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // FromJS() is a helper function which delegates to
+ // Converter<T>::FromJS()
+ template <typename T>
+ inline Result FromJS(Napi::Env env, Napi::Value value, T& out) {
+ return Converter<T>::FromJS(env, value, out);
+ }
+
+ // FromJSOptional() is similar to FromJS(), but if 'value' is either null
+ // or undefined then 'out' is left unassigned.
+ template <typename T>
+ inline Result FromJSOptional(Napi::Env env, Napi::Value value, T& out) {
+ if (value.IsNull() || value.IsUndefined()) {
+ return Success;
+ }
+ return Converter<T>::FromJS(env, value, out);
+ }
+
+ // ToJS() is a helper function which delegates to Converter<T>::ToJS()
+ template <typename T>
+ inline Napi::Value ToJS(Napi::Env env, T&& value) {
+ return Converter<std::remove_cv_t<std::remove_reference_t<T>>>::ToJS(
+ env, std::forward<T>(value));
+ }
+
+ // DefaultedParameter can be used in the tuple parameter types passed to
+ // FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args), for parameters
+ // that have a default value. If the argument is omitted in the call, then
+ // DefaultedParameter::default_value will be assigned to
+ // DefaultedParameter::value.
+ template <typename T>
+ struct DefaultedParameter {
+ T value; // The argument value assigned by FromJS()
+ T default_value; // The default value if no argument supplied
+
+ // Implicit conversion operator. Returns value.
+ inline operator const T&() const {
+ return value;
+ }
+ };
+
+ // IsDefaultedParameter<T>::value is true iff T is of type DefaultedParameter.
+ template <typename T>
+ struct IsDefaultedParameter {
+ static constexpr bool value = false;
+ };
+ template <typename T>
+ struct IsDefaultedParameter<DefaultedParameter<T>> {
+ static constexpr bool value = true;
+ };
+
+ // FromJS() is a helper function for bulk converting the arguments of 'info'.
+ // PARAM_TYPES is a std::tuple<> describing the C++ function parameter types.
+ // Parameters may be of the templated DefaultedParameter type, in which case
+ // the parameter will default to the default-value if omitted.
+ template <typename PARAM_TYPES, int BASE_INDEX = 0>
+ inline Result FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args) {
+ if constexpr (BASE_INDEX < std::tuple_size_v<PARAM_TYPES>) {
+ using T = std::tuple_element_t<BASE_INDEX, PARAM_TYPES>;
+ auto& value = info[BASE_INDEX];
+ auto& out = std::get<BASE_INDEX>(args);
+ if constexpr (IsDefaultedParameter<T>::value) {
+ // Parameter has a default value.
+ // Check whether the argument was provided.
+ if (value.IsNull() || value.IsUndefined()) {
+ // Use default value for this parameter
+ out.value = out.default_value;
+ } else {
+ // Argument was provided
+ auto res = FromJS(info.Env(), value, out.value);
+ if (!res) {
+ return res;
+ }
+ }
+ } else {
+ // Parameter does not have a default value.
+ auto res = FromJS(info.Env(), value, out);
+ if (!res) {
+ return res;
+ }
+ }
+ // Convert the rest of the arguments
+ return FromJS<PARAM_TYPES, BASE_INDEX + 1>(info, args);
+ } else {
+ return Success;
+ }
+ }
+
+} // namespace wgpu::interop
+
+#endif // DAWN_NODE_INTEROP_CORE_WEBGPU_H_
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/WebGPU.cpp.tmpl b/chromium/third_party/dawn/src/dawn/node/interop/WebGPU.cpp.tmpl
new file mode 100644
index 00000000000..ca6e24a8a08
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/interop/WebGPU.cpp.tmpl
@@ -0,0 +1,393 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn/node/tools/cmd/idlgen/main.go to generate
+the WebGPU.cpp source file.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+ types used by this template
+* src/dawn/node/tools/cmd/idlgen/main.go for additional structures and functions
+ used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+{{- Include "WebGPUCommon.tmpl" -}}
+
+#include "src/dawn/node/interop/WebGPU.h"
+
+#include <unordered_map>
+
+#include "src/dawn/node/utils/Debug.h"
+
+namespace wgpu {
+namespace interop {
+
+namespace {
+
+{{template "Wrappers" $}}
+
+} // namespace
+
+{{ range $ := .Declarations}}
+{{- if IsDictionary $}}{{template "Dictionary" $}}
+{{- else if IsInterface $}}{{template "Interface" $}}
+{{- else if IsEnum $}}{{template "Enum" $}}
+{{- end}}
+{{- end}}
+
+
+void Initialize(Napi::Env env) {
+ auto* wrapper = Wrappers::Init(env);
+ auto global = env.Global();
+{{ range $ := .Declarations}}
+{{- if IsInterfaceOrNamespace $}}
+ global.Set(Napi::String::New(env, "{{$.Name}}"), wrapper->{{$.Name}}_ctor.Value());
+{{- end}}
+{{- end}}
+}
+
+} // namespace interop
+} // namespace wgpu
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Wrappers emits the C++ 'Wrappers' class, which holds all the interface and
+-- namespace interop wrapper classes.
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Wrappers"}}
+// Wrappers holds all the Napi class constructors, and Napi::ObjectWrap type
+// declarations, for each of the WebIDL interface and namespace types.
+class Wrappers {
+ Wrappers(Napi::Env env) {
+{{- range $ := .Declarations}}
+{{- if IsInterfaceOrNamespace $}}
+ {{$.Name}}_ctor = Napi::Persistent(W{{$.Name}}::Class(env));
+{{- end}}
+{{- end}}
+ }
+
+ static Wrappers* instance;
+
+public:
+{{- range $ := .Declarations}}
+{{- if IsInterfaceOrNamespace $}}{{template "Wrapper" $}}
+{{- end}}
+{{- end}}
+
+ // Allocates and constructs the Wrappers instance
+ static Wrappers* Init(Napi::Env env) {
+ instance = new Wrappers(env);
+ return instance;
+ }
+
+ // Destructs and frees the Wrappers instance
+ static void Term(Napi::Env env) {
+ delete instance;
+ instance = nullptr;
+ }
+
+ static Wrappers* For(Napi::Env env) {
+ // Currently Napi only actually supports a single Env, so there's no point
+ // maintaining a map of Env to Wrapper. Note: This might not always be true.
+ return instance;
+ }
+
+{{ range $ := .Declarations}}
+{{- if IsInterfaceOrNamespace $}}
+ Napi::FunctionReference {{$.Name}}_ctor;
+{{- end}}
+{{- end}}
+};
+
+Wrappers* Wrappers::instance = nullptr;
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Wrapper emits the C++ wrapper class for the given ast.Interface or
+-- ast.Namespace.
+-- This wrapper class inherits from Napi::ObjectWrap, which binds the lifetime
+-- of the JavaScript object to the lifetime of the wrapper class instance.
+-- If the wrapper is for an interface, the wrapper object holds a unique_ptr to
+-- the interface implementation, and delegates all exposed method calls on to
+-- the implementation.
+-- See: https://github.com/nodejs/node-addon-api/blob/main/doc/object_wrap.md
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Wrapper"}}
+ struct W{{$.Name}} : public Napi::ObjectWrap<W{{$.Name}}> {
+{{- if IsInterface $}}
+ std::unique_ptr<{{$.Name}}> impl;
+{{- end}}
+ static Napi::Function Class(Napi::Env env) {
+ return DefineClass(env, "{{$.Name}}", {
+{{ if $s := SetlikeOf $}}
+ InstanceMethod("has", &W{{$.Name}}::has),
+ InstanceMethod("keys", &W{{$.Name}}::keys),
+{{- end}}
+{{- range $m := MethodsOf $}}
+ InstanceMethod("{{$m.Name}}", &W{{$.Name}}::{{$m.Name}}),
+{{- end}}
+{{- range $a := AttributesOf $}}
+ InstanceAccessor("{{$a.Name}}", &W{{$.Name}}::get{{Title $a.Name}},
+{{- if $a.Readonly}} nullptr{{else}} &W{{$.Name}}::set{{Title $a.Name}}{{end -}}
+ ),
+{{- end}}
+{{- range $c := ConstantsOf $}}
+ StaticValue("{{$c.Name}}", ToJS(env, {{$.Name}}::{{$c.Name}}), napi_default_jsproperty),
+{{- end}}
+ });
+ }
+
+ W{{$.Name}}(const Napi::CallbackInfo& info) : ObjectWrap(info) {}
+
+{{ if $s := SetlikeOf $}}
+ Napi::Value has(const Napi::CallbackInfo& info) {
+ std::tuple<{{template "Type" $s.Elem}}> args;
+ auto res = FromJS(info, args);
+ if (res) {
+ return ToJS(info.Env(), impl->has(info.Env(), std::get<0>(args)));
+ }
+ Napi::Error::New(info.Env(), res.error).ThrowAsJavaScriptException();
+ return {};
+ }
+ Napi::Value keys(const Napi::CallbackInfo& info) {
+ return ToJS(info.Env(), impl->keys(info.Env()));
+ }
+{{- end}}
+{{- range $m := MethodsOf $}}
+ Napi::Value {{$m.Name}}(const Napi::CallbackInfo& info) {
+ std::string error;
+{{- range $overload_idx, $o := $m.Overloads}}
+{{- $overloaded := gt (len $m.Overloads) 1}}
+ { {{if $overloaded}}// Overload {{$overload_idx}}{{end}}
+ std::tuple<
+{{- range $i, $p := $o.Parameters}}
+{{- if $i}}, {{end}}
+{{- if $p.Init }}DefaultedParameter<{{template "Type" $p.Type}}>
+{{- else if $p.Optional}}std::optional<{{template "Type" $p.Type}}>
+{{- else }}{{template "Type" $p.Type}}
+{{- end}}
+{{- end}}> args;
+
+{{- range $i, $p := $o.Parameters}}
+{{- if $p.Init}}
+ std::get<{{$i}} /* {{$p.Name}} */>(args).default_value = {{Eval "Literal" "Value" $p.Init "Type" $p.Type}};
+{{- end}}
+{{- end}}
+
+ auto res = FromJS(info, args);
+ if (res) {
+ {{/* indent */}}INTEROP_LOG(
+{{- range $i, $p := $o.Parameters}}
+{{- if $i}}, ", {{$p.Name}}: "{{else}}"{{$p.Name}}: "{{end}}, std::get<{{$i}}>(args)
+{{- end}});
+ {{/* indent */}}
+{{- if not (IsUndefinedType $o.Type) }}auto result = {{end -}}
+ impl->{{$o.Name}}(info.Env(){{range $i, $_ := $o.Parameters}}, std::get<{{$i}}>(args){{end}});
+ {{/* indent */ -}}
+{{- if IsUndefinedType $o.Type}}return info.Env().Undefined();
+{{- else }}return ToJS(info.Env(), result);
+{{- end }}
+ }
+ error = {{if $overloaded}}"\noverload {{$overload_idx}} failed to match:\n" + {{end}}res.error;
+ }
+{{- end}}
+ Napi::Error::New(info.Env(), "no overload matched for {{$m.Name}}:\n" + error).ThrowAsJavaScriptException();
+ return {};
+ }
+{{- end}}
+
+{{- range $a := AttributesOf $}}
+ Napi::Value get{{Title $a.Name}}(const Napi::CallbackInfo& info) {
+ return ToJS(info.Env(), impl->get{{Title $a.Name}}(info.Env()));
+ }
+{{- if not $a.Readonly}}
+ void set{{Title $a.Name}}(const Napi::CallbackInfo& info, const Napi::Value& value) {
+ {{template "Type" $a.Type}} v{};
+ auto res = FromJS(info.Env(), value, v);
+ if (res) {
+ impl->set{{Title $a.Name}}(info.Env(), std::move(v));
+ } else {
+ res = res.Append("invalid value to {{$a.Name}}");
+ Napi::Error::New(info.Env(), res.error).ThrowAsJavaScriptException();
+ }
+ }
+{{- end }}
+{{- end}}
+ };
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Dictionary emits the C++ method implementations and associated functions of
+-- the interop type that defines the given ast.Dictionary
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Dictionary"}}
+Result Converter<{{$.Name}}>::FromJS(Napi::Env env, Napi::Value value, {{$.Name}}& out) {
+ auto object = value.ToObject();
+ Result res;
+{{- template "DictionaryMembersFromJS" $}};
+ return Success;
+}
+
+Napi::Value Converter<{{$.Name}}>::ToJS(Napi::Env env, {{$.Name}} value) {
+ auto object = Napi::Object::New(env);
+{{- template "DictionaryMembersToJS" $}}
+ return object;
+}
+
+std::ostream& operator<<(std::ostream& o, const {{$.Name}}& dict) {
+ o << "{{$.Name}} {";
+{{- range $i, $m := $.Members}}
+ o << {{if $i}}", "{{else}}" "{{end}} << "{{$m.Name}}: ";
+ utils::Write(o, dict.{{$m.Name}});
+{{- end }}
+ o << "}" << std::endl;
+ return o;
+}
+{{ end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMembersFromJS emits the C++ logic to convert each of the
+-- dictionary ast.Member fields from JavaScript to C++. Each call to ToJS() is
+-- emitted as a separate statement, and requires a 'Result res' local to be
+-- declared
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMembersFromJS"}}
+{{- if $.Inherits}}{{template "DictionaryMembersFromJS" (Lookup $.Inherits)}}{{end}}
+{{- range $i, $m := $.Members}}
+ {{/* indent */}}
+{{- if $m.Init }}res = interop::FromJSOptional(env, object.Get("{{$m.Name}}"), out.{{$m.Name}});
+{{- else }}res = interop::FromJS(env, object.Get("{{$m.Name}}"), out.{{$m.Name}});
+{{- end }}
+ if (!res) {
+ return res.Append("while converting member '{{$m.Name}}'");
+ }
+{{- end}}
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMembersToJS emits the C++ logic to convert each of the
+-- dictionary ast.Member fields to JavaScript from C++. Each call to ToJS() is
+-- emitted as a separate statement
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMembersToJS"}}
+{{- if $.Inherits}}{{template "DictionaryMembersToJS" (Lookup $.Inherits)}}{{end}}
+{{- range $m := $.Members}}
+ object.Set(Napi::String::New(env, "{{$m.Name}}"), interop::ToJS(env, value.{{$m.Name}}));
+{{- end}}
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Interface emits the C++ method implementations that define the given
+-- ast.Interface.
+-- Note: Most of the actual binding logic lives in the interface wrapper class.
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Interface"}}
+{{$.Name}}::{{$.Name}}() = default;
+
+{{$.Name}}* {{$.Name}}::Unwrap(Napi::Object object) {
+ auto* wrappers = Wrappers::For(object.Env());
+ if (!object.InstanceOf(wrappers->{{$.Name}}_ctor.Value())) {
+ return nullptr;
+ }
+ return Wrappers::W{{$.Name}}::Unwrap(object)->impl.get();
+}
+
+Interface<{{$.Name}}> {{$.Name}}::Bind(Napi::Env env, std::unique_ptr<{{$.Name}}>&& impl) {
+ auto* wrappers = Wrappers::For(env);
+ auto object = wrappers->{{$.Name}}_ctor.New({});
+ auto* wrapper = Wrappers::W{{$.Name}}::Unwrap(object);
+ wrapper->impl = std::move(impl);
+ return Interface<{{$.Name}}>(object);
+}
+
+{{$.Name}}::~{{$.Name}}() = default;
+{{ end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Enum emits the C++ associated functions of the interop type that defines the
+-- given ast.Enum
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Enum"}}
+bool Converter<{{$.Name}}>::FromString(std::string str, {{$.Name}}& out) {
+{{- range $e := $.Values}}
+ if (str == {{$e.Value}}) {
+ out = {{$.Name}}::{{EnumEntryName $e.Value}};
+ return true;
+ }
+{{- end}}
+ return false;
+}
+
+const char* Converter<{{$.Name}}>::ToString({{$.Name}} value) {
+ switch (value) {
+{{- range $e := $.Values}}
+ case {{$.Name}}::{{EnumEntryName $e.Value}}:
+ return {{$e.Value}};
+{{- end}}
+ }
+ return nullptr;
+}
+
+Result Converter<{{$.Name}}>::FromJS(Napi::Env env, Napi::Value value, {{$.Name}}& out) {
+ std::string str = value.ToString();
+ if (FromString(str, out)) {
+ return Success;
+ }
+ return Error(str + " is not a valid enum value of {{$.Name}}");
+}
+
+Napi::Value Converter<{{$.Name}}>::ToJS(Napi::Env env, {{$.Name}} value) {
+ switch (value) {
+{{- range $e := $.Values}}
+ case {{$.Name}}::{{EnumEntryName $e.Value}}:
+ return Napi::String::New(env, {{$e.Value}});
+{{- end}}
+ }
+ return env.Undefined();
+}
+
+std::ostream& operator<<(std::ostream& o, {{$.Name}} value) {
+ if (auto* s = Converter<{{$.Name}}>::ToString(value)) {
+ return o << s;
+ }
+ return o << "undefined<{{$.Name}}>";
+}
+
+{{end}}
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/WebGPU.h.tmpl b/chromium/third_party/dawn/src/dawn/node/interop/WebGPU.h.tmpl
new file mode 100644
index 00000000000..1a1a97040ec
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/interop/WebGPU.h.tmpl
@@ -0,0 +1,282 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn/node/tools/cmd/idlgen/main.go to generate
+the WebGPU.h header file.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+ types used by this template
+* src/dawn/node/tools/cmd/idlgen/main.go for additional structures and functions
+ used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+{{- Include "WebGPUCommon.tmpl" -}}
+
+#ifndef DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+#define DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+
+#include "src/dawn/node/interop/Core.h"
+
+namespace wgpu {
+namespace interop {
+
+// Initialize() registers the WebGPU types with the Napi environment.
+void Initialize(Napi::Env env);
+
+{{ range $ := .Declarations}}
+{{- if IsDictionary $}}{{template "Dictionary" $}}
+{{- else if IsNamespace $}}{{template "Namespace" $}}
+{{- else if IsInterface $}}{{template "Interface" $}}
+{{- else if IsEnum $}}{{template "Enum" $}}
+{{- else if IsTypedef $}}{{template "Typedef" $}}
+{{- end}}
+{{- end}}
+
+} // namespace interop
+} // namespace wgpu
+
+#endif // DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Dictionary emits the C++ header declaration that defines the interop type for
+-- the given ast.Dictionary
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Dictionary"}}
+// dictionary {{$.Name}}
+class {{$.Name}} {{- if $.Inherits }} : public {{$.Inherits}}{{end}} {
+public:
+{{ range $m := $.Members}}
+{{- if IsConstructor $m}} {{$.Name}}();
+{{ else if IsMember $m}} {{template "DictionaryMember" $m}}
+{{ end}}
+{{- end -}}
+};
+
+template<>
+class Converter<{{$.Name}}> {
+public:
+ static Result FromJS(Napi::Env, Napi::Value, {{$.Name}}&);
+ static Napi::Value ToJS(Napi::Env, {{$.Name}});
+};
+
+std::ostream& operator<<(std::ostream& o, const {{$.Name}}& desc);
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Namespace emits the C++ header declaration that defines the interop type for
+-- the given ast.Namespace
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Namespace"}}
+// namespace {{$.Name}}
+class {{$.Name}} {
+public:
+ virtual ~{{$.Name}}();
+ {{$.Name}}();
+{{- range $c := ConstantsOf $}}
+{{- template "Constant" $c}}
+{{- end}}
+};
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Interface emits the C++ header declaration that defines the interop type for
+-- the given ast.Interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Interface"}}
+// interface {{$.Name}}
+class {{$.Name}} {{- if $.Inherits }} : public {{$.Inherits}}{{end}} {
+public:
+ static Interface<{{$.Name}}> Bind(Napi::Env, std::unique_ptr<{{$.Name}}>&&);
+ static {{$.Name}}* Unwrap(Napi::Object);
+
+ template<typename T, typename ... ARGS>
+ static inline Interface<{{$.Name}}> Create(Napi::Env env, ARGS&& ... args) {
+ return Bind(env, std::make_unique<T>(std::forward<ARGS>(args)...));
+ }
+
+ virtual ~{{$.Name}}();
+ {{$.Name}}();
+{{- if $s := SetlikeOf $}}
+{{- template "InterfaceSetlike" $s}}
+{{- end}}
+{{- range $m := MethodsOf $}}
+{{- template "InterfaceMethod" $m}}
+{{- end}}
+{{- range $a := AttributesOf $}}
+{{- template "InterfaceAttribute" $a}}
+{{- end}}
+{{- range $c := ConstantsOf $}}
+{{- template "Constant" $c}}
+{{- end}}
+};
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Typedef emits the C++ header declaration that defines the interop type for
+-- the given ast.Interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Typedef"}}
+using {{$.Name}} = {{template "Type" $.Type}};
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Enum emits the C++ header declaration that defines the interop type for
+-- the given ast.Enum
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Enum"}}
+enum class {{$.Name}} {
+{{- range $ := $.Values}}
+ {{EnumEntryName $.Value}},
+{{- end}}
+};
+
+template<>
+class Converter<{{$.Name}}> {
+public:
+ static Result FromJS(Napi::Env, Napi::Value, {{$.Name}}&);
+ static Napi::Value ToJS(Napi::Env, {{$.Name}});
+ static bool FromString(std::string, {{$.Name}}&);
+ static const char* ToString({{$.Name}});
+};
+
+std::ostream& operator<<(std::ostream& o, {{$.Name}});
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMember emits the C++ declaration for a single dictionary ast.Member
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMember"}}
+{{- if $.Attribute}}{{template "AttributeType" $}} {{$.Name}}
+{{- if $.Init}} = {{Eval "Literal" "Value" $.Init "Type" $.Type}}{{end}};
+{{- else }}{{template "Type" $.Type}} {{$.Name}}({{template "Parameters" $.Parameters}});
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceSetlike emits the C++ methods for a setlike interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceSetlike"}}
+ virtual bool has(Napi::Env, {{template "Type" $.Elem}}) = 0;
+ virtual std::vector<{{template "Type" $.Elem}}> keys(Napi::Env) = 0;
+{{- /* TODO(crbug.com/dawn/1143):
+ entries, forEach, size, values
+ read-write: add, clear, or delete
+*/}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceMethod emits the C++ declaration for a single interface ast.Member
+-- method
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceMethod"}}
+{{- range $o := $.Overloads}}
+ virtual {{template "Type" $o.Type}} {{$.Name}}(Napi::Env{{template "ParametersWithLeadingComma" $o.Parameters}}) = 0;
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceAttribute emits the C++ declaration for a single interface
+-- ast.Member attribute
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceAttribute"}}
+ virtual {{template "Type" $.Type}} get{{Title $.Name}}(Napi::Env) = 0;
+{{- if not $.Readonly}}
+ virtual void set{{Title $.Name}}(Napi::Env, {{template "Type" $.Type}} value) = 0;
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Constant emits the C++ declaration for a single ast.Member constant
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Constant"}}
+ static constexpr {{template "Type" $.Type}} {{$.Name}} = {{Eval "Literal" "Value" $.Init "Type" $.Type}};
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Parameters emits the C++ comma separated list of parameter declarations for
+-- the given []ast.Parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Parameters"}}
+{{- range $i, $param := $ }}
+{{- if $i }}, {{end}}
+{{- template "Parameter" $param}}
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- ParametersWithLeadingComma emits the C++ comma separated list of parameter
+-- declarations for the given []ast.Parameter, starting with a leading comma
+-- for the first parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "ParametersWithLeadingComma"}}
+{{- range $i, $param := $ }}, {{/* */}}
+{{- template "Parameter" $param}}
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Parameter emits the C++ parameter type and name for the given ast.Parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Parameter" -}}
+{{- if $.Init }}{{template "Type" $.Type}} {{$.Name}}
+{{- else if $.Optional}}std::optional<{{template "Type" $.Type}}> {{$.Name}}
+{{- else }}{{template "Type" $.Type}} {{$.Name}}
+{{- end }}
+{{- end}}
diff --git a/chromium/third_party/dawn/src/dawn/node/interop/WebGPUCommon.tmpl b/chromium/third_party/dawn/src/dawn/node/interop/WebGPUCommon.tmpl
new file mode 100644
index 00000000000..94b2e407c84
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/interop/WebGPUCommon.tmpl
@@ -0,0 +1,127 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn/node/tools/cmd/idlgen/main.go.
+This file provides common template definitions and is included by WebGPU.h.tmpl
+and WebGPU.cpp.tmpl.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+ types used by this template
+* src/dawn/node/tools/cmd/idlgen/main.go for additional structures and functions
+ used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Type generates the C++ type for the given ast.Type
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Type" -}}
+{{- if IsUndefinedType $}}void
+{{- else if IsTypeName $}}
+{{- if eq $.Name "boolean" }}bool
+{{- else if eq $.Name "long" }}int32_t
+{{- else if eq $.Name "unsigned long" }}uint32_t
+{{- else if eq $.Name "long long" }}int64_t
+{{- else if eq $.Name "unsigned long long" }}uint64_t
+{{- else if eq $.Name "object" }}Object
+{{- else if eq $.Name "DOMString" }}std::string
+{{- else if eq $.Name "USVString" }}std::string
+{{- else if eq $.Name "ArrayBuffer" }}ArrayBuffer
+{{- else if IsInterface (Lookup $.Name) }}Interface<{{$.Name}}>
+{{- else }}{{$.Name}}
+{{- end }}
+{{- else if IsParametrizedType $}}{{$.Name}}<{{template "TypeList" $.Elems}}>
+{{- else if IsNullableType $}}std::optional<{{template "Type" $.Type}}>
+{{- else if IsUnionType $}}std::variant<{{template "VariantTypeList" $.Types}}>
+{{- else if IsSequenceType $}}std::vector<{{template "Type" $.Elem}}>
+{{- else if IsRecordType $}}std::unordered_map<{{template "Type" $.Key}}, {{template "Type" $.Elem}}>
+{{- else }} /* Unhandled Type {{printf "%T" $}} */
+{{- end -}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- AttributeType generates the C++ type for the given ast.Member
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "AttributeType" -}}
+{{- if $.Required }}{{template "Type" $.Type}}
+{{- else if $.Init }}{{template "Type" $.Type}}
+{{- else }}std::optional<{{template "Type" $.Type}}>
+{{- end}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Literal generates a C++ literal value using the following arguments:
+-- Value - the ast.Literal
+-- Type - the ast.Type of the literal
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Literal" -}}
+{{- if IsDefaultDictionaryLiteral $.Value}}{{template "Type" $.Type}}{}
+{{- else if IsTypeName $.Type }}
+{{- $ty := Lookup $.Type.Name}}
+{{- if IsTypedef $ty }}{{Eval "Literal" "Value" $.Value "Type" $ty.Type}}
+{{- else if IsEnum $ty }}{{$.Type.Name}}::{{EnumEntryName $.Value.Value}}
+{{- else if IsBasicLiteral $.Value }}{{$.Value.Value}}
+{{- else }}/* Unhandled Type {{printf "ty: %T $.Type.Name: %T $.Value: %T" $ty $.Type.Name $.Value}} */
+{{- end }}
+{{- else if IsSequenceType $.Type }}{{template "Type" $.Type}}{} {{- /* TODO: Assumes the initialiser is empty */}}
+{{- else if IsBasicLiteral $.Value }}{{$.Value.Value}}
+{{- else }} /* Unhandled Type {{printf "%T %T" $.Type $.Value}} */
+{{- end}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- TypeList generates a C++ comma separated list of types from the given
+-- []ast.Type
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "TypeList" -}}
+{{- range $i, $ty := $}}
+{{- if $i }}, {{end}}
+{{- template "Type" $ty}}
+{{- end}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- VariantTypeList generates a C++ comma separated list of types from the given
+-- []ast.Type, skipping any 'undefined' types
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "VariantTypeList" -}}
+{{- range $i, $ty := $}}
+{{- if not (IsUndefinedType $ty)}}
+{{- if $i }}, {{end}}
+{{- template "Type" $ty}}
+{{- end}}
+{{- end}}
+{{- end }}
+
diff --git a/chromium/third_party/dawn/src/dawn/node/tools/go.mod b/chromium/third_party/dawn/src/dawn/node/tools/go.mod
new file mode 100644
index 00000000000..b5eb8dfb508
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/tools/go.mod
@@ -0,0 +1,9 @@
+module dawn.googlesource.com/dawn/src/dawn/node/tools
+
+go 1.16
+
+require (
+ github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094
+ github.com/mattn/go-colorable v0.1.9
+ github.com/mattn/go-isatty v0.0.14 // indirect
+)
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/go.sum b/chromium/third_party/dawn/src/dawn/node/tools/go.sum
index 42c01181c64..42c01181c64 100644
--- a/chromium/third_party/dawn/src/dawn_node/tools/go.sum
+++ b/chromium/third_party/dawn/src/dawn/node/tools/go.sum
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/run-cts b/chromium/third_party/dawn/src/dawn/node/tools/run-cts
index cf58452becc..cf58452becc 100755
--- a/chromium/third_party/dawn/src/dawn_node/tools/run-cts
+++ b/chromium/third_party/dawn/src/dawn/node/tools/run-cts
diff --git a/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go b/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go
new file mode 100644
index 00000000000..55488e32280
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/idlgen/main.go
@@ -0,0 +1,640 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// idlgen is a tool used to generate code from WebIDL files and a golang
+// template file
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+
+ "github.com/ben-clayton/webidlparser/ast"
+ "github.com/ben-clayton/webidlparser/parser"
+)
+
+func main() {
+ if err := run(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
+func showUsage() {
+ fmt.Println(`
+idlgen is a tool used to generate code from WebIDL files and a golang
+template file
+
+Usage:
+ idlgen --template=<template-path> --output=<output-path> <idl-file> [<idl-file>...]`)
+ os.Exit(1)
+}
+
+func run() error {
+ var templatePath string
+ var outputPath string
+ flag.StringVar(&templatePath, "template", "", "the template file run with the parsed WebIDL files")
+ flag.StringVar(&outputPath, "output", "", "the output file")
+ flag.Parse()
+
+ idlFiles := flag.Args()
+
+ // Check all required arguments are provided
+ if templatePath == "" || outputPath == "" || len(idlFiles) == 0 {
+ showUsage()
+ }
+
+ // Open up the output file
+ out := os.Stdout
+ if outputPath != "" {
+ file, err := os.Create(outputPath)
+ if err != nil {
+ return fmt.Errorf("failed to open output file '%v'", outputPath)
+ }
+ out = file
+ defer file.Close()
+ }
+
+ // Read the template file
+ tmpl, err := ioutil.ReadFile(templatePath)
+ if err != nil {
+ return fmt.Errorf("failed to open template file '%v'", templatePath)
+ }
+
+ // idl is the combination of the parsed idlFiles
+ idl := &ast.File{}
+
+ // Parse each of the WebIDL files and add the declarations to idl
+ for _, path := range idlFiles {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("failed to open file '%v'", path)
+ }
+ fileIDL := parser.Parse(string(content))
+ if numErrs := len(fileIDL.Errors); numErrs != 0 {
+ errs := make([]string, numErrs)
+ for i, e := range fileIDL.Errors {
+ errs[i] = e.Message
+ }
+ return fmt.Errorf("errors found while parsing %v:\n%v", path, strings.Join(errs, "\n"))
+ }
+ idl.Declarations = append(idl.Declarations, fileIDL.Declarations...)
+ }
+
+ // Initialize the generator
+ g := generator{t: template.New(templatePath)}
+ g.workingDir = filepath.Dir(templatePath)
+ g.funcs = map[string]interface{}{
+ // Functions exposed to the template
+ "AttributesOf": attributesOf,
+ "ConstantsOf": constantsOf,
+ "EnumEntryName": enumEntryName,
+ "Eval": g.eval,
+ "Include": g.include,
+ "IsBasicLiteral": is(ast.BasicLiteral{}),
+ "IsConstructor": isConstructor,
+ "IsDefaultDictionaryLiteral": is(ast.DefaultDictionaryLiteral{}),
+ "IsDictionary": is(ast.Dictionary{}),
+ "IsEnum": is(ast.Enum{}),
+ "IsInterface": is(ast.Interface{}),
+ "IsInterfaceOrNamespace": is(ast.Interface{}, ast.Namespace{}),
+ "IsMember": is(ast.Member{}),
+ "IsNamespace": is(ast.Namespace{}),
+ "IsNullableType": is(ast.NullableType{}),
+ "IsParametrizedType": is(ast.ParametrizedType{}),
+ "IsRecordType": is(ast.RecordType{}),
+ "IsSequenceType": is(ast.SequenceType{}),
+ "IsTypedef": is(ast.Typedef{}),
+ "IsTypeName": is(ast.TypeName{}),
+ "IsUndefinedType": isUndefinedType,
+ "IsUnionType": is(ast.UnionType{}),
+ "Lookup": g.lookup,
+ "MethodsOf": methodsOf,
+ "SetlikeOf": setlikeOf,
+ "Title": strings.Title,
+ }
+ t, err := g.t.
+ Option("missingkey=invalid").
+ Funcs(g.funcs).
+ Parse(string(tmpl))
+ if err != nil {
+ return fmt.Errorf("failed to parse template file '%v': %w", templatePath, err)
+ }
+
+ // simplify the definitions in the WebIDL before passing this to the template
+ idl, declarations := simplify(idl)
+ g.declarations = declarations
+
+ // Write the file header
+ fmt.Fprintf(out, header, strings.Join(os.Args[1:], "\n// "))
+
+ // Execute the template
+ return t.Execute(out, idl)
+}
+
+// declarations is a map of WebIDL declaration name to its AST node.
+type declarations map[string]ast.Decl
+
+// nameOf returns the name of the AST node n.
+// Returns an empty string if the node is not named.
+func nameOf(n ast.Node) string {
+ switch n := n.(type) {
+ case *ast.Namespace:
+ return n.Name
+ case *ast.Interface:
+ return n.Name
+ case *ast.Dictionary:
+ return n.Name
+ case *ast.Enum:
+ return n.Name
+ case *ast.Typedef:
+ return n.Name
+ case *ast.Mixin:
+ return n.Name
+ case *ast.Includes:
+ return ""
+ default:
+ panic(fmt.Errorf("unhandled AST declaration %T", n))
+ }
+}
+
+// simplify processes the AST 'in', returning a new AST that:
+// * Has all partial interfaces merged into a single interface.
+// * Has all mixins flattened into their place of use.
+// * Has all the declarations ordered in dependency order (leaf first)
+// simplify also returns the map of declarations in the AST.
+func simplify(in *ast.File) (*ast.File, declarations) {
+ s := simplifier{
+ declarations: declarations{},
+ registered: map[string]bool{},
+ out: &ast.File{},
+ }
+
+ // Walk the IDL declarations to merge together partial interfaces and embed
+ // mixins into their uses.
+ {
+ interfaces := map[string]*ast.Interface{}
+ mixins := map[string]*ast.Mixin{}
+ includes := []*ast.Includes{}
+ for _, d := range in.Declarations {
+ switch d := d.(type) {
+ case *ast.Interface:
+ if i, ok := interfaces[d.Name]; ok {
+ // Merge partial body into one interface
+ i.Members = append(i.Members, d.Members...)
+ } else {
+ clone := *d
+ d := &clone
+ interfaces[d.Name] = d
+ s.declarations[d.Name] = d
+ }
+ case *ast.Mixin:
+ mixins[d.Name] = d
+ s.declarations[d.Name] = d
+ case *ast.Includes:
+ includes = append(includes, d)
+ default:
+ if name := nameOf(d); name != "" {
+ s.declarations[nameOf(d)] = d
+ }
+ }
+ }
+
+ // Merge mixin into interface
+ for _, include := range includes {
+ i, ok := interfaces[include.Name]
+ if !ok {
+ panic(fmt.Errorf("%v includes %v, but %v is not an interface", include.Name, include.Source, include.Name))
+ }
+ m, ok := mixins[include.Source]
+ if !ok {
+ panic(fmt.Errorf("%v includes %v, but %v is not an mixin", include.Name, include.Source, include.Source))
+ }
+ // Merge mixin into the interface
+ for _, member := range m.Members {
+ if member, ok := member.(*ast.Member); ok {
+ i.Members = append(i.Members, member)
+ }
+ }
+ }
+ }
+
+ // Now traverse the declarations in to produce the dependency-ordered
+ // output `s.out`.
+ for _, d := range in.Declarations {
+ if name := nameOf(d); name != "" {
+ s.visit(s.declarations[nameOf(d)])
+ }
+ }
+
+ return s.out, s.declarations
+}
+
+// simplifier holds internal state for simplify()
+type simplifier struct {
+ // all AST declarations
+ declarations declarations
+ // set of visited declarations
+ registered map[string]bool
+ // the dependency-ordered output
+ out *ast.File
+}
+
+// visit traverses the AST declaration 'd' adding all dependent declarations to
+// s.out.
+func (s *simplifier) visit(d ast.Decl) {
+ register := func(name string) bool {
+ if s.registered[name] {
+ return true
+ }
+ s.registered[name] = true
+ return false
+ }
+ switch d := d.(type) {
+ case *ast.Namespace:
+ if register(d.Name) {
+ return
+ }
+ for _, m := range d.Members {
+ if m, ok := m.(*ast.Member); ok {
+ s.visitType(m.Type)
+ for _, p := range m.Parameters {
+ s.visitType(p.Type)
+ }
+ }
+ }
+ case *ast.Interface:
+ if register(d.Name) {
+ return
+ }
+ if d, ok := s.declarations[d.Inherits]; ok {
+ s.visit(d)
+ }
+ for _, m := range d.Members {
+ if m, ok := m.(*ast.Member); ok {
+ s.visitType(m.Type)
+ for _, p := range m.Parameters {
+ s.visitType(p.Type)
+ }
+ }
+ }
+ case *ast.Dictionary:
+ if register(d.Name) {
+ return
+ }
+ if d, ok := s.declarations[d.Inherits]; ok {
+ s.visit(d)
+ }
+ for _, m := range d.Members {
+ s.visitType(m.Type)
+ for _, p := range m.Parameters {
+ s.visitType(p.Type)
+ }
+ }
+ case *ast.Typedef:
+ if register(d.Name) {
+ return
+ }
+ s.visitType(d.Type)
+ case *ast.Mixin:
+ if register(d.Name) {
+ return
+ }
+ for _, m := range d.Members {
+ if m, ok := m.(*ast.Member); ok {
+ s.visitType(m.Type)
+ for _, p := range m.Parameters {
+ s.visitType(p.Type)
+ }
+ }
+ }
+ case *ast.Enum:
+ if register(d.Name) {
+ return
+ }
+ case *ast.Includes:
+ if register(d.Name) {
+ return
+ }
+ default:
+ panic(fmt.Errorf("unhandled AST declaration %T", d))
+ }
+
+ s.out.Declarations = append(s.out.Declarations, d)
+}
+
+// visitType traverses the AST type 't' adding all dependent declarations to
+// s.out.
+func (s *simplifier) visitType(t ast.Type) {
+ switch t := t.(type) {
+ case *ast.TypeName:
+ if d, ok := s.declarations[t.Name]; ok {
+ s.visit(d)
+ }
+ case *ast.UnionType:
+ for _, t := range t.Types {
+ s.visitType(t)
+ }
+ case *ast.ParametrizedType:
+ for _, t := range t.Elems {
+ s.visitType(t)
+ }
+ case *ast.NullableType:
+ s.visitType(t.Type)
+ case *ast.SequenceType:
+ s.visitType(t.Elem)
+ case *ast.RecordType:
+ s.visitType(t.Elem)
+ default:
+ panic(fmt.Errorf("unhandled AST type %T", t))
+ }
+}
+
+// generator holds the template generator state
+type generator struct {
+ // the root template
+ t *template.Template
+ // the working directory
+ workingDir string
+ // map of function name to function exposed to the template executor
+ funcs map[string]interface{}
+ // dependency-sorted declarations
+ declarations declarations
+}
+
+// eval executes the sub-template with the given name and arguments, returning
+// the generated output
+// args can be a single argument:
+// arg[0]
+// or a list of name-value pairs:
+// (args[0]: name, args[1]: value), (args[2]: name, args[3]: value)...
+func (g *generator) eval(template string, args ...interface{}) (string, error) {
+ target := g.t.Lookup(template)
+ if target == nil {
+ return "", fmt.Errorf("template '%v' not found", template)
+ }
+ sb := strings.Builder{}
+ var err error
+ if len(args) == 1 {
+ err = target.Execute(&sb, args[0])
+ } else {
+ m := newMap()
+ if len(args)%2 != 0 {
+ return "", fmt.Errorf("Eval expects a single argument or list name-value pairs")
+ }
+ for i := 0; i < len(args); i += 2 {
+ name, ok := args[i].(string)
+ if !ok {
+ return "", fmt.Errorf("Eval argument %v is not a string", i)
+ }
+ m.Put(name, args[i+1])
+ }
+ err = target.Execute(&sb, m)
+ }
+ if err != nil {
+ return "", fmt.Errorf("while evaluating '%v': %v", template, err)
+ }
+ return sb.String(), nil
+}
+
+// lookup returns the declaration with the given name, or nil if not found.
+func (g *generator) lookup(name string) ast.Decl {
+ return g.declarations[name]
+}
+
+// include loads the template with the given path, importing the declarations
+// into the scope of the current template.
+func (g *generator) include(path string) (string, error) {
+ t, err := g.t.
+ Option("missingkey=invalid").
+ Funcs(g.funcs).
+ ParseFiles(filepath.Join(g.workingDir, path))
+ if err != nil {
+ return "", err
+ }
+ g.t.AddParseTree(path, t.Tree)
+ return "", nil
+}
+
+// Map is a simple generic key-value map, which can be used in the template
+type Map map[interface{}]interface{}
+
+func newMap() Map { return Map{} }
+
+// Put adds the key-value pair into the map.
+// Put always returns an empty string so nothing is printed in the template.
+func (m Map) Put(key, value interface{}) string {
+ m[key] = value
+ return ""
+}
+
+// Get looks up and returns the value with the given key. If the map does not
+// contain the given key, then nil is returned.
+func (m Map) Get(key interface{}) interface{} {
+ return m[key]
+}
+
+// is returns a function that returns true if the value passed to the function
+// matches any of the types of the objects in 'prototypes'.
+func is(prototypes ...interface{}) func(interface{}) bool {
+ types := make([]reflect.Type, len(prototypes))
+ for i, p := range prototypes {
+ types[i] = reflect.TypeOf(p)
+ }
+ return func(v interface{}) bool {
+ ty := reflect.TypeOf(v)
+ for _, rty := range types {
+ if ty == rty || ty == reflect.PtrTo(rty) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// isConstructor returns true if the object is a constructor ast.Member.
+func isConstructor(v interface{}) bool {
+ if member, ok := v.(*ast.Member); ok {
+ if ty, ok := member.Type.(*ast.TypeName); ok {
+ return ty.Name == "constructor"
+ }
+ }
+ return false
+}
+
+// isUndefinedType returns true if the type is 'undefined'
+func isUndefinedType(ty ast.Type) bool {
+ if ty, ok := ty.(*ast.TypeName); ok {
+ return ty.Name == "undefined"
+ }
+ return false
+}
+
+// enumEntryName formats the enum entry name 's' for use in a C++ enum.
+func enumEntryName(s string) string {
+ return "k" + strings.ReplaceAll(pascalCase(strings.Trim(s, `"`)), "-", "")
+}
+
+// Method describes a WebIDL interface method
+type Method struct {
+ // Name of the method
+ Name string
+ // The list of overloads of the method
+ Overloads []*ast.Member
+}
+
+// methodsOf returns all the methods of the given WebIDL interface.
+func methodsOf(obj interface{}) []*Method {
+ iface, ok := obj.(*ast.Interface)
+ if !ok {
+ return nil
+ }
+ byName := map[string]*Method{}
+ out := []*Method{}
+ for _, member := range iface.Members {
+ member := member.(*ast.Member)
+ if !member.Const && !member.Attribute && !isConstructor(member) {
+ if method, ok := byName[member.Name]; ok {
+ method.Overloads = append(method.Overloads, member)
+ } else {
+ method = &Method{
+ Name: member.Name,
+ Overloads: []*ast.Member{member},
+ }
+ byName[member.Name] = method
+ out = append(out, method)
+ }
+ }
+ }
+ return out
+}
+
+// attributesOf returns all the attributes of the given WebIDL interface or
+// namespace.
+func attributesOf(obj interface{}) []*ast.Member {
+ out := []*ast.Member{}
+ add := func(m interface{}) {
+ if m := m.(*ast.Member); m.Attribute {
+ out = append(out, m)
+ }
+ }
+ switch obj := obj.(type) {
+ case *ast.Interface:
+ for _, m := range obj.Members {
+ add(m)
+ }
+ case *ast.Namespace:
+ for _, m := range obj.Members {
+ add(m)
+ }
+ default:
+ return nil
+ }
+ return out
+}
+
+// constantsOf returns all the constant values of the given WebIDL interface or
+// namespace.
+func constantsOf(obj interface{}) []*ast.Member {
+ out := []*ast.Member{}
+ add := func(m interface{}) {
+ if m := m.(*ast.Member); m.Const {
+ out = append(out, m)
+ }
+ }
+ switch obj := obj.(type) {
+ case *ast.Interface:
+ for _, m := range obj.Members {
+ add(m)
+ }
+ case *ast.Namespace:
+ for _, m := range obj.Members {
+ add(m)
+ }
+ default:
+ return nil
+ }
+ return out
+}
+
+// setlikeOf returns the setlike ast.Pattern, if obj is a setlike interface.
+func setlikeOf(obj interface{}) *ast.Pattern {
+ iface, ok := obj.(*ast.Interface)
+ if !ok {
+ return nil
+ }
+ for _, pattern := range iface.Patterns {
+ if pattern.Type == ast.Setlike {
+ return pattern
+ }
+ }
+ return nil
+}
+
+// pascalCase returns the snake-case string s transformed into 'PascalCase',
+// Rules:
+// * The first letter of the string is capitalized
+// * Characters following an underscore, hyphen or number are capitalized
+// * Underscores are removed from the returned string
+// See: https://en.wikipedia.org/wiki/Camel_case
+func pascalCase(s string) string {
+ b := strings.Builder{}
+ upper := true
+ for _, r := range s {
+ if r == '_' || r == '-' {
+ upper = true
+ continue
+ }
+ if upper {
+ b.WriteRune(unicode.ToUpper(r))
+ upper = false
+ } else {
+ b.WriteRune(r)
+ }
+ if unicode.IsNumber(r) {
+ upper = true
+ }
+ }
+ return b.String()
+}
+
+const header = `// Copyright 2021 The Dawn Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+////////////////////////////////////////////////////////////////////////////////
+// File generated by tools/cmd/idlgen.go, with the arguments:
+// %v
+//
+// Do not modify this file directly
+////////////////////////////////////////////////////////////////////////////////
+
+`
diff --git a/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go b/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go
new file mode 100644
index 00000000000..bafb13ce3c9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/tools/src/cmd/run-cts/main.go
@@ -0,0 +1,1075 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// run-cts is a tool used to run the WebGPU CTS using the Dawn module for NodeJS
+package main
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "os"
+ "os/exec"
+ "os/signal"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+ "unicode/utf8"
+
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+)
+
+const (
+ testTimeout = time.Minute
+)
+
+func main() {
+ if err := run(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
+func showUsage() {
+ fmt.Println(`
+run-cts is a tool used to run the WebGPU CTS using the Dawn module for NodeJS
+
+Usage:
+ run-cts --dawn-node=<path to dawn.node> --cts=<path to WebGPU CTS> [test-query]`)
+ os.Exit(1)
+}
+
+var (
+ colors bool
+ stdout io.Writer
+ mainCtx context.Context
+)
+
+// ANSI escape sequences
+const (
+ escape = "\u001B["
+ positionLeft = escape + "0G"
+ ansiReset = escape + "0m"
+
+ bold = escape + "1m"
+
+ red = escape + "31m"
+ green = escape + "32m"
+ yellow = escape + "33m"
+ blue = escape + "34m"
+ magenta = escape + "35m"
+ cyan = escape + "36m"
+ white = escape + "37m"
+)
+
+type dawnNodeFlags []string
+
+func (f *dawnNodeFlags) String() string {
+ return fmt.Sprint(strings.Join(*f, ""))
+}
+
+func (f *dawnNodeFlags) Set(value string) error {
+ // Multiple flags must be passed in individually:
+ // -flag=a=b -dawn_node_flag=c=d
+ *f = append(*f, value)
+ return nil
+}
+
+func makeMainCtx() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ sigs := make(chan os.Signal, 1)
+ signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
+ go func() {
+ sig := <-sigs
+ fmt.Printf("Signal received: %v\n", sig)
+ cancel()
+ }()
+ return ctx
+}
+
+func run() error {
+ mainCtx = makeMainCtx()
+
+ colors = os.Getenv("TERM") != "dumb" ||
+ isatty.IsTerminal(os.Stdout.Fd()) ||
+ isatty.IsCygwinTerminal(os.Stdout.Fd())
+ if colors {
+ if _, disable := os.LookupEnv("NO_COLOR"); disable {
+ colors = false
+ }
+ }
+
+ backendDefault := "default"
+ if vkIcdFilenames := os.Getenv("VK_ICD_FILENAMES"); vkIcdFilenames != "" {
+ backendDefault = "vulkan"
+ }
+
+ var dawnNode, cts, node, npx, resultsPath, expectationsPath, logFilename, backend string
+ var verbose, isolated, build bool
+ var numRunners int
+ var flags dawnNodeFlags
+ flag.StringVar(&dawnNode, "dawn-node", "", "path to dawn.node module")
+ flag.StringVar(&cts, "cts", "", "root directory of WebGPU CTS")
+ flag.StringVar(&node, "node", "", "path to node executable")
+ flag.StringVar(&npx, "npx", "", "path to npx executable")
+ flag.StringVar(&resultsPath, "output", "", "path to write test results file")
+ flag.StringVar(&expectationsPath, "expect", "", "path to expectations file")
+ flag.BoolVar(&verbose, "verbose", false, "print extra information while testing")
+ flag.BoolVar(&build, "build", true, "attempt to build the CTS before running")
+ flag.BoolVar(&isolated, "isolate", false, "run each test in an isolated process")
+ flag.BoolVar(&colors, "colors", colors, "enable / disable colors")
+ flag.IntVar(&numRunners, "j", runtime.NumCPU()/2, "number of concurrent runners. 0 runs serially")
+ flag.StringVar(&logFilename, "log", "", "path to log file of tests run and result")
+ flag.Var(&flags, "flag", "flag to pass to dawn-node as flag=value. multiple flags must be passed in individually")
+ flag.StringVar(&backend, "backend", backendDefault, "backend to use: default|null|webgpu|d3d11|d3d12|metal|vulkan|opengl|opengles."+
+ " set to 'vulkan' if VK_ICD_FILENAMES environment variable is set, 'default' otherwise")
+ flag.Parse()
+
+ if colors {
+ stdout = colorable.NewColorableStdout()
+ } else {
+ stdout = colorable.NewNonColorable(os.Stdout)
+ }
+
+ // Check mandatory arguments
+ if dawnNode == "" || cts == "" {
+ showUsage()
+ }
+ if !isFile(dawnNode) {
+ return fmt.Errorf("'%v' is not a file", dawnNode)
+ }
+ if !isDir(cts) {
+ return fmt.Errorf("'%v' is not a directory", cts)
+ }
+
+ // Make paths absolute
+ for _, path := range []*string{&dawnNode, &cts} {
+ abs, err := filepath.Abs(*path)
+ if err != nil {
+ return fmt.Errorf("unable to get absolute path for '%v'", *path)
+ }
+ *path = abs
+ }
+
+ // The test query is the optional unnamed argument
+ query := "webgpu:*"
+ switch len(flag.Args()) {
+ case 0:
+ case 1:
+ query = flag.Args()[0]
+ default:
+ return fmt.Errorf("only a single query can be provided")
+ }
+
+ // Find node
+ if node == "" {
+ var err error
+ node, err = exec.LookPath("node")
+ if err != nil {
+ return fmt.Errorf("add node to PATH or specify with --node")
+ }
+ }
+ // Find npx
+ if npx == "" {
+ var err error
+ npx, err = exec.LookPath("npx")
+ if err != nil {
+ npx = ""
+ }
+ }
+
+ if backend != "default" {
+ fmt.Println("Forcing backend to", backend)
+ flags = append(flags, fmt.Sprint("dawn-backend=", backend))
+ }
+
+ r := runner{
+ numRunners: numRunners,
+ verbose: verbose,
+ node: node,
+ npx: npx,
+ dawnNode: dawnNode,
+ cts: cts,
+ flags: flags,
+ results: testcaseStatuses{},
+ evalScript: func(main string) string {
+ return fmt.Sprintf(`require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/%v.ts');`, main)
+ },
+ }
+
+ if logFilename != "" {
+ writer, err := os.Create(logFilename)
+ if err != nil {
+ return fmt.Errorf("failed to open log '%v': %w", logFilename, err)
+ }
+ defer writer.Close()
+ r.log = newLogger(writer)
+ }
+
+ cache := cache{}
+ cachePath := dawnNode + ".runcts.cache"
+ if err := cache.load(cachePath); err != nil && verbose {
+ fmt.Println("failed to load cache from", cachePath, err)
+ }
+ defer cache.save(cachePath)
+
+ // Scan the CTS source to determine the most recent change to the CTS source
+ mostRecentSourceChange, err := r.scanSourceTimestamps(verbose)
+ if err != nil {
+ return fmt.Errorf("failed to scan source files for modified timestamps: %w", err)
+ }
+
+ ctsNeedsRebuild := mostRecentSourceChange.After(cache.BuildTimestamp) ||
+ !isDir(filepath.Join(r.cts, "out-node"))
+ if build {
+ if verbose {
+ fmt.Println("CTS needs rebuild:", ctsNeedsRebuild)
+ }
+
+ if npx != "" {
+ if ctsNeedsRebuild {
+ if err := r.buildCTS(verbose); err != nil {
+ return fmt.Errorf("failed to build CTS: %w", err)
+ }
+ cache.BuildTimestamp = mostRecentSourceChange
+ }
+ // Use the prebuilt CTS (instead of using the `setup-ts-in-node` transpiler)
+ r.evalScript = func(main string) string {
+ return fmt.Sprintf(`require('./out-node/common/runtime/%v.js');`, main)
+ }
+ } else {
+ fmt.Println("npx not found on PATH. Using runtime TypeScript transpilation (slow)")
+ }
+ }
+
+ // If an expectations file was specified, load it.
+ if expectationsPath != "" {
+ if ex, err := loadExpectations(expectationsPath); err == nil {
+ r.expectations = ex
+ } else {
+ return err
+ }
+ }
+
+ if numRunners > 0 {
+ // Find all the test cases that match the given queries.
+ if err := r.gatherTestCases(query, verbose); err != nil {
+ return fmt.Errorf("failed to gather test cases: %w", err)
+ }
+
+ if isolated {
+ fmt.Println("Running in parallel isolated...")
+ fmt.Printf("Testing %d test cases...\n", len(r.testcases))
+ if err := r.runParallelIsolated(); err != nil {
+ return err
+ }
+ } else {
+ fmt.Println("Running in parallel with server...")
+ fmt.Printf("Testing %d test cases...\n", len(r.testcases))
+ if err := r.runParallelWithServer(); err != nil {
+ return err
+ }
+ }
+ } else {
+ fmt.Println("Running serially...")
+ if err := r.runSerially(query); err != nil {
+ return err
+ }
+ }
+
+ if resultsPath != "" {
+ if err := saveExpectations(resultsPath, r.results); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type logger struct {
+ writer io.Writer
+ idx int
+ resultByIndex map[int]result
+}
+
+// newLogger creates a new logger instance.
+func newLogger(writer io.Writer) logger {
+ return logger{writer, 0, map[int]result{}}
+}
+
+// logResult writes the test results to the log file in sequential order.
+// logResult should be called whenever a new test result becomes available.
+func (l *logger) logResults(res result) {
+ if l.writer == nil {
+ return
+ }
+ l.resultByIndex[res.index] = res
+ for {
+ logRes, ok := l.resultByIndex[l.idx]
+ if !ok {
+ break
+ }
+ fmt.Fprintf(l.writer, "%v [%v]\n", logRes.testcase, logRes.status)
+ l.idx++
+ }
+}
+
+// Cache holds cached information between runs to optimize runs
+type cache struct {
+ BuildTimestamp time.Time
+}
+
+// load loads the cache information from the JSON file at path
+func (c *cache) load(path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return json.NewDecoder(f).Decode(c)
+}
+
+// save saves the cache information to the JSON file at path
+func (c *cache) save(path string) error {
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return json.NewEncoder(f).Encode(c)
+}
+
+type runner struct {
+ numRunners int
+ verbose bool
+ node, npx, dawnNode, cts string
+ flags dawnNodeFlags
+ evalScript func(string) string
+ testcases []string
+ expectations testcaseStatuses
+ results testcaseStatuses
+ log logger
+}
+
+// scanSourceTimestamps scans all the .js and .ts files in all subdirectories of
+// r.cts, and returns the file with the most recent timestamp.
+func (r *runner) scanSourceTimestamps(verbose bool) (time.Time, error) {
+ if verbose {
+ start := time.Now()
+ fmt.Println("Scanning .js / .ts files for changes...")
+ defer func() {
+ fmt.Println("completed in", time.Since(start))
+ }()
+ }
+
+ dir := filepath.Join(r.cts, "src")
+
+ mostRecentChange := time.Time{}
+ err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ switch filepath.Ext(path) {
+ case ".ts", ".js":
+ if info.ModTime().After(mostRecentChange) {
+ mostRecentChange = info.ModTime()
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return time.Time{}, err
+ }
+ return mostRecentChange, nil
+}
+
+// buildCTS calls `npx grunt run:build-out-node` in the CTS directory to compile
+// the TypeScript files down to JavaScript. Doing this once ahead of time can be
+// much faster than dynamically transpiling when there are many tests to run.
+func (r *runner) buildCTS(verbose bool) error {
+ if verbose {
+ start := time.Now()
+ fmt.Println("Building CTS...")
+ defer func() {
+ fmt.Println("completed in", time.Since(start))
+ }()
+ }
+
+ cmd := exec.Command(r.npx, "grunt", "run:build-out-node")
+ cmd.Dir = r.cts
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("%w: %v", err, string(out))
+ }
+ return nil
+}
+
+// gatherTestCases() queries the CTS for all test cases that match the given
+// query. On success, gatherTestCases() populates r.testcases.
+func (r *runner) gatherTestCases(query string, verbose bool) error {
+ if verbose {
+ start := time.Now()
+ fmt.Println("Gathering test cases...")
+ defer func() {
+ fmt.Println("completed in", time.Since(start))
+ }()
+ }
+
+ args := append([]string{
+ "-e", r.evalScript("cmdline"),
+ "--", // Start of arguments
+ // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
+ // and slices away the first two arguments. When running with '-e', args
+ // start at 1, so just inject a dummy argument.
+ "dummy-arg",
+ "--list",
+ }, query)
+
+ cmd := exec.Command(r.node, args...)
+ cmd.Dir = r.cts
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("%w\n%v", err, string(out))
+ }
+
+ tests := filterTestcases(strings.Split(string(out), "\n"))
+ r.testcases = tests
+ return nil
+}
+
+type portListener struct {
+ buffer strings.Builder
+ port chan int
+}
+
+func newPortListener() portListener {
+ return portListener{strings.Builder{}, make(chan int)}
+}
+
+var portRE = regexp.MustCompile(`\[\[(\d+)\]\]`)
+
+func (p *portListener) Write(data []byte) (n int, err error) {
+ if p.port != nil {
+ p.buffer.Write(data)
+ match := portRE.FindStringSubmatch(p.buffer.String())
+ if len(match) == 2 {
+ port, err := strconv.Atoi(match[1])
+ if err != nil {
+ return 0, err
+ }
+ p.port <- port
+ close(p.port)
+ p.port = nil
+ }
+ }
+ return len(data), nil
+}
+
+// runParallelWithServer() starts r.numRunners instances of the CTS server test
+// runner, and issues test run requests to those servers, concurrently.
+func (r *runner) runParallelWithServer() error {
+ // Create a chan of test indices.
+ // This will be read by the test runner goroutines.
+ caseIndices := make(chan int, len(r.testcases))
+ for i := range r.testcases {
+ caseIndices <- i
+ }
+ close(caseIndices)
+
+ // Create a chan for the test results.
+ // This will be written to by the test runner goroutines.
+ results := make(chan result, len(r.testcases))
+
+ // Spin up the test runner goroutines
+ wg := &sync.WaitGroup{}
+ for i := 0; i < r.numRunners; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := r.runServer(caseIndices, results); err != nil {
+ results <- result{
+ status: fail,
+ error: fmt.Errorf("Test server error: %w", err),
+ }
+ }
+ }()
+ }
+
+ r.streamResults(wg, results)
+ return nil
+}
+
+type redirectingWriter struct {
+ io.Writer
+}
+
+// runServer starts a test runner server instance, takes case indices from
+// caseIndices, and requests the server run the test with the given index.
+// The result of the test run is written to the results chan.
+// Once the caseIndices chan has been closed, the server is stopped and
+// runServer returns.
+func (r *runner) runServer(caseIndices <-chan int, results chan<- result) error {
+ var port int
+ var rw redirectingWriter
+
+ stopServer := func() {}
+ startServer := func() error {
+ args := []string{
+ "-e", r.evalScript("server"), // Evaluate 'eval'.
+ "--",
+ // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
+ // and slices away the first two arguments. When running with '-e', args
+ // start at 1, so just inject a dummy argument.
+ "dummy-arg",
+ // Actual arguments begin here
+ "--gpu-provider", r.dawnNode,
+ }
+ for _, f := range r.flags {
+ args = append(args, "--gpu-provider-flag", f)
+ }
+
+ ctx := mainCtx
+ cmd := exec.CommandContext(ctx, r.node, args...)
+
+ serverLog := &bytes.Buffer{}
+
+ pl := newPortListener()
+
+ cmd.Dir = r.cts
+ cmd.Stdout = io.MultiWriter(&rw, serverLog, &pl)
+ cmd.Stderr = io.MultiWriter(&rw, serverLog)
+
+ err := cmd.Start()
+ if err != nil {
+ return fmt.Errorf("failed to start test runner server: %v", err)
+ }
+
+ select {
+ case port = <-pl.port:
+ case <-time.After(time.Second * 10):
+ return fmt.Errorf("timeout waiting for server port:\n%v", serverLog.String())
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+ }
+ stopServer = func() {
+ if port > 0 {
+ go http.Post(fmt.Sprintf("http://localhost:%v/terminate", port), "", &bytes.Buffer{})
+ time.Sleep(time.Millisecond * 100)
+ port = 0
+ }
+ }
+
+ for idx := range caseIndices {
+ // Redirect the server log per test case
+ caseServerLog := &bytes.Buffer{}
+ rw.Writer = caseServerLog
+
+ if port == 0 {
+ if err := startServer(); err != nil {
+ return err
+ }
+ }
+
+ res := result{index: idx, testcase: r.testcases[idx]}
+
+ type Response struct {
+ Status string
+ Message string
+ }
+ postResp, err := http.Post(fmt.Sprintf("http://localhost:%v/run?%v", port, r.testcases[idx]), "", &bytes.Buffer{})
+ if err != nil {
+ res.error = fmt.Errorf("server POST failure. Restarting server... This can happen when there is a crash. Try running with --isolate.")
+ res.status = fail
+ results <- res
+ stopServer()
+ continue
+ }
+
+ if postResp.StatusCode == http.StatusOK {
+ var resp Response
+ if err := json.NewDecoder(postResp.Body).Decode(&resp); err != nil {
+ res.error = fmt.Errorf("server response decode failure")
+ res.status = fail
+ results <- res
+ continue
+ }
+
+ switch resp.Status {
+ case "pass":
+ res.status = pass
+ res.message = resp.Message + caseServerLog.String()
+ case "warn":
+ res.status = warn
+ res.message = resp.Message + caseServerLog.String()
+ case "fail":
+ res.status = fail
+ res.message = resp.Message + caseServerLog.String()
+ case "skip":
+ res.status = skip
+ res.message = resp.Message + caseServerLog.String()
+ default:
+ res.status = fail
+ res.error = fmt.Errorf("unknown status: '%v'", resp.Status)
+ }
+ } else {
+ msg, err := ioutil.ReadAll(postResp.Body)
+ if err != nil {
+ msg = []byte(err.Error())
+ }
+ res.status = fail
+ res.error = fmt.Errorf("server error: %v", string(msg))
+ }
+ results <- res
+ }
+
+ stopServer()
+ return nil
+}
+
+// runParallelIsolated() calls the CTS command-line test runner to run each
+// testcase in a separate process. This reduces possibility of state leakage
+// between tests.
+// Up to r.numRunners tests will be run concurrently.
+func (r *runner) runParallelIsolated() error {
+ // Create a chan of test indices.
+ // This will be read by the test runner goroutines.
+ caseIndices := make(chan int, len(r.testcases))
+ for i := range r.testcases {
+ caseIndices <- i
+ }
+ close(caseIndices)
+
+ // Create a chan for the test results.
+ // This will be written to by the test runner goroutines.
+ results := make(chan result, len(r.testcases))
+
+ // Spin up the test runner goroutines
+ wg := &sync.WaitGroup{}
+ for i := 0; i < r.numRunners; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for idx := range caseIndices {
+ res := r.runTestcase(r.testcases[idx])
+ res.index = idx
+ results <- res
+ }
+ }()
+ }
+
+ r.streamResults(wg, results)
+ return nil
+}
+
+// streamResults reads from the chan 'results', printing the results in test-id
+// sequential order. Once the WaitGroup 'wg' is complete, streamResults() will
+// automatically close the 'results' chan.
+// Once all the results have been printed, a summary will be printed and the
+// function will return.
+func (r *runner) streamResults(wg *sync.WaitGroup, results chan result) {
+ // Create another goroutine to close the results chan when all the runner
+ // goroutines have finished.
+ start := time.Now()
+ var timeTaken time.Duration
+ go func() {
+ wg.Wait()
+ timeTaken = time.Since(start)
+ close(results)
+ }()
+
+ // Total number of tests, test counts binned by status
+ numTests, numByExpectedStatus := len(r.testcases), map[expectedStatus]int{}
+
+ // Helper function for printing a progress bar.
+ lastStatusUpdate, animFrame := time.Now(), 0
+ updateProgress := func() {
+ printANSIProgressBar(animFrame, numTests, numByExpectedStatus)
+ animFrame++
+ lastStatusUpdate = time.Now()
+ }
+
+ // Pull test results as they become available.
+ // Update the status counts, and print any failures (or all test results if --verbose)
+ progressUpdateRate := time.Millisecond * 10
+ if !colors {
+ // No colors == no cursor control. Reduce progress updates so that
+ // we're not printing endless progress bars.
+ progressUpdateRate = time.Second
+ }
+
+ for res := range results {
+ r.log.logResults(res)
+ r.results[res.testcase] = res.status
+ expected := r.expectations[res.testcase]
+ exStatus := expectedStatus{
+ status: res.status,
+ expected: expected == res.status,
+ }
+ numByExpectedStatus[exStatus] = numByExpectedStatus[exStatus] + 1
+ name := res.testcase
+ if r.verbose ||
+ res.error != nil ||
+ (exStatus.status != pass && exStatus.status != skip && !exStatus.expected) {
+ fmt.Printf("%v - %v: %v", name, res.status, res.message)
+ if expected != "" {
+ fmt.Printf(" [%v -> %v]", expected, res.status)
+ }
+ fmt.Println()
+ if res.error != nil {
+ fmt.Println(res.error)
+ }
+ updateProgress()
+ }
+ if time.Since(lastStatusUpdate) > progressUpdateRate {
+ updateProgress()
+ }
+ }
+ printANSIProgressBar(animFrame, numTests, numByExpectedStatus)
+
+ // All done. Print final stats.
+ fmt.Printf("\nCompleted in %v\n", timeTaken)
+
+ var numExpectedByStatus map[status]int
+ if r.expectations != nil {
+ // The status of each testcase that was run
+ numExpectedByStatus = map[status]int{}
+ for t, s := range r.expectations {
+ if _, wasTested := r.results[t]; wasTested {
+ numExpectedByStatus[s] = numExpectedByStatus[s] + 1
+ }
+ }
+ }
+
+ for _, s := range statuses {
+ // number of tests, just run, that resulted in the given status
+ numByStatus := numByExpectedStatus[expectedStatus{s, true}] +
+ numByExpectedStatus[expectedStatus{s, false}]
+ // difference in number of tests that had the given status from the
+ // expected number (taken from the expectations file)
+ diffFromExpected := 0
+ if numExpectedByStatus != nil {
+ diffFromExpected = numByStatus - numExpectedByStatus[s]
+ }
+ if numByStatus == 0 && diffFromExpected == 0 {
+ continue
+ }
+
+ fmt.Print(bold, statusColor[s])
+ fmt.Print(alignRight(strings.ToUpper(string(s))+": ", 10))
+ fmt.Print(ansiReset)
+ if numByStatus > 0 {
+ fmt.Print(bold)
+ }
+ fmt.Print(alignLeft(numByStatus, 10))
+ fmt.Print(ansiReset)
+ fmt.Print(alignRight("("+percentage(numByStatus, numTests)+")", 6))
+
+ if diffFromExpected != 0 {
+ fmt.Print(bold, " [")
+ fmt.Printf("%+d", diffFromExpected)
+ fmt.Print(ansiReset, "]")
+ }
+ fmt.Println()
+ }
+
+}
+
+// runSerially() calls the CTS test runner to run the test query in a single
+// process.
+// TODO(bclayton): Support comparing against r.expectations
+func (r *runner) runSerially(query string) error {
+ start := time.Now()
+ result := r.runTestcase(query)
+ timeTaken := time.Since(start)
+
+ if r.verbose {
+ fmt.Println(result)
+ }
+ fmt.Println("Status:", result.status)
+ fmt.Println("Completed in", timeTaken)
+ return nil
+}
+
+// status is an enumerator of test result status
+type status string
+
+const (
+ pass status = "pass"
+ warn status = "warn"
+ fail status = "fail"
+ skip status = "skip"
+ timeout status = "timeout"
+)
+
+// All the status types
+var statuses = []status{pass, warn, fail, skip, timeout}
+
+var statusColor = map[status]string{
+ pass: green,
+ warn: yellow,
+ skip: blue,
+ timeout: yellow,
+ fail: red,
+}
+
+// expectedStatus is a test status, along with a boolean to indicate whether the
+// status matches the test expectations
+type expectedStatus struct {
+ status status
+ expected bool
+}
+
+// result holds the information about a completed test
+type result struct {
+ index int
+ testcase string
+ status status
+ message string
+ error error
+}
+
+// runTestcase() runs the CTS testcase with the given query, returning the test
+// result.
+func (r *runner) runTestcase(query string) result {
+ ctx, cancel := context.WithTimeout(mainCtx, testTimeout)
+ defer cancel()
+
+ args := []string{
+ "-e", r.evalScript("cmdline"), // Evaluate 'eval'.
+ "--",
+ // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
+ // and slices away the first two arguments. When running with '-e', args
+ // start at 1, so just inject a dummy argument.
+ "dummy-arg",
+ // Actual arguments begin here
+ "--gpu-provider", r.dawnNode,
+ "--verbose",
+ }
+ for _, f := range r.flags {
+ args = append(args, "--gpu-provider-flag", f)
+ }
+ args = append(args, query)
+
+ cmd := exec.CommandContext(ctx, r.node, args...)
+ cmd.Dir = r.cts
+
+ var buf bytes.Buffer
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+
+ err := cmd.Run()
+ msg := buf.String()
+ switch {
+ case errors.Is(err, context.DeadlineExceeded):
+ return result{testcase: query, status: timeout, message: msg}
+ case strings.Contains(msg, "[fail]"):
+ return result{testcase: query, status: fail, message: msg}
+ case strings.Contains(msg, "[warn]"):
+ return result{testcase: query, status: warn, message: msg}
+ case strings.Contains(msg, "[skip]"):
+ return result{testcase: query, status: skip, message: msg}
+ case strings.Contains(msg, "[pass]"), err == nil:
+ return result{testcase: query, status: pass, message: msg}
+ }
+ return result{testcase: query, status: fail, message: fmt.Sprint(msg, err), error: err}
+}
+
+// filterTestcases returns in with empty strings removed
+func filterTestcases(in []string) []string {
+ out := make([]string, 0, len(in))
+ for _, c := range in {
+ if c != "" {
+ out = append(out, c)
+ }
+ }
+ return out
+}
+
+// percentage returns the percentage of n out of total as a string
+func percentage(n, total int) string {
+ if total == 0 {
+ return "-"
+ }
+ f := float64(n) / float64(total)
+ return fmt.Sprintf("%.1f%c", f*100.0, '%')
+}
+
+// isDir returns true if the path resolves to a directory
+func isDir(path string) bool {
+ s, err := os.Stat(path)
+ if err != nil {
+ return false
+ }
+ return s.IsDir()
+}
+
+// isFile returns true if the path resolves to a file
+func isFile(path string) bool {
+ s, err := os.Stat(path)
+ if err != nil {
+ return false
+ }
+ return !s.IsDir()
+}
+
+// alignLeft returns the string of 'val' padded so that it is aligned left in
+// a column of the given width
+func alignLeft(val interface{}, width int) string {
+ s := fmt.Sprint(val)
+ padding := width - utf8.RuneCountInString(s)
+ if padding < 0 {
+ return s
+ }
+ return s + strings.Repeat(" ", padding)
+}
+
+// alignRight returns the string of 'val' padded so that it is aligned right in
+// a column of the given width
+func alignRight(val interface{}, width int) string {
+ s := fmt.Sprint(val)
+ padding := width - utf8.RuneCountInString(s)
+ if padding < 0 {
+ return s
+ }
+ return strings.Repeat(" ", padding) + s
+}
+
+// printANSIProgressBar prints a colored progress bar, providing realtime
+// information about the status of the CTS run.
+// Note: We'll want to skip this if !isatty or if we're running on windows.
+func printANSIProgressBar(animFrame int, numTests int, numByExpectedStatus map[expectedStatus]int) {
+ const barWidth = 50
+
+ animSymbols := []rune{'⣾', '⣽', '⣻', '⢿', '⡿', '⣟', '⣯', '⣷'}
+ blockSymbols := []rune{'▏', '▎', '▍', '▌', '▋', '▊', '▉'}
+
+ numBlocksPrinted := 0
+
+ fmt.Fprint(stdout, string(animSymbols[animFrame%len(animSymbols)]), " [")
+ animFrame++
+
+ numFinished := 0
+
+ for _, status := range statuses {
+ for _, expected := range []bool{true, false} {
+ color := statusColor[status]
+ if expected {
+ color += bold
+ }
+
+ num := numByExpectedStatus[expectedStatus{status, expected}]
+ numFinished += num
+ statusFrac := float64(num) / float64(numTests)
+ fNumBlocks := barWidth * statusFrac
+ fmt.Fprint(stdout, color)
+ numBlocks := int(math.Ceil(fNumBlocks))
+ if expected {
+ if numBlocks > 1 {
+ fmt.Print(strings.Repeat(string("░"), numBlocks))
+ }
+ } else {
+ if numBlocks > 1 {
+ fmt.Print(strings.Repeat(string("▉"), numBlocks))
+ }
+ if numBlocks > 0 {
+ frac := fNumBlocks - math.Floor(fNumBlocks)
+ symbol := blockSymbols[int(math.Round(frac*float64(len(blockSymbols)-1)))]
+ fmt.Print(string(symbol))
+ }
+ }
+ numBlocksPrinted += numBlocks
+ }
+ }
+
+ if barWidth > numBlocksPrinted {
+ fmt.Print(strings.Repeat(string(" "), barWidth-numBlocksPrinted))
+ }
+ fmt.Fprint(stdout, ansiReset)
+ fmt.Print("] ", percentage(numFinished, numTests))
+
+ if colors {
+ // move cursor to start of line so the bar is overridden
+ fmt.Fprint(stdout, positionLeft)
+ } else {
+ // cannot move cursor, so newline
+ fmt.Println()
+ }
+}
+
+// testcaseStatus is a pair of testcase name and result status
+// Intended to be serialized for expectations files.
+type testcaseStatus struct {
+ Testcase string
+ Status status
+}
+
+// testcaseStatuses is a map of testcase to test status
+type testcaseStatuses map[string]status
+
+// loadExpectations loads the test expectations from path
+func loadExpectations(path string) (testcaseStatuses, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open expectations file: %w", err)
+ }
+ defer f.Close()
+
+ statuses := []testcaseStatus{}
+ if err := json.NewDecoder(f).Decode(&statuses); err != nil {
+ return nil, fmt.Errorf("failed to read expectations file: %w", err)
+ }
+
+ out := make(testcaseStatuses, len(statuses))
+ for _, s := range statuses {
+ out[s.Testcase] = s.Status
+ }
+ return out, nil
+}
+
+// saveExpectations saves the test results 'ex' as an expectations file to path
+func saveExpectations(path string, ex testcaseStatuses) error {
+ f, err := os.Create(path)
+ if err != nil {
+ return fmt.Errorf("failed to create expectations file: %w", err)
+ }
+ defer f.Close()
+
+ statuses := make([]testcaseStatus, 0, len(ex))
+ for testcase, status := range ex {
+ statuses = append(statuses, testcaseStatus{testcase, status})
+ }
+ sort.Slice(statuses, func(i, j int) bool { return statuses[i].Testcase < statuses[j].Testcase })
+
+ e := json.NewEncoder(f)
+ e.SetIndent("", " ")
+ if err := e.Encode(&statuses); err != nil {
+ return fmt.Errorf("failed to save expectations file: %w", err)
+ }
+
+ return nil
+}
diff --git a/chromium/third_party/dawn/src/dawn/node/utils/Debug.h b/chromium/third_party/dawn/src/dawn/node/utils/Debug.h
new file mode 100644
index 00000000000..cf7c7d8b206
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/node/utils/Debug.h
@@ -0,0 +1,146 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNODE_UTILS_DEBUG_H_
+#define DAWNNODE_UTILS_DEBUG_H_
+
+#include <iostream>
+#include <optional>
+#include <sstream>
+#include <unordered_map>
+#include <variant>
+#include <vector>
+
+#include "dawn/webgpu_cpp_print.h"
+
+namespace wgpu::utils {
+
+ // Write() is a helper for printing container types to the std::ostream.
+ // Write() is used by the LOG() macro below.
+
+ // Forward declarations
+ inline std::ostream& Write(std::ostream& out) {
+ return out;
+ }
+ template <typename T>
+ inline std::ostream& Write(std::ostream& out, const std::optional<T>& value);
+ template <typename T>
+ inline std::ostream& Write(std::ostream& out, const std::vector<T>& value);
+ template <typename K, typename V>
+ inline std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value);
+ template <typename... TYS>
+ inline std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value);
+ template <typename VALUE>
+ std::ostream& Write(std::ostream& out, VALUE&& value);
+
+ // Write() implementations
+ template <typename T>
+ std::ostream& Write(std::ostream& out, const std::optional<T>& value) {
+ if (value.has_value()) {
+ return Write(out, value.value());
+ }
+ return out << "<undefined>";
+ }
+
+ template <typename T>
+ std::ostream& Write(std::ostream& out, const std::vector<T>& value) {
+ out << "[";
+ bool first = true;
+ for (const auto& el : value) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ Write(out, el);
+ }
+ return out << "]";
+ }
+
+ template <typename K, typename V>
+ std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value) {
+ out << "{";
+ bool first = true;
+ for (auto& [key, value] : value) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ Write(out, key);
+ out << ": ";
+ Write(out, value);
+ }
+ return out << "}";
+ }
+
+ template <typename... TYS>
+ std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value) {
+ std::visit([&](auto&& v) { Write(out, v); }, value);
+ return out;
+ }
+
+ template <typename VALUE>
+ std::ostream& Write(std::ostream& out, VALUE&& value) {
+ return out << std::forward<VALUE>(value);
+ }
+
+ template <typename FIRST, typename... REST>
+ inline std::ostream& Write(std::ostream& out, FIRST&& first, REST&&... rest) {
+ Write(out, std::forward<FIRST>(first));
+ Write(out, std::forward<REST>(rest)...);
+ return out;
+ }
+
+ // Fatal() prints a message to stdout with the given file, line, function and optional message,
+ // then calls abort(). Fatal() is usually not called directly, but by the UNREACHABLE() and
+ // UNIMPLEMENTED() macro below.
+ template <typename... MSG_ARGS>
+ [[noreturn]] inline void Fatal(const char* reason,
+ const char* file,
+ int line,
+ const char* function,
+ MSG_ARGS&&... msg_args) {
+ std::stringstream msg;
+ msg << file << ":" << line << ": " << reason << ": " << function << "()";
+ if constexpr (sizeof...(msg_args) > 0) {
+ msg << " ";
+ Write(msg, std::forward<MSG_ARGS>(msg_args)...);
+ }
+ std::cout << msg.str() << std::endl;
+ abort();
+ }
+
+// LOG() prints the current file, line and function to stdout, followed by a
+// string representation of all the variadic arguments.
+#define LOG(...) \
+ ::wgpu::utils::Write(std::cout << __FILE__ << ":" << __LINE__ << " " << __FUNCTION__ << ": ", \
+ ##__VA_ARGS__) \
+ << std::endl
+
+// UNIMPLEMENTED() prints 'UNIMPLEMENTED' with the current file, line and
+// function to stdout, along with the optional message, then calls abort().
+// The macro calls Fatal(), which is annotated with [[noreturn]].
+// Used to stub code that has not yet been implemented.
+#define UNIMPLEMENTED(...) \
+ ::wgpu::utils::Fatal("UNIMPLEMENTED", __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__)
+
+// UNREACHABLE() prints 'UNREACHABLE' with the current file, line and
+// function to stdout, along with the optional message, then calls abort().
+// The macro calls Fatal(), which is annotated with [[noreturn]].
+// Used to stub code that has not yet been implemented.
+#define UNREACHABLE(...) \
+ ::wgpu::utils::Fatal("UNREACHABLE", __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__)
+
+} // namespace wgpu::utils
+
+#endif // DAWNNODE_UTILS_DEBUG_H_
diff --git a/chromium/third_party/dawn/src/dawn/platform/BUILD.gn b/chromium/third_party/dawn/src/dawn/platform/BUILD.gn
new file mode 100644
index 00000000000..2d1cb00900a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/platform/BUILD.gn
@@ -0,0 +1,41 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/scripts/dawn_component.gni")
+
+dawn_component("platform") {
+ DEFINE_PREFIX = "DAWN_PLATFORM"
+
+ configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+
+ sources = [
+ "${dawn_root}/include/dawn/platform/DawnPlatform.h",
+ "${dawn_root}/include/dawn/platform/dawn_platform_export.h",
+ "DawnPlatform.cpp",
+ "WorkerThread.cpp",
+ "WorkerThread.h",
+ "tracing/EventTracer.cpp",
+ "tracing/EventTracer.h",
+ "tracing/TraceEvent.h",
+ ]
+
+ deps = [ "${dawn_root}/src/dawn/common" ]
+
+ public_deps = [
+ # DawnPlatform.h has #include <dawn/webgpu.h>
+ "${dawn_root}/include/dawn:headers",
+ ]
+}
diff --git a/chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt
new file mode 100644
index 00000000000..4a74b23c28e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/platform/CMakeLists.txt
@@ -0,0 +1,32 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_platform ${DAWN_DUMMY_FILE})
+
+target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_SHARED_LIBRARY")
+endif()
+
+target_sources(dawn_platform PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/platform/DawnPlatform.h"
+ "${DAWN_INCLUDE_DIR}/dawn/platform/dawn_platform_export.h"
+ "DawnPlatform.cpp"
+ "WorkerThread.cpp"
+ "WorkerThread.h"
+ "tracing/EventTracer.cpp"
+ "tracing/EventTracer.h"
+ "tracing/TraceEvent.h"
+)
+target_link_libraries(dawn_platform PUBLIC dawn_headers PRIVATE dawn_internal_config dawn_common)
diff --git a/chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp b/chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp
new file mode 100644
index 00000000000..2706316acd2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/platform/DawnPlatform.cpp
@@ -0,0 +1,63 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/platform/DawnPlatform.h"
+#include "dawn/platform/WorkerThread.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::platform {
+
+ CachingInterface::CachingInterface() = default;
+
+ CachingInterface::~CachingInterface() = default;
+
+ Platform::Platform() = default;
+
+ Platform::~Platform() = default;
+
+ const unsigned char* Platform::GetTraceCategoryEnabledFlag(TraceCategory category) {
+ static unsigned char disabled = 0;
+ return &disabled;
+ }
+
+ double Platform::MonotonicallyIncreasingTime() {
+ return 0;
+ }
+
+ uint64_t Platform::AddTraceEvent(char phase,
+ const unsigned char* categoryGroupEnabled,
+ const char* name,
+ uint64_t id,
+ double timestamp,
+ int numArgs,
+ const char** argNames,
+ const unsigned char* argTypes,
+ const uint64_t* argValues,
+ unsigned char flags) {
+ // AddTraceEvent cannot be called if events are disabled.
+ ASSERT(false);
+ return 0;
+ }
+
+ dawn::platform::CachingInterface* Platform::GetCachingInterface(const void* fingerprint,
+ size_t fingerprintSize) {
+ return nullptr;
+ }
+
+ std::unique_ptr<dawn::platform::WorkerTaskPool> Platform::CreateWorkerTaskPool() {
+ return std::make_unique<AsyncWorkerThreadPool>();
+ }
+
+} // namespace dawn::platform
diff --git a/chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp b/chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp
new file mode 100644
index 00000000000..8ecbc589223
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/platform/WorkerThread.cpp
@@ -0,0 +1,97 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/platform/WorkerThread.h"
+
+#include <condition_variable>
+#include <functional>
+#include <thread>
+
+#include "dawn/common/Assert.h"
+
+namespace {
+
+ class AsyncWaitableEventImpl {
+ public:
+ AsyncWaitableEventImpl() : mIsComplete(false) {
+ }
+
+ void Wait() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mCondition.wait(lock, [this] { return mIsComplete; });
+ }
+
+ bool IsComplete() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ return mIsComplete;
+ }
+
+ void MarkAsComplete() {
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mIsComplete = true;
+ }
+ mCondition.notify_all();
+ }
+
+ private:
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mIsComplete;
+ };
+
+ class AsyncWaitableEvent final : public dawn::platform::WaitableEvent {
+ public:
+ explicit AsyncWaitableEvent()
+ : mWaitableEventImpl(std::make_shared<AsyncWaitableEventImpl>()) {
+ }
+
+ void Wait() override {
+ mWaitableEventImpl->Wait();
+ }
+
+ bool IsComplete() override {
+ return mWaitableEventImpl->IsComplete();
+ }
+
+ std::shared_ptr<AsyncWaitableEventImpl> GetWaitableEventImpl() const {
+ return mWaitableEventImpl;
+ }
+
+ private:
+ std::shared_ptr<AsyncWaitableEventImpl> mWaitableEventImpl;
+ };
+
+} // anonymous namespace
+
+namespace dawn::platform {
+
+ std::unique_ptr<dawn::platform::WaitableEvent> AsyncWorkerThreadPool::PostWorkerTask(
+ dawn::platform::PostWorkerTaskCallback callback,
+ void* userdata) {
+ std::unique_ptr<AsyncWaitableEvent> waitableEvent = std::make_unique<AsyncWaitableEvent>();
+
+ std::function<void()> doTask =
+ [callback, userdata, waitableEventImpl = waitableEvent->GetWaitableEventImpl()]() {
+ callback(userdata);
+ waitableEventImpl->MarkAsComplete();
+ };
+
+ std::thread thread(doTask);
+ thread.detach();
+
+ return waitableEvent;
+ }
+
+} // namespace dawn::platform
diff --git a/chromium/third_party/dawn/src/dawn/platform/WorkerThread.h b/chromium/third_party/dawn/src/dawn/platform/WorkerThread.h
new file mode 100644
index 00000000000..90796896553
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/platform/WorkerThread.h
@@ -0,0 +1,32 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_WORKERTHREAD_H_
+#define COMMON_WORKERTHREAD_H_
+
+#include "dawn/common/NonCopyable.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::platform {
+
+ class AsyncWorkerThreadPool : public dawn::platform::WorkerTaskPool, public NonCopyable {
+ public:
+ std::unique_ptr<dawn::platform::WaitableEvent> PostWorkerTask(
+ dawn::platform::PostWorkerTaskCallback callback,
+ void* userdata) override;
+ };
+
+} // namespace dawn::platform
+
+#endif
diff --git a/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp b/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp
new file mode 100644
index 00000000000..7445d98663a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.cpp
@@ -0,0 +1,58 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/platform/tracing/EventTracer.h"
+#include "dawn/common/Assert.h"
+#include "dawn/platform/DawnPlatform.h"
+
+namespace dawn::platform::tracing {
+
+ const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, TraceCategory category) {
+ static unsigned char disabled = 0;
+ if (platform == nullptr) {
+ return &disabled;
+ }
+
+ const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(category);
+ if (categoryEnabledFlag != nullptr) {
+ return categoryEnabledFlag;
+ }
+
+ return &disabled;
+ }
+
+ TraceEventHandle AddTraceEvent(Platform* platform,
+ char phase,
+ const unsigned char* categoryGroupEnabled,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const unsigned char* argTypes,
+ const uint64_t* argValues,
+ unsigned char flags) {
+ ASSERT(platform != nullptr);
+
+ double timestamp = platform->MonotonicallyIncreasingTime();
+ if (timestamp != 0) {
+ TraceEventHandle handle =
+ platform->AddTraceEvent(phase, categoryGroupEnabled, name, id, timestamp, numArgs,
+ argNames, argTypes, argValues, flags);
+ return handle;
+ }
+
+ return static_cast<TraceEventHandle>(0);
+ }
+
+} // namespace dawn::platform::tracing
diff --git a/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h b/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h
new file mode 100644
index 00000000000..0200ec50a1a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/platform/tracing/EventTracer.h
@@ -0,0 +1,51 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNPLATFORM_TRACING_EVENTTRACER_H_
+#define DAWNPLATFORM_TRACING_EVENTTRACER_H_
+
+#include "dawn/platform/dawn_platform_export.h"
+
+#include <cstdint>
+
+namespace dawn::platform {
+
+ class Platform;
+ enum class TraceCategory;
+
+ namespace tracing {
+
+ using TraceEventHandle = uint64_t;
+
+ DAWN_PLATFORM_EXPORT const unsigned char* GetTraceCategoryEnabledFlag(
+ Platform* platform,
+ TraceCategory category);
+
+ // TODO(enga): Simplify this API.
+ DAWN_PLATFORM_EXPORT TraceEventHandle
+ AddTraceEvent(Platform* platform,
+ char phase,
+ const unsigned char* categoryGroupEnabled,
+ const char* name,
+ uint64_t id,
+ int numArgs,
+ const char** argNames,
+ const unsigned char* argTypes,
+ const uint64_t* argValues,
+ unsigned char flags);
+
+ } // namespace tracing
+} // namespace dawn::platform
+
+#endif // DAWNPLATFORM_TRACING_EVENTTRACER_H_
diff --git a/chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h b/chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h
new file mode 100644
index 00000000000..e120e086111
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/platform/tracing/TraceEvent.h
@@ -0,0 +1,991 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Begin and end of function calls
+// Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent")
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+// doSomethingCostly()
+// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you need them
+// to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+// "howMuch", howMuch);
+// ...
+// }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use ASYNC_BEGIN and
+// ASYNC_END:
+// [single threaded sender code]
+// static int send_count = 0;
+// ++send_count;
+// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+// Send(new MyMessage(send_count));
+// [receive code]
+// void OnMyMessage(send_count) {
+// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+// }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process. Pointers can
+// be used for the ID parameter, and they will be mangled internally so that
+// the same pointer on two different processes will not match. For example:
+// class MyTracedClass {
+// public:
+// MyTracedClass() {
+// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// }
+// ~MyTracedClass() {
+// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+// }
+// }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disembiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category, name, and arg_names. Thus, the following code will
+// cause problems:
+// char* str = strdup("impprtantName");
+// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
+// free(str); // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+// The |arg_values|, when used, are always deep copied with the _COPY
+// macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+// TRACE_EVENT1("category", "name",
+// "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+// TRACE_EVENT1("category", "name",
+// "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+// TRACE_EVENT1("category", "name",
+// "arg1", std::string("string will be copied"));
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to addTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling addTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because addTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of aquiring a lock
+// and resolving the category.
+
+#ifndef DAWNPLATFORM_TRACING_TRACEEVENT_H_
+#define DAWNPLATFORM_TRACING_TRACEEVENT_H_
+
+#include <string>
+
+#include "dawn/platform/tracing/EventTracer.h"
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT0(platform, category, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0)
+#define TRACE_EVENT1(platform, category, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0, arg1_name, arg1_val)
+#define TRACE_EVENT2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(platform, category, name) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_INSTANT1(platform, category, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(platform, category, name) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_INSTANT1(platform, category, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(platform, category, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(platform, category, name) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_BEGIN1(platform, category, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(platform, category, name) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_BEGIN1(platform, category, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(platform, category, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_END0(platform, category, name) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_END1(platform, category, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(platform, category, name) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(platform, category, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER1(platform, category, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0, "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(platform, category, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0, "value", static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER2(platform, category, name, value1_name, value1_val, value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
+ TRACE_EVENT_FLAG_NONE, 0, value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(platform, category, name, value1_name, value1_val, value2_name, \
+ value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
+ TRACE_EVENT_FLAG_COPY, 0, value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID1(platform, category, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0, "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(platform, category, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0, "value", static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID2(platform, category, name, id, value1_name, value1_val, value2_name, \
+ value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, TRACE_EVENT_FLAG_NONE, 0, \
+ value1_name, static_cast<int>(value1_val), value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(platform, category, name, id, value1_name, value1_val, value2_name, \
+ value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, TRACE_EVENT_FLAG_COPY, 0, \
+ value1_name, static_cast<int>(value1_val), value2_name, static_cast<int>(value2_val))
+
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+// events are considered to match if their category, name and id values all
+// match. |id| must either be a pointer or an integer value up to 64 bits. If
+// it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_BEGIN macros. When the operation completes, call ASYNC_END.
+// An async operation can span threads and processes, but all events in that
+// operation must use the same |name| and |id|. Each event can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(platform, category, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_ASYNC_BEGIN1(platform, category, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(platform, category, name, id, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(platform, category, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(platform, category, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(platform, category, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records a single ASYNC_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// ASYNC_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_ASYNC_STEP0(platform, category, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0, "step", step)
+#define TRACE_EVENT_ASYNC_STEP1(platform, category, name, id, step, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_STEP0(platform, category, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0, "step", step)
+#define TRACE_EVENT_COPY_ASYNC_STEP1(platform, category, name, id, step, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0, "step", step, arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(platform, category, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_ASYNC_END1(platform, category, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(platform, category, name, id, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+ TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(platform, category, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_ASYNC_END1(platform, category, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(platform, category, name, id, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
+ TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
+// considered as a match if their category_group, name and id all match.
+// - |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// - |id| is used to match a child NESTABLE_ASYNC event with its parent
+// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
+// be logged using the same id and category_group.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
+// at the first NESTABLE_ASYNC event of that id, and unmatched
+// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
+// NESTABLE_ASYNC event of that id. Corresponding warning messages for
+// unmatched events will be shown in the analysis view.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
+// 0, 1 or 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(platform, category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
+// or 2 associated arguments. If the category is not enabled, then this does
+// nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(platform, category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
+// associated argument. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(platform, category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(platform, category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with none, one or two associated argument. If the category is not enabled,
+// then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(platform, category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(platform, category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
+ arg1_name, arg1_val)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(platform, category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2(platform, category_group, name, id, \
+ arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, 0, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2(platform, category_group, name, id, \
+ arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, 0, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
+// |timestamp| provided.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(platform, category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(platform, category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP1(platform, category_group, name, id, \
+ timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT_WITH_TIMESTAMP0(platform, category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(platform, category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(platform, category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY, 0)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(platform, category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_FLOW_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(platform, category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+ id, TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+ id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
+ id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(platform, category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, 0, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(platform, category_group, name, id, step, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, 0, "step", step, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(platform, category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
+ id, TRACE_EVENT_FLAG_COPY, 0, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(platform, category_group, name, id, step, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
+ id, TRACE_EVENT_FLAG_COPY, 0, "step", step, arg1_name, \
+ arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(platform, category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, 0)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(platform, category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+ id, TRACE_EVENT_FLAG_BIND_TO_ENCLOSING, 0)
+#define TRACE_EVENT_FLOW_END1(platform, category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_END2(platform, category_group, name, id, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+ id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(platform, category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+ id, TRACE_EVENT_FLAG_COPY, 0)
+#define TRACE_EVENT_COPY_FLOW_END1(platform, category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+ id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(platform, category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
+ id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Creates a scope of a sampling state with the given category and name (both must
+// be constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+//
+// { // The sampling state is set within this scope.
+// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+// ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, name) \
+ TraceEvent::SamplingStateScope<bucket_number> traceEventSamplingScope(category "\0" name);
+
+// Returns a current sampling state of the given bucket.
+// The format of the returned string is "category\0name".
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+ TraceEvent::SamplingStateScope<bucket_number>::current()
+
+// Sets a current sampling state of the given bucket.
+// |category| and |name| have to be constant strings.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, name) \
+ TraceEvent::SamplingStateScope<bucket_number>::set(category "\0" name)
+
+// Sets a current sampling state of the given bucket.
+// |categoryAndName| doesn't need to be a constant string.
+// The format of the string is "category\0name".
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(bucket_number, categoryAndName) \
+ TraceEvent::SamplingStateScope<bucket_number>::set(categoryAndName)
+
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
+ TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category name. The returned
+// pointer can be held permanently in a local static for example. If the
+// unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const unsigned char*
+// TRACE_EVENT_API_GET_CATEGORY_ENABLED(const char* category_name)
+#define TRACE_EVENT_API_GET_CATEGORY_ENABLED dawn::platform::tracing::GetTraceCategoryEnabledFlag
+
+// Add a trace event to the platform tracing system.
+// void TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const unsigned char* category_enabled,
+// const char* name,
+// unsigned long long id,
+// int num_args,
+// const char** arg_names,
+// const unsigned char* arg_types,
+// const unsigned long long* arg_values,
+// unsigned char flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT dawn::platform::tracing::AddTraceEvent
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collissions.
+#define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b)
+#define INTERNALTRACEEVENTUID(name_prefix) INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, category) \
+ static const unsigned char* INTERNALTRACEEVENTUID(catstatic) = 0; \
+ if (!INTERNALTRACEEVENTUID(catstatic)) \
+ INTERNALTRACEEVENTUID(catstatic) = TRACE_EVENT_API_GET_CATEGORY_ENABLED(platform, category);
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(platformObj, phase, category, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platformObj, \
+ ::dawn::platform::TraceCategory::category) \
+ if (*INTERNALTRACEEVENTUID(catstatic)) { \
+ dawn::platform::TraceEvent::addTraceEvent( \
+ platformObj, phase, INTERNALTRACEEVENTUID(catstatic), name, \
+ dawn::platform::TraceEvent::noEventId, flags, __VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(platformObj, category, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platformObj, ::dawn::platform::TraceCategory::category) \
+ dawn::platform::TraceEvent::TraceEndOnScopeClose INTERNALTRACEEVENTUID(profileScope); \
+ do { \
+ if (*INTERNALTRACEEVENTUID(catstatic)) { \
+ dawn::platform::TraceEvent::addTraceEvent( \
+ platformObj, TRACE_EVENT_PHASE_BEGIN, INTERNALTRACEEVENTUID(catstatic), name, \
+ dawn::platform::TraceEvent::noEventId, TRACE_EVENT_FLAG_NONE, __VA_ARGS__); \
+ INTERNALTRACEEVENTUID(profileScope) \
+ .initialize(platformObj, INTERNALTRACEEVENTUID(catstatic), name); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(platformObj, phase, category, name, id, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platformObj, \
+ ::dawn::platform::TraceCategory::category) \
+ if (*INTERNALTRACEEVENTUID(catstatic)) { \
+ unsigned char traceEventFlags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ dawn::platform::TraceEvent::TraceID traceEventTraceID(id, &traceEventFlags); \
+ dawn::platform::TraceEvent::addTraceEvent( \
+ platformObj, phase, INTERNALTRACEEVENTUID(catstatic), name, \
+ traceEventTraceID.data(), traceEventFlags, __VA_ARGS__); \
+ } \
+ } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP ('T')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned char>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned char>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned char>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned char>(1 << 2))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+
+namespace dawn::platform::TraceEvent {
+
+ // Specify these values when the corresponding argument of addTraceEvent is not
+ // used.
+ const int zeroNumArgs = 0;
+ const unsigned long long noEventId = 0;
+
+ // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+ // are mangled with the Process ID so that they are unlikely to collide when the
+ // same pointer is used on different processes.
+ class TraceID {
+ public:
+ explicit TraceID(const void* id, unsigned char* flags)
+ : m_data(static_cast<unsigned long long>(reinterpret_cast<uintptr_t>(id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ explicit TraceID(unsigned long long id, unsigned char* flags) : m_data(id) {
+ (void)flags;
+ }
+ explicit TraceID(unsigned long id, unsigned char* flags) : m_data(id) {
+ (void)flags;
+ }
+ explicit TraceID(unsigned int id, unsigned char* flags) : m_data(id) {
+ (void)flags;
+ }
+ explicit TraceID(unsigned short id, unsigned char* flags) : m_data(id) {
+ (void)flags;
+ }
+ explicit TraceID(unsigned char id, unsigned char* flags) : m_data(id) {
+ (void)flags;
+ }
+ explicit TraceID(long long id, unsigned char* flags)
+ : m_data(static_cast<unsigned long long>(id)) {
+ (void)flags;
+ }
+ explicit TraceID(long id, unsigned char* flags)
+ : m_data(static_cast<unsigned long long>(id)) {
+ (void)flags;
+ }
+ explicit TraceID(int id, unsigned char* flags)
+ : m_data(static_cast<unsigned long long>(id)) {
+ (void)flags;
+ }
+ explicit TraceID(short id, unsigned char* flags)
+ : m_data(static_cast<unsigned long long>(id)) {
+ (void)flags;
+ }
+ explicit TraceID(signed char id, unsigned char* flags)
+ : m_data(static_cast<unsigned long long>(id)) {
+ (void)flags;
+ }
+
+ unsigned long long data() const {
+ return m_data;
+ }
+
+ private:
+ unsigned long long m_data;
+ };
+
+ // Simple union to store various types as unsigned long long.
+ union TraceValueUnion {
+ bool m_bool;
+ unsigned long long m_uint;
+ long long m_int;
+ double m_double;
+ const void* m_pointer;
+ const char* m_string;
+ };
+
+ // Simple container for const char* that should be copied instead of retained.
+ class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : m_str(str) {
+ }
+ operator const char*() const {
+ return m_str;
+ }
+
+ private:
+ const char* m_str;
+ };
+
+// Define setTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, value_type_id) \
+ static inline void setTraceValue(actual_type arg, unsigned char* type, uint64_t* value) { \
+ TraceValueUnion typeValue; \
+ typeValue.union_member = arg; \
+ *type = value_type_id; \
+ *value = typeValue.m_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \
+ static inline void setTraceValue(actual_type arg, unsigned char* type, uint64_t* value) { \
+ *type = value_type_id; \
+ *value = static_cast<unsigned long long>(arg); \
+ }
+
+ INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT)
+ INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+ INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+ INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+ INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+ INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+ INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+ INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+ INTERNAL_DECLARE_SET_TRACE_VALUE(bool, m_bool, TRACE_VALUE_TYPE_BOOL)
+ INTERNAL_DECLARE_SET_TRACE_VALUE(double, m_double, TRACE_VALUE_TYPE_DOUBLE)
+ INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, m_pointer, TRACE_VALUE_TYPE_POINTER)
+ INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, m_string, TRACE_VALUE_TYPE_STRING)
+ INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&,
+ m_string,
+ TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+ static inline void setTraceValue(const std::string& arg, unsigned char* type, uint64_t* value) {
+ TraceValueUnion typeValue;
+ typeValue.m_string = arg.data();
+ *type = TRACE_VALUE_TYPE_COPY_STRING;
+ *value = typeValue.m_uint;
+ }
+
+ // These addTraceEvent template functions are defined here instead of in the
+ // macro, because the arg values could be temporary string objects. In order to
+ // store pointers to the internal c_str and pass through to the tracing API, the
+ // arg values must live throughout these procedures.
+
+ static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+ dawn::platform::Platform* platform,
+ char phase,
+ const unsigned char* categoryEnabled,
+ const char* name,
+ unsigned long long id,
+ unsigned char flags,
+ int /*unused, helps avoid empty __VA_ARGS__*/) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id,
+ zeroNumArgs, 0, 0, 0, flags);
+ }
+
+ template <class ARG1_TYPE>
+ static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+ dawn::platform::Platform* platform,
+ char phase,
+ const unsigned char* categoryEnabled,
+ const char* name,
+ unsigned long long id,
+ unsigned char flags,
+ int /*unused, helps avoid empty __VA_ARGS__*/,
+ const char* arg1Name,
+ const ARG1_TYPE& arg1Val) {
+ const int numArgs = 1;
+ unsigned char argTypes[1];
+ uint64_t argValues[1];
+ setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
+ &arg1Name, argTypes, argValues, flags);
+ }
+
+ template <class ARG1_TYPE, class ARG2_TYPE>
+ static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+ dawn::platform::Platform* platform,
+ char phase,
+ const unsigned char* categoryEnabled,
+ const char* name,
+ unsigned long long id,
+ unsigned char flags,
+ int /*unused, helps avoid empty __VA_ARGS__*/,
+ const char* arg1Name,
+ const ARG1_TYPE& arg1Val,
+ const char* arg2Name,
+ const ARG2_TYPE& arg2Val) {
+ const int numArgs = 2;
+ const char* argNames[2] = {arg1Name, arg2Name};
+ unsigned char argTypes[2];
+ uint64_t argValues[2];
+ setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
+ setTraceValue(arg2Val, &argTypes[1], &argValues[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
+ argNames, argTypes, argValues, flags);
+ }
+
+ // Used by TRACE_EVENTx macro. Do not use directly.
+ class TraceEndOnScopeClose {
+ public:
+ // Note: members of m_data intentionally left uninitialized. See initialize.
+ TraceEndOnScopeClose() : m_pdata(0) {
+ }
+ ~TraceEndOnScopeClose() {
+ if (m_pdata)
+ addEventIfEnabled();
+ }
+
+ void initialize(dawn::platform::Platform* platform,
+ const unsigned char* categoryEnabled,
+ const char* name) {
+ m_data.platform = platform;
+ m_data.categoryEnabled = categoryEnabled;
+ m_data.name = name;
+ m_pdata = &m_data;
+ }
+
+ private:
+ // Add the end event if the category is still enabled.
+ void addEventIfEnabled() {
+ // Only called when m_pdata is non-null.
+ if (*m_pdata->categoryEnabled) {
+ TRACE_EVENT_API_ADD_TRACE_EVENT(m_pdata->platform, TRACE_EVENT_PHASE_END,
+ m_pdata->categoryEnabled, m_pdata->name, noEventId,
+ zeroNumArgs, 0, 0, 0, TRACE_EVENT_FLAG_NONE);
+ }
+ }
+
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ dawn::platform::Platform* platform;
+ const unsigned char* categoryEnabled;
+ const char* name;
+ };
+ Data* m_pdata;
+ Data m_data;
+ };
+
+} // namespace dawn::platform::TraceEvent
+
+#endif // DAWNPLATFORM_TRACING_TRACEEVENT_H_
diff --git a/chromium/third_party/dawn/src/dawn/tests/BUILD.gn b/chromium/third_party/dawn/src/dawn/tests/BUILD.gn
new file mode 100644
index 00000000000..4f7a559b16f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/tests/BUILD.gn
@@ -0,0 +1,644 @@
+# Copyright 2012 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("//testing/test.gni")
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_features.gni")
+
+group("tests") {
+ testonly = true
+ deps = [
+ ":dawn_end2end_tests",
+ ":dawn_perf_tests",
+ ":dawn_unittests",
+ ]
+}
+
+###############################################################################
+# Gtest Gmock - Handle building inside and outside of Chromium.
+###############################################################################
+
+# When building outside of Chromium we need to define our own targets for GTest
+# and GMock. However when compiling inside of Chromium we need to reuse the
+# existing targets, both because Chromium has a special harness for swarming
+# and because otherwise the "gn check" fails.
+
+if (!build_with_chromium) {
+ # When we aren't in Chromium we define out own targets based on the location
+ # of the googletest repo.
+ googletest_dir = dawn_googletest_dir
+
+ config("gtest_config") {
+ include_dirs = [
+ "${googletest_dir}/googletest",
+ "${googletest_dir}/googletest/include",
+ ]
+ }
+
+ static_library("gtest") {
+ testonly = true
+ sources = [ "${googletest_dir}/googletest/src/gtest-all.cc" ]
+ public_configs = [ ":gtest_config" ]
+ }
+
+ config("gmock_config") {
+ include_dirs = [
+ "${googletest_dir}/googlemock",
+ "${googletest_dir}/googlemock/include",
+ "${googletest_dir}/googletest/include",
+ ]
+ }
+
+ static_library("gmock") {
+ testonly = true
+ sources = [ "${googletest_dir}/googlemock/src/gmock-all.cc" ]
+ public_configs = [ ":gmock_config" ]
+ }
+
+ group("gmock_and_gtest") {
+ testonly = true
+ public_deps = [
+ ":gmock",
+ ":gtest",
+ ]
+ }
+} else {
+ # When we are in Chromium we reuse its targets, and also add some deps that
+ # are needed to launch the test in swarming mode.
+ group("gmock_and_gtest") {
+ testonly = true
+ public_deps = [
+ "//base",
+ "//base/test:test_support",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+ }
+}
+
+###############################################################################
+# Wrapping of Chromium targets
+###############################################################################
+
+# These targets are separated because they are Chromium sources files that
+# can't use the dawn_internal config, otherwise Dawn's warning flags get
+# applied while compiling a bunch of Chromium's //base (via header inclusion)
+if (build_with_chromium) {
+ source_set("unittests_main") {
+ testonly = true
+ deps = [ ":gmock_and_gtest" ]
+ sources = [ "//gpu/dawn_unittests_main.cc" ]
+ }
+ source_set("end2end_tests_main") {
+ testonly = true
+ deps = [ ":gmock_and_gtest" ]
+ sources = [ "//gpu/dawn_end2end_tests_main.cc" ]
+ }
+ source_set("perf_tests_main") {
+ testonly = true
+ deps = [ ":gmock_and_gtest" ]
+ sources = [ "//gpu/dawn_perf_tests_main.cc" ]
+ }
+}
+
+###############################################################################
+# Dawn test template
+###############################################################################
+template("dawn_test") {
+ test(target_name) {
+ # Copy all variables except "configs", which has a default value
+ forward_variables_from(invoker, "*", [ "configs" ])
+ if (defined(invoker.configs)) {
+ configs += invoker.configs
+ }
+
+ configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+ }
+}
+
+###############################################################################
+# Dawn unittests
+###############################################################################
+
+dawn_json_generator("mock_webgpu_gen") {
+ target = "mock_api"
+ outputs = [
+ "src/dawn/mock_webgpu.h",
+ "src/dawn/mock_webgpu.cpp",
+ ]
+}
+
+# Source code for mocks used for unit testing are separated from the rest of
+# sources so that they aren't included in non-test builds.
+source_set("native_mocks_sources") {
+ testonly = true
+
+ deps = [
+ ":gmock_and_gtest",
+ "${dawn_root}/src/dawn/native:sources",
+ "${dawn_root}/src/dawn/native:static",
+ "${dawn_root}/src/dawn/utils",
+ ]
+
+ # Add internal dawn native config for internal unittests.
+ configs += [ "${dawn_root}/src/dawn/native:internal" ]
+
+ sources = [
+ "unittests/native/mocks/BindGroupLayoutMock.h",
+ "unittests/native/mocks/BindGroupMock.h",
+ "unittests/native/mocks/CommandBufferMock.h",
+ "unittests/native/mocks/ComputePipelineMock.h",
+ "unittests/native/mocks/DeviceMock.h",
+ "unittests/native/mocks/ExternalTextureMock.h",
+ "unittests/native/mocks/PipelineLayoutMock.h",
+ "unittests/native/mocks/QuerySetMock.h",
+ "unittests/native/mocks/RenderPipelineMock.h",
+ "unittests/native/mocks/SamplerMock.h",
+ "unittests/native/mocks/ShaderModuleMock.cpp",
+ "unittests/native/mocks/ShaderModuleMock.h",
+ "unittests/native/mocks/SwapChainMock.h",
+ "unittests/native/mocks/TextureMock.h",
+ ]
+}
+
+dawn_test("dawn_unittests") {
+ deps = [
+ ":gmock_and_gtest",
+ ":mock_webgpu_gen",
+ ":native_mocks_sources",
+ "${dawn_root}/src/dawn:cpp",
+ "${dawn_root}/src/dawn:proc",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_root}/src/dawn/native:sources",
+ "${dawn_root}/src/dawn/native:static",
+ "${dawn_root}/src/dawn/utils",
+ "${dawn_root}/src/dawn/wire",
+ ]
+
+ # Add internal dawn native config for internal unittests.
+ configs = [ "${dawn_root}/src/dawn/native:internal" ]
+
+ sources = get_target_outputs(":mock_webgpu_gen")
+ sources += [
+ "${dawn_root}/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp",
+ "${dawn_root}/src/dawn/wire/client/ClientMemoryTransferService_mock.h",
+ "${dawn_root}/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp",
+ "${dawn_root}/src/dawn/wire/server/ServerMemoryTransferService_mock.h",
+ "DawnNativeTest.cpp",
+ "DawnNativeTest.h",
+ "MockCallback.h",
+ "ToggleParser.cpp",
+ "ToggleParser.h",
+ "unittests/AsyncTaskTests.cpp",
+ "unittests/BitSetIteratorTests.cpp",
+ "unittests/BuddyAllocatorTests.cpp",
+ "unittests/BuddyMemoryAllocatorTests.cpp",
+ "unittests/ChainUtilsTests.cpp",
+ "unittests/CommandAllocatorTests.cpp",
+ "unittests/ConcurrentCacheTests.cpp",
+ "unittests/EnumClassBitmasksTests.cpp",
+ "unittests/EnumMaskIteratorTests.cpp",
+ "unittests/ErrorTests.cpp",
+ "unittests/FeatureTests.cpp",
+ "unittests/GPUInfoTests.cpp",
+ "unittests/GetProcAddressTests.cpp",
+ "unittests/ITypArrayTests.cpp",
+ "unittests/ITypBitsetTests.cpp",
+ "unittests/ITypSpanTests.cpp",
+ "unittests/ITypVectorTests.cpp",
+ "unittests/LimitsTests.cpp",
+ "unittests/LinkedListTests.cpp",
+ "unittests/MathTests.cpp",
+ "unittests/ObjectBaseTests.cpp",
+ "unittests/PerStageTests.cpp",
+ "unittests/PerThreadProcTests.cpp",
+ "unittests/PlacementAllocatedTests.cpp",
+ "unittests/RefBaseTests.cpp",
+ "unittests/RefCountedTests.cpp",
+ "unittests/ResultTests.cpp",
+ "unittests/RingBufferAllocatorTests.cpp",
+ "unittests/SerialMapTests.cpp",
+ "unittests/SerialQueueTests.cpp",
+ "unittests/SlabAllocatorTests.cpp",
+ "unittests/StackContainerTests.cpp",
+ "unittests/SubresourceStorageTests.cpp",
+ "unittests/SystemUtilsTests.cpp",
+ "unittests/ToBackendTests.cpp",
+ "unittests/TypedIntegerTests.cpp",
+ "unittests/native/CommandBufferEncodingTests.cpp",
+ "unittests/native/DestroyObjectTests.cpp",
+ "unittests/native/DeviceCreationTests.cpp",
+ "unittests/validation/BindGroupValidationTests.cpp",
+ "unittests/validation/BufferValidationTests.cpp",
+ "unittests/validation/CommandBufferValidationTests.cpp",
+ "unittests/validation/ComputeIndirectValidationTests.cpp",
+ "unittests/validation/ComputeValidationTests.cpp",
+ "unittests/validation/CopyCommandsValidationTests.cpp",
+ "unittests/validation/CopyTextureForBrowserTests.cpp",
+ "unittests/validation/DebugMarkerValidationTests.cpp",
+ "unittests/validation/DeviceValidationTests.cpp",
+ "unittests/validation/DrawIndirectValidationTests.cpp",
+ "unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp",
+ "unittests/validation/DynamicStateCommandValidationTests.cpp",
+ "unittests/validation/ErrorScopeValidationTests.cpp",
+ "unittests/validation/ExternalTextureTests.cpp",
+ "unittests/validation/GetBindGroupLayoutValidationTests.cpp",
+ "unittests/validation/IndexBufferValidationTests.cpp",
+ "unittests/validation/InternalUsageValidationTests.cpp",
+ "unittests/validation/LabelTests.cpp",
+ "unittests/validation/MinimumBufferSizeValidationTests.cpp",
+ "unittests/validation/MultipleDeviceTests.cpp",
+ "unittests/validation/OverridableConstantsValidationTests.cpp",
+ "unittests/validation/PipelineAndPassCompatibilityTests.cpp",
+ "unittests/validation/QueryValidationTests.cpp",
+ "unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp",
+ "unittests/validation/QueueSubmitValidationTests.cpp",
+ "unittests/validation/QueueWriteBufferValidationTests.cpp",
+ "unittests/validation/QueueWriteTextureValidationTests.cpp",
+ "unittests/validation/RenderBundleValidationTests.cpp",
+ "unittests/validation/RenderPassDescriptorValidationTests.cpp",
+ "unittests/validation/RenderPipelineValidationTests.cpp",
+ "unittests/validation/ResourceUsageTrackingTests.cpp",
+ "unittests/validation/SamplerValidationTests.cpp",
+ "unittests/validation/ShaderModuleValidationTests.cpp",
+ "unittests/validation/StorageTextureValidationTests.cpp",
+ "unittests/validation/TextureSubresourceTests.cpp",
+ "unittests/validation/TextureValidationTests.cpp",
+ "unittests/validation/TextureViewValidationTests.cpp",
+ "unittests/validation/ToggleValidationTests.cpp",
+ "unittests/validation/UnsafeAPIValidationTests.cpp",
+ "unittests/validation/ValidationTest.cpp",
+ "unittests/validation/ValidationTest.h",
+ "unittests/validation/VertexBufferValidationTests.cpp",
+ "unittests/validation/VertexStateValidationTests.cpp",
+ "unittests/validation/VideoViewsValidationTests.cpp",
+ "unittests/validation/WriteBufferTests.cpp",
+ "unittests/wire/WireAdapterTests.cpp",
+ "unittests/wire/WireArgumentTests.cpp",
+ "unittests/wire/WireBasicTests.cpp",
+ "unittests/wire/WireBufferMappingTests.cpp",
+ "unittests/wire/WireCreatePipelineAsyncTests.cpp",
+ "unittests/wire/WireDestroyObjectTests.cpp",
+ "unittests/wire/WireDisconnectTests.cpp",
+ "unittests/wire/WireErrorCallbackTests.cpp",
+ "unittests/wire/WireExtensionTests.cpp",
+ "unittests/wire/WireInjectDeviceTests.cpp",
+ "unittests/wire/WireInjectInstanceTests.cpp",
+ "unittests/wire/WireInjectSwapChainTests.cpp",
+ "unittests/wire/WireInjectTextureTests.cpp",
+ "unittests/wire/WireInstanceTests.cpp",
+ "unittests/wire/WireMemoryTransferServiceTests.cpp",
+ "unittests/wire/WireOptionalTests.cpp",
+ "unittests/wire/WireQueueTests.cpp",
+ "unittests/wire/WireShaderModuleTests.cpp",
+ "unittests/wire/WireTest.cpp",
+ "unittests/wire/WireTest.h",
+ "unittests/wire/WireWGPUDevicePropertiesTests.cpp",
+ ]
+
+ if (is_win) {
+ sources += [ "unittests/WindowsUtilsTests.cpp" ]
+ }
+
+ if (dawn_enable_d3d12) {
+ sources += [ "unittests/d3d12/CopySplitTests.cpp" ]
+ }
+
+ # When building inside Chromium, use their gtest main function because it is
+ # needed to run in swarming correctly.
+ if (build_with_chromium) {
+ deps += [ ":unittests_main" ]
+ } else {
+ sources += [ "UnittestsMain.cpp" ]
+ }
+}
+
+###############################################################################
+# Dawn end2end tests targets
+###############################################################################
+
+source_set("end2end_tests_sources") {
+ configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+ testonly = true
+
+ deps = [
+ ":gmock_and_gtest",
+ "${dawn_root}/src/dawn:cpp",
+ "${dawn_root}/src/dawn:proc",
+ "${dawn_root}/src/dawn/common",
+
+ # Statically linked because the end2end white_box tests use Dawn internals.
+ "${dawn_root}/src/dawn/native:static",
+ "${dawn_root}/src/dawn/utils",
+ "${dawn_root}/src/dawn/wire",
+ ]
+
+ sources = [
+ "DawnTest.h",
+ "MockCallback.h",
+ "ParamGenerator.h",
+ "ToggleParser.cpp",
+ "ToggleParser.h",
+ "end2end/AdapterDiscoveryTests.cpp",
+ "end2end/BasicTests.cpp",
+ "end2end/BindGroupTests.cpp",
+ "end2end/BufferTests.cpp",
+ "end2end/BufferZeroInitTests.cpp",
+ "end2end/ClipSpaceTests.cpp",
+ "end2end/ColorStateTests.cpp",
+ "end2end/CommandEncoderTests.cpp",
+ "end2end/CompressedTextureFormatTests.cpp",
+ "end2end/ComputeCopyStorageBufferTests.cpp",
+ "end2end/ComputeDispatchTests.cpp",
+ "end2end/ComputeLayoutMemoryBufferTests.cpp",
+ "end2end/ComputeSharedMemoryTests.cpp",
+ "end2end/ComputeStorageBufferBarrierTests.cpp",
+ "end2end/CopyTests.cpp",
+ "end2end/CopyTextureForBrowserTests.cpp",
+ "end2end/CreatePipelineAsyncTests.cpp",
+ "end2end/CullingTests.cpp",
+ "end2end/DebugMarkerTests.cpp",
+ "end2end/DeprecatedAPITests.cpp",
+ "end2end/DepthBiasTests.cpp",
+ "end2end/DepthStencilCopyTests.cpp",
+ "end2end/DepthStencilLoadOpTests.cpp",
+ "end2end/DepthStencilSamplingTests.cpp",
+ "end2end/DepthStencilStateTests.cpp",
+ "end2end/DestroyTests.cpp",
+ "end2end/DeviceInitializationTests.cpp",
+ "end2end/DeviceLostTests.cpp",
+ "end2end/DrawIndexedIndirectTests.cpp",
+ "end2end/DrawIndexedTests.cpp",
+ "end2end/DrawIndirectTests.cpp",
+ "end2end/DrawTests.cpp",
+ "end2end/DynamicBufferOffsetTests.cpp",
+ "end2end/EntryPointTests.cpp",
+ "end2end/ExternalTextureTests.cpp",
+ "end2end/FirstIndexOffsetTests.cpp",
+ "end2end/GpuMemorySynchronizationTests.cpp",
+ "end2end/IndexFormatTests.cpp",
+ "end2end/MaxLimitTests.cpp",
+ "end2end/MemoryAllocationStressTests.cpp",
+ "end2end/MultisampledRenderingTests.cpp",
+ "end2end/MultisampledSamplingTests.cpp",
+ "end2end/NonzeroBufferCreationTests.cpp",
+ "end2end/NonzeroTextureCreationTests.cpp",
+ "end2end/ObjectCachingTests.cpp",
+ "end2end/OpArrayLengthTests.cpp",
+ "end2end/PipelineLayoutTests.cpp",
+ "end2end/PrimitiveStateTests.cpp",
+ "end2end/PrimitiveTopologyTests.cpp",
+ "end2end/QueryTests.cpp",
+ "end2end/QueueTests.cpp",
+ "end2end/QueueTimelineTests.cpp",
+ "end2end/ReadOnlyDepthStencilAttachmentTests.cpp",
+ "end2end/RenderAttachmentTests.cpp",
+ "end2end/RenderBundleTests.cpp",
+ "end2end/RenderPassLoadOpTests.cpp",
+ "end2end/RenderPassTests.cpp",
+ "end2end/SamplerFilterAnisotropicTests.cpp",
+ "end2end/SamplerTests.cpp",
+ "end2end/ScissorTests.cpp",
+ "end2end/ShaderFloat16Tests.cpp",
+ "end2end/ShaderTests.cpp",
+ "end2end/StorageTextureTests.cpp",
+ "end2end/SubresourceRenderAttachmentTests.cpp",
+ "end2end/Texture3DTests.cpp",
+ "end2end/TextureFormatTests.cpp",
+ "end2end/TextureSubresourceTests.cpp",
+ "end2end/TextureViewTests.cpp",
+ "end2end/TextureZeroInitTests.cpp",
+ "end2end/VertexFormatTests.cpp",
+ "end2end/VertexOnlyRenderPipelineTests.cpp",
+ "end2end/VertexStateTests.cpp",
+ "end2end/ViewportOrientationTests.cpp",
+ "end2end/ViewportTests.cpp",
+ ]
+
+ # Validation tests that need OS windows live in end2end tests.
+ sources += [
+ "unittests/validation/ValidationTest.cpp",
+ "unittests/validation/ValidationTest.h",
+ ]
+
+ libs = []
+
+ if (dawn_enable_d3d12) {
+ sources += [
+ "end2end/D3D12CachingTests.cpp",
+ "end2end/D3D12ResourceWrappingTests.cpp",
+ "end2end/VideoViewsTests_win.cpp",
+ ]
+ libs += [
+ "d3d11.lib",
+ "dxgi.lib",
+ ]
+ }
+
+ if (dawn_enable_metal) {
+ sources += [ "end2end/IOSurfaceWrappingTests.cpp" ]
+ frameworks = [ "IOSurface.framework" ]
+ }
+
+ if (dawn_enable_opengl) {
+ assert(dawn_supports_glfw_for_windowing)
+ }
+
+ if (dawn_supports_glfw_for_windowing) {
+ sources += [
+ "end2end/SwapChainTests.cpp",
+ "end2end/SwapChainValidationTests.cpp",
+ "end2end/WindowSurfaceTests.cpp",
+ ]
+ deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+ }
+
+ if (dawn_enable_d3d12 || (dawn_enable_vulkan && is_chromeos)) {
+ sources += [
+ "end2end/VideoViewsTests.cpp",
+ "end2end/VideoViewsTests.h",
+ ]
+ }
+
+ if (dawn_enable_vulkan && is_chromeos) {
+ sources += [ "end2end/VideoViewsTests_gbm.cpp" ]
+ }
+}
+
+source_set("white_box_tests_sources") {
+ configs += [ "${dawn_root}/src/dawn/native:internal" ]
+ testonly = true
+
+ deps = [
+ ":gmock_and_gtest",
+ "${dawn_root}/src/dawn:cpp",
+ "${dawn_root}/src/dawn:proc",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_root}/src/dawn/native:sources",
+
+ # Statically linked because the end2end white_box tests use Dawn internals.
+ "${dawn_root}/src/dawn/native:static",
+ "${dawn_root}/src/dawn/utils",
+ "${dawn_root}/src/dawn/wire",
+ ]
+
+ sources = [
+ "DawnTest.h",
+ "ParamGenerator.h",
+ "ToggleParser.h",
+ ]
+
+ if (dawn_enable_vulkan) {
+ deps += [ "${dawn_root}/third_party/khronos:vulkan_headers" ]
+
+ if (is_chromeos) {
+ sources += [
+ "white_box/VulkanImageWrappingTests.cpp",
+ "white_box/VulkanImageWrappingTests.h",
+ "white_box/VulkanImageWrappingTests_DmaBuf.cpp",
+ ]
+ } else if (is_linux) {
+ sources += [
+ "white_box/VulkanImageWrappingTests.cpp",
+ "white_box/VulkanImageWrappingTests.h",
+ "white_box/VulkanImageWrappingTests_OpaqueFD.cpp",
+ ]
+ }
+
+ if (dawn_enable_error_injection) {
+ sources += [ "white_box/VulkanErrorInjectorTests.cpp" ]
+ }
+ }
+
+ sources += [
+ "white_box/BufferAllocatedSizeTests.cpp",
+ "white_box/InternalResourceUsageTests.cpp",
+ "white_box/InternalStorageBufferBindingTests.cpp",
+ "white_box/QueryInternalShaderTests.cpp",
+ ]
+
+ if (dawn_enable_d3d12) {
+ sources += [
+ "white_box/D3D12DescriptorHeapTests.cpp",
+ "white_box/D3D12ResidencyTests.cpp",
+ "white_box/D3D12ResourceHeapTests.cpp",
+ ]
+ }
+
+ if (dawn_enable_metal) {
+ sources += [ "white_box/MetalAutoreleasePoolTests.mm" ]
+ }
+
+ if (dawn_enable_opengl) {
+ deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+ }
+
+ if (dawn_enable_opengles) {
+ sources += [ "white_box/EGLImageWrappingTests.cpp" ]
+ deps += [ "//third_party/angle:libEGL" ]
+ }
+
+ libs = []
+}
+
+dawn_test("dawn_end2end_tests") {
+ deps = [
+ ":end2end_tests_sources",
+ ":gmock_and_gtest",
+ ":white_box_tests_sources",
+ "${dawn_root}/src/dawn:cpp",
+ "${dawn_root}/src/dawn:proc",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_root}/src/dawn/native:static",
+ "${dawn_root}/src/dawn/utils",
+ "${dawn_root}/src/dawn/wire",
+ ]
+
+ sources = [
+ "DawnTest.cpp",
+ "DawnTest.h",
+ ]
+
+ libs = []
+
+ # When building inside Chromium, use their gtest main function because it is
+ # needed to run in swarming correctly.
+ if (build_with_chromium) {
+ deps += [ ":end2end_tests_main" ]
+ } else {
+ sources += [ "End2EndTestsMain.cpp" ]
+ }
+
+ if (dawn_enable_opengl) {
+ deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+ }
+
+ if (is_chromeos) {
+ libs += [ "gbm" ]
+ }
+}
+
+###############################################################################
+# Dawn perf tests
+###############################################################################
+
+dawn_test("dawn_perf_tests") {
+ deps = [
+ ":gmock_and_gtest",
+ "${dawn_root}/src/dawn:cpp",
+ "${dawn_root}/src/dawn:proc",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_root}/src/dawn/native",
+ "${dawn_root}/src/dawn/platform",
+ "${dawn_root}/src/dawn/utils",
+ "${dawn_root}/src/dawn/wire",
+ ]
+
+ sources = [
+ "DawnTest.cpp",
+ "DawnTest.h",
+ "ParamGenerator.h",
+ "ToggleParser.cpp",
+ "ToggleParser.h",
+ "perf_tests/BufferUploadPerf.cpp",
+ "perf_tests/DawnPerfTest.cpp",
+ "perf_tests/DawnPerfTest.h",
+ "perf_tests/DawnPerfTestPlatform.cpp",
+ "perf_tests/DawnPerfTestPlatform.h",
+ "perf_tests/DrawCallPerf.cpp",
+ "perf_tests/ShaderRobustnessPerf.cpp",
+ "perf_tests/SubresourceTrackingPerf.cpp",
+ ]
+
+ libs = []
+
+ # When building inside Chromium, use their gtest main function and the
+ # other perf test scaffolding in order to run in swarming correctly.
+ if (build_with_chromium) {
+ deps += [ ":perf_tests_main" ]
+ data_deps = [ "//testing:run_perf_test" ]
+ } else {
+ sources += [ "PerfTestsMain.cpp" ]
+ }
+
+ if (dawn_enable_metal) {
+ frameworks = [ "IOSurface.framework" ]
+ }
+
+ if (dawn_enable_opengl) {
+ deps += [ "${dawn_root}/src/dawn/utils:glfw" ]
+ }
+}
diff --git a/chromium/third_party/dawn/src/dawn/utils/BUILD.gn b/chromium/third_party/dawn/src/dawn/utils/BUILD.gn
new file mode 100644
index 00000000000..e281b417127
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/BUILD.gn
@@ -0,0 +1,193 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/scripts/dawn_features.gni")
+
+###############################################################################
+# GLFW wrapping target
+###############################################################################
+
+# GLFW does not support ChromeOS, Android or Fuchsia, so provide a small mock
+# library that can be linked into the Dawn tests on these platforms. Otherwise,
+# use the real library from third_party/.
+if (dawn_supports_glfw_for_windowing) {
+ group("glfw") {
+ public_deps = [ "${dawn_root}/third_party/gn/glfw" ]
+ }
+} else if (is_fuchsia) {
+ # The mock implementation of GLFW on Fuchsia
+ config("glfw_public_config") {
+ # Allow inclusion of <GLFW/glfw3.h>
+ include_dirs = [ "${dawn_glfw_dir}/include" ]
+
+ # The GLFW/glfw3.h header includes <GL/gl.h> by default, but the latter
+ # does not exist on Fuchsia. Defining GLFW_INCLUDE_NONE helps work around
+ # the issue, but it needs to be defined for any file that includes the
+ # header.
+ defines = [
+ "GLFW_INCLUDE_NONE",
+ "GLFW_INCLUDE_VULKAN",
+ ]
+ }
+
+ static_library("glfw") {
+ sources = [
+ # NOTE: The header below is required to pass "gn check".
+ "${dawn_glfw_dir}/include/GLFW/glfw3.h",
+ "Glfw3Fuchsia.cpp",
+ ]
+ public_configs = [ ":glfw_public_config" ]
+ deps = [ "${dawn_root}/src/dawn/common" ]
+ }
+} else {
+ # Just skip GLFW on other systems
+ group("glfw") {
+ }
+}
+
+###############################################################################
+# Utils for tests and samples
+###############################################################################
+
+static_library("utils") {
+ configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+
+ sources = [
+ "ComboRenderBundleEncoderDescriptor.cpp",
+ "ComboRenderBundleEncoderDescriptor.h",
+ "ComboRenderPipelineDescriptor.cpp",
+ "ComboRenderPipelineDescriptor.h",
+ "PlatformDebugLogger.h",
+ "ScopedAutoreleasePool.h",
+ "SystemUtils.cpp",
+ "SystemUtils.h",
+ "TerribleCommandBuffer.cpp",
+ "TerribleCommandBuffer.h",
+ "TestUtils.cpp",
+ "TestUtils.h",
+ "TextureUtils.cpp",
+ "TextureUtils.h",
+ "Timer.h",
+ "WGPUHelpers.cpp",
+ "WGPUHelpers.h",
+ "WireHelper.cpp",
+ "WireHelper.h",
+ ]
+ deps = [
+ "${dawn_root}/src/dawn:proc",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_root}/src/dawn/native:headers",
+ "${dawn_root}/src/dawn/wire",
+ "${dawn_spirv_tools_dir}:spvtools_opt",
+ ]
+ libs = []
+ frameworks = []
+
+ if (is_win && !dawn_is_winuwp) {
+ sources += [ "WindowsDebugLogger.cpp" ]
+ } else {
+ sources += [ "EmptyDebugLogger.cpp" ]
+ }
+
+ if (is_win) {
+ sources += [ "WindowsTimer.cpp" ]
+ } else if (is_mac) {
+ sources += [
+ "OSXTimer.cpp",
+ "ObjCUtils.h",
+ "ObjCUtils.mm",
+ ]
+ frameworks += [ "QuartzCore.framework" ]
+ } else {
+ sources += [ "PosixTimer.cpp" ]
+ }
+
+ if (is_mac) {
+ sources += [ "ScopedAutoreleasePool.mm" ]
+ } else {
+ sources += [ "ScopedAutoreleasePool.cpp" ]
+ }
+
+ if (dawn_supports_glfw_for_windowing) {
+ sources += [
+ "GLFWUtils.cpp",
+ "GLFWUtils.h",
+ ]
+ deps += [ ":glfw" ]
+
+ if (dawn_enable_metal) {
+ sources += [ "GLFWUtils_metal.mm" ]
+ frameworks += [ "Metal.framework" ]
+ }
+ }
+
+ public_deps = [ "${dawn_root}/include/dawn:cpp_headers" ]
+}
+
+###############################################################################
+# Dawn samples, only in standalone builds
+###############################################################################
+
+if (dawn_standalone) {
+ # Library to handle the interaction of Dawn with GLFW windows in samples
+ static_library("bindings") {
+ configs += [ "${dawn_root}/src/dawn/common:internal_config" ]
+
+ sources = [
+ "BackendBinding.cpp",
+ "BackendBinding.h",
+ ]
+
+ public_deps = [ "${dawn_root}/include/dawn:headers" ]
+
+ deps = [
+ ":glfw",
+ "${dawn_root}/src/dawn/common",
+ "${dawn_root}/src/dawn/native",
+ ]
+ libs = []
+ frameworks = []
+
+ if (dawn_enable_d3d12) {
+ sources += [ "D3D12Binding.cpp" ]
+ }
+
+ if (dawn_enable_metal) {
+ sources += [ "MetalBinding.mm" ]
+ frameworks += [
+ "Metal.framework",
+ "QuartzCore.framework",
+ ]
+
+ # Suppress warnings that Metal isn't in the deployment target of Chrome
+ if (is_mac) {
+ cflags_objcc = [ "-Wno-unguarded-availability" ]
+ }
+ }
+
+ if (dawn_enable_null) {
+ sources += [ "NullBinding.cpp" ]
+ }
+
+ if (dawn_enable_opengl) {
+ sources += [ "OpenGLBinding.cpp" ]
+ }
+
+ if (dawn_enable_vulkan) {
+ sources += [ "VulkanBinding.cpp" ]
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp b/chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp
new file mode 100644
index 00000000000..f97e6b94275
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/BackendBinding.cpp
@@ -0,0 +1,109 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Compiler.h"
+
+#include "GLFW/glfw3.h"
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+# include "dawn/native/OpenGLBackend.h"
+#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
+
+namespace utils {
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+ BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device);
+#endif
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device);
+#endif
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+ BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device);
+#endif
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+ BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device);
+#endif
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+ BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device);
+#endif
+
+ BackendBinding::BackendBinding(GLFWwindow* window, WGPUDevice device)
+ : mWindow(window), mDevice(device) {
+ }
+
+ void DiscoverAdapter(dawn::native::Instance* instance,
+ GLFWwindow* window,
+ wgpu::BackendType type) {
+ DAWN_UNUSED(type);
+ DAWN_UNUSED(window);
+
+ if (type == wgpu::BackendType::OpenGL || type == wgpu::BackendType::OpenGLES) {
+#if defined(DAWN_ENABLE_BACKEND_OPENGL)
+ glfwMakeContextCurrent(window);
+ auto getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+ if (type == wgpu::BackendType::OpenGL) {
+ dawn::native::opengl::AdapterDiscoveryOptions adapterOptions;
+ adapterOptions.getProc = getProc;
+ instance->DiscoverAdapters(&adapterOptions);
+ } else {
+ dawn::native::opengl::AdapterDiscoveryOptionsES adapterOptions;
+ adapterOptions.getProc = getProc;
+ instance->DiscoverAdapters(&adapterOptions);
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
+ } else {
+ instance->DiscoverDefaultAdapters();
+ }
+ }
+
+ BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device) {
+ switch (type) {
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+ case wgpu::BackendType::D3D12:
+ return CreateD3D12Binding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ case wgpu::BackendType::Metal:
+ return CreateMetalBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+ case wgpu::BackendType::Null:
+ return CreateNullBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+ case wgpu::BackendType::OpenGL:
+ return CreateOpenGLBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+ case wgpu::BackendType::OpenGLES:
+ return CreateOpenGLBinding(window, device);
+#endif
+
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+ case wgpu::BackendType::Vulkan:
+ return CreateVulkanBinding(window, device);
+#endif
+
+ default:
+ return nullptr;
+ }
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/BackendBinding.h b/chromium/third_party/dawn/src/dawn/utils/BackendBinding.h
new file mode 100644
index 00000000000..352d294c919
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/BackendBinding.h
@@ -0,0 +1,46 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_BACKENDBINDING_H_
+#define UTILS_BACKENDBINDING_H_
+
+#include "dawn/native/DawnNative.h"
+#include "dawn/webgpu_cpp.h"
+
+struct GLFWwindow;
+
+namespace utils {
+
+ class BackendBinding {
+ public:
+ virtual ~BackendBinding() = default;
+
+ virtual uint64_t GetSwapChainImplementation() = 0;
+ virtual WGPUTextureFormat GetPreferredSwapChainTextureFormat() = 0;
+
+ protected:
+ BackendBinding(GLFWwindow* window, WGPUDevice device);
+
+ GLFWwindow* mWindow = nullptr;
+ WGPUDevice mDevice = nullptr;
+ };
+
+ void DiscoverAdapter(dawn::native::Instance* instance,
+ GLFWwindow* window,
+ wgpu::BackendType type);
+ BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device);
+
+} // namespace utils
+
+#endif // UTILS_BACKENDBINDING_H_
diff --git a/chromium/third_party/dawn/src/utils/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/utils/CMakeLists.txt
index 3ed0994f2f9..3ed0994f2f9 100644
--- a/chromium/third_party/dawn/src/utils/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/utils/CMakeLists.txt
diff --git a/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp b/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
new file mode 100644
index 00000000000..9c413d218ec
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
@@ -0,0 +1,28 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ComboRenderBundleEncoderDescriptor.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace utils {
+
+ ComboRenderBundleEncoderDescriptor::ComboRenderBundleEncoderDescriptor() {
+ wgpu::RenderBundleEncoderDescriptor* descriptor = this;
+
+ descriptor->colorFormatsCount = 0;
+ descriptor->colorFormats = &cColorFormats[0];
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h b/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
new file mode 100644
index 00000000000..c1ef12b50f1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
@@ -0,0 +1,35 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
+#define UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
+
+#include <dawn/webgpu_cpp.h>
+
+#include "dawn/common/Constants.h"
+
+#include <array>
+
+namespace utils {
+
+ class ComboRenderBundleEncoderDescriptor : public wgpu::RenderBundleEncoderDescriptor {
+ public:
+ ComboRenderBundleEncoderDescriptor();
+
+ std::array<wgpu::TextureFormat, kMaxColorAttachments> cColorFormats;
+ };
+
+} // namespace utils
+
+#endif // UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp b/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
new file mode 100644
index 00000000000..1114af433d1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
@@ -0,0 +1,145 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ComboRenderPipelineDescriptor.h"
+
+#include "dawn/utils/WGPUHelpers.h"
+
+namespace utils {
+
+ ComboVertexState::ComboVertexState() {
+ vertexBufferCount = 0;
+
+ // Fill the default values for vertexBuffers and vertexAttributes in buffers.
+ wgpu::VertexAttribute vertexAttribute;
+ vertexAttribute.shaderLocation = 0;
+ vertexAttribute.offset = 0;
+ vertexAttribute.format = wgpu::VertexFormat::Float32;
+ for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
+ cAttributes[i] = vertexAttribute;
+ }
+ for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
+ cVertexBuffers[i].arrayStride = 0;
+ cVertexBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
+ cVertexBuffers[i].attributeCount = 0;
+ cVertexBuffers[i].attributes = nullptr;
+ }
+ // cVertexBuffers[i].attributes points to somewhere in cAttributes.
+ // cVertexBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+ // cVertexBuffers[0] has two attributes, then cVertexBuffers[1].attributes should point to
+ // &cAttributes[2]. Likewise, if cVertexBuffers[1] has 3 attributes, then
+ // cVertexBuffers[2].attributes should point to &cAttributes[5].
+ cVertexBuffers[0].attributes = &cAttributes[0];
+ }
+
+ ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor() {
+ wgpu::RenderPipelineDescriptor* descriptor = this;
+
+ // Set defaults for the vertex state.
+ {
+ wgpu::VertexState* vertex = &descriptor->vertex;
+ vertex->module = nullptr;
+ vertex->entryPoint = "main";
+ vertex->bufferCount = 0;
+
+ // Fill the default values for vertexBuffers and vertexAttributes in buffers.
+ for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
+ cAttributes[i].shaderLocation = 0;
+ cAttributes[i].offset = 0;
+ cAttributes[i].format = wgpu::VertexFormat::Float32;
+ }
+ for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
+ cBuffers[i].arrayStride = 0;
+ cBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
+ cBuffers[i].attributeCount = 0;
+ cBuffers[i].attributes = nullptr;
+ }
+ // cBuffers[i].attributes points to somewhere in cAttributes.
+ // cBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+ // cBuffers[0] has two attributes, then cBuffers[1].attributes should point to
+ // &cAttributes[2]. Likewise, if cBuffers[1] has 3 attributes, then
+ // cBuffers[2].attributes should point to &cAttributes[5].
+ cBuffers[0].attributes = &cAttributes[0];
+ vertex->buffers = &cBuffers[0];
+ }
+
+ // Set the defaults for the primitive state
+ {
+ wgpu::PrimitiveState* primitive = &descriptor->primitive;
+ primitive->topology = wgpu::PrimitiveTopology::TriangleList;
+ primitive->stripIndexFormat = wgpu::IndexFormat::Undefined;
+ primitive->frontFace = wgpu::FrontFace::CCW;
+ primitive->cullMode = wgpu::CullMode::None;
+ }
+
+ // Set the defaults for the depth-stencil state
+ {
+ wgpu::StencilFaceState stencilFace;
+ stencilFace.compare = wgpu::CompareFunction::Always;
+ stencilFace.failOp = wgpu::StencilOperation::Keep;
+ stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+ stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+ cDepthStencil.format = wgpu::TextureFormat::Depth24PlusStencil8;
+ cDepthStencil.depthWriteEnabled = false;
+ cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+ cDepthStencil.stencilBack = stencilFace;
+ cDepthStencil.stencilFront = stencilFace;
+ cDepthStencil.stencilReadMask = 0xff;
+ cDepthStencil.stencilWriteMask = 0xff;
+ cDepthStencil.depthBias = 0;
+ cDepthStencil.depthBiasSlopeScale = 0.0;
+ cDepthStencil.depthBiasClamp = 0.0;
+ }
+
+ // Set the defaults for the multisample state
+ {
+ wgpu::MultisampleState* multisample = &descriptor->multisample;
+ multisample->count = 1;
+ multisample->mask = 0xFFFFFFFF;
+ multisample->alphaToCoverageEnabled = false;
+ }
+
+ // Set the defaults for the fragment state
+ {
+ cFragment.module = nullptr;
+ cFragment.entryPoint = "main";
+ cFragment.targetCount = 1;
+ cFragment.targets = &cTargets[0];
+ descriptor->fragment = &cFragment;
+
+ wgpu::BlendComponent blendComponent;
+ blendComponent.srcFactor = wgpu::BlendFactor::One;
+ blendComponent.dstFactor = wgpu::BlendFactor::Zero;
+ blendComponent.operation = wgpu::BlendOperation::Add;
+
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ cTargets[i].format = wgpu::TextureFormat::RGBA8Unorm;
+ cTargets[i].blend = nullptr;
+ cTargets[i].writeMask = wgpu::ColorWriteMask::All;
+
+ cBlends[i].color = blendComponent;
+ cBlends[i].alpha = blendComponent;
+ }
+ }
+ }
+
+ wgpu::DepthStencilState* ComboRenderPipelineDescriptor::EnableDepthStencil(
+ wgpu::TextureFormat format) {
+ this->depthStencil = &cDepthStencil;
+ cDepthStencil.format = format;
+ return &cDepthStencil;
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h b/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h
new file mode 100644
index 00000000000..1e4662f58b6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/ComboRenderPipelineDescriptor.h
@@ -0,0 +1,64 @@
+// Copyright 2018 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
+#define UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
+
+#include <dawn/webgpu_cpp.h>
+
+#include "dawn/common/Constants.h"
+
+#include <array>
+
+namespace utils {
+
+ // Primarily used by tests to easily set up the vertex buffer state portion of a RenderPipeline.
+ class ComboVertexState {
+ public:
+ ComboVertexState();
+
+ ComboVertexState(const ComboVertexState&) = delete;
+ ComboVertexState& operator=(const ComboVertexState&) = delete;
+ ComboVertexState(ComboVertexState&&) = delete;
+ ComboVertexState& operator=(ComboVertexState&&) = delete;
+
+ uint32_t vertexBufferCount;
+ std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cVertexBuffers;
+ std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+ };
+
+ class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
+ public:
+ ComboRenderPipelineDescriptor();
+
+ ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
+ ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
+ ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
+ ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
+
+ wgpu::DepthStencilState* EnableDepthStencil(
+ wgpu::TextureFormat format = wgpu::TextureFormat::Depth24PlusStencil8);
+
+ std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cBuffers;
+ std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+ std::array<wgpu::ColorTargetState, kMaxColorAttachments> cTargets;
+ std::array<wgpu::BlendState, kMaxColorAttachments> cBlends;
+
+ wgpu::FragmentState cFragment;
+ wgpu::DepthStencilState cDepthStencil;
+ };
+
+} // namespace utils
+
+#endif // UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp b/chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp
new file mode 100644
index 00000000000..9ed65b2b776
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/D3D12Binding.cpp
@@ -0,0 +1,55 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/D3D12Backend.h"
+
+#include "GLFW/glfw3.h"
+#define GLFW_EXPOSE_NATIVE_WIN32
+#include "GLFW/glfw3native.h"
+
+#include <memory>
+
+namespace utils {
+
+ class D3D12Binding : public BackendBinding {
+ public:
+ D3D12Binding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+ }
+
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ HWND win32Window = glfwGetWin32Window(mWindow);
+ mSwapchainImpl =
+ dawn::native::d3d12::CreateNativeSwapChainImpl(mDevice, win32Window);
+ }
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
+
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ ASSERT(mSwapchainImpl.userData != nullptr);
+ return dawn::native::d3d12::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+ }
+
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+ };
+
+ BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device) {
+ return new D3D12Binding(window, device);
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp b/chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp
new file mode 100644
index 00000000000..b52b38f3f8f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/EmptyDebugLogger.cpp
@@ -0,0 +1,29 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/PlatformDebugLogger.h"
+
+namespace utils {
+
+ class EmptyDebugLogger : public PlatformDebugLogger {
+ public:
+ EmptyDebugLogger() = default;
+ ~EmptyDebugLogger() override = default;
+ };
+
+ PlatformDebugLogger* CreatePlatformDebugLogger() {
+ return new EmptyDebugLogger();
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp
new file mode 100644
index 00000000000..de77ccd4623
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.cpp
@@ -0,0 +1,88 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/GLFWUtils.h"
+
+#include "GLFW/glfw3.h"
+#include "dawn/common/Platform.h"
+
+#include <cstdlib>
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# define GLFW_EXPOSE_NATIVE_WIN32
+#elif defined(DAWN_USE_X11)
+# define GLFW_EXPOSE_NATIVE_X11
+#endif
+#include "GLFW/glfw3native.h"
+
+namespace utils {
+
+ void SetupGLFWWindowHintsForBackend(wgpu::BackendType type) {
+ if (type == wgpu::BackendType::OpenGL) {
+ // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and
+ // texture views.
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ } else if (type == wgpu::BackendType::OpenGLES) {
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+ glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+ } else {
+ // Without this GLFW will initialize a GL context on the window, which prevents using
+ // the window with other APIs (by crashing in weird ways).
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+ }
+ }
+
+ wgpu::Surface CreateSurfaceForWindow(wgpu::Instance instance, GLFWwindow* window) {
+ std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+ SetupWindowAndGetSurfaceDescriptorForTesting(window);
+
+ wgpu::SurfaceDescriptor descriptor;
+ descriptor.nextInChain = chainedDescriptor.get();
+ wgpu::Surface surface = instance.CreateSurface(&descriptor);
+
+ return surface;
+ }
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+ GLFWwindow* window) {
+ std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
+ desc->hwnd = glfwGetWin32Window(window);
+ desc->hinstance = GetModuleHandle(nullptr);
+ return std::move(desc);
+ }
+#elif defined(DAWN_USE_X11)
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+ GLFWwindow* window) {
+ std::unique_ptr<wgpu::SurfaceDescriptorFromXlibWindow> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromXlibWindow>();
+ desc->display = glfwGetX11Display();
+ desc->window = glfwGetX11Window(window);
+ return std::move(desc);
+ }
+#elif defined(DAWN_ENABLE_BACKEND_METAL)
+ // SetupWindowAndGetSurfaceDescriptorForTesting defined in GLFWUtils_metal.mm
+#else
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(GLFWwindow*) {
+ return nullptr;
+ }
+#endif
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/GLFWUtils.h b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.h
index f2299cba9d2..f2299cba9d2 100644
--- a/chromium/third_party/dawn/src/utils/GLFWUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils.h
diff --git a/chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm
new file mode 100644
index 00000000000..b574002ce4d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/GLFWUtils_metal.mm
@@ -0,0 +1,54 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#if !defined(DAWN_ENABLE_BACKEND_METAL)
+# error "GLFWUtils_metal.mm requires the Metal backend to be enabled."
+#endif // !defined(DAWN_ENABLE_BACKEND_METAL)
+
+#include "dawn/utils/GLFWUtils.h"
+
+#import <QuartzCore/CAMetalLayer.h>
+#include "GLFW/glfw3.h"
+
+#include <cstdlib>
+
+#define GLFW_EXPOSE_NATIVE_COCOA
+#include "GLFW/glfw3native.h"
+
+namespace utils {
+
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+ GLFWwindow* window) {
+ if (@available(macOS 10.11, *)) {
+ NSWindow* nsWindow = glfwGetCocoaWindow(window);
+ NSView* view = [nsWindow contentView];
+
+ // Create a CAMetalLayer that covers the whole window that will be passed to
+ // CreateSurface.
+ [view setWantsLayer:YES];
+ [view setLayer:[CAMetalLayer layer]];
+
+ // Use retina if the window was created with retina support.
+ [[view layer] setContentsScale:[nsWindow backingScaleFactor]];
+
+ std::unique_ptr<wgpu::SurfaceDescriptorFromMetalLayer> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromMetalLayer>();
+ desc->layer = [view layer];
+ return std::move(desc);
+ }
+
+ return nullptr;
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp b/chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp
new file mode 100644
index 00000000000..4caa7acc695
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/Glfw3Fuchsia.cpp
@@ -0,0 +1,100 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A mock GLFW implementation that supports Fuchsia, but only implements
+// the functions called from Dawn.
+
+// NOTE: This must be included before GLFW/glfw3.h because the latter will
+// include <vulkan/vulkan.h> and "common/vulkan_platform.h" wants to be
+// the first header to do so for sanity reasons (e.g. undefining weird
+// macros on Windows and Linux).
+// clang-format off
+#include "dawn/common/vulkan_platform.h"
+#include "dawn/common/Assert.h"
+#include <GLFW/glfw3.h>
+// clang-format on
+
+#include <dlfcn.h>
+
+int glfwInit(void) {
+ return GLFW_TRUE;
+}
+
+void glfwDefaultWindowHints(void) {
+}
+
+void glfwWindowHint(int hint, int value) {
+ DAWN_UNUSED(hint);
+ DAWN_UNUSED(value);
+}
+
+struct GLFWwindow {
+ PFN_vkGetInstanceProcAddr GetInstanceProcAddress = nullptr;
+ void* vulkan_loader = nullptr;
+
+ GLFWwindow() {
+ vulkan_loader = ::dlopen("libvulkan.so", RTLD_NOW);
+ ASSERT(vulkan_loader != nullptr);
+ GetInstanceProcAddress = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
+ dlsym(vulkan_loader, "vkGetInstanceProcAddr"));
+ ASSERT(GetInstanceProcAddress != nullptr);
+ }
+
+ ~GLFWwindow() {
+ if (vulkan_loader) {
+ ::dlclose(vulkan_loader);
+ }
+ vulkan_loader = nullptr;
+ }
+};
+
+GLFWwindow* glfwCreateWindow(int width,
+ int height,
+ const char* title,
+ GLFWmonitor* monitor,
+ GLFWwindow* share) {
+ ASSERT(monitor == nullptr);
+ ASSERT(share == nullptr);
+ DAWN_UNUSED(width);
+ DAWN_UNUSED(height);
+ DAWN_UNUSED(title);
+ return new GLFWwindow();
+}
+
+VkResult glfwCreateWindowSurface(VkInstance instance,
+ GLFWwindow* window,
+ const VkAllocationCallbacks* allocator,
+ VkSurfaceKHR* surface) {
+ // IMPORTANT: This assumes that the VkInstance was created with a Fuchsia
+ // swapchain layer enabled, as well as the corresponding extension that
+ // is queried here to perform the surface creation. Dawn should do all
+ // required steps in VulkanInfo.cpp, VulkanFunctions.cpp and BackendVk.cpp.
+
+ auto vkCreateImagePipeSurfaceFUCHSIA = reinterpret_cast<PFN_vkCreateImagePipeSurfaceFUCHSIA>(
+ window->GetInstanceProcAddress(instance, "vkCreateImagePipeSurfaceFUCHSIA"));
+ ASSERT(vkCreateImagePipeSurfaceFUCHSIA != nullptr);
+ if (!vkCreateImagePipeSurfaceFUCHSIA) {
+ *surface = VK_NULL_HANDLE;
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ const struct VkImagePipeSurfaceCreateInfoFUCHSIA create_info = {
+ VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA,
+ nullptr, // pNext
+ 0, // flags, ignored for now
+ ZX_HANDLE_INVALID, // imagePipeHandle, a null handle matches the framebuffer.
+ };
+
+ return vkCreateImagePipeSurfaceFUCHSIA(instance, &create_info, nullptr, surface);
+}
diff --git a/chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm b/chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm
new file mode 100644
index 00000000000..b35245cf739
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/MetalBinding.mm
@@ -0,0 +1,135 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/native/MetalBackend.h"
+
+#define GLFW_EXPOSE_NATIVE_COCOA
+#include "GLFW/glfw3.h"
+#include "GLFW/glfw3native.h"
+
+#import <QuartzCore/CAMetalLayer.h>
+
+namespace utils {
+ class SwapChainImplMTL {
+ public:
+ using WSIContext = DawnWSIContextMetal;
+
+ SwapChainImplMTL(id nsWindow) : mNsWindow(nsWindow) {
+ }
+
+ ~SwapChainImplMTL() {
+ [mCurrentTexture release];
+ [mCurrentDrawable release];
+ }
+
+ void Init(DawnWSIContextMetal* ctx) {
+ mMtlDevice = ctx->device;
+ mCommandQueue = ctx->queue;
+ }
+
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
+ uint32_t width,
+ uint32_t height) {
+ if (format != WGPUTextureFormat_BGRA8Unorm) {
+ return "unsupported format";
+ }
+ ASSERT(width > 0);
+ ASSERT(height > 0);
+
+ NSView* contentView = [mNsWindow contentView];
+ [contentView setWantsLayer:YES];
+
+ CGSize size = {};
+ size.width = width;
+ size.height = height;
+
+ mLayer = [CAMetalLayer layer];
+ [mLayer setDevice:mMtlDevice];
+ [mLayer setPixelFormat:MTLPixelFormatBGRA8Unorm];
+ [mLayer setDrawableSize:size];
+
+ constexpr uint32_t kFramebufferOnlyTextureUsages =
+ WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_Present;
+ bool hasOnlyFramebufferUsages = !(usage & (~kFramebufferOnlyTextureUsages));
+ if (hasOnlyFramebufferUsages) {
+ [mLayer setFramebufferOnly:YES];
+ }
+
+ [contentView setLayer:mLayer];
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+ [mCurrentDrawable release];
+ mCurrentDrawable = [mLayer nextDrawable];
+ [mCurrentDrawable retain];
+
+ [mCurrentTexture release];
+ mCurrentTexture = mCurrentDrawable.texture;
+ [mCurrentTexture retain];
+
+ nextTexture->texture.ptr = reinterpret_cast<void*>(mCurrentTexture);
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ DawnSwapChainError Present() {
+ id<MTLCommandBuffer> commandBuffer = [mCommandQueue commandBuffer];
+ [commandBuffer presentDrawable:mCurrentDrawable];
+ [commandBuffer commit];
+
+ return DAWN_SWAP_CHAIN_NO_ERROR;
+ }
+
+ private:
+ id mNsWindow = nil;
+ id<MTLDevice> mMtlDevice = nil;
+ id<MTLCommandQueue> mCommandQueue = nil;
+
+ CAMetalLayer* mLayer = nullptr;
+ id<CAMetalDrawable> mCurrentDrawable = nil;
+ id<MTLTexture> mCurrentTexture = nil;
+ };
+
+ class MetalBinding : public BackendBinding {
+ public:
+ MetalBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+ }
+
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ mSwapchainImpl = CreateSwapChainImplementation(
+ new SwapChainImplMTL(glfwGetCocoaWindow(mWindow)));
+ }
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
+
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ return WGPUTextureFormat_BGRA8Unorm;
+ }
+
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+ };
+
+ BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device) {
+ return new MetalBinding(window, device);
+ }
+}
diff --git a/chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp b/chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp
new file mode 100644
index 00000000000..c33b6dda40f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/NullBinding.cpp
@@ -0,0 +1,47 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/NullBackend.h"
+
+#include <memory>
+
+namespace utils {
+
+ class NullBinding : public BackendBinding {
+ public:
+ NullBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+ }
+
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ mSwapchainImpl = dawn::native::null::CreateNativeSwapChainImpl();
+ }
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ return WGPUTextureFormat_RGBA8Unorm;
+ }
+
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+ };
+
+ BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device) {
+ return new NullBinding(window, device);
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp b/chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp
new file mode 100644
index 00000000000..818b27bf717
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/OSXTimer.cpp
@@ -0,0 +1,77 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/Timer.h"
+
+#include <CoreServices/CoreServices.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+
+namespace utils {
+
+ class OSXTimer : public Timer {
+ public:
+ OSXTimer() : Timer(), mRunning(false), mSecondCoeff(0) {
+ }
+
+ ~OSXTimer() override = default;
+
+ void Start() override {
+ mStartTime = mach_absolute_time();
+ // Cache secondCoeff
+ GetSecondCoeff();
+ mRunning = true;
+ }
+
+ void Stop() override {
+ mStopTime = mach_absolute_time();
+ mRunning = false;
+ }
+
+ double GetElapsedTime() const override {
+ if (mRunning) {
+ return mSecondCoeff * (mach_absolute_time() - mStartTime);
+ } else {
+ return mSecondCoeff * (mStopTime - mStartTime);
+ }
+ }
+
+ double GetAbsoluteTime() override {
+ return GetSecondCoeff() * mach_absolute_time();
+ }
+
+ private:
+ double GetSecondCoeff() {
+ // If this is the first time we've run, get the timebase.
+ if (mSecondCoeff == 0.0) {
+ mach_timebase_info_data_t timebaseInfo;
+ mach_timebase_info(&timebaseInfo);
+
+ mSecondCoeff = timebaseInfo.numer * (1.0 / 1000000000) / timebaseInfo.denom;
+ }
+
+ return mSecondCoeff;
+ }
+
+ bool mRunning;
+ uint64_t mStartTime;
+ uint64_t mStopTime;
+ double mSecondCoeff;
+ };
+
+ Timer* CreateTimer() {
+ return new OSXTimer();
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ObjCUtils.h b/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.h
index 17b3956a165..17b3956a165 100644
--- a/chromium/third_party/dawn/src/utils/ObjCUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.h
diff --git a/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm b/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm
new file mode 100644
index 00000000000..c006976409f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/ObjCUtils.mm
@@ -0,0 +1,25 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ObjCUtils.h"
+
+#include <QuartzCore/CALayer.h>
+
+namespace utils {
+
+ void* CreateDummyCALayer() {
+ return [CALayer layer];
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp b/chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp
new file mode 100644
index 00000000000..35972afc648
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/OpenGLBinding.cpp
@@ -0,0 +1,55 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Platform.h"
+#include "dawn/common/SwapChainUtils.h"
+#include "dawn/dawn_wsi.h"
+#include "dawn/native/OpenGLBackend.h"
+
+#include <cstdio>
+#include "GLFW/glfw3.h"
+
+namespace utils {
+
+ class OpenGLBinding : public BackendBinding {
+ public:
+ OpenGLBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+ }
+
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ mSwapchainImpl = dawn::native::opengl::CreateNativeSwapChainImpl(
+ mDevice,
+ [](void* userdata) { glfwSwapBuffers(static_cast<GLFWwindow*>(userdata)); },
+ mWindow);
+ }
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
+
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ return dawn::native::opengl::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+ }
+
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+ };
+
+ BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device) {
+ return new OpenGLBinding(window, device);
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/PlatformDebugLogger.h b/chromium/third_party/dawn/src/dawn/utils/PlatformDebugLogger.h
index 33c46dec44b..33c46dec44b 100644
--- a/chromium/third_party/dawn/src/utils/PlatformDebugLogger.h
+++ b/chromium/third_party/dawn/src/dawn/utils/PlatformDebugLogger.h
diff --git a/chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp b/chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp
new file mode 100644
index 00000000000..18eb5e6bf95
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/PosixTimer.cpp
@@ -0,0 +1,74 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/Timer.h"
+
+#include <stdint.h>
+#include <time.h>
+
+namespace utils {
+
+ namespace {
+
+ uint64_t GetCurrentTimeNs() {
+ struct timespec currentTime;
+ clock_gettime(CLOCK_MONOTONIC, &currentTime);
+ return currentTime.tv_sec * 1'000'000'000llu + currentTime.tv_nsec;
+ }
+
+ } // anonymous namespace
+
+ class PosixTimer : public Timer {
+ public:
+ PosixTimer() : Timer(), mRunning(false) {
+ }
+
+ ~PosixTimer() override = default;
+
+ void Start() override {
+ mStartTimeNs = GetCurrentTimeNs();
+ mRunning = true;
+ }
+
+ void Stop() override {
+ mStopTimeNs = GetCurrentTimeNs();
+ mRunning = false;
+ }
+
+ double GetElapsedTime() const override {
+ uint64_t endTimeNs;
+ if (mRunning) {
+ endTimeNs = GetCurrentTimeNs();
+ } else {
+ endTimeNs = mStopTimeNs;
+ }
+
+ return (endTimeNs - mStartTimeNs) * 1e-9;
+ }
+
+ double GetAbsoluteTime() override {
+ return GetCurrentTimeNs() * 1e-9;
+ }
+
+ private:
+ bool mRunning;
+ uint64_t mStartTimeNs;
+ uint64_t mStopTimeNs;
+ };
+
+ Timer* CreateTimer() {
+ return new PosixTimer();
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp
new file mode 100644
index 00000000000..2f5f050a44b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.cpp
@@ -0,0 +1,34 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ScopedAutoreleasePool.h"
+
+#include "dawn/common/Compiler.h"
+
+namespace utils {
+
+ ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool(nullptr) {
+ DAWN_UNUSED(mPool);
+ }
+
+ ScopedAutoreleasePool::~ScopedAutoreleasePool() = default;
+
+ ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
+ }
+
+ ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
+ return *this;
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h
new file mode 100644
index 00000000000..bd00a1a196f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.h
@@ -0,0 +1,61 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_SCOPEDAUTORELEASEPOOL_H_
+#define UTILS_SCOPEDAUTORELEASEPOOL_H_
+
+#include "dawn/common/Compiler.h"
+
+#include <cstddef>
+
+namespace utils {
+
+ /**
+ * ScopedAutoreleasePool is a scoped class which initializes an NSAutoreleasePool on
+ * creation, and drains it on destruction. On non-Apple platforms, ScopedAutoreleasePool
+ * is a no-op.
+ *
+ * An autoreleasepool is needed when using protocol objects in Objective-C because Cocoa
+ * expects a pool to always be available in each thread. If a pool is not available, then
+ * autoreleased objects will never be released and will leak.
+ *
+ * In long-running blocks of code or loops, it is important to periodically create and drain
+ * autorelease pools so that memory is recycled. In Dawn's tests, we have an autoreleasepool
+ * per-test. In graphics applications it's advised to create an autoreleasepool around the
+ * frame loop. Ex.)
+ * void frame() {
+ * // Any protocol objects will be reclaimed when this object falls out of scope.
+ * utils::ScopedAutoreleasePool pool;
+ *
+ * // do rendering ...
+ * }
+ */
+ class [[nodiscard]] ScopedAutoreleasePool {
+ public:
+ ScopedAutoreleasePool();
+ ~ScopedAutoreleasePool();
+
+ ScopedAutoreleasePool(const ScopedAutoreleasePool&) = delete;
+ ScopedAutoreleasePool& operator=(const ScopedAutoreleasePool&) = delete;
+
+ ScopedAutoreleasePool(ScopedAutoreleasePool &&);
+ ScopedAutoreleasePool& operator=(ScopedAutoreleasePool&&);
+
+ private:
+ void* mPool = nullptr;
+ };
+
+} // namespace utils
+
+#endif // UTILS_SCOPEDAUTORELEASEPOOL_H_
diff --git a/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm
new file mode 100644
index 00000000000..c4cb9a285a2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/ScopedAutoreleasePool.mm
@@ -0,0 +1,44 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/ScopedAutoreleasePool.h"
+
+#import <Foundation/Foundation.h>
+
+namespace utils {
+
+ ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool([[NSAutoreleasePool alloc] init]) {
+ }
+
+ ScopedAutoreleasePool::~ScopedAutoreleasePool() {
+ if (mPool != nullptr) {
+ [static_cast<NSAutoreleasePool*>(mPool) release];
+ mPool = nullptr;
+ }
+ }
+
+ ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
+ mPool = rhs.mPool;
+ rhs.mPool = nullptr;
+ }
+
+ ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
+ if (&rhs != this) {
+ mPool = rhs.mPool;
+ rhs.mPool = nullptr;
+ }
+ return *this;
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp b/chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp
new file mode 100644
index 00000000000..9010e2b20d3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/SystemUtils.cpp
@@ -0,0 +1,39 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Platform.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# include <Windows.h>
+#elif defined(DAWN_PLATFORM_POSIX)
+# include <unistd.h>
+#else
+# error "Unsupported platform."
+#endif
+
+namespace utils {
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ void USleep(unsigned int usecs) {
+ Sleep(static_cast<DWORD>(usecs / 1000));
+ }
+#elif defined(DAWN_PLATFORM_POSIX)
+ void USleep(unsigned int usecs) {
+ usleep(usecs);
+ }
+#else
+# error "Implement USleep for your platform."
+#endif
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/SystemUtils.h b/chromium/third_party/dawn/src/dawn/utils/SystemUtils.h
index 1f42cc539a6..1f42cc539a6 100644
--- a/chromium/third_party/dawn/src/utils/SystemUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/SystemUtils.h
diff --git a/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp b/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp
new file mode 100644
index 00000000000..b99243b435f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.cpp
@@ -0,0 +1,59 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/TerribleCommandBuffer.h"
+
+#include "dawn/common/Assert.h"
+
+namespace utils {
+
+ TerribleCommandBuffer::TerribleCommandBuffer() {
+ }
+
+ TerribleCommandBuffer::TerribleCommandBuffer(dawn::wire::CommandHandler* handler)
+ : mHandler(handler) {
+ }
+
+ void TerribleCommandBuffer::SetHandler(dawn::wire::CommandHandler* handler) {
+ mHandler = handler;
+ }
+
+ size_t TerribleCommandBuffer::GetMaximumAllocationSize() const {
+ return sizeof(mBuffer);
+ }
+
+ void* TerribleCommandBuffer::GetCmdSpace(size_t size) {
+ // Note: This returns non-null even if size is zero.
+ if (size > sizeof(mBuffer)) {
+ return nullptr;
+ }
+ char* result = &mBuffer[mOffset];
+ if (sizeof(mBuffer) - size < mOffset) {
+ if (!Flush()) {
+ return nullptr;
+ }
+ return GetCmdSpace(size);
+ }
+
+ mOffset += size;
+ return result;
+ }
+
+ bool TerribleCommandBuffer::Flush() {
+ bool success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
+ mOffset = 0;
+ return success;
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h b/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h
new file mode 100644
index 00000000000..6960b2b8856
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/TerribleCommandBuffer.h
@@ -0,0 +1,42 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_TERRIBLE_COMMAND_BUFFER_H_
+#define UTILS_TERRIBLE_COMMAND_BUFFER_H_
+
+#include "dawn/wire/Wire.h"
+
+namespace utils {
+
+ class TerribleCommandBuffer : public dawn::wire::CommandSerializer {
+ public:
+ TerribleCommandBuffer();
+ TerribleCommandBuffer(dawn::wire::CommandHandler* handler);
+
+ void SetHandler(dawn::wire::CommandHandler* handler);
+
+ size_t GetMaximumAllocationSize() const override;
+
+ void* GetCmdSpace(size_t size) override;
+ bool Flush() override;
+
+ private:
+ dawn::wire::CommandHandler* mHandler = nullptr;
+ size_t mOffset = 0;
+ char mBuffer[1000000];
+ };
+
+} // namespace utils
+
+#endif // UTILS_TERRIBLE_COMMAND_BUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp b/chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp
new file mode 100644
index 00000000000..31535f26440
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/TestUtils.cpp
@@ -0,0 +1,181 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/TestUtils.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Constants.h"
+#include "dawn/common/Math.h"
+#include "dawn/utils/TextureUtils.h"
+#include "dawn/utils/WGPUHelpers.h"
+
+#include <vector>
+
+namespace utils {
+
+ uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
+ const uint32_t bytesPerBlock = utils::GetTexelBlockSizeInBytes(format);
+ const uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+ ASSERT(width % blockWidth == 0);
+ return Align(bytesPerBlock * (width / blockWidth), kTextureBytesPerRowAlignment);
+ }
+
+ TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
+ wgpu::TextureFormat format,
+ wgpu::Extent3D textureSizeAtLevel0,
+ uint32_t mipmapLevel,
+ wgpu::TextureDimension dimension,
+ uint32_t rowsPerImage) {
+ // Compressed texture formats not supported in this function yet.
+ ASSERT(utils::GetTextureFormatBlockWidth(format) == 1);
+
+ TextureDataCopyLayout layout;
+
+ layout.mipSize = {std::max(textureSizeAtLevel0.width >> mipmapLevel, 1u),
+ std::max(textureSizeAtLevel0.height >> mipmapLevel, 1u),
+ textureSizeAtLevel0.depthOrArrayLayers};
+
+ if (dimension == wgpu::TextureDimension::e3D) {
+ layout.mipSize.depthOrArrayLayers =
+ std::max(textureSizeAtLevel0.depthOrArrayLayers >> mipmapLevel, 1u);
+ }
+
+ layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
+
+ if (rowsPerImage == wgpu::kCopyStrideUndefined) {
+ rowsPerImage = layout.mipSize.height;
+ }
+ layout.rowsPerImage = rowsPerImage;
+
+ uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
+ layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
+
+ layout.byteLength =
+ RequiredBytesInCopy(layout.bytesPerRow, appliedRowsPerImage, layout.mipSize, format);
+
+ const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+ layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
+ layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
+ layout.texelBlockCount = layout.byteLength / bytesPerTexel;
+
+ return layout;
+ }
+
+ uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ wgpu::Extent3D copyExtent,
+ wgpu::TextureFormat textureFormat) {
+ uint32_t blockSize = utils::GetTexelBlockSizeInBytes(textureFormat);
+ uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureFormat);
+ uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureFormat);
+ ASSERT(copyExtent.width % blockWidth == 0);
+ uint32_t widthInBlocks = copyExtent.width / blockWidth;
+ ASSERT(copyExtent.height % blockHeight == 0);
+ uint32_t heightInBlocks = copyExtent.height / blockHeight;
+ return RequiredBytesInCopy(bytesPerRow, rowsPerImage, widthInBlocks, heightInBlocks,
+ copyExtent.depthOrArrayLayers, blockSize);
+ }
+
+ uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ uint64_t widthInBlocks,
+ uint64_t heightInBlocks,
+ uint64_t depth,
+ uint64_t bytesPerBlock) {
+ if (depth == 0) {
+ return 0;
+ }
+
+ uint64_t bytesPerImage = bytesPerRow * rowsPerImage;
+ uint64_t requiredBytesInCopy = bytesPerImage * (depth - 1);
+ if (heightInBlocks != 0) {
+ uint64_t lastRowBytes = widthInBlocks * bytesPerBlock;
+ uint64_t lastImageBytes = bytesPerRow * (heightInBlocks - 1) + lastRowBytes;
+ requiredBytesInCopy += lastImageBytes;
+ }
+ return requiredBytesInCopy;
+ }
+
+ uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ wgpu::Extent3D copyExtent,
+ wgpu::TextureFormat textureFormat) {
+ return RequiredBytesInCopy(bytesPerRow, rowsPerImage, copyExtent, textureFormat) /
+ utils::GetTexelBlockSizeInBytes(textureFormat);
+ }
+
+ void UnalignDynamicUploader(wgpu::Device device) {
+ std::vector<uint8_t> data = {1};
+
+ wgpu::TextureDescriptor descriptor = {};
+ descriptor.size = {1, 1, 1};
+ descriptor.format = wgpu::TextureFormat::R8Unorm;
+ descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+ wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+ wgpu::ImageCopyTexture imageCopyTexture =
+ utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+ wgpu::TextureDataLayout textureDataLayout =
+ utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
+ wgpu::Extent3D copyExtent = {1, 1, 1};
+
+ // WriteTexture with exactly 1 byte of data.
+ device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), 1, &textureDataLayout,
+ &copyExtent);
+ }
+
+ uint32_t VertexFormatSize(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Snorm8x2:
+ return 2;
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Unorm8x4:
+ case wgpu::VertexFormat::Snorm8x4:
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Float16x2:
+ case wgpu::VertexFormat::Float32:
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Sint32:
+ return 4;
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Unorm16x4:
+ case wgpu::VertexFormat::Snorm16x4:
+ case wgpu::VertexFormat::Float16x4:
+ case wgpu::VertexFormat::Float32x2:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Sint32x2:
+ return 8;
+ case wgpu::VertexFormat::Float32x3:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Sint32x3:
+ return 12;
+ case wgpu::VertexFormat::Float32x4:
+ case wgpu::VertexFormat::Uint32x4:
+ case wgpu::VertexFormat::Sint32x4:
+ return 16;
+ case wgpu::VertexFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TestUtils.h b/chromium/third_party/dawn/src/dawn/utils/TestUtils.h
index 02b0dafb31a..02b0dafb31a 100644
--- a/chromium/third_party/dawn/src/utils/TestUtils.h
+++ b/chromium/third_party/dawn/src/dawn/utils/TestUtils.h
diff --git a/chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp b/chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp
new file mode 100644
index 00000000000..099cad24b88
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/TextureUtils.cpp
@@ -0,0 +1,707 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "TextureUtils.h"
+
+namespace utils {
+ bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Float:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ bool IsBCTextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth32Float:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat) {
+ if (IsBCTextureFormat(textureFormat) || IsETC2TextureFormat(textureFormat) ||
+ IsASTCTextureFormat(textureFormat)) {
+ return false;
+ }
+
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return false;
+
+ default:
+ return true;
+ }
+ }
+
+ uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ return 1u;
+
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ return 2u;
+
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return 4u;
+
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ return 8u;
+
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ return 16u;
+
+ case wgpu::TextureFormat::Depth16Unorm:
+ return 2u;
+
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth32Float:
+ return 4u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ return 8u;
+
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return 16u;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ return 8u;
+
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return 16u;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 16u;
+
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+
+ // Block size of a multi-planar format depends on aspect.
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+ // TODO(dawn:666): implement stencil8
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return 1u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return 4u;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ return 4u;
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ return 5u;
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ return 6u;
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ return 8u;
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ return 10u;
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 12u;
+
+ // Block size of a multi-planar format depends on aspect.
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+ // TODO(dawn:666): implement stencil8
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Depth24UnormStencil8:
+ case wgpu::TextureFormat::Depth32FloatStencil8:
+ return 1u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return 4u;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ return 4u;
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ return 5u;
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ return 6u;
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ return 8u;
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ return 10u;
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 12u;
+
+ // Block size of a multi-planar format depends on aspect.
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+ // TODO(dawn:666): implement stencil8
+ case wgpu::TextureFormat::Stencil8:
+ case wgpu::TextureFormat::Undefined:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return "f32";
+
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA32Uint:
+ return "u32";
+
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ return "i32";
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ return 1u;
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ return 2u;
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ return 4u;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::RGBA8Unorm:
+ return "rgba8unorm";
+ case wgpu::TextureFormat::RGBA8Snorm:
+ return "rgba8snorm";
+ case wgpu::TextureFormat::RGBA8Uint:
+ return "rgba8uint";
+ case wgpu::TextureFormat::RGBA8Sint:
+ return "rgba8sint";
+ case wgpu::TextureFormat::RGBA16Uint:
+ return "rgba16uint";
+ case wgpu::TextureFormat::RGBA16Sint:
+ return "rgba16sint";
+ case wgpu::TextureFormat::RGBA16Float:
+ return "rgba16float";
+ case wgpu::TextureFormat::R32Uint:
+ return "r32uint";
+ case wgpu::TextureFormat::R32Sint:
+ return "r32sint";
+ case wgpu::TextureFormat::R32Float:
+ return "r32float";
+ case wgpu::TextureFormat::RG32Uint:
+ return "rg32uint";
+ case wgpu::TextureFormat::RG32Sint:
+ return "rg32sint";
+ case wgpu::TextureFormat::RG32Float:
+ return "rg32float";
+ case wgpu::TextureFormat::RGBA32Uint:
+ return "rgba32uint";
+ case wgpu::TextureFormat::RGBA32Sint:
+ return "rgba32sint";
+ case wgpu::TextureFormat::RGBA32Float:
+ return "rgba32float";
+
+ // The below do not currently exist in the WGSL spec, but are used
+ // for tests that expect compilation failure.
+ case wgpu::TextureFormat::R8Unorm:
+ return "r8unorm";
+ case wgpu::TextureFormat::R8Snorm:
+ return "r8snorm";
+ case wgpu::TextureFormat::R8Uint:
+ return "r8uint";
+ case wgpu::TextureFormat::R8Sint:
+ return "r8sint";
+ case wgpu::TextureFormat::R16Uint:
+ return "r16uint";
+ case wgpu::TextureFormat::R16Sint:
+ return "r16sint";
+ case wgpu::TextureFormat::R16Float:
+ return "r16float";
+ case wgpu::TextureFormat::RG8Unorm:
+ return "rg8unorm";
+ case wgpu::TextureFormat::RG8Snorm:
+ return "rg8snorm";
+ case wgpu::TextureFormat::RG8Uint:
+ return "rg8uint";
+ case wgpu::TextureFormat::RG8Sint:
+ return "rg8sint";
+ case wgpu::TextureFormat::RG16Uint:
+ return "rg16uint";
+ case wgpu::TextureFormat::RG16Sint:
+ return "rg16sint";
+ case wgpu::TextureFormat::RG16Float:
+ return "rg16float";
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ return "rgb10a2unorm";
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ return "rg11b10ufloat";
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ wgpu::TextureDimension ViewDimensionToTextureDimension(
+ const wgpu::TextureViewDimension dimension) {
+ switch (dimension) {
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return wgpu::TextureDimension::e2D;
+ case wgpu::TextureViewDimension::e3D:
+ return wgpu::TextureDimension::e3D;
+ // TODO(crbug.com/dawn/814): Implement for 1D texture.
+ case wgpu::TextureViewDimension::e1D:
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/TextureUtils.h b/chromium/third_party/dawn/src/dawn/utils/TextureUtils.h
new file mode 100644
index 00000000000..b75903b7e34
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/TextureUtils.h
@@ -0,0 +1,248 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_TEXTURE_UTILS_H_
+#define UTILS_TEXTURE_UTILS_H_
+
+#include <array>
+
+#include <dawn/webgpu_cpp.h>
+
+#include "dawn/common/Assert.h"
+
+namespace utils {
+ // TODO(dawn:666): Add Stencil8 format when it's implemented.
+ static constexpr std::array<wgpu::TextureFormat, 94> kAllTextureFormats = {
+ wgpu::TextureFormat::R8Unorm,
+ wgpu::TextureFormat::R8Snorm,
+ wgpu::TextureFormat::R8Uint,
+ wgpu::TextureFormat::R8Sint,
+ wgpu::TextureFormat::R16Uint,
+ wgpu::TextureFormat::R16Sint,
+ wgpu::TextureFormat::R16Float,
+ wgpu::TextureFormat::RG8Unorm,
+ wgpu::TextureFormat::RG8Snorm,
+ wgpu::TextureFormat::RG8Uint,
+ wgpu::TextureFormat::RG8Sint,
+ wgpu::TextureFormat::R32Float,
+ wgpu::TextureFormat::R32Uint,
+ wgpu::TextureFormat::R32Sint,
+ wgpu::TextureFormat::RG16Uint,
+ wgpu::TextureFormat::RG16Sint,
+ wgpu::TextureFormat::RG16Float,
+ wgpu::TextureFormat::RGBA8Unorm,
+ wgpu::TextureFormat::RGBA8UnormSrgb,
+ wgpu::TextureFormat::RGBA8Snorm,
+ wgpu::TextureFormat::RGBA8Uint,
+ wgpu::TextureFormat::RGBA8Sint,
+ wgpu::TextureFormat::BGRA8Unorm,
+ wgpu::TextureFormat::BGRA8UnormSrgb,
+ wgpu::TextureFormat::RGB10A2Unorm,
+ wgpu::TextureFormat::RG11B10Ufloat,
+ wgpu::TextureFormat::RGB9E5Ufloat,
+ wgpu::TextureFormat::RG32Float,
+ wgpu::TextureFormat::RG32Uint,
+ wgpu::TextureFormat::RG32Sint,
+ wgpu::TextureFormat::RGBA16Uint,
+ wgpu::TextureFormat::RGBA16Sint,
+ wgpu::TextureFormat::RGBA16Float,
+ wgpu::TextureFormat::RGBA32Float,
+ wgpu::TextureFormat::RGBA32Uint,
+ wgpu::TextureFormat::RGBA32Sint,
+ wgpu::TextureFormat::Depth16Unorm,
+ wgpu::TextureFormat::Depth32Float,
+ wgpu::TextureFormat::Depth24Plus,
+ wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::Depth24UnormStencil8,
+ wgpu::TextureFormat::Depth32FloatStencil8,
+ wgpu::TextureFormat::BC1RGBAUnorm,
+ wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm,
+ wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm,
+ wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm,
+ wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm,
+ wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat,
+ wgpu::TextureFormat::BC6HRGBFloat,
+ wgpu::TextureFormat::BC7RGBAUnorm,
+ wgpu::TextureFormat::BC7RGBAUnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8Unorm,
+ wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm,
+ wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm,
+ wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm,
+ wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm,
+ wgpu::TextureFormat::EACRG11Snorm,
+ wgpu::TextureFormat::ASTC4x4Unorm,
+ wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm,
+ wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm,
+ wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm,
+ wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm,
+ wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm,
+ wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm,
+ wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm,
+ wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm,
+ wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm,
+ wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm,
+ wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm,
+ wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm,
+ wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm,
+ wgpu::TextureFormat::ASTC12x12UnormSrgb};
+
+ static constexpr std::array<wgpu::TextureFormat, 40> kFormatsInCoreSpec = {
+ wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::R8Snorm,
+ wgpu::TextureFormat::R8Uint, wgpu::TextureFormat::R8Sint,
+ wgpu::TextureFormat::R16Uint, wgpu::TextureFormat::R16Sint,
+ wgpu::TextureFormat::R16Float, wgpu::TextureFormat::RG8Unorm,
+ wgpu::TextureFormat::RG8Snorm, wgpu::TextureFormat::RG8Uint,
+ wgpu::TextureFormat::RG8Sint, wgpu::TextureFormat::R32Float,
+ wgpu::TextureFormat::R32Uint, wgpu::TextureFormat::R32Sint,
+ wgpu::TextureFormat::RG16Uint, wgpu::TextureFormat::RG16Sint,
+ wgpu::TextureFormat::RG16Float, wgpu::TextureFormat::RGBA8Unorm,
+ wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Snorm,
+ wgpu::TextureFormat::RGBA8Uint, wgpu::TextureFormat::RGBA8Sint,
+ wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb,
+ wgpu::TextureFormat::RGB10A2Unorm, wgpu::TextureFormat::RG11B10Ufloat,
+ wgpu::TextureFormat::RGB9E5Ufloat, wgpu::TextureFormat::RG32Float,
+ wgpu::TextureFormat::RG32Uint, wgpu::TextureFormat::RG32Sint,
+ wgpu::TextureFormat::RGBA16Uint, wgpu::TextureFormat::RGBA16Sint,
+ wgpu::TextureFormat::RGBA16Float, wgpu::TextureFormat::RGBA32Float,
+ wgpu::TextureFormat::RGBA32Uint, wgpu::TextureFormat::RGBA32Sint,
+ wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth32Float,
+ wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
+ };
+
+ static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
+ wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
+ wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb};
+
+ static constexpr std::array<wgpu::TextureFormat, 10> kETC2Formats = {
+ wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm};
+
+ static constexpr std::array<wgpu::TextureFormat, 28> kASTCFormats = {
+ wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb,
+ };
+
+ static constexpr std::array<wgpu::TextureFormat, 52> kCompressedFormats = {
+ wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
+ wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm,
+ wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb};
+ static_assert(kCompressedFormats.size() ==
+ kBCFormats.size() + kETC2Formats.size() + kASTCFormats.size(),
+ "Number of compressed format must equal number of BC, ETC2, and ASTC formats.");
+
+ // TODO(dawn:666): Add Stencil8 format when it's implemented.
+ static constexpr std::array<wgpu::TextureFormat, 6> kDepthFormats = {
+ wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth32Float,
+ wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::Depth24UnormStencil8, wgpu::TextureFormat::Depth32FloatStencil8,
+ };
+ static constexpr std::array<wgpu::TextureFormat, 3> kStencilFormats = {
+ wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::Depth24UnormStencil8,
+ wgpu::TextureFormat::Depth32FloatStencil8,
+ };
+ static constexpr std::array<wgpu::TextureFormat, 3> kDepthAndStencilFormats = {
+ wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::Depth24UnormStencil8,
+ wgpu::TextureFormat::Depth32FloatStencil8,
+ };
+
+ bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
+
+ bool IsBCTextureFormat(wgpu::TextureFormat textureFormat);
+ bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat);
+ bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat);
+
+ bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat);
+
+ bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat);
+
+ uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
+ uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
+ uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
+
+ const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat);
+ const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
+ uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat);
+
+ wgpu::TextureDimension ViewDimensionToTextureDimension(
+ const wgpu::TextureViewDimension dimension);
+} // namespace utils
+
+#endif
diff --git a/chromium/third_party/dawn/src/utils/Timer.h b/chromium/third_party/dawn/src/dawn/utils/Timer.h
index 86587ddba06..86587ddba06 100644
--- a/chromium/third_party/dawn/src/utils/Timer.h
+++ b/chromium/third_party/dawn/src/dawn/utils/Timer.h
diff --git a/chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp b/chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp
new file mode 100644
index 00000000000..fc94090a0b2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/VulkanBinding.cpp
@@ -0,0 +1,57 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/BackendBinding.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/native/VulkanBackend.h"
+
+// Include GLFW after VulkanBackend so that it declares the Vulkan-specific functions
+#include "GLFW/glfw3.h"
+
+#include <memory>
+
+namespace utils {
+
+ class VulkanBinding : public BackendBinding {
+ public:
+ VulkanBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+ }
+
+ uint64_t GetSwapChainImplementation() override {
+ if (mSwapchainImpl.userData == nullptr) {
+ VkSurfaceKHR surface = VK_NULL_HANDLE;
+ if (glfwCreateWindowSurface(dawn::native::vulkan::GetInstance(mDevice), mWindow,
+ nullptr, &surface) != VK_SUCCESS) {
+ ASSERT(false);
+ }
+
+ mSwapchainImpl = dawn::native::vulkan::CreateNativeSwapChainImpl(mDevice, surface);
+ }
+ return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+ }
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ ASSERT(mSwapchainImpl.userData != nullptr);
+ return dawn::native::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+ }
+
+ private:
+ DawnSwapChainImplementation mSwapchainImpl = {};
+ };
+
+ BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device) {
+ return new VulkanBinding(window, device);
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp
new file mode 100644
index 00000000000..238d4039494
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.cpp
@@ -0,0 +1,374 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/WGPUHelpers.h"
+
+#include "dawn/common/Constants.h"
+#include "dawn/common/Log.h"
+
+#include "spirv-tools/optimizer.hpp"
+
+#include <cstring>
+#include <iomanip>
+#include <limits>
+#include <mutex>
+#include <sstream>
+
+namespace utils {
+ wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
+ // Use SPIRV-Tools's C API to assemble the SPIR-V assembly text to binary. Because the types
+ // aren't RAII, we don't return directly on success and instead always go through the code
+ // path that destroys the SPIRV-Tools objects.
+ wgpu::ShaderModule result = nullptr;
+
+ spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
+ ASSERT(context != nullptr);
+
+ spv_binary spirv = nullptr;
+ spv_diagnostic diagnostic = nullptr;
+ if (spvTextToBinary(context, source, strlen(source), &spirv, &diagnostic) == SPV_SUCCESS) {
+ ASSERT(spirv != nullptr);
+ ASSERT(spirv->wordCount <= std::numeric_limits<uint32_t>::max());
+
+ wgpu::ShaderModuleSPIRVDescriptor spirvDesc;
+ spirvDesc.codeSize = static_cast<uint32_t>(spirv->wordCount);
+ spirvDesc.code = spirv->code;
+
+ wgpu::ShaderModuleDescriptor descriptor;
+ descriptor.nextInChain = &spirvDesc;
+ result = device.CreateShaderModule(&descriptor);
+ } else {
+ ASSERT(diagnostic != nullptr);
+ dawn::WarningLog() << "CreateShaderModuleFromASM SPIRV assembly error:"
+ << diagnostic->position.line + 1 << ":"
+ << diagnostic->position.column + 1 << ": " << diagnostic->error;
+ }
+
+ spvDiagnosticDestroy(diagnostic);
+ spvBinaryDestroy(spirv);
+ spvContextDestroy(context);
+
+ return result;
+ }
+
+ wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source) {
+ wgpu::ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = source;
+ wgpu::ShaderModuleDescriptor descriptor;
+ descriptor.nextInChain = &wgslDesc;
+ return device.CreateShaderModule(&descriptor);
+ }
+
+ wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+ const void* data,
+ uint64_t size,
+ wgpu::BufferUsage usage) {
+ wgpu::BufferDescriptor descriptor;
+ descriptor.size = size;
+ descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
+ wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+ device.GetQueue().WriteBuffer(buffer, 0, data, size);
+ return buffer;
+ }
+
+ ComboRenderPassDescriptor::ComboRenderPassDescriptor(
+ std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+ wgpu::TextureView depthStencil) {
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ cColorAttachments[i].loadOp = wgpu::LoadOp::Clear;
+ cColorAttachments[i].storeOp = wgpu::StoreOp::Store;
+ cColorAttachments[i].clearColor = {0.0f, 0.0f, 0.0f, 0.0f};
+ }
+
+ cDepthStencilAttachmentInfo.clearDepth = 1.0f;
+ cDepthStencilAttachmentInfo.clearStencil = 0;
+ cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+ cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+ cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+ cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+
+ colorAttachmentCount = static_cast<uint32_t>(colorAttachmentInfo.size());
+ uint32_t colorAttachmentIndex = 0;
+ for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
+ if (colorAttachment.Get() != nullptr) {
+ cColorAttachments[colorAttachmentIndex].view = colorAttachment;
+ }
+ ++colorAttachmentIndex;
+ }
+ colorAttachments = cColorAttachments.data();
+
+ if (depthStencil.Get() != nullptr) {
+ cDepthStencilAttachmentInfo.view = depthStencil;
+ depthStencilAttachment = &cDepthStencilAttachmentInfo;
+ } else {
+ depthStencilAttachment = nullptr;
+ }
+ }
+
+ ComboRenderPassDescriptor::ComboRenderPassDescriptor(const ComboRenderPassDescriptor& other) {
+ *this = other;
+ }
+
+ const ComboRenderPassDescriptor& ComboRenderPassDescriptor::operator=(
+ const ComboRenderPassDescriptor& otherRenderPass) {
+ cDepthStencilAttachmentInfo = otherRenderPass.cDepthStencilAttachmentInfo;
+ cColorAttachments = otherRenderPass.cColorAttachments;
+ colorAttachmentCount = otherRenderPass.colorAttachmentCount;
+
+ colorAttachments = cColorAttachments.data();
+
+ if (otherRenderPass.depthStencilAttachment != nullptr) {
+ // Assign desc.depthStencilAttachment to this->depthStencilAttachmentInfo;
+ depthStencilAttachment = &cDepthStencilAttachmentInfo;
+ } else {
+ depthStencilAttachment = nullptr;
+ }
+
+ return *this;
+ }
+
+ BasicRenderPass::BasicRenderPass()
+ : width(0),
+ height(0),
+ color(nullptr),
+ colorFormat(wgpu::TextureFormat::RGBA8Unorm),
+ renderPassInfo({}) {
+ }
+
+ BasicRenderPass::BasicRenderPass(uint32_t texWidth,
+ uint32_t texHeight,
+ wgpu::Texture colorAttachment,
+ wgpu::TextureFormat textureFormat)
+ : width(texWidth),
+ height(texHeight),
+ color(colorAttachment),
+ colorFormat(textureFormat),
+ renderPassInfo({colorAttachment.CreateView()}) {
+ }
+
+ BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
+ uint32_t width,
+ uint32_t height,
+ wgpu::TextureFormat format) {
+ DAWN_ASSERT(width > 0 && height > 0);
+
+ wgpu::TextureDescriptor descriptor;
+ descriptor.dimension = wgpu::TextureDimension::e2D;
+ descriptor.size.width = width;
+ descriptor.size.height = height;
+ descriptor.size.depthOrArrayLayers = 1;
+ descriptor.sampleCount = 1;
+ descriptor.format = format;
+ descriptor.mipLevelCount = 1;
+ descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+ wgpu::Texture color = device.CreateTexture(&descriptor);
+
+ return BasicRenderPass(width, height, color);
+ }
+
+ wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ wgpu::ImageCopyBuffer imageCopyBuffer = {};
+ imageCopyBuffer.buffer = buffer;
+ imageCopyBuffer.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
+
+ return imageCopyBuffer;
+ }
+
+ wgpu::ImageCopyTexture CreateImageCopyTexture(wgpu::Texture texture,
+ uint32_t mipLevel,
+ wgpu::Origin3D origin,
+ wgpu::TextureAspect aspect) {
+ wgpu::ImageCopyTexture imageCopyTexture;
+ imageCopyTexture.texture = texture;
+ imageCopyTexture.mipLevel = mipLevel;
+ imageCopyTexture.origin = origin;
+ imageCopyTexture.aspect = aspect;
+
+ return imageCopyTexture;
+ }
+
+ wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ wgpu::TextureDataLayout textureDataLayout;
+ textureDataLayout.offset = offset;
+ textureDataLayout.bytesPerRow = bytesPerRow;
+ textureDataLayout.rowsPerImage = rowsPerImage;
+
+ return textureDataLayout;
+ }
+
+ wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+ const wgpu::BindGroupLayout* bindGroupLayout) {
+ wgpu::PipelineLayoutDescriptor descriptor;
+ if (bindGroupLayout != nullptr) {
+ descriptor.bindGroupLayoutCount = 1;
+ descriptor.bindGroupLayouts = bindGroupLayout;
+ } else {
+ descriptor.bindGroupLayoutCount = 0;
+ descriptor.bindGroupLayouts = nullptr;
+ }
+ return device.CreatePipelineLayout(&descriptor);
+ }
+
+ wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+ std::vector<wgpu::BindGroupLayout> bgls) {
+ wgpu::PipelineLayoutDescriptor descriptor;
+ descriptor.bindGroupLayoutCount = uint32_t(bgls.size());
+ descriptor.bindGroupLayouts = bgls.data();
+ return device.CreatePipelineLayout(&descriptor);
+ }
+
+ wgpu::BindGroupLayout MakeBindGroupLayout(
+ const wgpu::Device& device,
+ std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer) {
+ std::vector<wgpu::BindGroupLayoutEntry> entries;
+ for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
+ entries.push_back(entry);
+ }
+
+ wgpu::BindGroupLayoutDescriptor descriptor;
+ descriptor.entryCount = static_cast<uint32_t>(entries.size());
+ descriptor.entries = entries.data();
+ return device.CreateBindGroupLayout(&descriptor);
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::BufferBindingType bufferType,
+ bool bufferHasDynamicOffset,
+ uint64_t bufferMinBindingSize) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ buffer.type = bufferType;
+ buffer.hasDynamicOffset = bufferHasDynamicOffset;
+ buffer.minBindingSize = bufferMinBindingSize;
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::SamplerBindingType samplerType) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ sampler.type = samplerType;
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::TextureSampleType textureSampleType,
+ wgpu::TextureViewDimension textureViewDimension,
+ bool textureMultisampled) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ texture.sampleType = textureSampleType;
+ texture.viewDimension = textureViewDimension;
+ texture.multisampled = textureMultisampled;
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::StorageTextureAccess storageTextureAccess,
+ wgpu::TextureFormat format,
+ wgpu::TextureViewDimension textureViewDimension) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ storageTexture.access = storageTextureAccess;
+ storageTexture.format = format;
+ storageTexture.viewDimension = textureViewDimension;
+ }
+
+ // ExternalTextureBindingLayout never contains data, so just make one that can be reused instead
+ // of declaring a new one every time it's needed.
+ wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout = {};
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::ExternalTextureBindingLayout* bindingLayout) {
+ binding = entryBinding;
+ visibility = entryVisibility;
+ nextInChain = bindingLayout;
+ }
+
+ BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+ const wgpu::BindGroupLayoutEntry& entry)
+ : wgpu::BindGroupLayoutEntry(entry) {
+ }
+
+ BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const wgpu::Sampler& sampler)
+ : binding(binding), sampler(sampler) {
+ }
+
+ BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const wgpu::TextureView& textureView)
+ : binding(binding), textureView(textureView) {
+ }
+
+ BindingInitializationHelper::BindingInitializationHelper(
+ uint32_t binding,
+ const wgpu::ExternalTexture& externalTexture)
+ : binding(binding) {
+ externalTextureBindingEntry.externalTexture = externalTexture;
+ }
+
+ BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+ const wgpu::Buffer& buffer,
+ uint64_t offset,
+ uint64_t size)
+ : binding(binding), buffer(buffer), offset(offset), size(size) {
+ }
+
+ wgpu::BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
+ wgpu::BindGroupEntry result;
+
+ result.binding = binding;
+ result.sampler = sampler;
+ result.textureView = textureView;
+ result.buffer = buffer;
+ result.offset = offset;
+ result.size = size;
+ if (externalTextureBindingEntry.externalTexture != nullptr) {
+ result.nextInChain = &externalTextureBindingEntry;
+ }
+
+ return result;
+ }
+
+ wgpu::BindGroup MakeBindGroup(
+ const wgpu::Device& device,
+ const wgpu::BindGroupLayout& layout,
+ std::initializer_list<BindingInitializationHelper> entriesInitializer) {
+ std::vector<wgpu::BindGroupEntry> entries;
+ for (const BindingInitializationHelper& helper : entriesInitializer) {
+ entries.push_back(helper.GetAsBinding());
+ }
+
+ wgpu::BindGroupDescriptor descriptor;
+ descriptor.layout = layout;
+ descriptor.entryCount = entries.size();
+ descriptor.entries = entries.data();
+
+ return device.CreateBindGroup(&descriptor);
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h
new file mode 100644
index 00000000000..f08c142c189
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/WGPUHelpers.h
@@ -0,0 +1,180 @@
+// Copyright 2017 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_DAWNHELPERS_H_
+#define UTILS_DAWNHELPERS_H_
+
+#include <dawn/webgpu_cpp.h>
+
+#include <array>
+#include <initializer_list>
+#include <vector>
+
+#include "dawn/common/Constants.h"
+#include "dawn/utils/TextureUtils.h"
+
+namespace utils {
+
+ enum Expectation { Success, Failure };
+
+ wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
+ wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
+
+ wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+ const void* data,
+ uint64_t size,
+ wgpu::BufferUsage usage);
+
+ template <typename T>
+ wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+ wgpu::BufferUsage usage,
+ std::initializer_list<T> data) {
+ return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
+ }
+
+ wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+ wgpu::ImageCopyTexture CreateImageCopyTexture(
+ wgpu::Texture texture,
+ uint32_t level,
+ wgpu::Origin3D origin,
+ wgpu::TextureAspect aspect = wgpu::TextureAspect::All);
+ wgpu::TextureDataLayout CreateTextureDataLayout(
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+
+ struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
+ public:
+ ComboRenderPassDescriptor(std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+ wgpu::TextureView depthStencil = wgpu::TextureView());
+
+ ComboRenderPassDescriptor(const ComboRenderPassDescriptor& otherRenderPass);
+ const ComboRenderPassDescriptor& operator=(
+ const ComboRenderPassDescriptor& otherRenderPass);
+
+ std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments> cColorAttachments;
+ wgpu::RenderPassDepthStencilAttachment cDepthStencilAttachmentInfo = {};
+ };
+
+ struct BasicRenderPass {
+ public:
+ BasicRenderPass();
+ BasicRenderPass(uint32_t width,
+ uint32_t height,
+ wgpu::Texture color,
+ wgpu::TextureFormat texture = kDefaultColorFormat);
+
+ static constexpr wgpu::TextureFormat kDefaultColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+ uint32_t width;
+ uint32_t height;
+ wgpu::Texture color;
+ wgpu::TextureFormat colorFormat;
+ utils::ComboRenderPassDescriptor renderPassInfo;
+ };
+ BasicRenderPass CreateBasicRenderPass(
+ const wgpu::Device& device,
+ uint32_t width,
+ uint32_t height,
+ wgpu::TextureFormat format = BasicRenderPass::kDefaultColorFormat);
+
+ wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+ const wgpu::BindGroupLayout* bindGroupLayout);
+
+ wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+ std::vector<wgpu::BindGroupLayout> bgls);
+
+ extern wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout;
+
+ // Helpers to make creating bind group layouts look nicer:
+ //
+ // utils::MakeBindGroupLayout(device, {
+ // {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+ // {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+ // {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
+ // });
+
+ struct BindingLayoutEntryInitializationHelper : wgpu::BindGroupLayoutEntry {
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::BufferBindingType bufferType,
+ bool bufferHasDynamicOffset = false,
+ uint64_t bufferMinBindingSize = 0);
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::SamplerBindingType samplerType);
+ BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::TextureSampleType textureSampleType,
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
+ bool textureMultisampled = false);
+ BindingLayoutEntryInitializationHelper(
+ uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::StorageTextureAccess storageTextureAccess,
+ wgpu::TextureFormat format,
+ wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
+ BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+ wgpu::ShaderStage entryVisibility,
+ wgpu::ExternalTextureBindingLayout* bindingLayout);
+
+ BindingLayoutEntryInitializationHelper(const wgpu::BindGroupLayoutEntry& entry);
+ };
+
+ wgpu::BindGroupLayout MakeBindGroupLayout(
+ const wgpu::Device& device,
+ std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer);
+
+ // Helpers to make creating bind groups look nicer:
+ //
+ // utils::MakeBindGroup(device, layout, {
+ // {0, mySampler},
+ // {1, myBuffer, offset, size},
+ // {3, myTextureView}
+ // });
+
+ // Structure with one constructor per-type of bindings, so that the initializer_list accepts
+ // bindings with the right type and no extra information.
+ struct BindingInitializationHelper {
+ BindingInitializationHelper(uint32_t binding, const wgpu::Sampler& sampler);
+ BindingInitializationHelper(uint32_t binding, const wgpu::TextureView& textureView);
+ BindingInitializationHelper(uint32_t binding, const wgpu::ExternalTexture& externalTexture);
+ BindingInitializationHelper(uint32_t binding,
+ const wgpu::Buffer& buffer,
+ uint64_t offset = 0,
+ uint64_t size = wgpu::kWholeSize);
+
+ wgpu::BindGroupEntry GetAsBinding() const;
+
+ uint32_t binding;
+ wgpu::Sampler sampler;
+ wgpu::TextureView textureView;
+ wgpu::Buffer buffer;
+ wgpu::ExternalTextureBindingEntry externalTextureBindingEntry;
+ uint64_t offset = 0;
+ uint64_t size = 0;
+ };
+
+ wgpu::BindGroup MakeBindGroup(
+ const wgpu::Device& device,
+ const wgpu::BindGroupLayout& layout,
+ std::initializer_list<BindingInitializationHelper> entriesInitializer);
+
+} // namespace utils
+
+#endif // UTILS_DAWNHELPERS_H_
diff --git a/chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp b/chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp
new file mode 100644
index 00000000000..159c71a940c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/WindowsDebugLogger.cpp
@@ -0,0 +1,111 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/PlatformDebugLogger.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/windows_with_undefs.h"
+
+#include <array>
+#include <thread>
+
+namespace utils {
+
+ class WindowsDebugLogger : public PlatformDebugLogger {
+ public:
+ WindowsDebugLogger() : PlatformDebugLogger() {
+ if (IsDebuggerPresent()) {
+ // This condition is true when running inside Visual Studio or some other debugger.
+ // Messages are already printed there so we don't need to do anything.
+ return;
+ }
+
+ mShouldExitHandle = CreateEventA(nullptr, TRUE, FALSE, nullptr);
+ ASSERT(mShouldExitHandle != nullptr);
+
+ mThread = std::thread(
+ [](HANDLE shouldExit) {
+ // https://blogs.msdn.microsoft.com/reiley/2011/07/29/a-debugging-approach-to-outputdebugstring/
+ // for the layout of this struct.
+ struct {
+ DWORD process_id;
+ char data[4096 - sizeof(DWORD)];
+ }* dbWinBuffer = nullptr;
+
+ HANDLE file = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE,
+ 0, sizeof(*dbWinBuffer), "DBWIN_BUFFER");
+ ASSERT(file != nullptr);
+ ASSERT(file != INVALID_HANDLE_VALUE);
+
+ dbWinBuffer = static_cast<decltype(dbWinBuffer)>(
+ MapViewOfFile(file, SECTION_MAP_READ, 0, 0, 0));
+ ASSERT(dbWinBuffer != nullptr);
+
+ HANDLE dbWinBufferReady =
+ CreateEventA(nullptr, FALSE, FALSE, "DBWIN_BUFFER_READY");
+ ASSERT(dbWinBufferReady != nullptr);
+
+ HANDLE dbWinDataReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_DATA_READY");
+ ASSERT(dbWinDataReady != nullptr);
+
+ std::array<HANDLE, 2> waitHandles = {shouldExit, dbWinDataReady};
+ while (true) {
+ SetEvent(dbWinBufferReady);
+ DWORD wait = WaitForMultipleObjects(waitHandles.size(), waitHandles.data(),
+ FALSE, INFINITE);
+ if (wait == WAIT_OBJECT_0) {
+ break;
+ }
+ ASSERT(wait == WAIT_OBJECT_0 + 1);
+ fprintf(stderr, "%.*s\n", static_cast<int>(sizeof(dbWinBuffer->data)),
+ dbWinBuffer->data);
+ fflush(stderr);
+ }
+
+ CloseHandle(dbWinDataReady);
+ CloseHandle(dbWinBufferReady);
+ UnmapViewOfFile(dbWinBuffer);
+ CloseHandle(file);
+ },
+ mShouldExitHandle);
+ }
+
+ ~WindowsDebugLogger() override {
+ if (IsDebuggerPresent()) {
+ // This condition is true when running inside Visual Studio or some other debugger.
+ // Messages are already printed there so we don't need to do anything.
+ return;
+ }
+
+ if (mShouldExitHandle != nullptr) {
+ BOOL result = SetEvent(mShouldExitHandle);
+ ASSERT(result != 0);
+ CloseHandle(mShouldExitHandle);
+ }
+
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+ }
+
+ private:
+ std::thread mThread;
+ HANDLE mShouldExitHandle = INVALID_HANDLE_VALUE;
+ };
+
+ PlatformDebugLogger* CreatePlatformDebugLogger() {
+ return new WindowsDebugLogger();
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp b/chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp
new file mode 100644
index 00000000000..ca165d02754
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/WindowsTimer.cpp
@@ -0,0 +1,89 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/Timer.h"
+
+#include <windows.h>
+
+namespace utils {
+
+ class WindowsTimer : public Timer {
+ public:
+ WindowsTimer() : Timer(), mRunning(false), mFrequency(0) {
+ }
+
+ ~WindowsTimer() override = default;
+
+ void Start() override {
+ LARGE_INTEGER curTime;
+ QueryPerformanceCounter(&curTime);
+ mStartTime = curTime.QuadPart;
+
+ // Cache the frequency
+ GetFrequency();
+
+ mRunning = true;
+ }
+
+ void Stop() override {
+ LARGE_INTEGER curTime;
+ QueryPerformanceCounter(&curTime);
+ mStopTime = curTime.QuadPart;
+
+ mRunning = false;
+ }
+
+ double GetElapsedTime() const override {
+ LONGLONG endTime;
+ if (mRunning) {
+ LARGE_INTEGER curTime;
+ QueryPerformanceCounter(&curTime);
+ endTime = curTime.QuadPart;
+ } else {
+ endTime = mStopTime;
+ }
+
+ return static_cast<double>(endTime - mStartTime) / mFrequency;
+ }
+
+ double GetAbsoluteTime() override {
+ LARGE_INTEGER curTime;
+ QueryPerformanceCounter(&curTime);
+
+ return static_cast<double>(curTime.QuadPart) / GetFrequency();
+ }
+
+ private:
+ LONGLONG GetFrequency() {
+ if (mFrequency == 0) {
+ LARGE_INTEGER frequency = {};
+ QueryPerformanceFrequency(&frequency);
+
+ mFrequency = frequency.QuadPart;
+ }
+
+ return mFrequency;
+ }
+
+ bool mRunning;
+ LONGLONG mStartTime;
+ LONGLONG mStopTime;
+ LONGLONG mFrequency;
+ };
+
+ Timer* CreateTimer() {
+ return new WindowsTimer();
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp b/chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp
new file mode 100644
index 00000000000..73eed81899d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/utils/WireHelper.cpp
@@ -0,0 +1,178 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/utils/WireHelper.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/common/SystemUtils.h"
+#include "dawn/dawn_proc.h"
+#include "dawn/native/DawnNative.h"
+#include "dawn/utils/TerribleCommandBuffer.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireServer.h"
+
+#include <algorithm>
+#include <cstring>
+#include <fstream>
+#include <iomanip>
+#include <set>
+#include <sstream>
+
+namespace utils {
+
+ namespace {
+
+ class WireServerTraceLayer : public dawn::wire::CommandHandler {
+ public:
+ WireServerTraceLayer(const char* dir, dawn::wire::CommandHandler* handler)
+ : dawn::wire::CommandHandler(), mDir(dir), mHandler(handler) {
+ const char* sep = GetPathSeparator();
+ if (mDir.size() > 0 && mDir.back() != *sep) {
+ mDir += sep;
+ }
+ }
+
+ void BeginWireTrace(const char* name) {
+ std::string filename = name;
+ // Replace slashes in gtest names with underscores so everything is in one
+ // directory.
+ std::replace(filename.begin(), filename.end(), '/', '_');
+ std::replace(filename.begin(), filename.end(), '\\', '_');
+
+ // Prepend the filename with the directory.
+ filename = mDir + filename;
+
+ ASSERT(!mFile.is_open());
+ mFile.open(filename,
+ std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
+
+ // Write the initial 8 bytes. This means the fuzzer should never inject an
+ // error.
+ const uint64_t injectedErrorIndex = 0xFFFF'FFFF'FFFF'FFFF;
+ mFile.write(reinterpret_cast<const char*>(&injectedErrorIndex),
+ sizeof(injectedErrorIndex));
+ }
+
+ const volatile char* HandleCommands(const volatile char* commands,
+ size_t size) override {
+ if (mFile.is_open()) {
+ mFile.write(const_cast<const char*>(commands), size);
+ }
+ return mHandler->HandleCommands(commands, size);
+ }
+
+ private:
+ std::string mDir;
+ dawn::wire::CommandHandler* mHandler;
+ std::ofstream mFile;
+ };
+
+ class WireHelperDirect : public WireHelper {
+ public:
+ WireHelperDirect() {
+ dawnProcSetProcs(&dawn::native::GetProcs());
+ }
+
+ std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
+ ASSERT(backendDevice != nullptr);
+ return std::make_pair(wgpu::Device::Acquire(backendDevice), backendDevice);
+ }
+
+ void BeginWireTrace(const char* name) override {
+ }
+
+ bool FlushClient() override {
+ return true;
+ }
+
+ bool FlushServer() override {
+ return true;
+ }
+ };
+
+ class WireHelperProxy : public WireHelper {
+ public:
+ explicit WireHelperProxy(const char* wireTraceDir) {
+ mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
+ mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
+
+ dawn::wire::WireServerDescriptor serverDesc = {};
+ serverDesc.procs = &dawn::native::GetProcs();
+ serverDesc.serializer = mS2cBuf.get();
+
+ mWireServer.reset(new dawn::wire::WireServer(serverDesc));
+ mC2sBuf->SetHandler(mWireServer.get());
+
+ if (wireTraceDir != nullptr && strlen(wireTraceDir) > 0) {
+ mWireServerTraceLayer.reset(
+ new WireServerTraceLayer(wireTraceDir, mWireServer.get()));
+ mC2sBuf->SetHandler(mWireServerTraceLayer.get());
+ }
+
+ dawn::wire::WireClientDescriptor clientDesc = {};
+ clientDesc.serializer = mC2sBuf.get();
+
+ mWireClient.reset(new dawn::wire::WireClient(clientDesc));
+ mS2cBuf->SetHandler(mWireClient.get());
+ dawnProcSetProcs(&dawn::wire::client::GetProcs());
+ }
+
+ std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
+ ASSERT(backendDevice != nullptr);
+
+ auto reservation = mWireClient->ReserveDevice();
+ mWireServer->InjectDevice(backendDevice, reservation.id, reservation.generation);
+ dawn::native::GetProcs().deviceRelease(backendDevice);
+
+ return std::make_pair(wgpu::Device::Acquire(reservation.device), backendDevice);
+ }
+
+ void BeginWireTrace(const char* name) override {
+ if (mWireServerTraceLayer) {
+ return mWireServerTraceLayer->BeginWireTrace(name);
+ }
+ }
+
+ bool FlushClient() override {
+ return mC2sBuf->Flush();
+ }
+
+ bool FlushServer() override {
+ return mS2cBuf->Flush();
+ }
+
+ private:
+ std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
+ std::unique_ptr<utils::TerribleCommandBuffer> mS2cBuf;
+ std::unique_ptr<WireServerTraceLayer> mWireServerTraceLayer;
+ std::unique_ptr<dawn::wire::WireServer> mWireServer;
+ std::unique_ptr<dawn::wire::WireClient> mWireClient;
+ };
+
+ } // anonymous namespace
+
+ std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir) {
+ if (useWire) {
+ return std::unique_ptr<WireHelper>(new WireHelperProxy(wireTraceDir));
+ } else {
+ return std::unique_ptr<WireHelper>(new WireHelperDirect());
+ }
+ }
+
+ WireHelper::~WireHelper() {
+ dawnProcSetProcs(nullptr);
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WireHelper.h b/chromium/third_party/dawn/src/dawn/utils/WireHelper.h
index 78aa8026161..78aa8026161 100644
--- a/chromium/third_party/dawn/src/utils/WireHelper.h
+++ b/chromium/third_party/dawn/src/dawn/utils/WireHelper.h
diff --git a/chromium/third_party/dawn/src/dawn/wire/BUILD.gn b/chromium/third_party/dawn/src/dawn/wire/BUILD.gn
new file mode 100644
index 00000000000..bff2136e0f7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/BUILD.gn
@@ -0,0 +1,111 @@
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import("../../../scripts/dawn_overrides_with_defaults.gni")
+
+import("${dawn_root}/generator/dawn_generator.gni")
+import("${dawn_root}/scripts/dawn_component.gni")
+
+# Public dawn wire headers so they can be publically visible for dependencies of
+# dawn/wire
+source_set("headers") {
+ public_deps = [ "${dawn_root}/include/dawn:headers" ]
+ all_dependent_configs = [ "${dawn_root}/include/dawn:public" ]
+ sources = [
+ "${dawn_root}/include/dawn/wire/Wire.h",
+ "${dawn_root}/include/dawn/wire/WireClient.h",
+ "${dawn_root}/include/dawn/wire/WireServer.h",
+ "${dawn_root}/include/dawn/wire/dawn_wire_export.h",
+ ]
+}
+
+dawn_json_generator("gen") {
+ target = "wire"
+ outputs = [
+ "src/dawn/wire/ObjectType_autogen.h",
+ "src/dawn/wire/WireCmd_autogen.h",
+ "src/dawn/wire/WireCmd_autogen.cpp",
+ "src/dawn/wire/client/ApiObjects_autogen.h",
+ "src/dawn/wire/client/ApiProcs_autogen.cpp",
+ "src/dawn/wire/client/ClientBase_autogen.h",
+ "src/dawn/wire/client/ClientHandlers_autogen.cpp",
+ "src/dawn/wire/client/ClientPrototypes_autogen.inc",
+ "src/dawn/wire/server/ServerBase_autogen.h",
+ "src/dawn/wire/server/ServerDoers_autogen.cpp",
+ "src/dawn/wire/server/ServerHandlers_autogen.cpp",
+ "src/dawn/wire/server/ServerPrototypes_autogen.inc",
+ ]
+}
+
+dawn_component("wire") {
+ DEFINE_PREFIX = "DAWN_WIRE"
+
+ deps = [
+ ":gen",
+ "${dawn_root}/src/dawn/common",
+ ]
+
+ configs = [ "${dawn_root}/src/dawn/common:internal_config" ]
+ sources = get_target_outputs(":gen")
+ sources += [
+ "BufferConsumer.h",
+ "BufferConsumer_impl.h",
+ "ChunkedCommandHandler.cpp",
+ "ChunkedCommandHandler.h",
+ "ChunkedCommandSerializer.cpp",
+ "ChunkedCommandSerializer.h",
+ "SupportedFeatures.cpp",
+ "SupportedFeatures.h",
+ "Wire.cpp",
+ "WireClient.cpp",
+ "WireDeserializeAllocator.cpp",
+ "WireDeserializeAllocator.h",
+ "WireResult.h",
+ "WireServer.cpp",
+ "client/Adapter.cpp",
+ "client/Adapter.h",
+ "client/ApiObjects.h",
+ "client/Buffer.cpp",
+ "client/Buffer.h",
+ "client/Client.cpp",
+ "client/Client.h",
+ "client/ClientDoers.cpp",
+ "client/ClientInlineMemoryTransferService.cpp",
+ "client/Device.cpp",
+ "client/Device.h",
+ "client/Instance.cpp",
+ "client/Instance.h",
+ "client/LimitsAndFeatures.cpp",
+ "client/LimitsAndFeatures.h",
+ "client/ObjectAllocator.h",
+ "client/Queue.cpp",
+ "client/Queue.h",
+ "client/RequestTracker.h",
+ "client/ShaderModule.cpp",
+ "client/ShaderModule.h",
+ "server/ObjectStorage.h",
+ "server/Server.cpp",
+ "server/Server.h",
+ "server/ServerAdapter.cpp",
+ "server/ServerBuffer.cpp",
+ "server/ServerDevice.cpp",
+ "server/ServerInlineMemoryTransferService.cpp",
+ "server/ServerInstance.cpp",
+ "server/ServerQueue.cpp",
+ "server/ServerShaderModule.cpp",
+ ]
+
+ # Make headers publicly visible
+ public_deps = [ ":headers" ]
+}
diff --git a/chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h b/chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h
new file mode 100644
index 00000000000..b11a68a7bfd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/BufferConsumer.h
@@ -0,0 +1,85 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_BUFFERCONSUMER_H_
+#define DAWNWIRE_BUFFERCONSUMER_H_
+
+#include "dawn/wire/WireResult.h"
+
+#include <cstddef>
+
+namespace dawn::wire {
+
+ // BufferConsumer is a utility class that allows reading bytes from a buffer
+ // while simultaneously decrementing the amount of remaining space by exactly
+ // the amount read. It helps prevent bugs where incrementing a pointer and
+ // decrementing a size value are not kept in sync.
+ // BufferConsumer also contains bounds checks to prevent reading out-of-bounds.
+ template <typename BufferT>
+ class BufferConsumer {
+ static_assert(sizeof(BufferT) == 1,
+ "BufferT must be 1-byte, but may have const/volatile qualifiers.");
+
+ public:
+ BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {
+ }
+
+ BufferT* Buffer() const {
+ return mBuffer;
+ }
+ size_t AvailableSize() const {
+ return mSize;
+ }
+
+ protected:
+ template <typename T, typename N>
+ WireResult NextN(N count, T** data);
+
+ template <typename T>
+ WireResult Next(T** data);
+
+ template <typename T>
+ WireResult Peek(T** data);
+
+ private:
+ BufferT* mBuffer;
+ size_t mSize;
+ };
+
+ class SerializeBuffer : public BufferConsumer<char> {
+ public:
+ using BufferConsumer::BufferConsumer;
+ using BufferConsumer::Next;
+ using BufferConsumer::NextN;
+ };
+
+ class DeserializeBuffer : public BufferConsumer<const volatile char> {
+ public:
+ using BufferConsumer::BufferConsumer;
+ using BufferConsumer::Peek;
+
+ template <typename T, typename N>
+ WireResult ReadN(N count, const volatile T** data) {
+ return NextN(count, data);
+ }
+
+ template <typename T>
+ WireResult Read(const volatile T** data) {
+ return Next(data);
+ }
+ };
+
+} // namespace dawn::wire
+
+#endif // DAWNWIRE_BUFFERCONSUMER_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h b/chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h
new file mode 100644
index 00000000000..eef5d726edd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/BufferConsumer_impl.h
@@ -0,0 +1,73 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_BUFFERCONSUMER_IMPL_H_
+#define DAWNWIRE_BUFFERCONSUMER_IMPL_H_
+
+#include "dawn/wire/BufferConsumer.h"
+
+#include <limits>
+#include <type_traits>
+
+namespace dawn::wire {
+
+ template <typename BufferT>
+ template <typename T>
+ WireResult BufferConsumer<BufferT>::Peek(T** data) {
+ if (sizeof(T) > mSize) {
+ return WireResult::FatalError;
+ }
+
+ *data = reinterpret_cast<T*>(mBuffer);
+ return WireResult::Success;
+ }
+
+ template <typename BufferT>
+ template <typename T>
+ WireResult BufferConsumer<BufferT>::Next(T** data) {
+ if (sizeof(T) > mSize) {
+ return WireResult::FatalError;
+ }
+
+ *data = reinterpret_cast<T*>(mBuffer);
+ mBuffer += sizeof(T);
+ mSize -= sizeof(T);
+ return WireResult::Success;
+ }
+
+ template <typename BufferT>
+ template <typename T, typename N>
+ WireResult BufferConsumer<BufferT>::NextN(N count, T** data) {
+ static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
+
+ constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
+ if (count > kMaxCountWithoutOverflows) {
+ return WireResult::FatalError;
+ }
+
+ // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
+ size_t totalSize = sizeof(T) * count;
+ if (totalSize > mSize) {
+ return WireResult::FatalError;
+ }
+
+ *data = reinterpret_cast<T*>(mBuffer);
+ mBuffer += totalSize;
+ mSize -= totalSize;
+ return WireResult::Success;
+ }
+
+} // namespace dawn::wire
+
+#endif // DAWNWIRE_BUFFERCONSUMER_IMPL_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt
new file mode 100644
index 00000000000..e490a4d8608
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/CMakeLists.txt
@@ -0,0 +1,83 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DawnJSONGenerator(
+ TARGET "wire"
+ PRINT_NAME "Dawn wire"
+ RESULT_VARIABLE "DAWN_WIRE_GEN_SOURCES"
+)
+
+add_library(dawn_wire ${DAWN_DUMMY_FILE})
+
+target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_IMPLEMENTATION")
+if(BUILD_SHARED_LIBS)
+ target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_SHARED_LIBRARY")
+endif()
+
+target_sources(dawn_wire PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/wire/Wire.h"
+ "${DAWN_INCLUDE_DIR}/dawn/wire/WireClient.h"
+ "${DAWN_INCLUDE_DIR}/dawn/wire/WireServer.h"
+ "${DAWN_INCLUDE_DIR}/dawn/wire/dawn_wire_export.h"
+ ${DAWN_WIRE_GEN_SOURCES}
+ "BufferConsumer.h"
+ "BufferConsumer_impl.h"
+ "ChunkedCommandHandler.cpp"
+ "ChunkedCommandHandler.h"
+ "ChunkedCommandSerializer.cpp"
+ "ChunkedCommandSerializer.h"
+ "SupportedFeatures.cpp"
+ "SupportedFeatures.h"
+ "Wire.cpp"
+ "WireClient.cpp"
+ "WireDeserializeAllocator.cpp"
+ "WireDeserializeAllocator.h"
+ "WireResult.h"
+ "WireServer.cpp"
+ "client/Adapter.cpp"
+ "client/Adapter.h"
+ "client/ApiObjects.h"
+ "client/Buffer.cpp"
+ "client/Buffer.h"
+ "client/Client.cpp"
+ "client/Client.h"
+ "client/ClientDoers.cpp"
+ "client/ClientInlineMemoryTransferService.cpp"
+ "client/Device.cpp"
+ "client/Device.h"
+ "client/Instance.cpp"
+ "client/Instance.h"
+ "client/LimitsAndFeatures.cpp"
+ "client/LimitsAndFeatures.h"
+ "client/ObjectAllocator.h"
+ "client/Queue.cpp"
+ "client/Queue.h"
+ "client/RequestTracker.h"
+ "client/ShaderModule.cpp"
+ "client/ShaderModule.h"
+ "server/ObjectStorage.h"
+ "server/Server.cpp"
+ "server/Server.h"
+ "server/ServerAdapter.cpp"
+ "server/ServerBuffer.cpp"
+ "server/ServerDevice.cpp"
+ "server/ServerInlineMemoryTransferService.cpp"
+ "server/ServerInstance.cpp"
+ "server/ServerQueue.cpp"
+ "server/ServerShaderModule.cpp"
+)
+target_link_libraries(dawn_wire
+ PUBLIC dawn_headers
+ PRIVATE dawn_common dawn_internal_config
+)
diff --git a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp
new file mode 100644
index 00000000000..81136867a58
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.cpp
@@ -0,0 +1,79 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/ChunkedCommandHandler.h"
+
+#include "dawn/common/Alloc.h"
+
+#include <algorithm>
+#include <cstring>
+
+namespace dawn::wire {
+
+ ChunkedCommandHandler::~ChunkedCommandHandler() = default;
+
+ const volatile char* ChunkedCommandHandler::HandleCommands(const volatile char* commands,
+ size_t size) {
+ if (mChunkedCommandRemainingSize > 0) {
+ // If there is a chunked command in flight, append the command data.
+ // We append at most |mChunkedCommandRemainingSize| which is enough to finish the
+ // in-flight chunked command, and then pass the rest along to a second call to
+ // |HandleCommandsImpl|.
+ size_t chunkSize = std::min(size, mChunkedCommandRemainingSize);
+
+ memcpy(mChunkedCommandData.get() + mChunkedCommandPutOffset,
+ const_cast<const char*>(commands), chunkSize);
+ mChunkedCommandPutOffset += chunkSize;
+ mChunkedCommandRemainingSize -= chunkSize;
+
+ commands += chunkSize;
+ size -= chunkSize;
+
+ if (mChunkedCommandRemainingSize == 0) {
+ // Once the chunked command is complete, pass the data to the command handler
+ // implemenation.
+ auto chunkedCommandData = std::move(mChunkedCommandData);
+ if (HandleCommandsImpl(chunkedCommandData.get(), mChunkedCommandPutOffset) ==
+ nullptr) {
+ // |HandleCommandsImpl| returns nullptr on error. Forward any errors
+ // out.
+ return nullptr;
+ }
+ }
+ }
+
+ return HandleCommandsImpl(commands, size);
+ }
+
+ ChunkedCommandHandler::ChunkedCommandsResult ChunkedCommandHandler::BeginChunkedCommandData(
+ const volatile char* commands,
+ size_t commandSize,
+ size_t initialSize) {
+ ASSERT(!mChunkedCommandData);
+
+ // Reserve space for all the command data we're expecting, and copy the initial data
+ // to the start of the memory.
+ mChunkedCommandData.reset(AllocNoThrow<char>(commandSize));
+ if (!mChunkedCommandData) {
+ return ChunkedCommandsResult::Error;
+ }
+
+ memcpy(mChunkedCommandData.get(), const_cast<const char*>(commands), initialSize);
+ mChunkedCommandPutOffset = initialSize;
+ mChunkedCommandRemainingSize = commandSize - initialSize;
+
+ return ChunkedCommandsResult::Consumed;
+ }
+
+} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h
new file mode 100644
index 00000000000..162fecac4ce
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandHandler.h
@@ -0,0 +1,71 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
+#define DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/Wire.h"
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include <cstdint>
+#include <memory>
+
+namespace dawn::wire {
+
+ class ChunkedCommandHandler : public CommandHandler {
+ public:
+ const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
+ ~ChunkedCommandHandler() override;
+
+ protected:
+ enum class ChunkedCommandsResult {
+ Passthrough,
+ Consumed,
+ Error,
+ };
+
+ // Returns |true| if the commands were entirely consumed into the chunked command vector
+ // and should be handled later once we receive all the command data.
+ // Returns |false| if commands should be handled now immediately.
+ ChunkedCommandsResult HandleChunkedCommands(const volatile char* commands, size_t size) {
+ uint64_t commandSize64 =
+ reinterpret_cast<const volatile CmdHeader*>(commands)->commandSize;
+
+ if (commandSize64 > std::numeric_limits<size_t>::max()) {
+ return ChunkedCommandsResult::Error;
+ }
+ size_t commandSize = static_cast<size_t>(commandSize64);
+ if (size < commandSize) {
+ return BeginChunkedCommandData(commands, commandSize, size);
+ }
+ return ChunkedCommandsResult::Passthrough;
+ }
+
+ private:
+ virtual const volatile char* HandleCommandsImpl(const volatile char* commands,
+ size_t size) = 0;
+
+ ChunkedCommandsResult BeginChunkedCommandData(const volatile char* commands,
+ size_t commandSize,
+ size_t initialSize);
+
+ size_t mChunkedCommandRemainingSize = 0;
+ size_t mChunkedCommandPutOffset = 0;
+ std::unique_ptr<char[]> mChunkedCommandData;
+ };
+
+} // namespace dawn::wire
+
+#endif // DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp
new file mode 100644
index 00000000000..b2e4a56d9ad
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.cpp
@@ -0,0 +1,38 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/ChunkedCommandSerializer.h"
+
+namespace dawn::wire {
+
+ ChunkedCommandSerializer::ChunkedCommandSerializer(CommandSerializer* serializer)
+ : mSerializer(serializer), mMaxAllocationSize(serializer->GetMaximumAllocationSize()) {
+ }
+
+ void ChunkedCommandSerializer::SerializeChunkedCommand(const char* allocatedBuffer,
+ size_t remainingSize) {
+ while (remainingSize > 0) {
+ size_t chunkSize = std::min(remainingSize, mMaxAllocationSize);
+ void* dst = mSerializer->GetCmdSpace(chunkSize);
+ if (dst == nullptr) {
+ return;
+ }
+ memcpy(dst, allocatedBuffer, chunkSize);
+
+ allocatedBuffer += chunkSize;
+ remainingSize -= chunkSize;
+ }
+ }
+
+} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h
new file mode 100644
index 00000000000..92b8a445e21
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/ChunkedCommandSerializer.h
@@ -0,0 +1,114 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
+#define DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
+
+#include "dawn/common/Alloc.h"
+#include "dawn/common/Compiler.h"
+#include "dawn/wire/Wire.h"
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include <algorithm>
+#include <cstring>
+#include <memory>
+
+namespace dawn::wire {
+
+ class ChunkedCommandSerializer {
+ public:
+ ChunkedCommandSerializer(CommandSerializer* serializer);
+
+ template <typename Cmd>
+ void SerializeCommand(const Cmd& cmd) {
+ SerializeCommand(cmd, 0, [](SerializeBuffer*) { return WireResult::Success; });
+ }
+
+ template <typename Cmd, typename ExtraSizeSerializeFn>
+ void SerializeCommand(const Cmd& cmd,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ SerializeCommandImpl(
+ cmd,
+ [](const Cmd& cmd, size_t requiredSize, SerializeBuffer* serializeBuffer) {
+ return cmd.Serialize(requiredSize, serializeBuffer);
+ },
+ extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
+ }
+
+ template <typename Cmd>
+ void SerializeCommand(const Cmd& cmd, const ObjectIdProvider& objectIdProvider) {
+ SerializeCommand(cmd, objectIdProvider, 0,
+ [](SerializeBuffer*) { return WireResult::Success; });
+ }
+
+ template <typename Cmd, typename ExtraSizeSerializeFn>
+ void SerializeCommand(const Cmd& cmd,
+ const ObjectIdProvider& objectIdProvider,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ SerializeCommandImpl(
+ cmd,
+ [&objectIdProvider](const Cmd& cmd, size_t requiredSize,
+ SerializeBuffer* serializeBuffer) {
+ return cmd.Serialize(requiredSize, serializeBuffer, objectIdProvider);
+ },
+ extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
+ }
+
+ private:
+ template <typename Cmd, typename SerializeCmdFn, typename ExtraSizeSerializeFn>
+ void SerializeCommandImpl(const Cmd& cmd,
+ SerializeCmdFn&& SerializeCmd,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ size_t commandSize = cmd.GetRequiredSize();
+ size_t requiredSize = commandSize + extraSize;
+
+ if (requiredSize <= mMaxAllocationSize) {
+ char* allocatedBuffer = static_cast<char*>(mSerializer->GetCmdSpace(requiredSize));
+ if (allocatedBuffer != nullptr) {
+ SerializeBuffer serializeBuffer(allocatedBuffer, requiredSize);
+ WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+ WireResult r2 = SerializeExtraSize(&serializeBuffer);
+ if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
+ mSerializer->OnSerializeError();
+ }
+ }
+ return;
+ }
+
+ auto cmdSpace = std::unique_ptr<char[]>(AllocNoThrow<char>(requiredSize));
+ if (!cmdSpace) {
+ return;
+ }
+ SerializeBuffer serializeBuffer(cmdSpace.get(), requiredSize);
+ WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+ WireResult r2 = SerializeExtraSize(&serializeBuffer);
+ if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
+ mSerializer->OnSerializeError();
+ return;
+ }
+ SerializeChunkedCommand(cmdSpace.get(), requiredSize);
+ }
+
+ void SerializeChunkedCommand(const char* allocatedBuffer, size_t remainingSize);
+
+ CommandSerializer* mSerializer;
+ size_t mMaxAllocationSize;
+ };
+
+} // namespace dawn::wire
+
+#endif // DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp b/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp
new file mode 100644
index 00000000000..2d5a9f8125b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.cpp
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/SupportedFeatures.h"
+
+namespace dawn::wire {
+
+ // Note: Upon updating this list, please also update serialization/deserialization
+ // of limit structs on Adapter/Device initialization.
+ bool IsFeatureSupported(WGPUFeatureName feature) {
+ switch (feature) {
+ case WGPUFeatureName_Undefined:
+ case WGPUFeatureName_Force32:
+ case WGPUFeatureName_DawnNative:
+ return false;
+ case WGPUFeatureName_Depth24UnormStencil8:
+ case WGPUFeatureName_Depth32FloatStencil8:
+ case WGPUFeatureName_TimestampQuery:
+ case WGPUFeatureName_PipelineStatisticsQuery:
+ case WGPUFeatureName_TextureCompressionBC:
+ case WGPUFeatureName_TextureCompressionETC2:
+ case WGPUFeatureName_TextureCompressionASTC:
+ case WGPUFeatureName_IndirectFirstInstance:
+ case WGPUFeatureName_DepthClamping:
+ case WGPUFeatureName_DawnShaderFloat16:
+ case WGPUFeatureName_DawnInternalUsages:
+ case WGPUFeatureName_DawnMultiPlanarFormats:
+ return true;
+ }
+
+ // Catch-all, for unsupported features.
+ // "default:" is not used so we get compiler errors for
+ // newly added, unhandled features, but still catch completely
+ // unknown enums.
+ return false;
+ }
+
+} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h b/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h
new file mode 100644
index 00000000000..9c173e1633d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/SupportedFeatures.h
@@ -0,0 +1,26 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_SUPPORTEDFEATURES_H_
+#define DAWNWIRE_SUPPORTEDFEATURES_H_
+
+#include <dawn/webgpu.h>
+
+namespace dawn::wire {
+
+ bool IsFeatureSupported(WGPUFeatureName feature);
+
+} // namespace dawn::wire
+
+#endif // DAWNWIRE_SUPPORTEDFEATURES_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/Wire.cpp b/chromium/third_party/dawn/src/dawn/wire/Wire.cpp
new file mode 100644
index 00000000000..af3e6be57a6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/Wire.cpp
@@ -0,0 +1,28 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/Wire.h"
+
+namespace dawn::wire {
+
+ CommandSerializer::CommandSerializer() = default;
+ CommandSerializer::~CommandSerializer() = default;
+
+ void CommandSerializer::OnSerializeError() {
+ }
+
+ CommandHandler::CommandHandler() = default;
+ CommandHandler::~CommandHandler() = default;
+
+} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireClient.cpp b/chromium/third_party/dawn/src/dawn/wire/WireClient.cpp
new file mode 100644
index 00000000000..0446da89240
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/WireClient.cpp
@@ -0,0 +1,82 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire {
+
+ WireClient::WireClient(const WireClientDescriptor& descriptor)
+ : mImpl(new client::Client(descriptor.serializer, descriptor.memoryTransferService)) {
+ }
+
+ WireClient::~WireClient() {
+ mImpl.reset();
+ }
+
+ const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
+ return mImpl->HandleCommands(commands, size);
+ }
+
+ ReservedTexture WireClient::ReserveTexture(WGPUDevice device) {
+ return mImpl->ReserveTexture(device);
+ }
+
+ ReservedSwapChain WireClient::ReserveSwapChain(WGPUDevice device) {
+ return mImpl->ReserveSwapChain(device);
+ }
+
+ ReservedDevice WireClient::ReserveDevice() {
+ return mImpl->ReserveDevice();
+ }
+
+ ReservedInstance WireClient::ReserveInstance() {
+ return mImpl->ReserveInstance();
+ }
+
+ void WireClient::ReclaimTextureReservation(const ReservedTexture& reservation) {
+ mImpl->ReclaimTextureReservation(reservation);
+ }
+
+ void WireClient::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+ mImpl->ReclaimSwapChainReservation(reservation);
+ }
+
+ void WireClient::ReclaimDeviceReservation(const ReservedDevice& reservation) {
+ mImpl->ReclaimDeviceReservation(reservation);
+ }
+
+ void WireClient::ReclaimInstanceReservation(const ReservedInstance& reservation) {
+ mImpl->ReclaimInstanceReservation(reservation);
+ }
+
+ void WireClient::Disconnect() {
+ mImpl->Disconnect();
+ }
+
+ namespace client {
+ MemoryTransferService::MemoryTransferService() = default;
+
+ MemoryTransferService::~MemoryTransferService() = default;
+
+ MemoryTransferService::ReadHandle::ReadHandle() = default;
+
+ MemoryTransferService::ReadHandle::~ReadHandle() = default;
+
+ MemoryTransferService::WriteHandle::WriteHandle() = default;
+
+ MemoryTransferService::WriteHandle::~WriteHandle() = default;
+ } // namespace client
+
+} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp b/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp
new file mode 100644
index 00000000000..e0a3432867b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.cpp
@@ -0,0 +1,60 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/WireDeserializeAllocator.h"
+
+#include <algorithm>
+
+namespace dawn::wire {
+ WireDeserializeAllocator::WireDeserializeAllocator() {
+ Reset();
+ }
+
+ WireDeserializeAllocator::~WireDeserializeAllocator() {
+ Reset();
+ }
+
+ void* WireDeserializeAllocator::GetSpace(size_t size) {
+ // Return space in the current buffer if possible first.
+ if (mRemainingSize >= size) {
+ char* buffer = mCurrentBuffer;
+ mCurrentBuffer += size;
+ mRemainingSize -= size;
+ return buffer;
+ }
+
+ // Otherwise allocate a new buffer and try again.
+ size_t allocationSize = std::max(size, size_t(2048));
+ char* allocation = static_cast<char*>(malloc(allocationSize));
+ if (allocation == nullptr) {
+ return nullptr;
+ }
+
+ mAllocations.push_back(allocation);
+ mCurrentBuffer = allocation;
+ mRemainingSize = allocationSize;
+ return GetSpace(size);
+ }
+
+ void WireDeserializeAllocator::Reset() {
+ for (auto allocation : mAllocations) {
+ free(allocation);
+ }
+ mAllocations.clear();
+
+ // The initial buffer is the inline buffer so that some allocations can be skipped
+ mCurrentBuffer = mStaticBuffer;
+ mRemainingSize = sizeof(mStaticBuffer);
+ }
+} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h b/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h
new file mode 100644
index 00000000000..cc2ad7bb6f4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/WireDeserializeAllocator.h
@@ -0,0 +1,43 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
+#define DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
+
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include <vector>
+
+namespace dawn::wire {
+ // A really really simple implementation of the DeserializeAllocator. It's main feature
+ // is that it has some inline storage so as to avoid allocations for the majority of
+ // commands.
+ class WireDeserializeAllocator : public DeserializeAllocator {
+ public:
+ WireDeserializeAllocator();
+ virtual ~WireDeserializeAllocator();
+
+ void* GetSpace(size_t size) override;
+
+ void Reset();
+
+ private:
+ size_t mRemainingSize = 0;
+ char* mCurrentBuffer = nullptr;
+ char mStaticBuffer[2048];
+ std::vector<char*> mAllocations;
+ };
+} // namespace dawn::wire
+
+#endif // DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireResult.h b/chromium/third_party/dawn/src/dawn/wire/WireResult.h
new file mode 100644
index 00000000000..4025c2cf49a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/WireResult.h
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_WIRERESULT_H_
+#define DAWNWIRE_WIRERESULT_H_
+
+#include "dawn/common/Compiler.h"
+
+namespace dawn::wire {
+
+ enum class [[nodiscard]] WireResult{
+ Success,
+ FatalError,
+ };
+
+// Macro to simplify error handling, similar to DAWN_TRY but for WireResult.
+#define WIRE_TRY(EXPR) \
+ do { \
+ WireResult exprResult = EXPR; \
+ if (DAWN_UNLIKELY(exprResult != WireResult::Success)) { \
+ return exprResult; \
+ } \
+ } while (0)
+
+} // namespace dawn::wire
+
+#endif // DAWNWIRE_WIRERESULT_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/WireServer.cpp b/chromium/third_party/dawn/src/dawn/wire/WireServer.cpp
new file mode 100644
index 00000000000..bf9b0a11fd0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/WireServer.cpp
@@ -0,0 +1,83 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/WireServer.h"
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire {
+
+ WireServer::WireServer(const WireServerDescriptor& descriptor)
+ : mImpl(new server::Server(*descriptor.procs,
+ descriptor.serializer,
+ descriptor.memoryTransferService)) {
+ }
+
+ WireServer::~WireServer() {
+ mImpl.reset();
+ }
+
+ const volatile char* WireServer::HandleCommands(const volatile char* commands, size_t size) {
+ return mImpl->HandleCommands(commands, size);
+ }
+
+ bool WireServer::InjectTexture(WGPUTexture texture,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ return mImpl->InjectTexture(texture, id, generation, deviceId, deviceGeneration);
+ }
+
+ bool WireServer::InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ return mImpl->InjectSwapChain(swapchain, id, generation, deviceId, deviceGeneration);
+ }
+
+ bool WireServer::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
+ return mImpl->InjectDevice(device, id, generation);
+ }
+
+ bool WireServer::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
+ return mImpl->InjectInstance(instance, id, generation);
+ }
+
+ WGPUDevice WireServer::GetDevice(uint32_t id, uint32_t generation) {
+ return mImpl->GetDevice(id, generation);
+ }
+
+ namespace server {
+ MemoryTransferService::MemoryTransferService() = default;
+
+ MemoryTransferService::~MemoryTransferService() = default;
+
+ MemoryTransferService::ReadHandle::ReadHandle() = default;
+
+ MemoryTransferService::ReadHandle::~ReadHandle() = default;
+
+ MemoryTransferService::WriteHandle::WriteHandle() = default;
+
+ MemoryTransferService::WriteHandle::~WriteHandle() = default;
+
+ void MemoryTransferService::WriteHandle::SetTarget(void* data) {
+ mTargetData = data;
+ }
+ void MemoryTransferService::WriteHandle::SetDataLength(size_t dataLength) {
+ mDataLength = dataLength;
+ }
+ } // namespace server
+
+} // namespace dawn::wire
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp
new file mode 100644
index 00000000000..b2dcc87f63d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Adapter.cpp
@@ -0,0 +1,133 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Adapter.h"
+
+#include "dawn/common/Log.h"
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire::client {
+
+ Adapter::~Adapter() {
+ mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
+ request->callback(WGPURequestDeviceStatus_Unknown, nullptr,
+ "Adapter destroyed before callback", request->userdata);
+ });
+ }
+
+ void Adapter::CancelCallbacksForDisconnect() {
+ mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
+ request->callback(WGPURequestDeviceStatus_Unknown, nullptr, "GPU connection lost",
+ request->userdata);
+ });
+ }
+
+ bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+ return mLimitsAndFeatures.GetLimits(limits);
+ }
+
+ bool Adapter::HasFeature(WGPUFeatureName feature) const {
+ return mLimitsAndFeatures.HasFeature(feature);
+ }
+
+ size_t Adapter::EnumerateFeatures(WGPUFeatureName* features) const {
+ return mLimitsAndFeatures.EnumerateFeatures(features);
+ }
+
+ void Adapter::SetLimits(const WGPUSupportedLimits* limits) {
+ return mLimitsAndFeatures.SetLimits(limits);
+ }
+
+ void Adapter::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+ return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+ }
+
+ void Adapter::SetProperties(const WGPUAdapterProperties* properties) {
+ mProperties = *properties;
+ mProperties.nextInChain = nullptr;
+ }
+
+ void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
+ *properties = mProperties;
+ }
+
+ void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPURequestDeviceStatus_Error, nullptr, "GPU connection lost", userdata);
+ return;
+ }
+
+ auto* allocation = client->DeviceAllocator().New(client);
+ uint64_t serial = mRequestDeviceRequests.Add({callback, allocation->object->id, userdata});
+
+ AdapterRequestDeviceCmd cmd;
+ cmd.adapterId = this->id;
+ cmd.requestSerial = serial;
+ cmd.deviceObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+ cmd.descriptor = descriptor;
+
+ client->SerializeCommand(cmd);
+ }
+
+ bool Client::DoAdapterRequestDeviceCallback(Adapter* adapter,
+ uint64_t requestSerial,
+ WGPURequestDeviceStatus status,
+ const char* message,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features) {
+ // May have been deleted or recreated so this isn't an error.
+ if (adapter == nullptr) {
+ return true;
+ }
+ return adapter->OnRequestDeviceCallback(requestSerial, status, message, limits,
+ featuresCount, features);
+ }
+
+ bool Adapter::OnRequestDeviceCallback(uint64_t requestSerial,
+ WGPURequestDeviceStatus status,
+ const char* message,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features) {
+ RequestDeviceData request;
+ if (!mRequestDeviceRequests.Acquire(requestSerial, &request)) {
+ return false;
+ }
+
+ Device* device = client->DeviceAllocator().GetObject(request.deviceObjectId);
+
+ // If the return status is a failure we should give a null device to the callback and
+ // free the allocation.
+ if (status != WGPURequestDeviceStatus_Success) {
+ client->DeviceAllocator().Free(device);
+ request.callback(status, nullptr, message, request.userdata);
+ return true;
+ }
+
+ device->SetLimits(limits);
+ device->SetFeatures(features, featuresCount);
+
+ request.callback(status, ToAPI(device), message, request.userdata);
+ return true;
+ }
+
+ WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor*) {
+ dawn::ErrorLog() << "adapter.CreateDevice not supported with dawn_wire.";
+ return nullptr;
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Adapter.h b/chromium/third_party/dawn/src/dawn/wire/client/Adapter.h
new file mode 100644
index 00000000000..8753843f8f7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Adapter.h
@@ -0,0 +1,70 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_ADAPTER_H_
+#define DAWNWIRE_CLIENT_ADAPTER_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/LimitsAndFeatures.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+ class Adapter final : public ObjectBase {
+ public:
+ using ObjectBase::ObjectBase;
+
+ ~Adapter();
+ void CancelCallbacksForDisconnect() override;
+
+ bool GetLimits(WGPUSupportedLimits* limits) const;
+ bool HasFeature(WGPUFeatureName feature) const;
+ size_t EnumerateFeatures(WGPUFeatureName* features) const;
+ void SetLimits(const WGPUSupportedLimits* limits);
+ void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+ void SetProperties(const WGPUAdapterProperties* properties);
+ void GetProperties(WGPUAdapterProperties* properties) const;
+ void RequestDevice(const WGPUDeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
+
+ bool OnRequestDeviceCallback(uint64_t requestSerial,
+ WGPURequestDeviceStatus status,
+ const char* message,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features);
+
+ // Unimplementable. Only availale in dawn_native.
+ WGPUDevice CreateDevice(const WGPUDeviceDescriptor*);
+
+ private:
+ LimitsAndFeatures mLimitsAndFeatures;
+ WGPUAdapterProperties mProperties;
+
+ struct RequestDeviceData {
+ WGPURequestDeviceCallback callback = nullptr;
+ ObjectId deviceObjectId;
+ void* userdata = nullptr;
+ };
+ RequestTracker<RequestDeviceData> mRequestDeviceRequests;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_ADAPTER_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h b/chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h
new file mode 100644
index 00000000000..080da482b7e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ApiObjects.h
@@ -0,0 +1,29 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_APIOBJECTS_H_
+#define DAWNWIRE_CLIENT_APIOBJECTS_H_
+
+#include "dawn/wire/client/ObjectBase.h"
+
+#include "dawn/wire/client/Adapter.h"
+#include "dawn/wire/client/Buffer.h"
+#include "dawn/wire/client/Device.h"
+#include "dawn/wire/client/Instance.h"
+#include "dawn/wire/client/Queue.h"
+#include "dawn/wire/client/ShaderModule.h"
+
+#include "dawn/wire/client/ApiObjects_autogen.h"
+
+#endif // DAWNWIRE_CLIENT_APIOBJECTS_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp
new file mode 100644
index 00000000000..21db737a088
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Buffer.cpp
@@ -0,0 +1,406 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Buffer.h"
+
+#include "dawn/wire/BufferConsumer_impl.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/Device.h"
+
+namespace dawn::wire::client {
+
+ // static
+ WGPUBuffer Buffer::Create(Device* device, const WGPUBufferDescriptor* descriptor) {
+ Client* wireClient = device->client;
+
+ bool mappable =
+ (descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 ||
+ descriptor->mappedAtCreation;
+ if (mappable && descriptor->size >= std::numeric_limits<size_t>::max()) {
+ device->InjectError(WGPUErrorType_OutOfMemory, "Buffer is too large for map usage");
+ return device->CreateErrorBuffer();
+ }
+
+ std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
+ std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
+
+ DeviceCreateBufferCmd cmd;
+ cmd.deviceId = device->id;
+ cmd.descriptor = descriptor;
+ cmd.readHandleCreateInfoLength = 0;
+ cmd.readHandleCreateInfo = nullptr;
+ cmd.writeHandleCreateInfoLength = 0;
+ cmd.writeHandleCreateInfo = nullptr;
+
+ if (mappable) {
+ if ((descriptor->usage & WGPUBufferUsage_MapRead) != 0) {
+ // Create the read handle on buffer creation.
+ readHandle.reset(
+ wireClient->GetMemoryTransferService()->CreateReadHandle(descriptor->size));
+ if (readHandle == nullptr) {
+ device->InjectError(WGPUErrorType_OutOfMemory,
+ "Failed to create buffer mapping");
+ return device->CreateErrorBuffer();
+ }
+ cmd.readHandleCreateInfoLength = readHandle->SerializeCreateSize();
+ }
+
+ if ((descriptor->usage & WGPUBufferUsage_MapWrite) != 0 ||
+ descriptor->mappedAtCreation) {
+ // Create the write handle on buffer creation.
+ writeHandle.reset(
+ wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
+ if (writeHandle == nullptr) {
+ device->InjectError(WGPUErrorType_OutOfMemory,
+ "Failed to create buffer mapping");
+ return device->CreateErrorBuffer();
+ }
+ cmd.writeHandleCreateInfoLength = writeHandle->SerializeCreateSize();
+ }
+ }
+
+ // Create the buffer and send the creation command.
+ // This must happen after any potential device->CreateErrorBuffer()
+ // as server expects allocating ids to be monotonically increasing
+ auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(wireClient);
+ Buffer* buffer = bufferObjectAndSerial->object.get();
+ buffer->mDevice = device;
+ buffer->mDeviceIsAlive = device->GetAliveWeakPtr();
+ buffer->mSize = descriptor->size;
+ buffer->mDestructWriteHandleOnUnmap = false;
+
+ if (descriptor->mappedAtCreation) {
+ // If the buffer is mapped at creation, a write handle is created and will be
+ // destructed on unmap if the buffer doesn't have MapWrite usage
+ // The buffer is mapped right now.
+ buffer->mMapState = MapState::MappedAtCreation;
+
+ // This flag is for write handle created by mappedAtCreation
+ // instead of MapWrite usage. We don't have such a case for read handle
+ buffer->mDestructWriteHandleOnUnmap =
+ (descriptor->usage & WGPUBufferUsage_MapWrite) == 0;
+
+ buffer->mMapOffset = 0;
+ buffer->mMapSize = buffer->mSize;
+ ASSERT(writeHandle != nullptr);
+ buffer->mMappedData = writeHandle->GetData();
+ }
+
+ cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
+
+ wireClient->SerializeCommand(
+ cmd, cmd.readHandleCreateInfoLength + cmd.writeHandleCreateInfoLength,
+ [&](SerializeBuffer* serializeBuffer) {
+ if (readHandle != nullptr) {
+ char* readHandleBuffer;
+ WIRE_TRY(
+ serializeBuffer->NextN(cmd.readHandleCreateInfoLength, &readHandleBuffer));
+ // Serialize the ReadHandle into the space after the command.
+ readHandle->SerializeCreate(readHandleBuffer);
+ buffer->mReadHandle = std::move(readHandle);
+ }
+ if (writeHandle != nullptr) {
+ char* writeHandleBuffer;
+ WIRE_TRY(serializeBuffer->NextN(cmd.writeHandleCreateInfoLength,
+ &writeHandleBuffer));
+ // Serialize the WriteHandle into the space after the command.
+ writeHandle->SerializeCreate(writeHandleBuffer);
+ buffer->mWriteHandle = std::move(writeHandle);
+ }
+
+ return WireResult::Success;
+ });
+ return ToAPI(buffer);
+ }
+
+ // static
+ WGPUBuffer Buffer::CreateError(Device* device) {
+ auto* allocation = device->client->BufferAllocator().New(device->client);
+ allocation->object->mDevice = device;
+ allocation->object->mDeviceIsAlive = device->GetAliveWeakPtr();
+
+ DeviceCreateErrorBufferCmd cmd;
+ cmd.self = ToAPI(device);
+ cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+ device->client->SerializeCommand(cmd);
+
+ return ToAPI(allocation->object.get());
+ }
+
+ Buffer::~Buffer() {
+ ClearAllCallbacks(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+ FreeMappedData();
+ }
+
+ void Buffer::CancelCallbacksForDisconnect() {
+ ClearAllCallbacks(WGPUBufferMapAsyncStatus_DeviceLost);
+ }
+
+ void Buffer::ClearAllCallbacks(WGPUBufferMapAsyncStatus status) {
+ mRequests.CloseAll([status](MapRequestData* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, request->userdata);
+ }
+ });
+ }
+
+ void Buffer::MapAsync(WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ return callback(WGPUBufferMapAsyncStatus_DeviceLost, userdata);
+ }
+
+ // Handle the defaulting of size required by WebGPU.
+ if ((size == WGPU_WHOLE_MAP_SIZE) && (offset <= mSize)) {
+ size = mSize - offset;
+ }
+
+ // Create the request structure that will hold information while this mapping is
+ // in flight.
+ MapRequestData request = {};
+ request.callback = callback;
+ request.userdata = userdata;
+ request.offset = offset;
+ request.size = size;
+ if (mode & WGPUMapMode_Read) {
+ request.type = MapRequestType::Read;
+ } else if (mode & WGPUMapMode_Write) {
+ request.type = MapRequestType::Write;
+ }
+
+ uint64_t serial = mRequests.Add(std::move(request));
+
+ // Serialize the command to send to the server.
+ BufferMapAsyncCmd cmd;
+ cmd.bufferId = this->id;
+ cmd.requestSerial = serial;
+ cmd.mode = mode;
+ cmd.offset = offset;
+ cmd.size = size;
+
+ client->SerializeCommand(cmd);
+ }
+
+ bool Buffer::OnMapAsyncCallback(uint64_t requestSerial,
+ uint32_t status,
+ uint64_t readDataUpdateInfoLength,
+ const uint8_t* readDataUpdateInfo) {
+ MapRequestData request;
+ if (!mRequests.Acquire(requestSerial, &request)) {
+ return false;
+ }
+
+ auto FailRequest = [&request]() -> bool {
+ if (request.callback != nullptr) {
+ request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
+ }
+ return false;
+ };
+
+ // Take into account the client-side status of the request if the server says it is a
+ // success.
+ if (status == WGPUBufferMapAsyncStatus_Success) {
+ status = request.clientStatus;
+ }
+
+ if (status == WGPUBufferMapAsyncStatus_Success) {
+ switch (request.type) {
+ case MapRequestType::Read: {
+ if (readDataUpdateInfoLength > std::numeric_limits<size_t>::max()) {
+ // This is the size of data deserialized from the command stream, which must
+ // be CPU-addressable.
+ return FailRequest();
+ }
+
+ // Validate to prevent bad map request; buffer destroyed during map request
+ if (mReadHandle == nullptr) {
+ return FailRequest();
+ }
+ // Update user map data with server returned data
+ if (!mReadHandle->DeserializeDataUpdate(
+ readDataUpdateInfo, static_cast<size_t>(readDataUpdateInfoLength),
+ request.offset, request.size)) {
+ return FailRequest();
+ }
+ mMapState = MapState::MappedForRead;
+ mMappedData = const_cast<void*>(mReadHandle->GetData());
+ break;
+ }
+ case MapRequestType::Write: {
+ if (mWriteHandle == nullptr) {
+ return FailRequest();
+ }
+ mMapState = MapState::MappedForWrite;
+ mMappedData = mWriteHandle->GetData();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ mMapOffset = request.offset;
+ mMapSize = request.size;
+ }
+
+ if (request.callback) {
+ request.callback(static_cast<WGPUBufferMapAsyncStatus>(status), request.userdata);
+ }
+
+ return true;
+ }
+
+ void* Buffer::GetMappedRange(size_t offset, size_t size) {
+ if (!IsMappedForWriting() || !CheckGetMappedRangeOffsetSize(offset, size)) {
+ return nullptr;
+ }
+ return static_cast<uint8_t*>(mMappedData) + offset;
+ }
+
+ const void* Buffer::GetConstMappedRange(size_t offset, size_t size) {
+ if (!(IsMappedForWriting() || IsMappedForReading()) ||
+ !CheckGetMappedRangeOffsetSize(offset, size)) {
+ return nullptr;
+ }
+ return static_cast<uint8_t*>(mMappedData) + offset;
+ }
+
+ void Buffer::Unmap() {
+ // Invalidate the local pointer, and cancel all other in-flight requests that would
+ // turn into errors anyway (you can't double map). This prevents race when the following
+ // happens, where the application code would have unmapped a buffer but still receive a
+ // callback:
+ // - Client -> Server: MapRequest1, Unmap, MapRequest2
+ // - Server -> Client: Result of MapRequest1
+ // - Unmap locally on the client
+ // - Server -> Client: Result of MapRequest2
+
+ // mWriteHandle can still be nullptr if buffer has been destroyed before unmap
+ if ((mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation) &&
+ mWriteHandle != nullptr) {
+ // Writes need to be flushed before Unmap is sent. Unmap calls all associated
+ // in-flight callbacks which may read the updated data.
+
+ // Get the serialization size of data update writes.
+ size_t writeDataUpdateInfoLength =
+ mWriteHandle->SizeOfSerializeDataUpdate(mMapOffset, mMapSize);
+
+ BufferUpdateMappedDataCmd cmd;
+ cmd.bufferId = id;
+ cmd.writeDataUpdateInfoLength = writeDataUpdateInfoLength;
+ cmd.writeDataUpdateInfo = nullptr;
+ cmd.offset = mMapOffset;
+ cmd.size = mMapSize;
+
+ client->SerializeCommand(
+ cmd, writeDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
+ char* writeHandleBuffer;
+ WIRE_TRY(serializeBuffer->NextN(writeDataUpdateInfoLength, &writeHandleBuffer));
+
+ // Serialize flush metadata into the space after the command.
+ // This closes the handle for writing.
+ mWriteHandle->SerializeDataUpdate(writeHandleBuffer, cmd.offset, cmd.size);
+
+ return WireResult::Success;
+ });
+
+ // If mDestructWriteHandleOnUnmap is true, that means the write handle is merely
+ // for mappedAtCreation usage. It is destroyed on unmap after flush to server
+ // instead of at buffer destruction.
+ if (mMapState == MapState::MappedAtCreation && mDestructWriteHandleOnUnmap) {
+ mWriteHandle = nullptr;
+ if (mReadHandle) {
+ // If it's both mappedAtCreation and MapRead we need to reset
+ // mMappedData to readHandle's GetData(). This could be changed to
+ // merging read/write handle in future
+ mMappedData = const_cast<void*>(mReadHandle->GetData());
+ }
+ }
+ }
+
+ // Free map access tokens
+ mMapState = MapState::Unmapped;
+ mMapOffset = 0;
+ mMapSize = 0;
+
+ // Tag all mapping requests still in flight as unmapped before callback.
+ mRequests.ForAll([](MapRequestData* request) {
+ if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+ request->clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
+ }
+ });
+
+ BufferUnmapCmd cmd;
+ cmd.self = ToAPI(this);
+ client->SerializeCommand(cmd);
+ }
+
+ void Buffer::Destroy() {
+ // Remove the current mapping and destroy Read/WriteHandles.
+ FreeMappedData();
+
+ // Tag all mapping requests still in flight as destroyed before callback.
+ mRequests.ForAll([](MapRequestData* request) {
+ if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+ request->clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
+ }
+ });
+
+ BufferDestroyCmd cmd;
+ cmd.self = ToAPI(this);
+ client->SerializeCommand(cmd);
+ }
+
+ bool Buffer::IsMappedForReading() const {
+ return mMapState == MapState::MappedForRead;
+ }
+
+ bool Buffer::IsMappedForWriting() const {
+ return mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation;
+ }
+
+ bool Buffer::CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const {
+ if (offset % 8 != 0 || size % 4 != 0) {
+ return false;
+ }
+
+ if (size > mMapSize || offset < mMapOffset) {
+ return false;
+ }
+
+ size_t offsetInMappedRange = offset - mMapOffset;
+ return offsetInMappedRange <= mMapSize - size;
+ }
+
+ void Buffer::FreeMappedData() {
+#if defined(DAWN_ENABLE_ASSERTS)
+ // When in "debug" mode, 0xCA-out the mapped data when we free it so that in we can detect
+ // use-after-free of the mapped data. This is particularly useful for WebGPU test about the
+ // interaction of mapping and GC.
+ if (mMappedData) {
+ memset(static_cast<uint8_t*>(mMappedData) + mMapOffset, 0xCA, mMapSize);
+ }
+#endif // defined(DAWN_ENABLE_ASSERTS)
+
+ mMapOffset = 0;
+ mMapSize = 0;
+ mReadHandle = nullptr;
+ mWriteHandle = nullptr;
+ mMappedData = nullptr;
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Buffer.h b/chromium/third_party/dawn/src/dawn/wire/client/Buffer.h
new file mode 100644
index 00000000000..186c82bd591
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Buffer.h
@@ -0,0 +1,109 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_BUFFER_H_
+#define DAWNWIRE_CLIENT_BUFFER_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+ class Device;
+
+ class Buffer final : public ObjectBase {
+ public:
+ using ObjectBase::ObjectBase;
+
+ static WGPUBuffer Create(Device* device, const WGPUBufferDescriptor* descriptor);
+ static WGPUBuffer CreateError(Device* device);
+
+ ~Buffer();
+
+ bool OnMapAsyncCallback(uint64_t requestSerial,
+ uint32_t status,
+ uint64_t readDataUpdateInfoLength,
+ const uint8_t* readDataUpdateInfo);
+ void MapAsync(WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata);
+ void* GetMappedRange(size_t offset, size_t size);
+ const void* GetConstMappedRange(size_t offset, size_t size);
+ void Unmap();
+
+ void Destroy();
+
+ private:
+ void CancelCallbacksForDisconnect() override;
+ void ClearAllCallbacks(WGPUBufferMapAsyncStatus status);
+
+ bool IsMappedForReading() const;
+ bool IsMappedForWriting() const;
+ bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
+
+ void FreeMappedData();
+
+ Device* mDevice;
+
+ enum class MapRequestType { None, Read, Write };
+
+ enum class MapState {
+ Unmapped,
+ MappedForRead,
+ MappedForWrite,
+ MappedAtCreation,
+ };
+
+ // We want to defer all the validation to the server, which means we could have multiple
+ // map request in flight at a single time and need to track them separately.
+ // On well-behaved applications, only one request should exist at a single time.
+ struct MapRequestData {
+ WGPUBufferMapCallback callback = nullptr;
+ void* userdata = nullptr;
+ size_t offset = 0;
+ size_t size = 0;
+
+ // When the buffer is destroyed or unmapped too early, the unmappedBeforeX status takes
+ // precedence over the success value returned from the server. However Error statuses
+ // from the server take precedence over the client-side status.
+ WGPUBufferMapAsyncStatus clientStatus = WGPUBufferMapAsyncStatus_Success;
+
+ MapRequestType type = MapRequestType::None;
+ };
+ RequestTracker<MapRequestData> mRequests;
+ uint64_t mSize = 0;
+
+ // Only one mapped pointer can be active at a time because Unmap clears all the in-flight
+ // requests.
+ // TODO(enga): Use a tagged pointer to save space.
+ std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
+ std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
+ MapState mMapState = MapState::Unmapped;
+ bool mDestructWriteHandleOnUnmap = false;
+
+ void* mMappedData = nullptr;
+ size_t mMapOffset = 0;
+ size_t mMapSize = 0;
+
+ std::weak_ptr<bool> mDeviceIsAlive;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_BUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Client.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Client.cpp
new file mode 100644
index 00000000000..5db8444b271
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Client.cpp
@@ -0,0 +1,171 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Client.h"
+
+#include "dawn/common/Compiler.h"
+#include "dawn/wire/client/Device.h"
+
+namespace dawn::wire::client {
+
+ namespace {
+
+ class NoopCommandSerializer final : public CommandSerializer {
+ public:
+ static NoopCommandSerializer* GetInstance() {
+ static NoopCommandSerializer gNoopCommandSerializer;
+ return &gNoopCommandSerializer;
+ }
+
+ ~NoopCommandSerializer() = default;
+
+ size_t GetMaximumAllocationSize() const final {
+ return 0;
+ }
+ void* GetCmdSpace(size_t size) final {
+ return nullptr;
+ }
+ bool Flush() final {
+ return false;
+ }
+ };
+
+ } // anonymous namespace
+
+ Client::Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService)
+ : ClientBase(), mSerializer(serializer), mMemoryTransferService(memoryTransferService) {
+ if (mMemoryTransferService == nullptr) {
+ // If a MemoryTransferService is not provided, fall back to inline memory.
+ mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
+ mMemoryTransferService = mOwnedMemoryTransferService.get();
+ }
+ }
+
+ Client::~Client() {
+ DestroyAllObjects();
+ }
+
+ void Client::DestroyAllObjects() {
+ for (auto& objectList : mObjects) {
+ ObjectType objectType = static_cast<ObjectType>(&objectList - mObjects.data());
+ if (objectType == ObjectType::Device) {
+ continue;
+ }
+ while (!objectList.empty()) {
+ ObjectBase* object = objectList.head()->value();
+
+ DestroyObjectCmd cmd;
+ cmd.objectType = objectType;
+ cmd.objectId = object->id;
+ SerializeCommand(cmd);
+ FreeObject(objectType, object);
+ }
+ }
+
+ while (!mObjects[ObjectType::Device].empty()) {
+ ObjectBase* object = mObjects[ObjectType::Device].head()->value();
+
+ DestroyObjectCmd cmd;
+ cmd.objectType = ObjectType::Device;
+ cmd.objectId = object->id;
+ SerializeCommand(cmd);
+ FreeObject(ObjectType::Device, object);
+ }
+ }
+
+ ReservedTexture Client::ReserveTexture(WGPUDevice device) {
+ auto* allocation = TextureAllocator().New(this);
+
+ ReservedTexture result;
+ result.texture = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ result.deviceId = FromAPI(device)->id;
+ result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+ return result;
+ }
+
+ ReservedSwapChain Client::ReserveSwapChain(WGPUDevice device) {
+ auto* allocation = SwapChainAllocator().New(this);
+
+ ReservedSwapChain result;
+ result.swapchain = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ result.deviceId = FromAPI(device)->id;
+ result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+ return result;
+ }
+
+ ReservedDevice Client::ReserveDevice() {
+ auto* allocation = DeviceAllocator().New(this);
+
+ ReservedDevice result;
+ result.device = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ return result;
+ }
+
+ ReservedInstance Client::ReserveInstance() {
+ auto* allocation = InstanceAllocator().New(this);
+
+ ReservedInstance result;
+ result.instance = ToAPI(allocation->object.get());
+ result.id = allocation->object->id;
+ result.generation = allocation->generation;
+ return result;
+ }
+
+ void Client::ReclaimTextureReservation(const ReservedTexture& reservation) {
+ TextureAllocator().Free(FromAPI(reservation.texture));
+ }
+
+ void Client::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+ SwapChainAllocator().Free(FromAPI(reservation.swapchain));
+ }
+
+ void Client::ReclaimDeviceReservation(const ReservedDevice& reservation) {
+ DeviceAllocator().Free(FromAPI(reservation.device));
+ }
+
+ void Client::ReclaimInstanceReservation(const ReservedInstance& reservation) {
+ InstanceAllocator().Free(FromAPI(reservation.instance));
+ }
+
+ void Client::Disconnect() {
+ mDisconnected = true;
+ mSerializer = ChunkedCommandSerializer(NoopCommandSerializer::GetInstance());
+
+ auto& deviceList = mObjects[ObjectType::Device];
+ {
+ for (LinkNode<ObjectBase>* device = deviceList.head(); device != deviceList.end();
+ device = device->next()) {
+ static_cast<Device*>(device->value())
+ ->HandleDeviceLost(WGPUDeviceLostReason_Undefined, "GPU connection lost");
+ }
+ }
+ for (auto& objectList : mObjects) {
+ for (LinkNode<ObjectBase>* object = objectList.head(); object != objectList.end();
+ object = object->next()) {
+ object->value()->CancelCallbacksForDisconnect();
+ }
+ }
+ }
+
+ bool Client::IsDisconnected() const {
+ return mDisconnected;
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Client.h b/chromium/third_party/dawn/src/dawn/wire/client/Client.h
new file mode 100644
index 00000000000..fc84b3b87df
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Client.h
@@ -0,0 +1,95 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_CLIENT_H_
+#define DAWNWIRE_CLIENT_CLIENT_H_
+
+#include <dawn/webgpu.h>
+#include <dawn/wire/Wire.h>
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/common/NonCopyable.h"
+#include "dawn/wire/ChunkedCommandSerializer.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/WireDeserializeAllocator.h"
+#include "dawn/wire/client/ClientBase_autogen.h"
+
+namespace dawn::wire::client {
+
+ class Device;
+ class MemoryTransferService;
+
+ class Client : public ClientBase {
+ public:
+ Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService);
+ ~Client() override;
+
+ // ChunkedCommandHandler implementation
+ const volatile char* HandleCommandsImpl(const volatile char* commands,
+ size_t size) override;
+
+ MemoryTransferService* GetMemoryTransferService() const {
+ return mMemoryTransferService;
+ }
+
+ ReservedTexture ReserveTexture(WGPUDevice device);
+ ReservedSwapChain ReserveSwapChain(WGPUDevice device);
+ ReservedDevice ReserveDevice();
+ ReservedInstance ReserveInstance();
+
+ void ReclaimTextureReservation(const ReservedTexture& reservation);
+ void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
+ void ReclaimDeviceReservation(const ReservedDevice& reservation);
+ void ReclaimInstanceReservation(const ReservedInstance& reservation);
+
+ template <typename Cmd>
+ void SerializeCommand(const Cmd& cmd) {
+ mSerializer.SerializeCommand(cmd, *this);
+ }
+
+ template <typename Cmd, typename ExtraSizeSerializeFn>
+ void SerializeCommand(const Cmd& cmd,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ mSerializer.SerializeCommand(cmd, *this, extraSize, SerializeExtraSize);
+ }
+
+ void Disconnect();
+ bool IsDisconnected() const;
+
+ template <typename T>
+ void TrackObject(T* object) {
+ mObjects[ObjectTypeToTypeEnum<T>::value].Append(object);
+ }
+
+ private:
+ void DestroyAllObjects();
+
+#include "dawn/wire/client/ClientPrototypes_autogen.inc"
+
+ ChunkedCommandSerializer mSerializer;
+ WireDeserializeAllocator mAllocator;
+ MemoryTransferService* mMemoryTransferService = nullptr;
+ std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
+
+ PerObjectType<LinkedList<ObjectBase>> mObjects;
+ bool mDisconnected = false;
+ };
+
+ std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_CLIENT_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp
new file mode 100644
index 00000000000..7b99dc60b47
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ClientDoers.cpp
@@ -0,0 +1,133 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/Device.h"
+
+#include <limits>
+
+namespace dawn::wire::client {
+
+ bool Client::DoDeviceUncapturedErrorCallback(Device* device,
+ WGPUErrorType errorType,
+ const char* message) {
+ switch (errorType) {
+ case WGPUErrorType_NoError:
+ case WGPUErrorType_Validation:
+ case WGPUErrorType_OutOfMemory:
+ case WGPUErrorType_Unknown:
+ case WGPUErrorType_DeviceLost:
+ break;
+ default:
+ return false;
+ }
+ if (device == nullptr) {
+ // The device might have been deleted or recreated so this isn't an error.
+ return true;
+ }
+ device->HandleError(errorType, message);
+ return true;
+ }
+
+ bool Client::DoDeviceLoggingCallback(Device* device,
+ WGPULoggingType loggingType,
+ const char* message) {
+ if (device == nullptr) {
+ // The device might have been deleted or recreated so this isn't an error.
+ return true;
+ }
+ device->HandleLogging(loggingType, message);
+ return true;
+ }
+
+ bool Client::DoDeviceLostCallback(Device* device,
+ WGPUDeviceLostReason reason,
+ char const* message) {
+ if (device == nullptr) {
+ // The device might have been deleted or recreated so this isn't an error.
+ return true;
+ }
+ device->HandleDeviceLost(reason, message);
+ return true;
+ }
+
+ bool Client::DoDevicePopErrorScopeCallback(Device* device,
+ uint64_t requestSerial,
+ WGPUErrorType errorType,
+ const char* message) {
+ if (device == nullptr) {
+ // The device might have been deleted or recreated so this isn't an error.
+ return true;
+ }
+ return device->OnPopErrorScopeCallback(requestSerial, errorType, message);
+ }
+
+ bool Client::DoBufferMapAsyncCallback(Buffer* buffer,
+ uint64_t requestSerial,
+ uint32_t status,
+ uint64_t readDataUpdateInfoLength,
+ const uint8_t* readDataUpdateInfo) {
+ // The buffer might have been deleted or recreated so this isn't an error.
+ if (buffer == nullptr) {
+ return true;
+ }
+ return buffer->OnMapAsyncCallback(requestSerial, status, readDataUpdateInfoLength,
+ readDataUpdateInfo);
+ }
+
+ bool Client::DoQueueWorkDoneCallback(Queue* queue,
+ uint64_t requestSerial,
+ WGPUQueueWorkDoneStatus status) {
+ // The queue might have been deleted or recreated so this isn't an error.
+ if (queue == nullptr) {
+ return true;
+ }
+ return queue->OnWorkDoneCallback(requestSerial, status);
+ }
+
+ bool Client::DoDeviceCreateComputePipelineAsyncCallback(Device* device,
+ uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message) {
+ // The device might have been deleted or recreated so this isn't an error.
+ if (device == nullptr) {
+ return true;
+ }
+ return device->OnCreateComputePipelineAsyncCallback(requestSerial, status, message);
+ }
+
+ bool Client::DoDeviceCreateRenderPipelineAsyncCallback(Device* device,
+ uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message) {
+ // The device might have been deleted or recreated so this isn't an error.
+ if (device == nullptr) {
+ return true;
+ }
+ return device->OnCreateRenderPipelineAsyncCallback(requestSerial, status, message);
+ }
+
+ bool Client::DoShaderModuleGetCompilationInfoCallback(ShaderModule* shaderModule,
+ uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info) {
+ // The shader module might have been deleted or recreated so this isn't an error.
+ if (shaderModule == nullptr) {
+ return true;
+ }
+ return shaderModule->GetCompilationInfoCallback(requestSerial, status, info);
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
new file mode 100644
index 00000000000..e04ce803f1e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
@@ -0,0 +1,131 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Alloc.h"
+#include "dawn/common/Assert.h"
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/Client.h"
+
+#include <cstring>
+
+namespace dawn::wire::client {
+
+ class InlineMemoryTransferService : public MemoryTransferService {
+ class ReadHandleImpl : public ReadHandle {
+ public:
+ explicit ReadHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
+ : mStagingData(std::move(stagingData)), mSize(size) {
+ }
+
+ ~ReadHandleImpl() override = default;
+
+ size_t SerializeCreateSize() override {
+ return 0;
+ }
+
+ void SerializeCreate(void*) override {
+ }
+
+ const void* GetData() override {
+ return mStagingData.get();
+ }
+
+ bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) override {
+ if (deserializeSize != size || deserializePointer == nullptr) {
+ return false;
+ }
+
+ if (offset > mSize || size > mSize - offset) {
+ return false;
+ }
+
+ void* start = static_cast<uint8_t*>(mStagingData.get()) + offset;
+ memcpy(start, deserializePointer, size);
+ return true;
+ }
+
+ private:
+ std::unique_ptr<uint8_t[]> mStagingData;
+ size_t mSize;
+ };
+
+ class WriteHandleImpl : public WriteHandle {
+ public:
+ explicit WriteHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
+ : mStagingData(std::move(stagingData)), mSize(size) {
+ }
+
+ ~WriteHandleImpl() override = default;
+
+ size_t SerializeCreateSize() override {
+ return 0;
+ }
+
+ void SerializeCreate(void*) override {
+ }
+
+ void* GetData() override {
+ return mStagingData.get();
+ }
+
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
+ ASSERT(offset <= mSize);
+ ASSERT(size <= mSize - offset);
+ return size;
+ }
+
+ void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override {
+ ASSERT(mStagingData != nullptr);
+ ASSERT(serializePointer != nullptr);
+ ASSERT(offset <= mSize);
+ ASSERT(size <= mSize - offset);
+ memcpy(serializePointer, static_cast<uint8_t*>(mStagingData.get()) + offset, size);
+ }
+
+ private:
+ std::unique_ptr<uint8_t[]> mStagingData;
+ size_t mSize;
+ };
+
+ public:
+ InlineMemoryTransferService() {
+ }
+ ~InlineMemoryTransferService() override = default;
+
+ ReadHandle* CreateReadHandle(size_t size) override {
+ auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
+ if (stagingData) {
+ return new ReadHandleImpl(std::move(stagingData), size);
+ }
+ return nullptr;
+ }
+
+ WriteHandle* CreateWriteHandle(size_t size) override {
+ auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
+ if (stagingData) {
+ memset(stagingData.get(), 0, size);
+ return new WriteHandleImpl(std::move(stagingData), size);
+ }
+ return nullptr;
+ }
+ };
+
+ std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
+ return std::make_unique<InlineMemoryTransferService>();
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp b/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
new file mode 100644
index 00000000000..44ca3edc2d2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
@@ -0,0 +1,105 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/ClientMemoryTransferService_mock.h"
+
+#include <cstdio>
+#include "dawn/common/Assert.h"
+
+namespace dawn::wire::client {
+
+ MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
+ : ReadHandle(), mService(service) {
+ }
+
+ MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
+ mService->OnReadHandleDestroy(this);
+ }
+
+ size_t MockMemoryTransferService::MockReadHandle::SerializeCreateSize() {
+ return mService->OnReadHandleSerializeCreateSize(this);
+ }
+
+ void MockMemoryTransferService::MockReadHandle::SerializeCreate(void* serializePointer) {
+ mService->OnReadHandleSerializeCreate(this, serializePointer);
+ }
+
+ const void* MockMemoryTransferService::MockReadHandle::GetData() {
+ return mService->OnReadHandleGetData(this);
+ }
+
+ bool MockMemoryTransferService::MockReadHandle::DeserializeDataUpdate(
+ const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) {
+ ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+ return mService->OnReadHandleDeserializeDataUpdate(
+ this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
+ size);
+ }
+
+ MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
+ : WriteHandle(), mService(service) {
+ }
+
+ MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
+ mService->OnWriteHandleDestroy(this);
+ }
+
+ size_t MockMemoryTransferService::MockWriteHandle::SerializeCreateSize() {
+ return mService->OnWriteHandleSerializeCreateSize(this);
+ }
+
+ void MockMemoryTransferService::MockWriteHandle::SerializeCreate(void* serializePointer) {
+ mService->OnWriteHandleSerializeCreate(this, serializePointer);
+ }
+
+ void* MockMemoryTransferService::MockWriteHandle::GetData() {
+ return mService->OnWriteHandleGetData(this);
+ }
+
+ size_t MockMemoryTransferService::MockWriteHandle::SizeOfSerializeDataUpdate(size_t offset,
+ size_t size) {
+ return mService->OnWriteHandleSizeOfSerializeDataUpdate(this, offset, size);
+ }
+
+ void MockMemoryTransferService::MockWriteHandle::SerializeDataUpdate(void* serializePointer,
+ size_t offset,
+ size_t size) {
+ mService->OnWriteHandleSerializeDataUpdate(this, serializePointer, offset, size);
+ }
+
+ MockMemoryTransferService::MockMemoryTransferService() = default;
+ MockMemoryTransferService::~MockMemoryTransferService() = default;
+
+ MockMemoryTransferService::ReadHandle* MockMemoryTransferService::CreateReadHandle(
+ size_t size) {
+ return OnCreateReadHandle(size);
+ }
+
+ MockMemoryTransferService::WriteHandle* MockMemoryTransferService::CreateWriteHandle(
+ size_t size) {
+ return OnCreateWriteHandle(size);
+ }
+
+ MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
+ return new MockReadHandle(this);
+ }
+
+ MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
+ return new MockWriteHandle(this);
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h b/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h
new file mode 100644
index 00000000000..0974f40a772
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ClientMemoryTransferService_mock.h
@@ -0,0 +1,99 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
+#define DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
+
+#include <gmock/gmock.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire::client {
+
+ class MockMemoryTransferService : public MemoryTransferService {
+ public:
+ class MockReadHandle : public ReadHandle {
+ public:
+ explicit MockReadHandle(MockMemoryTransferService* service);
+ ~MockReadHandle() override;
+
+ size_t SerializeCreateSize() override;
+ void SerializeCreate(void* serializePointer) override;
+ const void* GetData() override;
+ bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) override;
+
+ private:
+ MockMemoryTransferService* mService;
+ };
+
+ class MockWriteHandle : public WriteHandle {
+ public:
+ explicit MockWriteHandle(MockMemoryTransferService* service);
+ ~MockWriteHandle() override;
+
+ size_t SerializeCreateSize() override;
+ void SerializeCreate(void* serializePointer) override;
+ void* GetData() override;
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
+ void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override;
+
+ private:
+ MockMemoryTransferService* mService;
+ };
+
+ MockMemoryTransferService();
+ ~MockMemoryTransferService() override;
+
+ ReadHandle* CreateReadHandle(size_t) override;
+ WriteHandle* CreateWriteHandle(size_t) override;
+
+ MockReadHandle* NewReadHandle();
+ MockWriteHandle* NewWriteHandle();
+
+ MOCK_METHOD(ReadHandle*, OnCreateReadHandle, (size_t));
+ MOCK_METHOD(WriteHandle*, OnCreateWriteHandle, (size_t));
+
+ MOCK_METHOD(size_t, OnReadHandleSerializeCreateSize, (const ReadHandle*));
+ MOCK_METHOD(void, OnReadHandleSerializeCreate, (const ReadHandle*, void* serializePointer));
+ MOCK_METHOD((const void*), OnReadHandleGetData, (const ReadHandle*));
+ MOCK_METHOD(bool,
+ OnReadHandleDeserializeDataUpdate,
+ (const ReadHandle*,
+ const uint32_t* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size));
+ MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle*));
+
+ MOCK_METHOD(size_t, OnWriteHandleSerializeCreateSize, (const void* WriteHandle));
+ MOCK_METHOD(void,
+ OnWriteHandleSerializeCreate,
+ (const void* WriteHandle, void* serializePointer));
+ MOCK_METHOD((void*), OnWriteHandleGetData, (const void* WriteHandle));
+ MOCK_METHOD(size_t,
+ OnWriteHandleSizeOfSerializeDataUpdate,
+ (const void* WriteHandle, size_t offset, size_t size));
+ MOCK_METHOD(size_t,
+ OnWriteHandleSerializeDataUpdate,
+ (const void* WriteHandle, void* serializePointer, size_t offset, size_t size));
+ MOCK_METHOD(void, OnWriteHandleDestroy, (const void* WriteHandle));
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Device.cpp
new file mode 100644
index 00000000000..c3866a322e0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Device.cpp
@@ -0,0 +1,342 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Device.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Log.h"
+#include "dawn/wire/client/ApiObjects_autogen.h"
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/ObjectAllocator.h"
+
+namespace dawn::wire::client {
+
+ Device::Device(Client* clientIn, uint32_t initialRefcount, uint32_t initialId)
+ : ObjectBase(clientIn, initialRefcount, initialId), mIsAlive(std::make_shared<bool>()) {
+#if defined(DAWN_ENABLE_ASSERTS)
+ mErrorCallback = [](WGPUErrorType, char const*, void*) {
+ static bool calledOnce = false;
+ if (!calledOnce) {
+ calledOnce = true;
+ dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
+ "probably not intended. If you really want to ignore errors "
+ "and suppress this message, set the callback to null.";
+ }
+ };
+
+ mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
+ static bool calledOnce = false;
+ if (!calledOnce) {
+ calledOnce = true;
+ dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
+ "intended. If you really want to ignore device lost "
+ "and suppress this message, set the callback to null.";
+ }
+ };
+#endif // DAWN_ENABLE_ASSERTS
+ }
+
+ Device::~Device() {
+ mErrorScopes.CloseAll([](ErrorScopeData* request) {
+ request->callback(WGPUErrorType_Unknown, "Device destroyed before callback",
+ request->userdata);
+ });
+
+ mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+ if (request->createComputePipelineAsyncCallback != nullptr) {
+ request->createComputePipelineAsyncCallback(
+ WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", request->userdata);
+ } else {
+ ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+ request->createRenderPipelineAsyncCallback(
+ WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", request->userdata);
+ }
+ });
+ }
+
+ bool Device::GetLimits(WGPUSupportedLimits* limits) const {
+ return mLimitsAndFeatures.GetLimits(limits);
+ }
+
+ bool Device::HasFeature(WGPUFeatureName feature) const {
+ return mLimitsAndFeatures.HasFeature(feature);
+ }
+
+ size_t Device::EnumerateFeatures(WGPUFeatureName* features) const {
+ return mLimitsAndFeatures.EnumerateFeatures(features);
+ }
+
+ void Device::SetLimits(const WGPUSupportedLimits* limits) {
+ return mLimitsAndFeatures.SetLimits(limits);
+ }
+
+ void Device::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+ return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+ }
+
+ void Device::HandleError(WGPUErrorType errorType, const char* message) {
+ if (mErrorCallback) {
+ mErrorCallback(errorType, message, mErrorUserdata);
+ }
+ }
+
+ void Device::HandleLogging(WGPULoggingType loggingType, const char* message) {
+ if (mLoggingCallback) {
+ // Since client always run in single thread, calling the callback directly is safe.
+ mLoggingCallback(loggingType, message, mLoggingUserdata);
+ }
+ }
+
+ void Device::HandleDeviceLost(WGPUDeviceLostReason reason, const char* message) {
+ if (mDeviceLostCallback && !mDidRunLostCallback) {
+ mDidRunLostCallback = true;
+ mDeviceLostCallback(reason, message, mDeviceLostUserdata);
+ }
+ }
+
+ void Device::CancelCallbacksForDisconnect() {
+ mErrorScopes.CloseAll([](ErrorScopeData* request) {
+ request->callback(WGPUErrorType_DeviceLost, "Device lost", request->userdata);
+ });
+
+ mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+ if (request->createComputePipelineAsyncCallback != nullptr) {
+ request->createComputePipelineAsyncCallback(
+ WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, "Device lost",
+ request->userdata);
+ } else {
+ ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+ request->createRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
+ nullptr, "Device lost",
+ request->userdata);
+ }
+ });
+ }
+
+ std::weak_ptr<bool> Device::GetAliveWeakPtr() {
+ return mIsAlive;
+ }
+
+ void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
+ mErrorCallback = errorCallback;
+ mErrorUserdata = errorUserdata;
+ }
+
+ void Device::SetLoggingCallback(WGPULoggingCallback callback, void* userdata) {
+ mLoggingCallback = callback;
+ mLoggingUserdata = userdata;
+ }
+
+ void Device::SetDeviceLostCallback(WGPUDeviceLostCallback callback, void* userdata) {
+ mDeviceLostCallback = callback;
+ mDeviceLostUserdata = userdata;
+ }
+
+ void Device::PushErrorScope(WGPUErrorFilter filter) {
+ mErrorScopeStackSize++;
+
+ DevicePushErrorScopeCmd cmd;
+ cmd.self = ToAPI(this);
+ cmd.filter = filter;
+
+ client->SerializeCommand(cmd);
+ }
+
+ bool Device::PopErrorScope(WGPUErrorCallback callback, void* userdata) {
+ if (mErrorScopeStackSize == 0) {
+ return false;
+ }
+ mErrorScopeStackSize--;
+
+ if (client->IsDisconnected()) {
+ callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
+ return true;
+ }
+
+ uint64_t serial = mErrorScopes.Add({callback, userdata});
+
+ DevicePopErrorScopeCmd cmd;
+ cmd.deviceId = this->id;
+ cmd.requestSerial = serial;
+
+ client->SerializeCommand(cmd);
+
+ return true;
+ }
+
+ bool Device::OnPopErrorScopeCallback(uint64_t requestSerial,
+ WGPUErrorType type,
+ const char* message) {
+ switch (type) {
+ case WGPUErrorType_NoError:
+ case WGPUErrorType_Validation:
+ case WGPUErrorType_OutOfMemory:
+ case WGPUErrorType_Unknown:
+ case WGPUErrorType_DeviceLost:
+ break;
+ default:
+ return false;
+ }
+
+ ErrorScopeData request;
+ if (!mErrorScopes.Acquire(requestSerial, &request)) {
+ return false;
+ }
+
+ request.callback(type, message, request.userdata);
+ return true;
+ }
+
+ void Device::InjectError(WGPUErrorType type, const char* message) {
+ DeviceInjectErrorCmd cmd;
+ cmd.self = ToAPI(this);
+ cmd.type = type;
+ cmd.message = message;
+ client->SerializeCommand(cmd);
+ }
+
+ WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+ }
+
+ WGPUBuffer Device::CreateErrorBuffer() {
+ return Buffer::CreateError(this);
+ }
+
+ WGPUQueue Device::GetQueue() {
+ // The queue is lazily created because if a Device is created by
+ // Reserve/Inject, we cannot send the GetQueue message until
+ // it has been injected on the Server. It cannot happen immediately
+ // on construction.
+ if (mQueue == nullptr) {
+ // Get the primary queue for this device.
+ auto* allocation = client->QueueAllocator().New(client);
+ mQueue = allocation->object.get();
+
+ DeviceGetQueueCmd cmd;
+ cmd.self = ToAPI(this);
+ cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+
+ client->SerializeCommand(cmd);
+ }
+
+ mQueue->refcount++;
+ return ToAPI(mQueue);
+ }
+
+ void Device::CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "GPU device disconnected", userdata);
+ }
+
+ auto* allocation = client->ComputePipelineAllocator().New(client);
+
+ CreatePipelineAsyncRequest request = {};
+ request.createComputePipelineAsyncCallback = callback;
+ request.userdata = userdata;
+ request.pipelineObjectID = allocation->object->id;
+
+ uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+
+ DeviceCreateComputePipelineAsyncCmd cmd;
+ cmd.deviceId = this->id;
+ cmd.descriptor = descriptor;
+ cmd.requestSerial = serial;
+ cmd.pipelineObjectHandle = ObjectHandle{allocation->object->id, allocation->generation};
+
+ client->SerializeCommand(cmd);
+ }
+
+ bool Device::OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message) {
+ CreatePipelineAsyncRequest request;
+ if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
+ return false;
+ }
+
+ auto pipelineAllocation =
+ client->ComputePipelineAllocator().GetObject(request.pipelineObjectID);
+
+ // If the return status is a failure we should give a null pipeline to the callback and
+ // free the allocation.
+ if (status != WGPUCreatePipelineAsyncStatus_Success) {
+ client->ComputePipelineAllocator().Free(pipelineAllocation);
+ request.createComputePipelineAsyncCallback(status, nullptr, message, request.userdata);
+ return true;
+ }
+
+ WGPUComputePipeline pipeline = reinterpret_cast<WGPUComputePipeline>(pipelineAllocation);
+ request.createComputePipelineAsyncCallback(status, pipeline, message, request.userdata);
+
+ return true;
+ }
+
+ void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "GPU device disconnected", userdata);
+ }
+
+ auto* allocation = client->RenderPipelineAllocator().New(client);
+
+ CreatePipelineAsyncRequest request = {};
+ request.createRenderPipelineAsyncCallback = callback;
+ request.userdata = userdata;
+ request.pipelineObjectID = allocation->object->id;
+
+ uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+
+ DeviceCreateRenderPipelineAsyncCmd cmd;
+ cmd.deviceId = this->id;
+ cmd.descriptor = descriptor;
+ cmd.requestSerial = serial;
+ cmd.pipelineObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+
+ client->SerializeCommand(cmd);
+ }
+
+ bool Device::OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message) {
+ CreatePipelineAsyncRequest request;
+ if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
+ return false;
+ }
+
+ auto pipelineAllocation =
+ client->RenderPipelineAllocator().GetObject(request.pipelineObjectID);
+
+ // If the return status is a failure we should give a null pipeline to the callback and
+ // free the allocation.
+ if (status != WGPUCreatePipelineAsyncStatus_Success) {
+ client->RenderPipelineAllocator().Free(pipelineAllocation);
+ request.createRenderPipelineAsyncCallback(status, nullptr, message, request.userdata);
+ return true;
+ }
+
+ WGPURenderPipeline pipeline = reinterpret_cast<WGPURenderPipeline>(pipelineAllocation);
+ request.createRenderPipelineAsyncCallback(status, pipeline, message, request.userdata);
+
+ return true;
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Device.h b/chromium/third_party/dawn/src/dawn/wire/client/Device.h
new file mode 100644
index 00000000000..bb6a96dc543
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Device.h
@@ -0,0 +1,112 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_DEVICE_H_
+#define DAWNWIRE_CLIENT_DEVICE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/ApiObjects_autogen.h"
+#include "dawn/wire/client/LimitsAndFeatures.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+#include <memory>
+
+namespace dawn::wire::client {
+
+ class Client;
+ class Queue;
+
+ class Device final : public ObjectBase {
+ public:
+ Device(Client* client, uint32_t refcount, uint32_t id);
+ ~Device();
+
+ void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
+ void SetLoggingCallback(WGPULoggingCallback errorCallback, void* errorUserdata);
+ void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
+ void InjectError(WGPUErrorType type, const char* message);
+ void PushErrorScope(WGPUErrorFilter filter);
+ bool PopErrorScope(WGPUErrorCallback callback, void* userdata);
+ WGPUBuffer CreateBuffer(const WGPUBufferDescriptor* descriptor);
+ WGPUBuffer CreateErrorBuffer();
+ WGPUComputePipeline CreateComputePipeline(WGPUComputePipelineDescriptor const* descriptor);
+ void CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ void HandleError(WGPUErrorType errorType, const char* message);
+ void HandleLogging(WGPULoggingType loggingType, const char* message);
+ void HandleDeviceLost(WGPUDeviceLostReason reason, const char* message);
+ bool OnPopErrorScopeCallback(uint64_t requestSerial,
+ WGPUErrorType type,
+ const char* message);
+ bool OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message);
+ bool OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
+ WGPUCreatePipelineAsyncStatus status,
+ const char* message);
+
+ bool GetLimits(WGPUSupportedLimits* limits) const;
+ bool HasFeature(WGPUFeatureName feature) const;
+ size_t EnumerateFeatures(WGPUFeatureName* features) const;
+ void SetLimits(const WGPUSupportedLimits* limits);
+ void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+
+ WGPUQueue GetQueue();
+
+ void CancelCallbacksForDisconnect() override;
+
+ std::weak_ptr<bool> GetAliveWeakPtr();
+
+ private:
+ LimitsAndFeatures mLimitsAndFeatures;
+ struct ErrorScopeData {
+ WGPUErrorCallback callback = nullptr;
+ void* userdata = nullptr;
+ };
+ RequestTracker<ErrorScopeData> mErrorScopes;
+ uint64_t mErrorScopeStackSize = 0;
+
+ struct CreatePipelineAsyncRequest {
+ WGPUCreateComputePipelineAsyncCallback createComputePipelineAsyncCallback = nullptr;
+ WGPUCreateRenderPipelineAsyncCallback createRenderPipelineAsyncCallback = nullptr;
+ void* userdata = nullptr;
+ ObjectId pipelineObjectID;
+ };
+ RequestTracker<CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
+
+ WGPUErrorCallback mErrorCallback = nullptr;
+ WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
+ WGPULoggingCallback mLoggingCallback = nullptr;
+ bool mDidRunLostCallback = false;
+ void* mErrorUserdata = nullptr;
+ void* mDeviceLostUserdata = nullptr;
+ void* mLoggingUserdata = nullptr;
+
+ Queue* mQueue = nullptr;
+
+ std::shared_ptr<bool> mIsAlive;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_DEVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp
new file mode 100644
index 00000000000..de27d4740e8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Instance.cpp
@@ -0,0 +1,101 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Instance.h"
+
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire::client {
+
+ Instance::~Instance() {
+ mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
+ request->callback(WGPURequestAdapterStatus_Unknown, nullptr,
+ "Instance destroyed before callback", request->userdata);
+ });
+ }
+
+ void Instance::CancelCallbacksForDisconnect() {
+ mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
+ request->callback(WGPURequestAdapterStatus_Unknown, nullptr, "GPU connection lost",
+ request->userdata);
+ });
+ }
+
+ void Instance::RequestAdapter(const WGPURequestAdapterOptions* options,
+ WGPURequestAdapterCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPURequestAdapterStatus_Error, nullptr, "GPU connection lost", userdata);
+ return;
+ }
+
+ auto* allocation = client->AdapterAllocator().New(client);
+ uint64_t serial = mRequestAdapterRequests.Add({callback, allocation->object->id, userdata});
+
+ InstanceRequestAdapterCmd cmd;
+ cmd.instanceId = this->id;
+ cmd.requestSerial = serial;
+ cmd.adapterObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+ cmd.options = options;
+
+ client->SerializeCommand(cmd);
+ }
+
+ bool Client::DoInstanceRequestAdapterCallback(Instance* instance,
+ uint64_t requestSerial,
+ WGPURequestAdapterStatus status,
+ const char* message,
+ const WGPUAdapterProperties* properties,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features) {
+ // May have been deleted or recreated so this isn't an error.
+ if (instance == nullptr) {
+ return true;
+ }
+ return instance->OnRequestAdapterCallback(requestSerial, status, message, properties,
+ limits, featuresCount, features);
+ }
+
+ bool Instance::OnRequestAdapterCallback(uint64_t requestSerial,
+ WGPURequestAdapterStatus status,
+ const char* message,
+ const WGPUAdapterProperties* properties,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features) {
+ RequestAdapterData request;
+ if (!mRequestAdapterRequests.Acquire(requestSerial, &request)) {
+ return false;
+ }
+
+ Adapter* adapter = client->AdapterAllocator().GetObject(request.adapterObjectId);
+
+ // If the return status is a failure we should give a null adapter to the callback and
+ // free the allocation.
+ if (status != WGPURequestAdapterStatus_Success) {
+ client->AdapterAllocator().Free(adapter);
+ request.callback(status, nullptr, message, request.userdata);
+ return true;
+ }
+
+ adapter->SetProperties(properties);
+ adapter->SetLimits(limits);
+ adapter->SetFeatures(features, featuresCount);
+
+ request.callback(status, ToAPI(adapter), message, request.userdata);
+ return true;
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Instance.h b/chromium/third_party/dawn/src/dawn/wire/client/Instance.h
new file mode 100644
index 00000000000..9c4cfc97224
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Instance.h
@@ -0,0 +1,56 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_INSTANCE_H_
+#define DAWNWIRE_CLIENT_INSTANCE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+ class Instance final : public ObjectBase {
+ public:
+ using ObjectBase::ObjectBase;
+
+ ~Instance();
+ void CancelCallbacksForDisconnect() override;
+
+ void RequestAdapter(const WGPURequestAdapterOptions* options,
+ WGPURequestAdapterCallback callback,
+ void* userdata);
+ bool OnRequestAdapterCallback(uint64_t requestSerial,
+ WGPURequestAdapterStatus status,
+ const char* message,
+ const WGPUAdapterProperties* properties,
+ const WGPUSupportedLimits* limits,
+ uint32_t featuresCount,
+ const WGPUFeatureName* features);
+
+ private:
+ struct RequestAdapterData {
+ WGPURequestAdapterCallback callback = nullptr;
+ ObjectId adapterObjectId;
+ void* userdata = nullptr;
+ };
+ RequestTracker<RequestAdapterData> mRequestAdapterRequests;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_INSTANCE_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp b/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp
new file mode 100644
index 00000000000..a2c753c9cc2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.cpp
@@ -0,0 +1,63 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/LimitsAndFeatures.h"
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/SupportedFeatures.h"
+
+namespace dawn::wire::client {
+
+ bool LimitsAndFeatures::GetLimits(WGPUSupportedLimits* limits) const {
+ ASSERT(limits != nullptr);
+ if (limits->nextInChain != nullptr) {
+ return false;
+ }
+ *limits = mLimits;
+ return true;
+ }
+
+ bool LimitsAndFeatures::HasFeature(WGPUFeatureName feature) const {
+ return mFeatures.count(feature) != 0;
+ }
+
+ size_t LimitsAndFeatures::EnumerateFeatures(WGPUFeatureName* features) const {
+ if (features != nullptr) {
+ for (WGPUFeatureName f : mFeatures) {
+ *features = f;
+ ++features;
+ }
+ }
+ return mFeatures.size();
+ }
+
+ void LimitsAndFeatures::SetLimits(const WGPUSupportedLimits* limits) {
+ ASSERT(limits != nullptr);
+ mLimits = *limits;
+ mLimits.nextInChain = nullptr;
+ }
+
+ void LimitsAndFeatures::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+ ASSERT(features != nullptr || featuresCount == 0);
+ for (uint32_t i = 0; i < featuresCount; ++i) {
+ // Filter out features that the server supports, but the client does not.
+ // (Could be different versions)
+ if (!IsFeatureSupported(features[i])) {
+ continue;
+ }
+ mFeatures.insert(features[i]);
+ }
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h b/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h
new file mode 100644
index 00000000000..e6c07e5487a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/LimitsAndFeatures.h
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_LIMITSANDFEATURES_H_
+#define DAWNWIRE_CLIENT_LIMITSANDFEATURES_H_
+
+#include <dawn/webgpu.h>
+
+#include <unordered_set>
+
+namespace dawn::wire::client {
+
+ class LimitsAndFeatures {
+ public:
+ bool GetLimits(WGPUSupportedLimits* limits) const;
+ bool HasFeature(WGPUFeatureName feature) const;
+ size_t EnumerateFeatures(WGPUFeatureName* features) const;
+
+ void SetLimits(const WGPUSupportedLimits* limits);
+ void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+
+ private:
+ WGPUSupportedLimits mLimits;
+ std::unordered_set<WGPUFeatureName> mFeatures;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_LIMITSANDFEATURES_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h b/chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h
new file mode 100644
index 00000000000..b14e91e803e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ObjectAllocator.h
@@ -0,0 +1,110 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
+#define DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/Compiler.h"
+#include "dawn/wire/WireCmd_autogen.h"
+
+#include <limits>
+#include <memory>
+#include <vector>
+
+namespace dawn::wire::client {
+
+ template <typename T>
+ class ObjectAllocator {
+ public:
+ struct ObjectAndSerial {
+ ObjectAndSerial(std::unique_ptr<T> object, uint32_t generation)
+ : object(std::move(object)), generation(generation) {
+ }
+ std::unique_ptr<T> object;
+ uint32_t generation;
+ };
+
+ ObjectAllocator() {
+ // ID 0 is nullptr
+ mObjects.emplace_back(nullptr, 0);
+ }
+
+ template <typename Client>
+ ObjectAndSerial* New(Client* client) {
+ uint32_t id = GetNewId();
+ auto object = std::make_unique<T>(client, 1, id);
+ client->TrackObject(object.get());
+
+ if (id >= mObjects.size()) {
+ ASSERT(id == mObjects.size());
+ mObjects.emplace_back(std::move(object), 0);
+ } else {
+ ASSERT(mObjects[id].object == nullptr);
+
+ mObjects[id].generation++;
+ // The generation should never overflow. We don't recycle ObjectIds that would
+ // overflow their next generation.
+ ASSERT(mObjects[id].generation != 0);
+
+ mObjects[id].object = std::move(object);
+ }
+
+ return &mObjects[id];
+ }
+ void Free(T* obj) {
+ ASSERT(obj->IsInList());
+ if (DAWN_LIKELY(mObjects[obj->id].generation != std::numeric_limits<uint32_t>::max())) {
+ // Only recycle this ObjectId if the generation won't overflow on the next
+ // allocation.
+ FreeId(obj->id);
+ }
+ mObjects[obj->id].object = nullptr;
+ }
+
+ T* GetObject(uint32_t id) {
+ if (id >= mObjects.size()) {
+ return nullptr;
+ }
+ return mObjects[id].object.get();
+ }
+
+ uint32_t GetGeneration(uint32_t id) {
+ if (id >= mObjects.size()) {
+ return 0;
+ }
+ return mObjects[id].generation;
+ }
+
+ private:
+ uint32_t GetNewId() {
+ if (mFreeIds.empty()) {
+ return mCurrentId++;
+ }
+ uint32_t id = mFreeIds.back();
+ mFreeIds.pop_back();
+ return id;
+ }
+ void FreeId(uint32_t id) {
+ mFreeIds.push_back(id);
+ }
+
+ // 0 is an ID reserved to represent nullptr
+ uint32_t mCurrentId = 1;
+ std::vector<uint32_t> mFreeIds;
+ std::vector<ObjectAndSerial> mObjects;
+ };
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h b/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h
new file mode 100644
index 00000000000..8a4c04d7faa
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ObjectBase.h
@@ -0,0 +1,51 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_OBJECTBASE_H_
+#define DAWNWIRE_CLIENT_OBJECTBASE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/common/LinkedList.h"
+#include "dawn/wire/ObjectType_autogen.h"
+
+namespace dawn::wire::client {
+
+ class Client;
+
+ // All objects on the client side have:
+ // - A pointer to the Client to get where to serialize commands
+ // - The external reference count
+ // - An ID that is used to refer to this object when talking with the server side
+ // - A next/prev pointer. They are part of a linked list of objects of the same type.
+ struct ObjectBase : public LinkNode<ObjectBase> {
+ ObjectBase(Client* client, uint32_t refcount, uint32_t id)
+ : client(client), refcount(refcount), id(id) {
+ }
+
+ ~ObjectBase() {
+ RemoveFromList();
+ }
+
+ virtual void CancelCallbacksForDisconnect() {
+ }
+
+ Client* const client;
+ uint32_t refcount;
+ const uint32_t id;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_OBJECTBASE_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp b/chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp
new file mode 100644
index 00000000000..37d97d7c501
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Queue.cpp
@@ -0,0 +1,98 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/Queue.h"
+
+#include "dawn/wire/client/Client.h"
+#include "dawn/wire/client/Device.h"
+
+namespace dawn::wire::client {
+
+ Queue::~Queue() {
+ ClearAllCallbacks(WGPUQueueWorkDoneStatus_Unknown);
+ }
+
+ bool Queue::OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status) {
+ OnWorkDoneData request;
+ if (!mOnWorkDoneRequests.Acquire(requestSerial, &request)) {
+ return false;
+ }
+
+ request.callback(status, request.userdata);
+ return true;
+ }
+
+ void Queue::OnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPUQueueWorkDoneStatus_DeviceLost, userdata);
+ return;
+ }
+
+ uint64_t serial = mOnWorkDoneRequests.Add({callback, userdata});
+
+ QueueOnSubmittedWorkDoneCmd cmd;
+ cmd.queueId = this->id;
+ cmd.signalValue = signalValue;
+ cmd.requestSerial = serial;
+
+ client->SerializeCommand(cmd);
+ }
+
+ void Queue::WriteBuffer(WGPUBuffer cBuffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ Buffer* buffer = FromAPI(cBuffer);
+
+ QueueWriteBufferCmd cmd;
+ cmd.queueId = id;
+ cmd.bufferId = buffer->id;
+ cmd.bufferOffset = bufferOffset;
+ cmd.data = static_cast<const uint8_t*>(data);
+ cmd.size = size;
+
+ client->SerializeCommand(cmd);
+ }
+
+ void Queue::WriteTexture(const WGPUImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize) {
+ QueueWriteTextureCmd cmd;
+ cmd.queueId = id;
+ cmd.destination = destination;
+ cmd.data = static_cast<const uint8_t*>(data);
+ cmd.dataSize = dataSize;
+ cmd.dataLayout = dataLayout;
+ cmd.writeSize = writeSize;
+
+ client->SerializeCommand(cmd);
+ }
+
+ void Queue::CancelCallbacksForDisconnect() {
+ ClearAllCallbacks(WGPUQueueWorkDoneStatus_DeviceLost);
+ }
+
+ void Queue::ClearAllCallbacks(WGPUQueueWorkDoneStatus status) {
+ mOnWorkDoneRequests.CloseAll([status](OnWorkDoneData* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, request->userdata);
+ }
+ });
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/Queue.h b/chromium/third_party/dawn/src/dawn/wire/client/Queue.h
new file mode 100644
index 00000000000..d205387deb1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/Queue.h
@@ -0,0 +1,57 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_QUEUE_H_
+#define DAWNWIRE_CLIENT_QUEUE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/WireClient.h"
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+ class Queue final : public ObjectBase {
+ public:
+ using ObjectBase::ObjectBase;
+ ~Queue();
+
+ bool OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status);
+
+ // Dawn API
+ void OnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata);
+ void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
+ void WriteTexture(const WGPUImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize);
+
+ private:
+ void CancelCallbacksForDisconnect() override;
+ void ClearAllCallbacks(WGPUQueueWorkDoneStatus status);
+
+ struct OnWorkDoneData {
+ WGPUQueueWorkDoneCallback callback = nullptr;
+ void* userdata = nullptr;
+ };
+ RequestTracker<OnWorkDoneData> mOnWorkDoneRequests;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_QUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h b/chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h
new file mode 100644
index 00000000000..c57ae926239
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/RequestTracker.h
@@ -0,0 +1,82 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_REQUESTTRACKER_H_
+#define DAWNWIRE_CLIENT_REQUESTTRACKER_H_
+
+#include "dawn/common/Assert.h"
+#include "dawn/common/NonCopyable.h"
+
+#include <cstdint>
+#include <map>
+
+namespace dawn::wire::client {
+
+ class Device;
+ class MemoryTransferService;
+
+ template <typename Request>
+ class RequestTracker : NonCopyable {
+ public:
+ ~RequestTracker() {
+ ASSERT(mRequests.empty());
+ }
+
+ uint64_t Add(Request&& request) {
+ mSerial++;
+ mRequests.emplace(mSerial, request);
+ return mSerial;
+ }
+
+ bool Acquire(uint64_t serial, Request* request) {
+ auto it = mRequests.find(serial);
+ if (it == mRequests.end()) {
+ return false;
+ }
+ *request = std::move(it->second);
+ mRequests.erase(it);
+ return true;
+ }
+
+ template <typename CloseFunc>
+ void CloseAll(CloseFunc&& closeFunc) {
+ // Call closeFunc on all requests while handling reentrancy where the callback of some
+ // requests may add some additional requests. We guarantee all callbacks for requests
+ // are called exactly onces, so keep closing new requests if the first batch added more.
+ // It is fine to loop infinitely here if that's what the application makes use do.
+ while (!mRequests.empty()) {
+ // Move mRequests to a local variable so that further reentrant modifications of
+ // mRequests don't invalidate the iterators.
+ auto allRequests = std::move(mRequests);
+ for (auto& [_, request] : allRequests) {
+ closeFunc(&request);
+ }
+ }
+ }
+
+ template <typename F>
+ void ForAll(F&& f) {
+ for (auto& [_, request] : mRequests) {
+ f(&request);
+ }
+ }
+
+ private:
+ uint64_t mSerial;
+ std::map<uint64_t, Request> mRequests;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_REQUESTTRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp
new file mode 100644
index 00000000000..ce25ef77767
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.cpp
@@ -0,0 +1,64 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/client/ShaderModule.h"
+
+#include "dawn/wire/client/Client.h"
+
+namespace dawn::wire::client {
+
+ ShaderModule::~ShaderModule() {
+ ClearAllCallbacks(WGPUCompilationInfoRequestStatus_Unknown);
+ }
+
+ void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, userdata);
+ return;
+ }
+
+ uint64_t serial = mCompilationInfoRequests.Add({callback, userdata});
+
+ ShaderModuleGetCompilationInfoCmd cmd;
+ cmd.shaderModuleId = this->id;
+ cmd.requestSerial = serial;
+
+ client->SerializeCommand(cmd);
+ }
+
+ bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info) {
+ CompilationInfoRequest request;
+ if (!mCompilationInfoRequests.Acquire(requestSerial, &request)) {
+ return false;
+ }
+
+ request.callback(status, info, request.userdata);
+ return true;
+ }
+
+ void ShaderModule::CancelCallbacksForDisconnect() {
+ ClearAllCallbacks(WGPUCompilationInfoRequestStatus_DeviceLost);
+ }
+
+ void ShaderModule::ClearAllCallbacks(WGPUCompilationInfoRequestStatus status) {
+ mCompilationInfoRequests.CloseAll([status](CompilationInfoRequest* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, nullptr, request->userdata);
+ }
+ });
+ }
+
+} // namespace dawn::wire::client
diff --git a/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h b/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h
new file mode 100644
index 00000000000..fba76b41d81
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/client/ShaderModule.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_SHADER_MODULE_H_
+#define DAWNWIRE_CLIENT_SHADER_MODULE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn/wire/client/ObjectBase.h"
+#include "dawn/wire/client/RequestTracker.h"
+
+namespace dawn::wire::client {
+
+ class ShaderModule final : public ObjectBase {
+ public:
+ using ObjectBase::ObjectBase;
+ ~ShaderModule();
+
+ void GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata);
+ bool GetCompilationInfoCallback(uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info);
+
+ private:
+ void CancelCallbacksForDisconnect() override;
+ void ClearAllCallbacks(WGPUCompilationInfoRequestStatus status);
+
+ struct CompilationInfoRequest {
+ WGPUCompilationInfoCallback callback = nullptr;
+ void* userdata = nullptr;
+ };
+ RequestTracker<CompilationInfoRequest> mCompilationInfoRequests;
+ };
+
+} // namespace dawn::wire::client
+
+#endif // DAWNWIRE_CLIENT_SHADER_MODULE_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h b/chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h
new file mode 100644
index 00000000000..b9cba68239c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ObjectStorage.h
@@ -0,0 +1,228 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_SERVER_OBJECTSTORAGE_H_
+#define DAWNWIRE_SERVER_OBJECTSTORAGE_H_
+
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/WireServer.h"
+
+#include <algorithm>
+#include <map>
+#include <unordered_set>
+
+namespace dawn::wire::server {
+
+ struct DeviceInfo {
+ std::unordered_set<uint64_t> childObjectTypesAndIds;
+ Server* server;
+ ObjectHandle self;
+ };
+
+ // Whether this object has been allocated, or reserved for async object creation.
+ // Used by the KnownObjects queries
+ enum class AllocationState : uint32_t {
+ Free,
+ Reserved,
+ Allocated,
+ };
+
+ template <typename T>
+ struct ObjectDataBase {
+ // The backend-provided handle and generation to this object.
+ T handle;
+ uint32_t generation = 0;
+
+ AllocationState state;
+
+ // This points to an allocation that is owned by the device.
+ DeviceInfo* deviceInfo = nullptr;
+ };
+
+ // Stores what the backend knows about the type.
+ template <typename T>
+ struct ObjectData : public ObjectDataBase<T> {};
+
+ enum class BufferMapWriteState { Unmapped, Mapped, MapError };
+
+ template <>
+ struct ObjectData<WGPUBuffer> : public ObjectDataBase<WGPUBuffer> {
+ // TODO(enga): Use a tagged pointer to save space.
+ std::unique_ptr<MemoryTransferService::ReadHandle> readHandle;
+ std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle;
+ BufferMapWriteState mapWriteState = BufferMapWriteState::Unmapped;
+ WGPUBufferUsageFlags usage = WGPUBufferUsage_None;
+ // Indicate if writeHandle needs to be destroyed on unmap
+ bool mappedAtCreation = false;
+ };
+
+ // Pack the ObjectType and ObjectId as a single value for storage in
+ // an std::unordered_set. This lets us avoid providing our own hash and
+ // equality comparison operators.
+ inline uint64_t PackObjectTypeAndId(ObjectType type, ObjectId id) {
+ static_assert(sizeof(ObjectType) * 8 <= 32);
+ static_assert(sizeof(ObjectId) * 8 <= 32);
+ return (static_cast<uint64_t>(type) << 32) + id;
+ }
+
+ inline std::pair<ObjectType, ObjectId> UnpackObjectTypeAndId(uint64_t payload) {
+ ObjectType type = static_cast<ObjectType>(payload >> 32);
+ ObjectId id = payload & 0xFFFFFFFF;
+ return std::make_pair(type, id);
+ }
+
+ template <>
+ struct ObjectData<WGPUDevice> : public ObjectDataBase<WGPUDevice> {
+ // Store |info| as a separate allocation so that its address does not move.
+ // The pointer to |info| is stored in device child objects.
+ std::unique_ptr<DeviceInfo> info = std::make_unique<DeviceInfo>();
+ };
+
+ // Keeps track of the mapping between client IDs and backend objects.
+ template <typename T>
+ class KnownObjects {
+ public:
+ using Data = ObjectData<T>;
+
+ KnownObjects() {
+ // Reserve ID 0 so that it can be used to represent nullptr for optional object values
+ // in the wire format. However don't tag it as allocated so that it is an error to ask
+ // KnownObjects for ID 0.
+ Data reservation;
+ reservation.handle = nullptr;
+ reservation.state = AllocationState::Free;
+ mKnown.push_back(std::move(reservation));
+ }
+
+ // Get a backend objects for a given client ID.
+ // Returns nullptr if the ID hasn't previously been allocated.
+ const Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) const {
+ if (id >= mKnown.size()) {
+ return nullptr;
+ }
+
+ const Data* data = &mKnown[id];
+
+ if (data->state != expected) {
+ return nullptr;
+ }
+
+ return data;
+ }
+ Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) {
+ if (id >= mKnown.size()) {
+ return nullptr;
+ }
+
+ Data* data = &mKnown[id];
+
+ if (data->state != expected) {
+ return nullptr;
+ }
+
+ return data;
+ }
+
+ // Allocates the data for a given ID and returns it.
+ // Returns nullptr if the ID is already allocated, or too far ahead, or if ID is 0 (ID 0 is
+ // reserved for nullptr). Invalidates all the Data*
+ Data* Allocate(uint32_t id, AllocationState state = AllocationState::Allocated) {
+ if (id == 0 || id > mKnown.size()) {
+ return nullptr;
+ }
+
+ Data data;
+ data.state = state;
+ data.handle = nullptr;
+
+ if (id >= mKnown.size()) {
+ mKnown.push_back(std::move(data));
+ return &mKnown.back();
+ }
+
+ if (mKnown[id].state != AllocationState::Free) {
+ return nullptr;
+ }
+
+ mKnown[id] = std::move(data);
+ return &mKnown[id];
+ }
+
+ // Marks an ID as deallocated
+ void Free(uint32_t id) {
+ ASSERT(id < mKnown.size());
+ mKnown[id].state = AllocationState::Free;
+ }
+
+ std::vector<T> AcquireAllHandles() {
+ std::vector<T> objects;
+ for (Data& data : mKnown) {
+ if (data.state == AllocationState::Allocated && data.handle != nullptr) {
+ objects.push_back(data.handle);
+ data.state = AllocationState::Free;
+ data.handle = nullptr;
+ }
+ }
+
+ return objects;
+ }
+
+ std::vector<T> GetAllHandles() {
+ std::vector<T> objects;
+ for (Data& data : mKnown) {
+ if (data.state == AllocationState::Allocated && data.handle != nullptr) {
+ objects.push_back(data.handle);
+ }
+ }
+
+ return objects;
+ }
+
+ private:
+ std::vector<Data> mKnown;
+ };
+
+ // ObjectIds are lost in deserialization. Store the ids of deserialized
+ // objects here so they can be used in command handlers. This is useful
+ // for creating ReturnWireCmds which contain client ids
+ template <typename T>
+ class ObjectIdLookupTable {
+ public:
+ void Store(T key, ObjectId id) {
+ mTable[key] = id;
+ }
+
+ // Return the cached ObjectId, or 0 (null handle)
+ ObjectId Get(T key) const {
+ const auto it = mTable.find(key);
+ if (it != mTable.end()) {
+ return it->second;
+ }
+ return 0;
+ }
+
+ void Remove(T key) {
+ auto it = mTable.find(key);
+ if (it != mTable.end()) {
+ mTable.erase(it);
+ }
+ }
+
+ private:
+ std::map<T, ObjectId> mTable;
+ };
+
+} // namespace dawn::wire::server
+
+#endif // DAWNWIRE_SERVER_OBJECTSTORAGE_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/Server.cpp b/chromium/third_party/dawn/src/dawn/wire/server/Server.cpp
new file mode 100644
index 00000000000..b0d4ba2534f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/Server.cpp
@@ -0,0 +1,213 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+#include "dawn/wire/WireServer.h"
+
+namespace dawn::wire::server {
+
+ Server::Server(const DawnProcTable& procs,
+ CommandSerializer* serializer,
+ MemoryTransferService* memoryTransferService)
+ : mSerializer(serializer),
+ mProcs(procs),
+ mMemoryTransferService(memoryTransferService),
+ mIsAlive(std::make_shared<bool>(true)) {
+ if (mMemoryTransferService == nullptr) {
+ // If a MemoryTransferService is not provided, fallback to inline memory.
+ mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
+ mMemoryTransferService = mOwnedMemoryTransferService.get();
+ }
+ }
+
+ Server::~Server() {
+ // Un-set the error and lost callbacks since we cannot forward them
+ // after the server has been destroyed.
+ for (WGPUDevice device : DeviceObjects().GetAllHandles()) {
+ ClearDeviceCallbacks(device);
+ }
+ DestroyAllObjects(mProcs);
+ }
+
+ bool Server::InjectTexture(WGPUTexture texture,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ ASSERT(texture != nullptr);
+ ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr || device->generation != deviceGeneration) {
+ return false;
+ }
+
+ ObjectData<WGPUTexture>* data = TextureObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
+ }
+
+ data->handle = texture;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
+ data->deviceInfo = device->info.get();
+
+ if (!TrackDeviceChild(data->deviceInfo, ObjectType::Texture, id)) {
+ return false;
+ }
+
+ // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.textureReference(texture);
+
+ return true;
+ }
+
+ bool Server::InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration) {
+ ASSERT(swapchain != nullptr);
+ ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr || device->generation != deviceGeneration) {
+ return false;
+ }
+
+ ObjectData<WGPUSwapChain>* data = SwapChainObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
+ }
+
+ data->handle = swapchain;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
+ data->deviceInfo = device->info.get();
+
+ if (!TrackDeviceChild(data->deviceInfo, ObjectType::SwapChain, id)) {
+ return false;
+ }
+
+ // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.swapChainReference(swapchain);
+
+ return true;
+ }
+
+ bool Server::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
+ ASSERT(device != nullptr);
+ ObjectData<WGPUDevice>* data = DeviceObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
+ }
+
+ data->handle = device;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
+ data->info->server = this;
+ data->info->self = ObjectHandle{id, generation};
+
+ // The device is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.deviceReference(device);
+
+ // Set callbacks to forward errors to the client.
+ SetForwardingDeviceCallbacks(data);
+ return true;
+ }
+
+ bool Server::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
+ ASSERT(instance != nullptr);
+ ObjectData<WGPUInstance>* data = InstanceObjects().Allocate(id);
+ if (data == nullptr) {
+ return false;
+ }
+
+ data->handle = instance;
+ data->generation = generation;
+ data->state = AllocationState::Allocated;
+
+ // The instance is externally owned so it shouldn't be destroyed when we receive a destroy
+ // message from the client. Add a reference to counterbalance the eventual release.
+ mProcs.instanceReference(instance);
+
+ return true;
+ }
+
+ WGPUDevice Server::GetDevice(uint32_t id, uint32_t generation) {
+ ObjectData<WGPUDevice>* data = DeviceObjects().Get(id);
+ if (data == nullptr || data->generation != generation) {
+ return nullptr;
+ }
+ return data->handle;
+ }
+
+ void Server::SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject) {
+ // Note: these callbacks are manually inlined here since they do not acquire and
+ // free their userdata. Also unlike other callbacks, these are cleared and unset when
+ // the server is destroyed, so we don't need to check if the server is still alive
+ // inside them.
+ mProcs.deviceSetUncapturedErrorCallback(
+ deviceObject->handle,
+ [](WGPUErrorType type, const char* message, void* userdata) {
+ DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+ info->server->OnUncapturedError(info->self, type, message);
+ },
+ deviceObject->info.get());
+ // Set callback to post warning and other infomation to client.
+ // Almost the same with UncapturedError.
+ mProcs.deviceSetLoggingCallback(
+ deviceObject->handle,
+ [](WGPULoggingType type, const char* message, void* userdata) {
+ DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+ info->server->OnLogging(info->self, type, message);
+ },
+ deviceObject->info.get());
+ mProcs.deviceSetDeviceLostCallback(
+ deviceObject->handle,
+ [](WGPUDeviceLostReason reason, const char* message, void* userdata) {
+ DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+ info->server->OnDeviceLost(info->self, reason, message);
+ },
+ deviceObject->info.get());
+ }
+
+ void Server::ClearDeviceCallbacks(WGPUDevice device) {
+ // Un-set the error and lost callbacks since we cannot forward them
+ // after the server has been destroyed.
+ mProcs.deviceSetUncapturedErrorCallback(device, nullptr, nullptr);
+ mProcs.deviceSetLoggingCallback(device, nullptr, nullptr);
+ mProcs.deviceSetDeviceLostCallback(device, nullptr, nullptr);
+ }
+
+ bool TrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
+ auto [_, inserted] = info->childObjectTypesAndIds.insert(PackObjectTypeAndId(type, id));
+ if (!inserted) {
+ // An object of this type and id already exists.
+ return false;
+ }
+ return true;
+ }
+
+ bool UntrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
+ auto& children = info->childObjectTypesAndIds;
+ auto it = children.find(PackObjectTypeAndId(type, id));
+ if (it == children.end()) {
+ // An object of this type and id was already deleted.
+ return false;
+ }
+ children.erase(it);
+ return true;
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/Server.h b/chromium/third_party/dawn/src/dawn/wire/server/Server.h
new file mode 100644
index 00000000000..9c7a02ac92b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/Server.h
@@ -0,0 +1,243 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_SERVER_SERVER_H_
+#define DAWNWIRE_SERVER_SERVER_H_
+
+#include "dawn/wire/ChunkedCommandSerializer.h"
+#include "dawn/wire/server/ServerBase_autogen.h"
+
+namespace dawn::wire::server {
+
+ class Server;
+ class MemoryTransferService;
+
+ // CallbackUserdata and its derived classes are intended to be created by
+ // Server::MakeUserdata<T> and then passed as the userdata argument for Dawn
+ // callbacks.
+ // It contains a pointer back to the Server so that the callback can call the
+ // Server to perform operations like serialization, and it contains a weak pointer
+ // |serverIsAlive|. If the weak pointer has expired, it means the server has
+ // been destroyed and the callback must not use the Server pointer.
+ // To assist with checking |serverIsAlive| and lifetime management of the userdata,
+ // |ForwardToServer| (defined later in this file) can be used to acquire the userdata,
+ // return early if |serverIsAlive| has expired, and then forward the arguments
+ // to userdata->server->MyCallbackHandler.
+ //
+ // Example Usage:
+ //
+ // struct MyUserdata : CallbackUserdata { uint32_t foo; };
+ //
+ // auto userdata = MakeUserdata<MyUserdata>();
+ // userdata->foo = 2;
+ //
+ // callMyCallbackHandler(
+ // ForwardToServer<&Server::MyCallbackHandler>,
+ // userdata.release());
+ //
+ // void Server::MyCallbackHandler(MyUserdata* userdata, Other args) { }
+ struct CallbackUserdata {
+ Server* const server;
+ std::weak_ptr<bool> const serverIsAlive;
+
+ CallbackUserdata() = delete;
+ CallbackUserdata(Server* server, const std::shared_ptr<bool>& serverIsAlive)
+ : server(server), serverIsAlive(serverIsAlive) {
+ }
+ };
+
+ template <auto F>
+ struct ForwardToServerHelper {
+ template <typename _>
+ struct ExtractedTypes;
+
+ // An internal structure used to unpack the various types that compose the type of F
+ template <typename Return, typename Class, typename Userdata, typename... Args>
+ struct ExtractedTypes<Return (Class::*)(Userdata*, Args...)> {
+ using UntypedCallback = Return (*)(Args..., void*);
+ static Return Callback(Args... args, void* userdata) {
+ // Acquire the userdata, and cast it to UserdataT.
+ std::unique_ptr<Userdata> data(static_cast<Userdata*>(userdata));
+ if (data->serverIsAlive.expired()) {
+ // Do nothing if the server has already been destroyed.
+ return;
+ }
+ // Forward the arguments and the typed userdata to the Server:: member function.
+ (data->server->*F)(data.get(), std::forward<decltype(args)>(args)...);
+ }
+ };
+
+ static constexpr typename ExtractedTypes<decltype(F)>::UntypedCallback Create() {
+ return ExtractedTypes<decltype(F)>::Callback;
+ }
+ };
+
+ template <auto F>
+ constexpr auto ForwardToServer = ForwardToServerHelper<F>::Create();
+
+ struct MapUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle buffer;
+ WGPUBuffer bufferObj;
+ uint64_t requestSerial;
+ uint64_t offset;
+ uint64_t size;
+ WGPUMapModeFlags mode;
+ };
+
+ struct ErrorScopeUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle device;
+ uint64_t requestSerial;
+ };
+
+ struct ShaderModuleGetCompilationInfoUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle shaderModule;
+ uint64_t requestSerial;
+ };
+
+ struct QueueWorkDoneUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle queue;
+ uint64_t requestSerial;
+ };
+
+ struct CreatePipelineAsyncUserData : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle device;
+ uint64_t requestSerial;
+ ObjectId pipelineObjectID;
+ };
+
+ struct RequestAdapterUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle instance;
+ uint64_t requestSerial;
+ ObjectId adapterObjectId;
+ };
+
+ struct RequestDeviceUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle adapter;
+ uint64_t requestSerial;
+ ObjectId deviceObjectId;
+ };
+
+ class Server : public ServerBase {
+ public:
+ Server(const DawnProcTable& procs,
+ CommandSerializer* serializer,
+ MemoryTransferService* memoryTransferService);
+ ~Server() override;
+
+ // ChunkedCommandHandler implementation
+ const volatile char* HandleCommandsImpl(const volatile char* commands,
+ size_t size) override;
+
+ bool InjectTexture(WGPUTexture texture,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
+
+ bool InjectSwapChain(WGPUSwapChain swapchain,
+ uint32_t id,
+ uint32_t generation,
+ uint32_t deviceId,
+ uint32_t deviceGeneration);
+
+ bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
+
+ bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
+
+ WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+
+ template <typename T,
+ typename Enable = std::enable_if<std::is_base_of<CallbackUserdata, T>::value>>
+ std::unique_ptr<T> MakeUserdata() {
+ return std::unique_ptr<T>(new T(this, mIsAlive));
+ }
+
+ private:
+ template <typename Cmd>
+ void SerializeCommand(const Cmd& cmd) {
+ mSerializer.SerializeCommand(cmd);
+ }
+
+ template <typename Cmd, typename ExtraSizeSerializeFn>
+ void SerializeCommand(const Cmd& cmd,
+ size_t extraSize,
+ ExtraSizeSerializeFn&& SerializeExtraSize) {
+ mSerializer.SerializeCommand(cmd, extraSize, SerializeExtraSize);
+ }
+
+ void SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject);
+ void ClearDeviceCallbacks(WGPUDevice device);
+
+ // Error callbacks
+ void OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message);
+ void OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message);
+ void OnLogging(ObjectHandle device, WGPULoggingType type, const char* message);
+ void OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
+ WGPUErrorType type,
+ const char* message);
+ void OnBufferMapAsyncCallback(MapUserdata* userdata, WGPUBufferMapAsyncStatus status);
+ void OnQueueWorkDone(QueueWorkDoneUserdata* userdata, WGPUQueueWorkDoneStatus status);
+ void OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
+ WGPUCreatePipelineAsyncStatus status,
+ WGPUComputePipeline pipeline,
+ const char* message);
+ void OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
+ WGPUCreatePipelineAsyncStatus status,
+ WGPURenderPipeline pipeline,
+ const char* message);
+ void OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* userdata,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info);
+ void OnRequestAdapterCallback(RequestAdapterUserdata* userdata,
+ WGPURequestAdapterStatus status,
+ WGPUAdapter adapter,
+ const char* message);
+ void OnRequestDeviceCallback(RequestDeviceUserdata* userdata,
+ WGPURequestDeviceStatus status,
+ WGPUDevice device,
+ const char* message);
+
+#include "dawn/wire/server/ServerPrototypes_autogen.inc"
+
+ WireDeserializeAllocator mAllocator;
+ ChunkedCommandSerializer mSerializer;
+ DawnProcTable mProcs;
+ std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
+ MemoryTransferService* mMemoryTransferService = nullptr;
+
+ std::shared_ptr<bool> mIsAlive;
+ };
+
+ bool TrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
+ bool UntrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
+
+ std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
+
+} // namespace dawn::wire::server
+
+#endif // DAWNWIRE_SERVER_SERVER_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp
new file mode 100644
index 00000000000..05184552ef5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerAdapter.cpp
@@ -0,0 +1,110 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+
+#include "dawn/wire/SupportedFeatures.h"
+
+namespace dawn::wire::server {
+
+ bool Server::DoAdapterRequestDevice(ObjectId adapterId,
+ uint64_t requestSerial,
+ ObjectHandle deviceHandle,
+ const WGPUDeviceDescriptor* descriptor) {
+ auto* adapter = AdapterObjects().Get(adapterId);
+ if (adapter == nullptr) {
+ return false;
+ }
+
+ auto* resultData = DeviceObjects().Allocate(deviceHandle.id, AllocationState::Reserved);
+ if (resultData == nullptr) {
+ return false;
+ }
+
+ resultData->generation = deviceHandle.generation;
+
+ auto userdata = MakeUserdata<RequestDeviceUserdata>();
+ userdata->adapter = ObjectHandle{adapterId, adapter->generation};
+ userdata->requestSerial = requestSerial;
+ userdata->deviceObjectId = deviceHandle.id;
+
+ mProcs.adapterRequestDevice(adapter->handle, descriptor,
+ ForwardToServer<&Server::OnRequestDeviceCallback>,
+ userdata.release());
+ return true;
+ }
+
+ void Server::OnRequestDeviceCallback(RequestDeviceUserdata* data,
+ WGPURequestDeviceStatus status,
+ WGPUDevice device,
+ const char* message) {
+ auto* deviceObject = DeviceObjects().Get(data->deviceObjectId, AllocationState::Reserved);
+ // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+ // they move from Reserved to Allocated, or if they are destroyed here.
+ ASSERT(deviceObject != nullptr);
+
+ ReturnAdapterRequestDeviceCallbackCmd cmd = {};
+ cmd.adapter = data->adapter;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.message = message;
+
+ if (status != WGPURequestDeviceStatus_Success) {
+ // Free the ObjectId which will make it unusable.
+ DeviceObjects().Free(data->deviceObjectId);
+ ASSERT(device == nullptr);
+ SerializeCommand(cmd);
+ return;
+ }
+
+ std::vector<WGPUFeatureName> features;
+
+ size_t featuresCount = mProcs.deviceEnumerateFeatures(device, nullptr);
+ features.resize(featuresCount);
+ mProcs.deviceEnumerateFeatures(device, features.data());
+
+ // The client should only be able to request supported features, so all enumerated
+ // features that were enabled must also be supported by the wire.
+ // Note: We fail the callback here, instead of immediately upon receiving
+ // the request to preserve callback ordering.
+ for (WGPUFeatureName f : features) {
+ if (!IsFeatureSupported(f)) {
+ // Release the device.
+ mProcs.deviceRelease(device);
+ // Free the ObjectId which will make it unusable.
+ DeviceObjects().Free(data->deviceObjectId);
+
+ cmd.status = WGPURequestDeviceStatus_Error;
+ cmd.message = "Requested feature not supported.";
+ SerializeCommand(cmd);
+ return;
+ }
+ }
+
+ cmd.featuresCount = features.size();
+ cmd.features = features.data();
+
+ WGPUSupportedLimits limits = {};
+ mProcs.deviceGetLimits(device, &limits);
+ cmd.limits = &limits;
+
+ // Assign the handle and allocated status if the device is created successfully.
+ deviceObject->state = AllocationState::Allocated;
+ deviceObject->handle = device;
+ SetForwardingDeviceCallbacks(deviceObject);
+
+ SerializeCommand(cmd);
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp
new file mode 100644
index 00000000000..44664da8193
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerBuffer.cpp
@@ -0,0 +1,282 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/BufferConsumer_impl.h"
+#include "dawn/wire/WireCmd_autogen.h"
+#include "dawn/wire/server/Server.h"
+
+#include <memory>
+
+namespace dawn::wire::server {
+
+ bool Server::PreHandleBufferUnmap(const BufferUnmapCmd& cmd) {
+ auto* buffer = BufferObjects().Get(cmd.selfId);
+ DAWN_ASSERT(buffer != nullptr);
+
+ if (buffer->mappedAtCreation && !(buffer->usage & WGPUMapMode_Write)) {
+ // This indicates the writeHandle is for mappedAtCreation only. Destroy on unmap
+ // writeHandle could have possibly been deleted if buffer is already destroyed so we
+ // don't assert it's non-null
+ buffer->writeHandle = nullptr;
+ }
+
+ buffer->mapWriteState = BufferMapWriteState::Unmapped;
+
+ return true;
+ }
+
+ bool Server::PreHandleBufferDestroy(const BufferDestroyCmd& cmd) {
+ // Destroying a buffer does an implicit unmapping.
+ auto* buffer = BufferObjects().Get(cmd.selfId);
+ DAWN_ASSERT(buffer != nullptr);
+
+ // The buffer was destroyed. Clear the Read/WriteHandle.
+ buffer->readHandle = nullptr;
+ buffer->writeHandle = nullptr;
+ buffer->mapWriteState = BufferMapWriteState::Unmapped;
+
+ return true;
+ }
+
+ bool Server::DoBufferMapAsync(ObjectId bufferId,
+ uint64_t requestSerial,
+ WGPUMapModeFlags mode,
+ uint64_t offset64,
+ uint64_t size64) {
+ // These requests are just forwarded to the buffer, with userdata containing what the
+ // client will require in the return command.
+
+ // The null object isn't valid as `self`
+ if (bufferId == 0) {
+ return false;
+ }
+
+ auto* buffer = BufferObjects().Get(bufferId);
+ if (buffer == nullptr) {
+ return false;
+ }
+
+ std::unique_ptr<MapUserdata> userdata = MakeUserdata<MapUserdata>();
+ userdata->buffer = ObjectHandle{bufferId, buffer->generation};
+ userdata->bufferObj = buffer->handle;
+ userdata->requestSerial = requestSerial;
+ userdata->mode = mode;
+
+ // Make sure that the deserialized offset and size are no larger than
+ // std::numeric_limits<size_t>::max() so that they are CPU-addressable, and size is not
+ // WGPU_WHOLE_MAP_SIZE, which is by definition std::numeric_limits<size_t>::max(). Since
+ // client does the default size computation, we should always have a valid actual size here
+ // in server. All other invalid actual size can be caught by dawn native side validation.
+ if (offset64 > std::numeric_limits<size_t>::max() || size64 >= WGPU_WHOLE_MAP_SIZE) {
+ OnBufferMapAsyncCallback(userdata.get(), WGPUBufferMapAsyncStatus_Error);
+ return true;
+ }
+
+ size_t offset = static_cast<size_t>(offset64);
+ size_t size = static_cast<size_t>(size64);
+
+ userdata->offset = offset;
+ userdata->size = size;
+
+ mProcs.bufferMapAsync(buffer->handle, mode, offset, size,
+ ForwardToServer<&Server::OnBufferMapAsyncCallback>,
+ userdata.release());
+
+ return true;
+ }
+
+ bool Server::DoDeviceCreateBuffer(ObjectId deviceId,
+ const WGPUBufferDescriptor* descriptor,
+ ObjectHandle bufferResult,
+ uint64_t readHandleCreateInfoLength,
+ const uint8_t* readHandleCreateInfo,
+ uint64_t writeHandleCreateInfoLength,
+ const uint8_t* writeHandleCreateInfo) {
+ auto* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr) {
+ return false;
+ }
+
+ // Create and register the buffer object.
+ auto* resultData = BufferObjects().Allocate(bufferResult.id);
+ if (resultData == nullptr) {
+ return false;
+ }
+ resultData->generation = bufferResult.generation;
+ resultData->handle = mProcs.deviceCreateBuffer(device->handle, descriptor);
+ resultData->deviceInfo = device->info.get();
+ resultData->usage = descriptor->usage;
+ resultData->mappedAtCreation = descriptor->mappedAtCreation;
+ if (!TrackDeviceChild(resultData->deviceInfo, ObjectType::Buffer, bufferResult.id)) {
+ return false;
+ }
+
+ // isReadMode and isWriteMode could be true at the same time if usage contains
+ // WGPUMapMode_Read and buffer is mappedAtCreation
+ bool isReadMode = descriptor->usage & WGPUMapMode_Read;
+ bool isWriteMode = descriptor->usage & WGPUMapMode_Write || descriptor->mappedAtCreation;
+
+ // This is the size of data deserialized from the command stream to create the read/write
+ // handle, which must be CPU-addressable.
+ if (readHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
+ writeHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
+ readHandleCreateInfoLength >
+ std::numeric_limits<size_t>::max() - writeHandleCreateInfoLength) {
+ return false;
+ }
+
+ if (isWriteMode) {
+ MemoryTransferService::WriteHandle* writeHandle = nullptr;
+ // Deserialize metadata produced from the client to create a companion server handle.
+ if (!mMemoryTransferService->DeserializeWriteHandle(
+ writeHandleCreateInfo, static_cast<size_t>(writeHandleCreateInfoLength),
+ &writeHandle)) {
+ return false;
+ }
+ ASSERT(writeHandle != nullptr);
+ resultData->writeHandle.reset(writeHandle);
+ writeHandle->SetDataLength(descriptor->size);
+
+ if (descriptor->mappedAtCreation) {
+ void* mapping =
+ mProcs.bufferGetMappedRange(resultData->handle, 0, descriptor->size);
+ if (mapping == nullptr) {
+ // A zero mapping is used to indicate an allocation error of an error buffer.
+ // This is a valid case and isn't fatal. Remember the buffer is an error so as
+ // to skip subsequent mapping operations.
+ resultData->mapWriteState = BufferMapWriteState::MapError;
+ return true;
+ }
+ ASSERT(mapping != nullptr);
+ writeHandle->SetTarget(mapping);
+
+ resultData->mapWriteState = BufferMapWriteState::Mapped;
+ }
+ }
+
+ if (isReadMode) {
+ MemoryTransferService::ReadHandle* readHandle = nullptr;
+ // Deserialize metadata produced from the client to create a companion server handle.
+ if (!mMemoryTransferService->DeserializeReadHandle(
+ readHandleCreateInfo, static_cast<size_t>(readHandleCreateInfoLength),
+ &readHandle)) {
+ return false;
+ }
+ ASSERT(readHandle != nullptr);
+
+ resultData->readHandle.reset(readHandle);
+ }
+
+ return true;
+ }
+
+ bool Server::DoBufferUpdateMappedData(ObjectId bufferId,
+ uint64_t writeDataUpdateInfoLength,
+ const uint8_t* writeDataUpdateInfo,
+ uint64_t offset,
+ uint64_t size) {
+ // The null object isn't valid as `self`
+ if (bufferId == 0) {
+ return false;
+ }
+
+ if (writeDataUpdateInfoLength > std::numeric_limits<size_t>::max() ||
+ offset > std::numeric_limits<size_t>::max() ||
+ size > std::numeric_limits<size_t>::max()) {
+ return false;
+ }
+
+ auto* buffer = BufferObjects().Get(bufferId);
+ if (buffer == nullptr) {
+ return false;
+ }
+ switch (buffer->mapWriteState) {
+ case BufferMapWriteState::Unmapped:
+ return false;
+ case BufferMapWriteState::MapError:
+ // The buffer is mapped but there was an error allocating mapped data.
+ // Do not perform the memcpy.
+ return true;
+ case BufferMapWriteState::Mapped:
+ break;
+ }
+ if (!buffer->writeHandle) {
+ // This check is performed after the check for the MapError state. It is permissible
+ // to Unmap and attempt to update mapped data of an error buffer.
+ return false;
+ }
+
+ // Deserialize the flush info and flush updated data from the handle into the target
+ // of the handle. The target is set via WriteHandle::SetTarget.
+ return buffer->writeHandle->DeserializeDataUpdate(
+ writeDataUpdateInfo, static_cast<size_t>(writeDataUpdateInfoLength),
+ static_cast<size_t>(offset), static_cast<size_t>(size));
+ }
+
+ void Server::OnBufferMapAsyncCallback(MapUserdata* data, WGPUBufferMapAsyncStatus status) {
+ // Skip sending the callback if the buffer has already been destroyed.
+ auto* bufferData = BufferObjects().Get(data->buffer.id);
+ if (bufferData == nullptr || bufferData->generation != data->buffer.generation) {
+ return;
+ }
+
+ bool isRead = data->mode & WGPUMapMode_Read;
+ bool isSuccess = status == WGPUBufferMapAsyncStatus_Success;
+
+ ReturnBufferMapAsyncCallbackCmd cmd;
+ cmd.buffer = data->buffer;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.readDataUpdateInfoLength = 0;
+ cmd.readDataUpdateInfo = nullptr;
+
+ const void* readData = nullptr;
+ if (isSuccess) {
+ if (isRead) {
+ // Get the serialization size of the message to initialize ReadHandle data.
+ readData =
+ mProcs.bufferGetConstMappedRange(data->bufferObj, data->offset, data->size);
+ cmd.readDataUpdateInfoLength =
+ bufferData->readHandle->SizeOfSerializeDataUpdate(data->offset, data->size);
+ } else {
+ ASSERT(data->mode & WGPUMapMode_Write);
+ // The in-flight map request returned successfully.
+ bufferData->mapWriteState = BufferMapWriteState::Mapped;
+ // Set the target of the WriteHandle to the mapped buffer data.
+ // writeHandle Target always refers to the buffer base address.
+ // but we call getMappedRange exactly with the range of data that is potentially
+ // modified (i.e. we don't want getMappedRange(0, wholeBufferSize) if only a
+ // subset of the buffer is actually mapped) in case the implementation does some
+ // range tracking.
+ bufferData->writeHandle->SetTarget(
+ static_cast<uint8_t*>(
+ mProcs.bufferGetMappedRange(data->bufferObj, data->offset, data->size)) -
+ data->offset);
+ }
+ }
+
+ SerializeCommand(cmd, cmd.readDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
+ if (isSuccess && isRead) {
+ char* readHandleBuffer;
+ WIRE_TRY(serializeBuffer->NextN(cmd.readDataUpdateInfoLength, &readHandleBuffer));
+ // The in-flight map request returned successfully.
+ bufferData->readHandle->SerializeDataUpdate(readData, data->offset, data->size,
+ readHandleBuffer);
+ }
+ return WireResult::Success;
+ });
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp
new file mode 100644
index 00000000000..b04afc575f4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerDevice.cpp
@@ -0,0 +1,204 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire::server {
+
+ namespace {
+
+ template <ObjectType objectType, typename Pipeline>
+ void HandleCreateRenderPipelineAsyncCallbackResult(KnownObjects<Pipeline>* knownObjects,
+ WGPUCreatePipelineAsyncStatus status,
+ Pipeline pipeline,
+ CreatePipelineAsyncUserData* data) {
+ // May be null if the device was destroyed. Device destruction destroys child
+ // objects on the wire.
+ auto* pipelineObject =
+ knownObjects->Get(data->pipelineObjectID, AllocationState::Reserved);
+ // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+ // they move from Reserved to Allocated, or if they are destroyed here.
+ ASSERT(pipelineObject != nullptr);
+
+ if (status == WGPUCreatePipelineAsyncStatus_Success) {
+ // Assign the handle and allocated status if the pipeline is created successfully.
+ pipelineObject->state = AllocationState::Allocated;
+ pipelineObject->handle = pipeline;
+
+ // This should be impossible to fail. It would require a command to be sent that
+ // creates a duplicate ObjectId, which would fail validation.
+ bool success = TrackDeviceChild(pipelineObject->deviceInfo, objectType,
+ data->pipelineObjectID);
+ ASSERT(success);
+ } else {
+ // Otherwise, free the ObjectId which will make it unusable.
+ knownObjects->Free(data->pipelineObjectID);
+ ASSERT(pipeline == nullptr);
+ }
+ }
+
+ } // anonymous namespace
+
+ void Server::OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message) {
+ ReturnDeviceUncapturedErrorCallbackCmd cmd;
+ cmd.device = device;
+ cmd.type = type;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+ }
+
+ void Server::OnDeviceLost(ObjectHandle device,
+ WGPUDeviceLostReason reason,
+ const char* message) {
+ ReturnDeviceLostCallbackCmd cmd;
+ cmd.device = device;
+ cmd.reason = reason;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+ }
+
+ void Server::OnLogging(ObjectHandle device, WGPULoggingType type, const char* message) {
+ ReturnDeviceLoggingCallbackCmd cmd;
+ cmd.device = device;
+ cmd.type = type;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+ }
+
+ bool Server::DoDevicePopErrorScope(ObjectId deviceId, uint64_t requestSerial) {
+ auto* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr) {
+ return false;
+ }
+
+ auto userdata = MakeUserdata<ErrorScopeUserdata>();
+ userdata->requestSerial = requestSerial;
+ userdata->device = ObjectHandle{deviceId, device->generation};
+
+ ErrorScopeUserdata* unownedUserdata = userdata.release();
+ bool success = mProcs.devicePopErrorScope(
+ device->handle, ForwardToServer<&Server::OnDevicePopErrorScope>, unownedUserdata);
+ if (!success) {
+ delete unownedUserdata;
+ }
+ return success;
+ }
+
+ void Server::OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
+ WGPUErrorType type,
+ const char* message) {
+ ReturnDevicePopErrorScopeCallbackCmd cmd;
+ cmd.device = userdata->device;
+ cmd.requestSerial = userdata->requestSerial;
+ cmd.type = type;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+ }
+
+ bool Server::DoDeviceCreateComputePipelineAsync(
+ ObjectId deviceId,
+ uint64_t requestSerial,
+ ObjectHandle pipelineObjectHandle,
+ const WGPUComputePipelineDescriptor* descriptor) {
+ auto* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr) {
+ return false;
+ }
+
+ auto* resultData =
+ ComputePipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
+ if (resultData == nullptr) {
+ return false;
+ }
+
+ resultData->generation = pipelineObjectHandle.generation;
+ resultData->deviceInfo = device->info.get();
+
+ auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
+ userdata->device = ObjectHandle{deviceId, device->generation};
+ userdata->requestSerial = requestSerial;
+ userdata->pipelineObjectID = pipelineObjectHandle.id;
+
+ mProcs.deviceCreateComputePipelineAsync(
+ device->handle, descriptor,
+ ForwardToServer<&Server::OnCreateComputePipelineAsyncCallback>, userdata.release());
+ return true;
+ }
+
+ void Server::OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* data,
+ WGPUCreatePipelineAsyncStatus status,
+ WGPUComputePipeline pipeline,
+ const char* message) {
+ HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::ComputePipeline>(
+ &ComputePipelineObjects(), status, pipeline, data);
+
+ ReturnDeviceCreateComputePipelineAsyncCallbackCmd cmd;
+ cmd.device = data->device;
+ cmd.status = status;
+ cmd.requestSerial = data->requestSerial;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+ }
+
+ bool Server::DoDeviceCreateRenderPipelineAsync(ObjectId deviceId,
+ uint64_t requestSerial,
+ ObjectHandle pipelineObjectHandle,
+ const WGPURenderPipelineDescriptor* descriptor) {
+ auto* device = DeviceObjects().Get(deviceId);
+ if (device == nullptr) {
+ return false;
+ }
+
+ auto* resultData =
+ RenderPipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
+ if (resultData == nullptr) {
+ return false;
+ }
+
+ resultData->generation = pipelineObjectHandle.generation;
+ resultData->deviceInfo = device->info.get();
+
+ auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
+ userdata->device = ObjectHandle{deviceId, device->generation};
+ userdata->requestSerial = requestSerial;
+ userdata->pipelineObjectID = pipelineObjectHandle.id;
+
+ mProcs.deviceCreateRenderPipelineAsync(
+ device->handle, descriptor,
+ ForwardToServer<&Server::OnCreateRenderPipelineAsyncCallback>, userdata.release());
+ return true;
+ }
+
+ void Server::OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* data,
+ WGPUCreatePipelineAsyncStatus status,
+ WGPURenderPipeline pipeline,
+ const char* message) {
+ HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::RenderPipeline>(
+ &RenderPipelineObjects(), status, pipeline, data);
+
+ ReturnDeviceCreateRenderPipelineAsyncCallbackCmd cmd;
+ cmd.device = data->device;
+ cmd.status = status;
+ cmd.requestSerial = data->requestSerial;
+ cmd.message = message;
+
+ SerializeCommand(cmd);
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
new file mode 100644
index 00000000000..0e6b30aba67
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
@@ -0,0 +1,94 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/WireServer.h"
+#include "dawn/wire/server/Server.h"
+
+#include <cstring>
+
+namespace dawn::wire::server {
+
+ class InlineMemoryTransferService : public MemoryTransferService {
+ public:
+ class ReadHandleImpl : public ReadHandle {
+ public:
+ ReadHandleImpl() {
+ }
+ ~ReadHandleImpl() override = default;
+
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
+ return size;
+ }
+
+ void SerializeDataUpdate(const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer) override {
+ if (size > 0) {
+ ASSERT(data != nullptr);
+ ASSERT(serializePointer != nullptr);
+ memcpy(serializePointer, data, size);
+ }
+ }
+ };
+
+ class WriteHandleImpl : public WriteHandle {
+ public:
+ WriteHandleImpl() {
+ }
+ ~WriteHandleImpl() override = default;
+
+ bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) override {
+ if (deserializeSize != size || mTargetData == nullptr ||
+ deserializePointer == nullptr) {
+ return false;
+ }
+ if ((offset >= mDataLength && offset > 0) || size > mDataLength - offset) {
+ return false;
+ }
+ memcpy(static_cast<uint8_t*>(mTargetData) + offset, deserializePointer, size);
+ return true;
+ }
+ };
+
+ InlineMemoryTransferService() {
+ }
+ ~InlineMemoryTransferService() override = default;
+
+ bool DeserializeReadHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle) override {
+ ASSERT(readHandle != nullptr);
+ *readHandle = new ReadHandleImpl();
+ return true;
+ }
+
+ bool DeserializeWriteHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle) override {
+ ASSERT(writeHandle != nullptr);
+ *writeHandle = new WriteHandleImpl();
+ return true;
+ }
+ };
+
+ std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
+ return std::make_unique<InlineMemoryTransferService>();
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp
new file mode 100644
index 00000000000..d39dadec93d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerInstance.cpp
@@ -0,0 +1,100 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+
+#include "dawn/wire/SupportedFeatures.h"
+
+#include <algorithm>
+
+namespace dawn::wire::server {
+
+ bool Server::DoInstanceRequestAdapter(ObjectId instanceId,
+ uint64_t requestSerial,
+ ObjectHandle adapterHandle,
+ const WGPURequestAdapterOptions* options) {
+ auto* instance = InstanceObjects().Get(instanceId);
+ if (instance == nullptr) {
+ return false;
+ }
+
+ auto* resultData = AdapterObjects().Allocate(adapterHandle.id, AllocationState::Reserved);
+ if (resultData == nullptr) {
+ return false;
+ }
+
+ resultData->generation = adapterHandle.generation;
+
+ auto userdata = MakeUserdata<RequestAdapterUserdata>();
+ userdata->instance = ObjectHandle{instanceId, instance->generation};
+ userdata->requestSerial = requestSerial;
+ userdata->adapterObjectId = adapterHandle.id;
+
+ mProcs.instanceRequestAdapter(instance->handle, options,
+ ForwardToServer<&Server::OnRequestAdapterCallback>,
+ userdata.release());
+ return true;
+ }
+
+ void Server::OnRequestAdapterCallback(RequestAdapterUserdata* data,
+ WGPURequestAdapterStatus status,
+ WGPUAdapter adapter,
+ const char* message) {
+ auto* adapterObject =
+ AdapterObjects().Get(data->adapterObjectId, AllocationState::Reserved);
+ // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+ // they move from Reserved to Allocated, or if they are destroyed here.
+ ASSERT(adapterObject != nullptr);
+
+ ReturnInstanceRequestAdapterCallbackCmd cmd = {};
+ cmd.instance = data->instance;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.message = message;
+
+ if (status != WGPURequestAdapterStatus_Success) {
+ // Free the ObjectId which will make it unusable.
+ AdapterObjects().Free(data->adapterObjectId);
+ ASSERT(adapter == nullptr);
+ SerializeCommand(cmd);
+ return;
+ }
+
+ WGPUAdapterProperties properties = {};
+ WGPUSupportedLimits limits = {};
+ std::vector<WGPUFeatureName> features;
+
+ // Assign the handle and allocated status if the adapter is created successfully.
+ adapterObject->state = AllocationState::Allocated;
+ adapterObject->handle = adapter;
+
+ size_t featuresCount = mProcs.adapterEnumerateFeatures(adapter, nullptr);
+ features.resize(featuresCount);
+ mProcs.adapterEnumerateFeatures(adapter, features.data());
+
+ // Hide features the wire cannot support.
+ auto it = std::partition(features.begin(), features.end(), IsFeatureSupported);
+
+ cmd.featuresCount = std::distance(features.begin(), it);
+ cmd.features = features.data();
+
+ mProcs.adapterGetProperties(adapter, &properties);
+ mProcs.adapterGetLimits(adapter, &limits);
+ cmd.properties = &properties;
+ cmd.limits = &limits;
+
+ SerializeCommand(cmd);
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
new file mode 100644
index 00000000000..758c344e1ba
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
@@ -0,0 +1,91 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/ServerMemoryTransferService_mock.h"
+
+#include "dawn/common/Assert.h"
+
+namespace dawn::wire::server {
+
+ MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
+ : ReadHandle(), mService(service) {
+ }
+
+ MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
+ mService->OnReadHandleDestroy(this);
+ }
+
+ size_t MockMemoryTransferService::MockReadHandle::SizeOfSerializeDataUpdate(size_t offset,
+ size_t size) {
+ return mService->OnReadHandleSizeOfSerializeDataUpdate(this, offset, size);
+ }
+
+ void MockMemoryTransferService::MockReadHandle::SerializeDataUpdate(const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer) {
+ mService->OnReadHandleSerializeDataUpdate(this, data, offset, size, serializePointer);
+ }
+
+ MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
+ : WriteHandle(), mService(service) {
+ }
+
+ MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
+ mService->OnWriteHandleDestroy(this);
+ }
+
+ const uint32_t* MockMemoryTransferService::MockWriteHandle::GetData() const {
+ return reinterpret_cast<const uint32_t*>(mTargetData);
+ }
+
+ bool MockMemoryTransferService::MockWriteHandle::DeserializeDataUpdate(
+ const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) {
+ ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+ return mService->OnWriteHandleDeserializeDataUpdate(
+ this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
+ size);
+ }
+
+ MockMemoryTransferService::MockMemoryTransferService() = default;
+ MockMemoryTransferService::~MockMemoryTransferService() = default;
+
+ bool MockMemoryTransferService::DeserializeReadHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle) {
+ ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+ return OnDeserializeReadHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
+ deserializeSize, readHandle);
+ }
+
+ bool MockMemoryTransferService::DeserializeWriteHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle) {
+ ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+ return OnDeserializeWriteHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
+ deserializeSize, writeHandle);
+ }
+
+ MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
+ return new MockReadHandle(this);
+ }
+
+ MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
+ return new MockWriteHandle(this);
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h b/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h
new file mode 100644
index 00000000000..faea0edc114
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerMemoryTransferService_mock.h
@@ -0,0 +1,108 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
+#define DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
+
+#include <gmock/gmock.h>
+
+#include "dawn/wire/WireServer.h"
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire::server {
+
+ class MockMemoryTransferService : public MemoryTransferService {
+ public:
+ class MockReadHandle : public ReadHandle {
+ public:
+ MockReadHandle(MockMemoryTransferService* service);
+ ~MockReadHandle() override;
+
+ size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
+ void SerializeDataUpdate(const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer) override;
+
+ private:
+ MockMemoryTransferService* mService;
+ };
+
+ class MockWriteHandle : public WriteHandle {
+ public:
+ MockWriteHandle(MockMemoryTransferService* service);
+ ~MockWriteHandle() override;
+
+ bool DeserializeDataUpdate(const void* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size) override;
+
+ const uint32_t* GetData() const;
+
+ private:
+ MockMemoryTransferService* mService;
+ };
+
+ MockMemoryTransferService();
+ ~MockMemoryTransferService() override;
+
+ bool DeserializeReadHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle) override;
+
+ bool DeserializeWriteHandle(const void* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle) override;
+
+ MockReadHandle* NewReadHandle();
+ MockWriteHandle* NewWriteHandle();
+
+ MOCK_METHOD(bool,
+ OnDeserializeReadHandle,
+ (const uint32_t* deserializePointer,
+ size_t deserializeSize,
+ ReadHandle** readHandle));
+
+ MOCK_METHOD(bool,
+ OnDeserializeWriteHandle,
+ (const uint32_t* deserializePointer,
+ size_t deserializeSize,
+ WriteHandle** writeHandle));
+
+ MOCK_METHOD(size_t,
+ OnReadHandleSizeOfSerializeDataUpdate,
+ (const ReadHandle* readHandle, size_t offset, size_t size));
+ MOCK_METHOD(void,
+ OnReadHandleSerializeDataUpdate,
+ (const ReadHandle* readHandle,
+ const void* data,
+ size_t offset,
+ size_t size,
+ void* serializePointer));
+ MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle* readHandle));
+
+ MOCK_METHOD(bool,
+ OnWriteHandleDeserializeDataUpdate,
+ (const WriteHandle* writeHandle,
+ const uint32_t* deserializePointer,
+ size_t deserializeSize,
+ size_t offset,
+ size_t size));
+ MOCK_METHOD(void, OnWriteHandleDestroy, (const WriteHandle* writeHandle));
+ };
+
+} // namespace dawn::wire::server
+
+#endif // DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp
new file mode 100644
index 00000000000..68e1ea82ee4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerQueue.cpp
@@ -0,0 +1,103 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/common/Assert.h"
+#include "dawn/wire/server/Server.h"
+
+namespace dawn::wire::server {
+
+ void Server::OnQueueWorkDone(QueueWorkDoneUserdata* data, WGPUQueueWorkDoneStatus status) {
+ ReturnQueueWorkDoneCallbackCmd cmd;
+ cmd.queue = data->queue;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+
+ SerializeCommand(cmd);
+ }
+
+ bool Server::DoQueueOnSubmittedWorkDone(ObjectId queueId,
+ uint64_t signalValue,
+ uint64_t requestSerial) {
+ auto* queue = QueueObjects().Get(queueId);
+ if (queue == nullptr) {
+ return false;
+ }
+
+ auto userdata = MakeUserdata<QueueWorkDoneUserdata>();
+ userdata->queue = ObjectHandle{queueId, queue->generation};
+ userdata->requestSerial = requestSerial;
+
+ mProcs.queueOnSubmittedWorkDone(queue->handle, signalValue,
+ ForwardToServer<&Server::OnQueueWorkDone>,
+ userdata.release());
+ return true;
+ }
+
+ bool Server::DoQueueWriteBuffer(ObjectId queueId,
+ ObjectId bufferId,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ uint64_t size) {
+ // The null object isn't valid as `self` or `buffer` so we can combine the check with the
+ // check that the ID is valid.
+ auto* queue = QueueObjects().Get(queueId);
+ auto* buffer = BufferObjects().Get(bufferId);
+ if (queue == nullptr || buffer == nullptr) {
+ return false;
+ }
+
+ if (size > std::numeric_limits<size_t>::max()) {
+ auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
+ if (device == nullptr) {
+ return false;
+ }
+ return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
+ WGPUErrorType_OutOfMemory,
+ "Data size too large for write texture.");
+ }
+
+ mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data,
+ static_cast<size_t>(size));
+ return true;
+ }
+
+ bool Server::DoQueueWriteTexture(ObjectId queueId,
+ const WGPUImageCopyTexture* destination,
+ const uint8_t* data,
+ uint64_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize) {
+ // The null object isn't valid as `self` so we can combine the check with the
+ // check that the ID is valid.
+ auto* queue = QueueObjects().Get(queueId);
+ if (queue == nullptr) {
+ return false;
+ }
+
+ if (dataSize > std::numeric_limits<size_t>::max()) {
+ auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
+ if (device == nullptr) {
+ return false;
+ }
+ return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
+ WGPUErrorType_OutOfMemory,
+ "Data size too large for write texture.");
+ }
+
+ mProcs.queueWriteTexture(queue->handle, destination, data, static_cast<size_t>(dataSize),
+ dataLayout, writeSize);
+ return true;
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp b/chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp
new file mode 100644
index 00000000000..8785e0d648e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/wire/server/ServerShaderModule.cpp
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/wire/server/Server.h"
+
+#include <memory>
+
+namespace dawn::wire::server {
+
+ bool Server::DoShaderModuleGetCompilationInfo(ObjectId shaderModuleId, uint64_t requestSerial) {
+ auto* shaderModule = ShaderModuleObjects().Get(shaderModuleId);
+ if (shaderModule == nullptr) {
+ return false;
+ }
+
+ auto userdata = MakeUserdata<ShaderModuleGetCompilationInfoUserdata>();
+ userdata->shaderModule = ObjectHandle{shaderModuleId, shaderModule->generation};
+ userdata->requestSerial = requestSerial;
+
+ mProcs.shaderModuleGetCompilationInfo(
+ shaderModule->handle, ForwardToServer<&Server::OnShaderModuleGetCompilationInfo>,
+ userdata.release());
+ return true;
+ }
+
+ void Server::OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* data,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info) {
+ ReturnShaderModuleGetCompilationInfoCallbackCmd cmd;
+ cmd.shaderModule = data->shaderModule;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.info = info;
+
+ SerializeCommand(cmd);
+ }
+
+} // namespace dawn::wire::server
diff --git a/chromium/third_party/dawn/src/dawn_native/Adapter.cpp b/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
deleted file mode 100644
index 77082d70715..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Adapter.h"
-
-#include "common/Constants.h"
-#include "dawn_native/Instance.h"
-
-namespace dawn_native {
-
- AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
- : mInstance(instance), mBackend(backend) {
- mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
- }
-
- MaybeError AdapterBase::Initialize() {
- DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
- DAWN_TRY_CONTEXT(
- InitializeSupportedFeaturesImpl(),
- "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
- "backend=%s type=%s)",
- mPCIInfo.name, mDriverDescription, mPCIInfo.vendorId, mPCIInfo.deviceId, mBackend,
- mAdapterType);
- DAWN_TRY_CONTEXT(
- InitializeSupportedLimitsImpl(&mLimits),
- "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
- "backend=%s type=%s)",
- mPCIInfo.name, mDriverDescription, mPCIInfo.vendorId, mPCIInfo.deviceId, mBackend,
- mAdapterType);
-
- // Enforce internal Dawn constants.
- mLimits.v1.maxVertexBufferArrayStride =
- std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
- mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
- mLimits.v1.maxVertexAttributes =
- std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
- mLimits.v1.maxVertexBuffers =
- std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
- mLimits.v1.maxInterStageShaderComponents =
- std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
- mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
- mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
- mLimits.v1.maxSamplersPerShaderStage =
- std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
- mLimits.v1.maxStorageBuffersPerShaderStage =
- std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
- mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
- mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
- mLimits.v1.maxUniformBuffersPerShaderStage =
- std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
- mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
- std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
- kMaxDynamicUniformBuffersPerPipelineLayout);
- mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
- std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
- kMaxDynamicStorageBuffersPerPipelineLayout);
-
- return {};
- }
-
- wgpu::BackendType AdapterBase::GetBackendType() const {
- return mBackend;
- }
-
- wgpu::AdapterType AdapterBase::GetAdapterType() const {
- return mAdapterType;
- }
-
- const std::string& AdapterBase::GetDriverDescription() const {
- return mDriverDescription;
- }
-
- const PCIInfo& AdapterBase::GetPCIInfo() const {
- return mPCIInfo;
- }
-
- InstanceBase* AdapterBase::GetInstance() const {
- return mInstance;
- }
-
- FeaturesSet AdapterBase::GetSupportedFeatures() const {
- return mSupportedFeatures;
- }
-
- bool AdapterBase::SupportsAllRequestedFeatures(
- const std::vector<const char*>& requestedFeatures) const {
- for (const char* featureStr : requestedFeatures) {
- Feature featureEnum = mInstance->FeatureNameToEnum(featureStr);
- if (featureEnum == Feature::InvalidEnum) {
- return false;
- }
- if (!mSupportedFeatures.IsEnabled(featureEnum)) {
- return false;
- }
- }
- return true;
- }
-
- WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
- WGPUDeviceProperties adapterProperties = {};
- adapterProperties.deviceID = mPCIInfo.deviceId;
- adapterProperties.vendorID = mPCIInfo.vendorId;
- adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
-
- mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
- // This is OK for now because there are no limit feature structs.
- // If we add additional structs, the caller will need to provide memory
- // to store them (ex. by calling GetLimits directly instead). Currently,
- // we keep this function as it's only used internally in Chromium to
- // send the adapter properties across the wire.
- GetLimits(FromAPI(&adapterProperties.limits));
- return adapterProperties;
- }
-
- bool AdapterBase::GetLimits(SupportedLimits* limits) const {
- ASSERT(limits != nullptr);
- if (limits->nextInChain != nullptr) {
- return false;
- }
- if (mUseTieredLimits) {
- limits->limits = ApplyLimitTiers(mLimits.v1);
- } else {
- limits->limits = mLimits.v1;
- }
- return true;
- }
-
- DeviceBase* AdapterBase::CreateDevice(const DawnDeviceDescriptor* descriptor) {
- DeviceBase* result = nullptr;
-
- if (mInstance->ConsumedError(CreateDeviceInternal(&result, descriptor))) {
- return nullptr;
- }
-
- return result;
- }
-
- void AdapterBase::RequestDevice(const DawnDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata) {
- DeviceBase* device = nullptr;
- MaybeError err = CreateDeviceInternal(&device, descriptor);
-
- if (err.IsError()) {
- std::unique_ptr<ErrorData> errorData = err.AcquireError();
- callback(WGPURequestDeviceStatus_Error, ToAPI(device),
- errorData->GetFormattedMessage().c_str(), userdata);
- return;
- }
- WGPURequestDeviceStatus status =
- device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
- callback(status, ToAPI(device), nullptr, userdata);
- }
-
- MaybeError AdapterBase::CreateDeviceInternal(DeviceBase** result,
- const DawnDeviceDescriptor* descriptor) {
- if (descriptor != nullptr) {
- for (const char* featureStr : descriptor->requiredFeatures) {
- Feature featureEnum = mInstance->FeatureNameToEnum(featureStr);
- DAWN_INVALID_IF(featureEnum == Feature::InvalidEnum,
- "Requested feature %s is unknown.", featureStr);
- DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(featureEnum),
- "Requested feature %s is disabled.", featureStr);
- }
- }
-
- if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
- DAWN_TRY_CONTEXT(
- ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
- FromAPI(descriptor->requiredLimits)->limits),
- "validating required limits");
-
- DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
- "nextInChain is not nullptr.");
- }
-
- DAWN_TRY_ASSIGN(*result, CreateDeviceImpl(descriptor));
- return {};
- }
-
- void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
- mUseTieredLimits = useTieredLimits;
- }
-
- void AdapterBase::ResetInternalDeviceForTesting() {
- mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
- }
-
- MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
- return DAWN_INTERNAL_ERROR(
- "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Adapter.h b/chromium/third_party/dawn/src/dawn_native/Adapter.h
deleted file mode 100644
index 1b9286e8733..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Adapter.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ADAPTER_H_
-#define DAWNNATIVE_ADAPTER_H_
-
-#include "dawn_native/DawnNative.h"
-
-#include "dawn_native/Error.h"
-#include "dawn_native/Features.h"
-#include "dawn_native/Limits.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <string>
-
-namespace dawn_native {
-
- class DeviceBase;
-
- class AdapterBase {
- public:
- AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
- virtual ~AdapterBase() = default;
-
- MaybeError Initialize();
-
- wgpu::BackendType GetBackendType() const;
- wgpu::AdapterType GetAdapterType() const;
- const std::string& GetDriverDescription() const;
- const PCIInfo& GetPCIInfo() const;
- InstanceBase* GetInstance() const;
-
- DeviceBase* CreateDevice(const DawnDeviceDescriptor* descriptor = nullptr);
-
- void RequestDevice(const DawnDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata);
-
- void ResetInternalDeviceForTesting();
-
- FeaturesSet GetSupportedFeatures() const;
- bool SupportsAllRequestedFeatures(const std::vector<const char*>& requestedFeatures) const;
- WGPUDeviceProperties GetAdapterProperties() const;
-
- bool GetLimits(SupportedLimits* limits) const;
-
- void SetUseTieredLimits(bool useTieredLimits);
-
- virtual bool SupportsExternalImages() const = 0;
-
- protected:
- PCIInfo mPCIInfo = {};
- wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
- std::string mDriverDescription;
- FeaturesSet mSupportedFeatures;
-
- private:
- virtual ResultOrError<DeviceBase*> CreateDeviceImpl(
- const DawnDeviceDescriptor* descriptor) = 0;
-
- virtual MaybeError InitializeImpl() = 0;
-
- // Check base WebGPU features and discover supported featurees.
- virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
-
- // Check base WebGPU limits and populate supported limits.
- virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
-
- MaybeError CreateDeviceInternal(DeviceBase** result,
- const DawnDeviceDescriptor* descriptor);
-
- virtual MaybeError ResetInternalDeviceForTestingImpl();
- InstanceBase* mInstance = nullptr;
- wgpu::BackendType mBackend;
- CombinedLimits mLimits;
- bool mUseTieredLimits = false;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_ADAPTER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/AsyncTask.cpp b/chromium/third_party/dawn/src/dawn_native/AsyncTask.cpp
deleted file mode 100644
index 84f8764b234..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/AsyncTask.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-#include "dawn_native/AsyncTask.h"
-
-#include "dawn_platform/DawnPlatform.h"
-
-namespace dawn_native {
-
- AsyncTaskManager::AsyncTaskManager(dawn_platform::WorkerTaskPool* workerTaskPool)
- : mWorkerTaskPool(workerTaskPool) {
- }
-
- void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
- // If these allocations becomes expensive, we can slab-allocate tasks.
- Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
- waitableTask->taskManager = this;
- waitableTask->asyncTask = std::move(asyncTask);
-
- {
- // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
- // and we may remove waitableTask objects from mPendingTasks in either main thread
- // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
- // protected by a mutex.
- std::lock_guard<std::mutex> lock(mPendingTasksMutex);
- mPendingTasks.emplace(waitableTask.Get(), waitableTask);
- }
-
- // Ref the task since it is accessed inside the worker function.
- // The worker function will acquire and release the task upon completion.
- waitableTask->Reference();
- waitableTask->waitableEvent =
- mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
- }
-
- void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
- std::lock_guard<std::mutex> lock(mPendingTasksMutex);
- auto iter = mPendingTasks.find(task);
- if (iter != mPendingTasks.end()) {
- mPendingTasks.erase(iter);
- }
- }
-
- void AsyncTaskManager::WaitAllPendingTasks() {
- std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
-
- {
- std::lock_guard<std::mutex> lock(mPendingTasksMutex);
- allPendingTasks.swap(mPendingTasks);
- }
-
- for (auto& keyValue : allPendingTasks) {
- keyValue.second->waitableEvent->Wait();
- }
- }
-
- bool AsyncTaskManager::HasPendingTasks() {
- std::lock_guard<std::mutex> lock(mPendingTasksMutex);
- return !mPendingTasks.empty();
- }
-
- void AsyncTaskManager::DoWaitableTask(void* task) {
- Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
- waitableTask->asyncTask();
- waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/AsyncTask.h b/chromium/third_party/dawn/src/dawn_native/AsyncTask.h
deleted file mode 100644
index 1ae9344e2b3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/AsyncTask.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ASYC_TASK_H_
-#define DAWNNATIVE_ASYC_TASK_H_
-
-#include <functional>
-#include <memory>
-#include <mutex>
-#include <unordered_map>
-
-#include "common/RefCounted.h"
-
-namespace dawn_platform {
- class WaitableEvent;
- class WorkerTaskPool;
-} // namespace dawn_platform
-
-namespace dawn_native {
-
- // TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
- // Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
- // shutting down the device. RunNow() could be used for more advanced scenarios, for example
- // always doing ShaderModule initial compilation asynchronously, but being able to steal the
- // task if we need it for synchronous pipeline compilation.
- using AsyncTask = std::function<void()>;
-
- class AsyncTaskManager {
- public:
- explicit AsyncTaskManager(dawn_platform::WorkerTaskPool* workerTaskPool);
-
- void PostTask(AsyncTask asyncTask);
- void WaitAllPendingTasks();
- bool HasPendingTasks();
-
- private:
- class WaitableTask : public RefCounted {
- public:
- AsyncTask asyncTask;
- AsyncTaskManager* taskManager;
- std::unique_ptr<dawn_platform::WaitableEvent> waitableEvent;
- };
-
- static void DoWaitableTask(void* task);
- void HandleTaskCompletion(WaitableTask* task);
-
- std::mutex mPendingTasksMutex;
- std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
- dawn_platform::WorkerTaskPool* mWorkerTaskPool;
- };
-
-} // namespace dawn_native
-
-#endif
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
deleted file mode 100644
index 427db42bb92..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/AttachmentState.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectContentHasher.h"
-#include "dawn_native/Texture.h"
-
-namespace dawn_native {
-
- AttachmentStateBlueprint::AttachmentStateBlueprint(
- const RenderBundleEncoderDescriptor* descriptor)
- : mSampleCount(descriptor->sampleCount) {
- ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
- mColorAttachmentsSet.set(i);
- mColorFormats[i] = descriptor->colorFormats[static_cast<uint8_t>(i)];
- }
- mDepthStencilFormat = descriptor->depthStencilFormat;
- }
-
- AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
- : mSampleCount(descriptor->multisample.count) {
- if (descriptor->fragment != nullptr) {
- ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
- ++i) {
- mColorAttachmentsSet.set(i);
- mColorFormats[i] = descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
- }
- }
- if (descriptor->depthStencil != nullptr) {
- mDepthStencilFormat = descriptor->depthStencil->format;
- }
- }
-
- AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
- ++i) {
- TextureViewBase* attachment =
- descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
- mColorAttachmentsSet.set(i);
- mColorFormats[i] = attachment->GetFormat().format;
- if (mSampleCount == 0) {
- mSampleCount = attachment->GetTexture()->GetSampleCount();
- } else {
- ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
- }
- }
- if (descriptor->depthStencilAttachment != nullptr) {
- TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
- mDepthStencilFormat = attachment->GetFormat().format;
- if (mSampleCount == 0) {
- mSampleCount = attachment->GetTexture()->GetSampleCount();
- } else {
- ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
- }
- }
- ASSERT(mSampleCount > 0);
- }
-
- AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) =
- default;
-
- size_t AttachmentStateBlueprint::HashFunc::operator()(
- const AttachmentStateBlueprint* attachmentState) const {
- size_t hash = 0;
-
- // Hash color formats
- HashCombine(&hash, attachmentState->mColorAttachmentsSet);
- for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
- HashCombine(&hash, attachmentState->mColorFormats[i]);
- }
-
- // Hash depth stencil attachment
- HashCombine(&hash, attachmentState->mDepthStencilFormat);
-
- // Hash sample count
- HashCombine(&hash, attachmentState->mSampleCount);
-
- return hash;
- }
-
- bool AttachmentStateBlueprint::EqualityFunc::operator()(
- const AttachmentStateBlueprint* a,
- const AttachmentStateBlueprint* b) const {
- // Check set attachments
- if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
- return false;
- }
-
- // Check color formats
- for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
- if (a->mColorFormats[i] != b->mColorFormats[i]) {
- return false;
- }
- }
-
- // Check depth stencil format
- if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
- return false;
- }
-
- // Check sample count
- if (a->mSampleCount != b->mSampleCount) {
- return false;
- }
-
- return true;
- }
-
- AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
- : AttachmentStateBlueprint(blueprint), ObjectBase(device) {
- }
-
- AttachmentState::~AttachmentState() {
- GetDevice()->UncacheAttachmentState(this);
- }
-
- size_t AttachmentState::ComputeContentHash() {
- // TODO(dawn:549): skip this traversal and reuse the blueprint.
- return AttachmentStateBlueprint::HashFunc()(this);
- }
-
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
- AttachmentState::GetColorAttachmentsMask() const {
- return mColorAttachmentsSet;
- }
-
- wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(
- ColorAttachmentIndex index) const {
- ASSERT(mColorAttachmentsSet[index]);
- return mColorFormats[index];
- }
-
- bool AttachmentState::HasDepthStencilAttachment() const {
- return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
- }
-
- wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
- ASSERT(HasDepthStencilAttachment());
- return mDepthStencilFormat;
- }
-
- uint32_t AttachmentState::GetSampleCount() const {
- return mSampleCount;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.h b/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
deleted file mode 100644
index 6c1e434971c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ATTACHMENTSTATE_H_
-#define DAWNNATIVE_ATTACHMENTSTATE_H_
-
-#include "common/Constants.h"
-#include "common/ityp_array.h"
-#include "common/ityp_bitset.h"
-#include "dawn_native/CachedObject.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-#include <bitset>
-
-namespace dawn_native {
-
- class DeviceBase;
-
- // AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
- // can be constructed by copying the blueprint state instead of traversing descriptors.
- // Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
- class AttachmentStateBlueprint {
- public:
- // Note: Descriptors must be validated before the AttachmentState is constructed.
- explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
- explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
- explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
-
- AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
-
- // Functors necessary for the unordered_set<AttachmentState*>-based cache.
- struct HashFunc {
- size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
- };
- struct EqualityFunc {
- bool operator()(const AttachmentStateBlueprint* a,
- const AttachmentStateBlueprint* b) const;
- };
-
- protected:
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
- ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
- // Default (texture format Undefined) indicates there is no depth stencil attachment.
- wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
- uint32_t mSampleCount = 0;
- };
-
- class AttachmentState final : public AttachmentStateBlueprint,
- public ObjectBase,
- public CachedObject {
- public:
- AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
-
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
- wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
- bool HasDepthStencilAttachment() const;
- wgpu::TextureFormat GetDepthStencilFormat() const;
- uint32_t GetSampleCount() const;
-
- size_t ComputeContentHash() override;
-
- private:
- ~AttachmentState() override;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_ATTACHMENTSTATE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BUILD.gn b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
index 82913a858dc..e811642bccc 100644
--- a/chromium/third_party/dawn/src/dawn_native/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2020 The Dawn Authors
+# Copyright 2022 The Dawn Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,766 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import("../../scripts/dawn_overrides_with_defaults.gni")
-
-import("//build_overrides/build.gni")
-import("${dawn_root}/generator/dawn_generator.gni")
-import("${dawn_root}/scripts/dawn_component.gni")
-import("${dawn_root}/scripts/dawn_features.gni")
-
-# Import mac_deployment_target
-if (is_mac) {
- if (dawn_has_build) {
- import("//build/config/mac/mac_sdk.gni")
- } else {
- mac_deployment_target = "10.11.0"
- }
-}
-
-# The VVLs are an optional dependency, only use it if the path has been set.
-enable_vulkan_validation_layers = dawn_enable_vulkan_validation_layers &&
- dawn_vulkan_validation_layers_dir != ""
-if (enable_vulkan_validation_layers) {
- import("//build_overrides/vulkan_validation_layers.gni")
-}
-
-# ANGLE is an optional dependency; only use it if the path has been set.
-use_angle = dawn_use_angle && defined(dawn_angle_dir)
-
-# Swiftshader is an optional dependency, only use it if the path has been set.
-use_swiftshader = dawn_use_swiftshader && dawn_swiftshader_dir != ""
-if (use_swiftshader) {
- assert(dawn_enable_vulkan,
- "dawn_use_swiftshader requires dawn_enable_vulkan=true")
- import("${dawn_swiftshader_dir}/src/Vulkan/vulkan.gni")
-}
-
-# The Vulkan loader is an optional dependency, only use it if the path has been
-# set.
-if (dawn_enable_vulkan) {
- enable_vulkan_loader =
- dawn_enable_vulkan_loader && dawn_vulkan_loader_dir != ""
-}
-
-group("dawn_abseil") {
- # When build_with_chromium=true we need to include "//third_party/abseil-cpp:absl" while
- # it's beneficial to be more specific with standalone Dawn, especially when it comes to
- # including it as a dependency in other projects (such as Skia).
- if (build_with_chromium) {
- public_deps = [ "$dawn_abseil_dir:absl" ]
- } else {
- public_deps = [ "${dawn_root}/third_party/gn/abseil-cpp:str_format" ]
- }
-}
-
-config("dawn_native_internal") {
- configs = [ "${dawn_root}/src/common:dawn_internal" ]
-
- # Suppress warnings that Metal isn't in the deployment target of Chrome:
- # initialization of the Metal backend is behind a IsMetalSupported check so
- # Dawn won't call Metal functions on macOS 10.10.
- # At the time this is written Chromium supports 10.10.0 and above, so if we
- # aren't on 10.11 it means we are on 10.11 and above, and Metal is available.
- # Skipping this check on 10.11 and above is important as it allows getting
- # proper compilation warning when using 10.12 and above feature for example.
- # TODO(crbug.com/1004024): Consider using API_AVAILABLE annotations on all
- # metal code in dawn once crbug.com/1004024 is sorted out if Chromium still
- # supports 10.10 then.
- if (is_mac && mac_deployment_target == "10.10.0") {
- cflags_objcc = [ "-Wno-unguarded-availability" ]
- }
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawn_native") {
+ public_deps = [ "../dawn/native" ]
}
-
-config("dawn_native_weak_framework") {
- if (is_mac && dawn_enable_metal) {
- weak_frameworks = [ "Metal.framework" ]
- }
-}
-
-# Config that adds the @executable_path rpath if needed so that Swiftshader or the Vulkan loader are found.
-config("dawn_native_vulkan_rpath") {
- if (is_mac && dawn_enable_vulkan &&
- (use_swiftshader || enable_vulkan_loader)) {
- ldflags = [
- "-rpath",
- "@executable_path/",
- ]
- }
-}
-
-dawn_json_generator("dawn_native_utils_gen") {
- target = "dawn_native_utils"
- outputs = [
- "src/dawn_native/ChainUtils_autogen.h",
- "src/dawn_native/ChainUtils_autogen.cpp",
- "src/dawn_native/ProcTable.cpp",
- "src/dawn_native/dawn_platform_autogen.h",
- "src/dawn_native/wgpu_structs_autogen.h",
- "src/dawn_native/wgpu_structs_autogen.cpp",
- "src/dawn_native/ValidationUtils_autogen.h",
- "src/dawn_native/ValidationUtils_autogen.cpp",
- "src/dawn_native/webgpu_absl_format_autogen.h",
- "src/dawn_native/webgpu_absl_format_autogen.cpp",
- "src/dawn_native/ObjectType_autogen.h",
- "src/dawn_native/ObjectType_autogen.cpp",
- ]
-}
-
-if (dawn_enable_opengl) {
- dawn_generator("dawn_native_opengl_loader_gen") {
- script = "${dawn_root}/generator/opengl_loader_generator.py"
- args = [
- "--gl-xml",
- rebase_path("${dawn_root}/third_party/khronos/gl.xml", root_build_dir),
- "--supported-extensions",
- rebase_path("opengl/supported_extensions.json", root_build_dir),
- ]
- outputs = [
- "src/dawn_native/opengl/OpenGLFunctionsBase_autogen.cpp",
- "src/dawn_native/opengl/OpenGLFunctionsBase_autogen.h",
- "src/dawn_native/opengl/opengl_platform_autogen.h",
- ]
- }
-}
-
-# Public dawn_native headers so they can be publicly visible for
-# dependencies of dawn_native
-source_set("dawn_native_headers") {
- public_deps = [ "${dawn_root}/src/dawn:dawncpp_headers" ]
- all_dependent_configs = [ "${dawn_root}/src/common:dawn_public_include_dirs" ]
- sources = [
- "${dawn_root}/src/include/dawn_native/DawnNative.h",
- "${dawn_root}/src/include/dawn_native/dawn_native_export.h",
-
- # Include all backend's public headers so that dependencies can include
- # them even when the backends are disabled.
- "${dawn_root}/src/include/dawn_native/D3D12Backend.h",
- "${dawn_root}/src/include/dawn_native/MetalBackend.h",
- "${dawn_root}/src/include/dawn_native/NullBackend.h",
- "${dawn_root}/src/include/dawn_native/OpenGLBackend.h",
- "${dawn_root}/src/include/dawn_native/VulkanBackend.h",
- ]
-}
-
-# The meat of the compilation for dawn_native so that we can cheaply have
-# shared_library / static_library versions of it. It compiles all the files
-# except those that define exported symbols.
-source_set("dawn_native_sources") {
- deps = [
- ":dawn_native_headers",
- ":dawn_native_utils_gen",
- "${dawn_root}/src/common",
- "${dawn_spirv_tools_dir}:spvtools_opt",
- "${dawn_spirv_tools_dir}:spvtools_val",
- "${dawn_tint_dir}/src:libtint",
- ]
- if (dawn_use_spirv_cross) {
- deps += [ "${dawn_root}/third_party/gn/spirv_cross:spirv_cross" ]
- }
- defines = []
- libs = []
- data_deps = []
-
- configs += [ ":dawn_native_internal" ]
-
- # Dependencies that are needed to compile dawn_native entry points in
- # FooBackend.cpp need to be public deps so they are propagated to the
- # dawn_native target
- public_deps = [
- ":dawn_abseil",
- "${dawn_root}/src/dawn_platform",
- ]
-
- sources = get_target_outputs(":dawn_native_utils_gen")
- sources += [
- "Adapter.cpp",
- "Adapter.h",
- "AsyncTask.cpp",
- "AsyncTask.h",
- "AttachmentState.cpp",
- "AttachmentState.h",
- "BackendConnection.cpp",
- "BackendConnection.h",
- "BindGroup.cpp",
- "BindGroup.h",
- "BindGroupLayout.cpp",
- "BindGroupLayout.h",
- "BindGroupTracker.h",
- "BindingInfo.cpp",
- "BindingInfo.h",
- "BuddyAllocator.cpp",
- "BuddyAllocator.h",
- "BuddyMemoryAllocator.cpp",
- "BuddyMemoryAllocator.h",
- "Buffer.cpp",
- "Buffer.h",
- "CachedObject.cpp",
- "CachedObject.h",
- "CallbackTaskManager.cpp",
- "CallbackTaskManager.h",
- "CommandAllocator.cpp",
- "CommandAllocator.h",
- "CommandBuffer.cpp",
- "CommandBuffer.h",
- "CommandBufferStateTracker.cpp",
- "CommandBufferStateTracker.h",
- "CommandEncoder.cpp",
- "CommandEncoder.h",
- "CommandValidation.cpp",
- "CommandValidation.h",
- "Commands.cpp",
- "Commands.h",
- "CompilationMessages.cpp",
- "CompilationMessages.h",
- "ComputePassEncoder.cpp",
- "ComputePassEncoder.h",
- "ComputePipeline.cpp",
- "ComputePipeline.h",
- "CopyTextureForBrowserHelper.cpp",
- "CopyTextureForBrowserHelper.h",
- "CreatePipelineAsyncTask.cpp",
- "CreatePipelineAsyncTask.h",
- "Device.cpp",
- "Device.h",
- "DynamicUploader.cpp",
- "DynamicUploader.h",
- "EncodingContext.cpp",
- "EncodingContext.h",
- "EnumClassBitmasks.h",
- "EnumMaskIterator.h",
- "Error.cpp",
- "Error.h",
- "ErrorData.cpp",
- "ErrorData.h",
- "ErrorInjector.cpp",
- "ErrorInjector.h",
- "ErrorScope.cpp",
- "ErrorScope.h",
- "ExternalTexture.cpp",
- "ExternalTexture.h",
- "Features.cpp",
- "Features.h",
- "Format.cpp",
- "Format.h",
- "Forward.h",
- "IndirectDrawMetadata.cpp",
- "IndirectDrawMetadata.h",
- "IndirectDrawValidationEncoder.cpp",
- "IndirectDrawValidationEncoder.h",
- "Instance.cpp",
- "Instance.h",
- "IntegerTypes.h",
- "InternalPipelineStore.cpp",
- "InternalPipelineStore.h",
- "Limits.cpp",
- "Limits.h",
- "ObjectBase.cpp",
- "ObjectBase.h",
- "ObjectContentHasher.cpp",
- "ObjectContentHasher.h",
- "PassResourceUsage.h",
- "PassResourceUsageTracker.cpp",
- "PassResourceUsageTracker.h",
- "PerStage.cpp",
- "PerStage.h",
- "PersistentCache.cpp",
- "PersistentCache.h",
- "Pipeline.cpp",
- "Pipeline.h",
- "PipelineLayout.cpp",
- "PipelineLayout.h",
- "PooledResourceMemoryAllocator.cpp",
- "PooledResourceMemoryAllocator.h",
- "ProgrammableEncoder.cpp",
- "ProgrammableEncoder.h",
- "QueryHelper.cpp",
- "QueryHelper.h",
- "QuerySet.cpp",
- "QuerySet.h",
- "Queue.cpp",
- "Queue.h",
- "RenderBundle.cpp",
- "RenderBundle.h",
- "RenderBundleEncoder.cpp",
- "RenderBundleEncoder.h",
- "RenderEncoderBase.cpp",
- "RenderEncoderBase.h",
- "RenderPassEncoder.cpp",
- "RenderPassEncoder.h",
- "RenderPipeline.cpp",
- "RenderPipeline.h",
- "ResourceHeap.h",
- "ResourceHeapAllocator.h",
- "ResourceMemoryAllocation.cpp",
- "ResourceMemoryAllocation.h",
- "RingBufferAllocator.cpp",
- "RingBufferAllocator.h",
- "Sampler.cpp",
- "Sampler.h",
- "ScratchBuffer.cpp",
- "ScratchBuffer.h",
- "ShaderModule.cpp",
- "ShaderModule.h",
- "StagingBuffer.cpp",
- "StagingBuffer.h",
- "Subresource.cpp",
- "Subresource.h",
- "SubresourceStorage.h",
- "Surface.cpp",
- "Surface.h",
- "SwapChain.cpp",
- "SwapChain.h",
- "Texture.cpp",
- "Texture.h",
- "TintUtils.cpp",
- "TintUtils.h",
- "ToBackend.h",
- "Toggles.cpp",
- "Toggles.h",
- "VertexFormat.cpp",
- "VertexFormat.h",
- "dawn_platform.h",
- "utils/WGPUHelpers.cpp",
- "utils/WGPUHelpers.h",
- ]
-
- if (dawn_use_x11) {
- libs += [ "X11" ]
- sources += [
- "XlibXcbFunctions.cpp",
- "XlibXcbFunctions.h",
- ]
- }
-
- # Only win32 app needs to link with user32.lib
- # In UWP, all availiable APIs are defined in WindowsApp.lib
- if (is_win && !dawn_is_winuwp) {
- libs += [ "user32.lib" ]
- }
-
- if (dawn_is_winuwp && is_debug) {
- # DXGIGetDebugInterface1 is defined in dxgi.lib
- # But this API is tagged as a development-only capability
- # which implies that linking to this function will cause
- # the application to fail Windows store certification
- # So we only link to it in debug build when compiling for UWP.
- # In win32 we load dxgi.dll using LoadLibrary
- # so no need for static linking.
- libs += [ "dxgi.lib" ]
- }
-
- # TODO(dawn:766):
- # Should link dxcompiler.lib and WinPixEventRuntime_UAP.lib in UWP
- # Somehow use dxcompiler.lib makes CoreApp unable to activate
- # WinPIX should be added as third party tools and linked statically
-
- if (dawn_enable_d3d12) {
- libs += [ "dxguid.lib" ]
- sources += [
- "d3d12/AdapterD3D12.cpp",
- "d3d12/AdapterD3D12.h",
- "d3d12/BackendD3D12.cpp",
- "d3d12/BackendD3D12.h",
- "d3d12/BindGroupD3D12.cpp",
- "d3d12/BindGroupD3D12.h",
- "d3d12/BindGroupLayoutD3D12.cpp",
- "d3d12/BindGroupLayoutD3D12.h",
- "d3d12/BufferD3D12.cpp",
- "d3d12/BufferD3D12.h",
- "d3d12/CPUDescriptorHeapAllocationD3D12.cpp",
- "d3d12/CPUDescriptorHeapAllocationD3D12.h",
- "d3d12/CommandAllocatorManager.cpp",
- "d3d12/CommandAllocatorManager.h",
- "d3d12/CommandBufferD3D12.cpp",
- "d3d12/CommandBufferD3D12.h",
- "d3d12/CommandRecordingContext.cpp",
- "d3d12/CommandRecordingContext.h",
- "d3d12/ComputePipelineD3D12.cpp",
- "d3d12/ComputePipelineD3D12.h",
- "d3d12/D3D11on12Util.cpp",
- "d3d12/D3D11on12Util.h",
- "d3d12/D3D12Error.cpp",
- "d3d12/D3D12Error.h",
- "d3d12/D3D12Info.cpp",
- "d3d12/D3D12Info.h",
- "d3d12/DeviceD3D12.cpp",
- "d3d12/DeviceD3D12.h",
- "d3d12/Forward.h",
- "d3d12/GPUDescriptorHeapAllocationD3D12.cpp",
- "d3d12/GPUDescriptorHeapAllocationD3D12.h",
- "d3d12/HeapAllocatorD3D12.cpp",
- "d3d12/HeapAllocatorD3D12.h",
- "d3d12/HeapD3D12.cpp",
- "d3d12/HeapD3D12.h",
- "d3d12/IntegerTypes.h",
- "d3d12/NativeSwapChainImplD3D12.cpp",
- "d3d12/NativeSwapChainImplD3D12.h",
- "d3d12/PageableD3D12.cpp",
- "d3d12/PageableD3D12.h",
- "d3d12/PipelineLayoutD3D12.cpp",
- "d3d12/PipelineLayoutD3D12.h",
- "d3d12/PlatformFunctions.cpp",
- "d3d12/PlatformFunctions.h",
- "d3d12/QuerySetD3D12.cpp",
- "d3d12/QuerySetD3D12.h",
- "d3d12/QueueD3D12.cpp",
- "d3d12/QueueD3D12.h",
- "d3d12/RenderPassBuilderD3D12.cpp",
- "d3d12/RenderPassBuilderD3D12.h",
- "d3d12/RenderPipelineD3D12.cpp",
- "d3d12/RenderPipelineD3D12.h",
- "d3d12/ResidencyManagerD3D12.cpp",
- "d3d12/ResidencyManagerD3D12.h",
- "d3d12/ResourceAllocatorManagerD3D12.cpp",
- "d3d12/ResourceAllocatorManagerD3D12.h",
- "d3d12/ResourceHeapAllocationD3D12.cpp",
- "d3d12/ResourceHeapAllocationD3D12.h",
- "d3d12/SamplerD3D12.cpp",
- "d3d12/SamplerD3D12.h",
- "d3d12/SamplerHeapCacheD3D12.cpp",
- "d3d12/SamplerHeapCacheD3D12.h",
- "d3d12/ShaderModuleD3D12.cpp",
- "d3d12/ShaderModuleD3D12.h",
- "d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp",
- "d3d12/ShaderVisibleDescriptorAllocatorD3D12.h",
- "d3d12/StagingBufferD3D12.cpp",
- "d3d12/StagingBufferD3D12.h",
- "d3d12/StagingDescriptorAllocatorD3D12.cpp",
- "d3d12/StagingDescriptorAllocatorD3D12.h",
- "d3d12/SwapChainD3D12.cpp",
- "d3d12/SwapChainD3D12.h",
- "d3d12/TextureCopySplitter.cpp",
- "d3d12/TextureCopySplitter.h",
- "d3d12/TextureD3D12.cpp",
- "d3d12/TextureD3D12.h",
- "d3d12/UtilsD3D12.cpp",
- "d3d12/UtilsD3D12.h",
- "d3d12/d3d12_platform.h",
- ]
- }
-
- if (dawn_enable_metal) {
- frameworks = [
- "Cocoa.framework",
- "IOKit.framework",
- "IOSurface.framework",
- "QuartzCore.framework",
- ]
- sources += [
- "Surface_metal.mm",
- "metal/BackendMTL.h",
- "metal/BackendMTL.mm",
- "metal/BindGroupLayoutMTL.h",
- "metal/BindGroupLayoutMTL.mm",
- "metal/BindGroupMTL.h",
- "metal/BindGroupMTL.mm",
- "metal/BufferMTL.h",
- "metal/BufferMTL.mm",
- "metal/CommandBufferMTL.h",
- "metal/CommandBufferMTL.mm",
- "metal/CommandRecordingContext.h",
- "metal/CommandRecordingContext.mm",
- "metal/ComputePipelineMTL.h",
- "metal/ComputePipelineMTL.mm",
- "metal/DeviceMTL.h",
- "metal/DeviceMTL.mm",
- "metal/Forward.h",
- "metal/PipelineLayoutMTL.h",
- "metal/PipelineLayoutMTL.mm",
- "metal/QuerySetMTL.h",
- "metal/QuerySetMTL.mm",
- "metal/QueueMTL.h",
- "metal/QueueMTL.mm",
- "metal/RenderPipelineMTL.h",
- "metal/RenderPipelineMTL.mm",
- "metal/SamplerMTL.h",
- "metal/SamplerMTL.mm",
- "metal/ShaderModuleMTL.h",
- "metal/ShaderModuleMTL.mm",
- "metal/StagingBufferMTL.h",
- "metal/StagingBufferMTL.mm",
- "metal/SwapChainMTL.h",
- "metal/SwapChainMTL.mm",
- "metal/TextureMTL.h",
- "metal/TextureMTL.mm",
- "metal/UtilsMetal.h",
- "metal/UtilsMetal.mm",
- ]
- }
-
- if (dawn_enable_null) {
- sources += [
- "null/DeviceNull.cpp",
- "null/DeviceNull.h",
- ]
- }
-
- if (dawn_enable_opengl || dawn_enable_vulkan) {
- sources += [
- "SpirvValidation.cpp",
- "SpirvValidation.h",
- ]
- }
-
- if (dawn_enable_opengl) {
- public_deps += [
- ":dawn_native_opengl_loader_gen",
- "${dawn_root}/third_party/khronos:khronos_platform",
- ]
- sources += get_target_outputs(":dawn_native_opengl_loader_gen")
- sources += [
- "opengl/BackendGL.cpp",
- "opengl/BackendGL.h",
- "opengl/BindGroupGL.cpp",
- "opengl/BindGroupGL.h",
- "opengl/BindGroupLayoutGL.cpp",
- "opengl/BindGroupLayoutGL.h",
- "opengl/BufferGL.cpp",
- "opengl/BufferGL.h",
- "opengl/CommandBufferGL.cpp",
- "opengl/CommandBufferGL.h",
- "opengl/ComputePipelineGL.cpp",
- "opengl/ComputePipelineGL.h",
- "opengl/DeviceGL.cpp",
- "opengl/DeviceGL.h",
- "opengl/Forward.h",
- "opengl/GLFormat.cpp",
- "opengl/GLFormat.h",
- "opengl/NativeSwapChainImplGL.cpp",
- "opengl/NativeSwapChainImplGL.h",
- "opengl/OpenGLFunctions.cpp",
- "opengl/OpenGLFunctions.h",
- "opengl/OpenGLVersion.cpp",
- "opengl/OpenGLVersion.h",
- "opengl/PersistentPipelineStateGL.cpp",
- "opengl/PersistentPipelineStateGL.h",
- "opengl/PipelineGL.cpp",
- "opengl/PipelineGL.h",
- "opengl/PipelineLayoutGL.cpp",
- "opengl/PipelineLayoutGL.h",
- "opengl/QuerySetGL.cpp",
- "opengl/QuerySetGL.h",
- "opengl/QueueGL.cpp",
- "opengl/QueueGL.h",
- "opengl/RenderPipelineGL.cpp",
- "opengl/RenderPipelineGL.h",
- "opengl/SamplerGL.cpp",
- "opengl/SamplerGL.h",
- "opengl/ShaderModuleGL.cpp",
- "opengl/ShaderModuleGL.h",
- "opengl/SpirvUtils.cpp",
- "opengl/SpirvUtils.h",
- "opengl/SwapChainGL.cpp",
- "opengl/SwapChainGL.h",
- "opengl/TextureGL.cpp",
- "opengl/TextureGL.h",
- "opengl/UtilsGL.cpp",
- "opengl/UtilsGL.h",
- "opengl/opengl_platform.h",
- ]
- }
-
- if (dawn_enable_vulkan) {
- public_deps += [ "${dawn_root}/third_party/khronos:vulkan_headers" ]
- sources += [
- "vulkan/AdapterVk.cpp",
- "vulkan/AdapterVk.h",
- "vulkan/BackendVk.cpp",
- "vulkan/BackendVk.h",
- "vulkan/BindGroupLayoutVk.cpp",
- "vulkan/BindGroupLayoutVk.h",
- "vulkan/BindGroupVk.cpp",
- "vulkan/BindGroupVk.h",
- "vulkan/BufferVk.cpp",
- "vulkan/BufferVk.h",
- "vulkan/CommandBufferVk.cpp",
- "vulkan/CommandBufferVk.h",
- "vulkan/CommandRecordingContext.h",
- "vulkan/ComputePipelineVk.cpp",
- "vulkan/ComputePipelineVk.h",
- "vulkan/DescriptorSetAllocation.h",
- "vulkan/DescriptorSetAllocator.cpp",
- "vulkan/DescriptorSetAllocator.h",
- "vulkan/DeviceVk.cpp",
- "vulkan/DeviceVk.h",
- "vulkan/ExternalHandle.h",
- "vulkan/FencedDeleter.cpp",
- "vulkan/FencedDeleter.h",
- "vulkan/Forward.h",
- "vulkan/NativeSwapChainImplVk.cpp",
- "vulkan/NativeSwapChainImplVk.h",
- "vulkan/PipelineLayoutVk.cpp",
- "vulkan/PipelineLayoutVk.h",
- "vulkan/QuerySetVk.cpp",
- "vulkan/QuerySetVk.h",
- "vulkan/QueueVk.cpp",
- "vulkan/QueueVk.h",
- "vulkan/RenderPassCache.cpp",
- "vulkan/RenderPassCache.h",
- "vulkan/RenderPipelineVk.cpp",
- "vulkan/RenderPipelineVk.h",
- "vulkan/ResourceHeapVk.cpp",
- "vulkan/ResourceHeapVk.h",
- "vulkan/ResourceMemoryAllocatorVk.cpp",
- "vulkan/ResourceMemoryAllocatorVk.h",
- "vulkan/SamplerVk.cpp",
- "vulkan/SamplerVk.h",
- "vulkan/ShaderModuleVk.cpp",
- "vulkan/ShaderModuleVk.h",
- "vulkan/StagingBufferVk.cpp",
- "vulkan/StagingBufferVk.h",
- "vulkan/SwapChainVk.cpp",
- "vulkan/SwapChainVk.h",
- "vulkan/TextureVk.cpp",
- "vulkan/TextureVk.h",
- "vulkan/UtilsVulkan.cpp",
- "vulkan/UtilsVulkan.h",
- "vulkan/VulkanError.cpp",
- "vulkan/VulkanError.h",
- "vulkan/VulkanExtensions.cpp",
- "vulkan/VulkanExtensions.h",
- "vulkan/VulkanFunctions.cpp",
- "vulkan/VulkanFunctions.h",
- "vulkan/VulkanInfo.cpp",
- "vulkan/VulkanInfo.h",
- "vulkan/external_memory/MemoryService.h",
- "vulkan/external_semaphore/SemaphoreService.h",
- ]
-
- if (is_chromeos) {
- sources += [
- "vulkan/external_memory/MemoryServiceDmaBuf.cpp",
- "vulkan/external_semaphore/SemaphoreServiceFD.cpp",
- ]
- defines += [ "DAWN_USE_SYNC_FDS" ]
- } else if (is_linux) {
- sources += [
- "vulkan/external_memory/MemoryServiceOpaqueFD.cpp",
- "vulkan/external_semaphore/SemaphoreServiceFD.cpp",
- ]
- } else if (is_fuchsia) {
- sources += [
- "vulkan/external_memory/MemoryServiceZirconHandle.cpp",
- "vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp",
- ]
- } else {
- sources += [
- "vulkan/external_memory/MemoryServiceNull.cpp",
- "vulkan/external_semaphore/SemaphoreServiceNull.cpp",
- ]
- }
- if (build_with_chromium && is_fuchsia) {
- # Necessary to ensure that the Vulkan libraries will be in the
- # final Fuchsia package.
- data_deps = [
- "//third_party/fuchsia-sdk:vulkan_base",
- "//third_party/fuchsia-sdk:vulkan_validation",
-
- # NOTE: The line below is a work around for http://crbug.com/1001081
- "//third_party/fuchsia-sdk/sdk:trace_engine",
- ]
- }
- if (enable_vulkan_validation_layers) {
- defines += [
- "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS",
- "DAWN_VK_DATA_DIR=\"$vulkan_data_subdir\"",
- ]
- }
- if (enable_vulkan_loader) {
- data_deps += [ "${dawn_vulkan_loader_dir}:libvulkan" ]
- defines += [ "DAWN_ENABLE_VULKAN_LOADER" ]
- }
- if (use_swiftshader) {
- data_deps += [
- "${dawn_swiftshader_dir}/src/Vulkan:icd_file",
- "${dawn_swiftshader_dir}/src/Vulkan:swiftshader_libvulkan",
- ]
- defines += [
- "DAWN_ENABLE_SWIFTSHADER",
- "DAWN_SWIFTSHADER_VK_ICD_JSON=\"${swiftshader_icd_file_name}\"",
- ]
- }
- }
-
- if (use_angle) {
- data_deps += [
- "${dawn_angle_dir}:libEGL",
- "${dawn_angle_dir}:libGLESv2",
- ]
- }
-}
-
-# The static and shared libraries for dawn_native. Most of the files are
-# already compiled in dawn_native_sources, but we still need to compile
-# files defining exported symbols.
-dawn_component("dawn_native") {
- DEFINE_PREFIX = "DAWN_NATIVE"
-
- #Make headers publically visible
- public_deps = [ ":dawn_native_headers" ]
-
- deps = [
- ":dawn_native_sources",
- "${dawn_root}/src/common",
- ]
- sources = [ "DawnNative.cpp" ]
- configs = [ ":dawn_native_internal" ]
- public_configs = [
- ":dawn_native_weak_framework",
- ":dawn_native_vulkan_rpath",
- ]
-
- if (dawn_enable_d3d12) {
- sources += [ "d3d12/D3D12Backend.cpp" ]
- }
- if (dawn_enable_metal) {
- sources += [ "metal/MetalBackend.mm" ]
- }
- if (dawn_enable_null) {
- sources += [ "null/NullBackend.cpp" ]
- }
- if (dawn_enable_opengl) {
- sources += [ "opengl/OpenGLBackend.cpp" ]
- }
- if (dawn_enable_vulkan) {
- sources += [ "vulkan/VulkanBackend.cpp" ]
-
- if (enable_vulkan_validation_layers) {
- data_deps =
- [ "${dawn_vulkan_validation_layers_dir}:vulkan_validation_layers" ]
- if (!is_android) {
- data_deps +=
- [ "${dawn_vulkan_validation_layers_dir}:vulkan_gen_json_files" ]
- }
- }
- }
-}
-
-dawn_json_generator("webgpu_dawn_native_proc_gen") {
- target = "webgpu_dawn_native_proc"
- outputs = [ "src/dawn_native/webgpu_dawn_native_proc.cpp" ]
-}
-
-dawn_component("webgpu_dawn") {
- # For a single library - build `webgpu_dawn_shared` with GN args:
- # dawn_complete_static_libs = true - to package a single lib
- #
- # is_debug = false
- # - setting this to true makes library over 50Mb
- #
- # use_custom_libcxx = false
- # - Otherwise, libc++ symbols may conflict if the
- # library is used outside of Chromium.
- #
- # dawn_use_swiftshader = false
- # angle_enable_swiftshader = false
- # - SwiftShader can't be built without use_custom_libcxx.
- # It should be built separately.
- DEFINE_PREFIX = "WGPU"
-
- sources = get_target_outputs(":webgpu_dawn_native_proc_gen")
- deps = [
- ":dawn_native_static",
- ":webgpu_dawn_native_proc_gen",
- ]
+group("webgpu_dawn") {
+ public_deps = [ "../dawn/native:webgpu_dawn" ]
}
diff --git a/chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp b/chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp
deleted file mode 100644
index 89379cb43d4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/BackendConnection.h"
-
-namespace dawn_native {
-
- BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
- : mInstance(instance), mType(type) {
- }
-
- wgpu::BackendType BackendConnection::GetType() const {
- return mType;
- }
-
- InstanceBase* BackendConnection::GetInstance() const {
- return mInstance;
- }
-
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> BackendConnection::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* options) {
- return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BackendConnection.h b/chromium/third_party/dawn/src/dawn_native/BackendConnection.h
deleted file mode 100644
index f17108ec585..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BackendConnection.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_BACKENDCONNECTION_H_
-#define DAWNNATIVE_BACKENDCONNECTION_H_
-
-#include "dawn_native/Adapter.h"
-#include "dawn_native/DawnNative.h"
-
-#include <memory>
-
-namespace dawn_native {
-
- // An common interface for all backends. Mostly used to create adapters for a particular
- // backend.
- class BackendConnection {
- public:
- BackendConnection(InstanceBase* instance, wgpu::BackendType type);
- virtual ~BackendConnection() = default;
-
- wgpu::BackendType GetType() const;
- InstanceBase* GetInstance() const;
-
- // Returns all the adapters for the system that can be created by the backend, without extra
- // options (such as debug adapters, custom driver libraries, etc.)
- virtual std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() = 0;
-
- // Returns new adapters created with the backend-specific options.
- virtual ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* options);
-
- private:
- InstanceBase* mInstance = nullptr;
- wgpu::BackendType mType;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_BACKENDCONNECTION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
deleted file mode 100644
index f4471422584..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
+++ /dev/null
@@ -1,486 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/BindGroup.h"
-
-#include "common/Assert.h"
-#include "common/Math.h"
-#include "common/ityp_bitset.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/ChainUtils_autogen.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ExternalTexture.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/Sampler.h"
-#include "dawn_native/Texture.h"
-
-namespace dawn_native {
-
- namespace {
-
- // Helper functions to perform binding-type specific validation
-
- MaybeError ValidateBufferBinding(const DeviceBase* device,
- const BindGroupEntry& entry,
- const BindingInfo& bindingInfo) {
- DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
-
- DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
- "Expected only buffer to be set for binding entry.");
-
- DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY(device->ValidateObject(entry.buffer));
-
- ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
-
- wgpu::BufferUsage requiredUsage;
- uint64_t maxBindingSize;
- uint64_t requiredBindingAlignment;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- requiredUsage = wgpu::BufferUsage::Uniform;
- maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
- requiredBindingAlignment =
- device->GetLimits().v1.minUniformBufferOffsetAlignment;
- break;
- case wgpu::BufferBindingType::Storage:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- requiredUsage = wgpu::BufferUsage::Storage;
- maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
- requiredBindingAlignment =
- device->GetLimits().v1.minStorageBufferOffsetAlignment;
- break;
- case kInternalStorageBufferBinding:
- requiredUsage = kInternalStorageBuffer;
- maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
- requiredBindingAlignment =
- device->GetLimits().v1.minStorageBufferOffsetAlignment;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- uint64_t bufferSize = entry.buffer->GetSize();
-
- // Handle wgpu::WholeSize, avoiding overflows.
- DAWN_INVALID_IF(entry.offset > bufferSize,
- "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
- bufferSize, entry.buffer);
-
- uint64_t bindingSize =
- (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
-
- DAWN_INVALID_IF(bindingSize > bufferSize,
- "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
- bufferSize, entry.buffer);
-
- DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
-
- // Note that no overflow can happen because we already checked that
- // bufferSize >= bindingSize
- DAWN_INVALID_IF(
- entry.offset > bufferSize - bindingSize,
- "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
- entry.offset, bufferSize, bindingSize, entry.buffer);
-
- DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
- "Offset (%u) does not satisfy the minimum %s alignment (%u).",
- entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
-
- DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
- "Binding usage (%s) of %s doesn't match expected usage (%s).",
- entry.buffer->GetUsage(), entry.buffer, requiredUsage);
-
- DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
- "Binding size (%u) is smaller than the minimum binding size (%u).",
- bindingSize, bindingInfo.buffer.minBindingSize);
-
- DAWN_INVALID_IF(bindingSize > maxBindingSize,
- "Binding size (%u) is larger than the maximum binding size (%u).",
- bindingSize, maxBindingSize);
-
- return {};
- }
-
- MaybeError ValidateTextureBinding(DeviceBase* device,
- const BindGroupEntry& entry,
- const BindingInfo& bindingInfo) {
- DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
-
- DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
- "Expected only textureView to be set for binding entry.");
-
- DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY(device->ValidateObject(entry.textureView));
-
- TextureViewBase* view = entry.textureView;
-
- Aspect aspect = view->GetAspects();
- // TODO(dawn:563): Format Aspects
- DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects selected in %s.", view);
-
- TextureBase* texture = view->GetTexture();
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Texture: {
- SampleTypeBit supportedTypes =
- texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
- SampleTypeBit requiredType =
- SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
-
- DAWN_INVALID_IF(
- !(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
- "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
- texture->GetUsage(), texture);
-
- DAWN_INVALID_IF(
- texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
- "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
- texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
-
- // TODO(dawn:563): Improve error message.
- DAWN_INVALID_IF((supportedTypes & requiredType) == 0,
- "Texture component type usage mismatch.");
-
- DAWN_INVALID_IF(
- entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
- "Dimension (%s) of %s doesn't match the expected dimension (%s).",
- entry.textureView->GetDimension(), entry.textureView,
- bindingInfo.texture.viewDimension);
- break;
- }
- case BindingInfoType::StorageTexture: {
- DAWN_INVALID_IF(
- !(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
- "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
- texture->GetUsage(), texture);
-
- ASSERT(!texture->IsMultisampledTexture());
-
- DAWN_INVALID_IF(
- texture->GetFormat().format != bindingInfo.storageTexture.format,
- "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
- texture, bindingInfo.storageTexture.format);
-
- DAWN_INVALID_IF(
- entry.textureView->GetDimension() !=
- bindingInfo.storageTexture.viewDimension,
- "Dimension (%s) of %s doesn't match the expected dimension (%s).",
- entry.textureView->GetDimension(), entry.textureView,
- bindingInfo.storageTexture.viewDimension);
-
- DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
- "mipLevelCount (%u) of %s expected to be 1.",
- entry.textureView->GetLevelCount(), entry.textureView);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- return {};
- }
-
- MaybeError ValidateSamplerBinding(const DeviceBase* device,
- const BindGroupEntry& entry,
- const BindingInfo& bindingInfo) {
- DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
-
- DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
- "Expected only sampler to be set for binding entry.");
-
- DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY(device->ValidateObject(entry.sampler));
-
- ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
-
- switch (bindingInfo.sampler.type) {
- case wgpu::SamplerBindingType::NonFiltering:
- DAWN_INVALID_IF(
- entry.sampler->IsFiltering(),
- "Filtering sampler %s is incompatible with non-filtering sampler "
- "binding.",
- entry.sampler);
- DAWN_FALLTHROUGH;
- case wgpu::SamplerBindingType::Filtering:
- DAWN_INVALID_IF(
- entry.sampler->IsComparison(),
- "Comparison sampler %s is incompatible with non-comparison sampler "
- "binding.",
- entry.sampler);
- break;
- case wgpu::SamplerBindingType::Comparison:
- DAWN_INVALID_IF(
- !entry.sampler->IsComparison(),
- "Non-comparison sampler %s is imcompatible with comparison sampler "
- "binding.",
- entry.sampler);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- return {};
- }
-
- MaybeError ValidateExternalTextureBinding(const DeviceBase* device,
- const BindGroupEntry& entry,
- const BindingInfo& bindingInfo) {
- const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
- FindInChain(entry.nextInChain, &externalTextureBindingEntry);
-
- DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
- "Binding entry external texture not set.");
-
- DAWN_INVALID_IF(
- entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
- "Expected only external texture to be set for binding entry.");
-
- DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
- wgpu::SType::ExternalTextureBindingEntry));
-
- DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
-
- return {};
- }
-
- } // anonymous namespace
-
- MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
- const BindGroupDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY(device->ValidateObject(descriptor->layout));
-
- DAWN_INVALID_IF(
- BindingIndex(descriptor->entryCount) != descriptor->layout->GetBindingCount(),
- "Number of entries (%u) did not match the number of entries (%u) specified in %s",
- descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
- descriptor->layout);
-
- const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
- ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
-
- ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
- for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
- const BindGroupEntry& entry = descriptor->entries[i];
-
- const auto& it = bindingMap.find(BindingNumber(entry.binding));
- DAWN_INVALID_IF(it == bindingMap.end(),
- "In entries[%u], binding index %u not present in the bind group layout",
- i, entry.binding);
-
- BindingIndex bindingIndex = it->second;
- ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
-
- DAWN_INVALID_IF(bindingsSet[bindingIndex],
- "In entries[%u], binding index %u already used by a previous entry", i,
- entry.binding);
-
- bindingsSet.set(bindingIndex);
-
- const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
-
- // Perform binding-type specific validation.
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
- "validating entries[%u] as a Buffer", i);
- break;
- case BindingInfoType::Texture:
- case BindingInfoType::StorageTexture:
- DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
- "validating entries[%u] as a Texture", i);
- break;
- case BindingInfoType::Sampler:
- DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
- "validating entries[%u] as a Sampler", i);
- break;
- case BindingInfoType::ExternalTexture:
- DAWN_TRY_CONTEXT(ValidateExternalTextureBinding(device, entry, bindingInfo),
- "validating entries[%u] as an ExternalTexture", i);
- break;
- }
- }
-
- // This should always be true because
- // - numBindings has to match between the bind group and its layout.
- // - Each binding must be set at most once
- //
- // We don't validate the equality because it wouldn't be possible to cover it with a test.
- ASSERT(bindingsSet.count() == bindingMap.size());
-
- return {};
- } // anonymous namespace
-
- // BindGroup
-
- BindGroupBase::BindGroupBase(DeviceBase* device,
- const BindGroupDescriptor* descriptor,
- void* bindingDataStart)
- : ApiObjectBase(device, descriptor->label),
- mLayout(descriptor->layout),
- mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
- for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
- // TODO(enga): Shouldn't be needed when bindings are tightly packed.
- // This is to fill Ref<ObjectBase> holes with nullptrs.
- new (&mBindingData.bindings[i]) Ref<ObjectBase>();
- }
-
- for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
- const BindGroupEntry& entry = descriptor->entries[i];
-
- BindingIndex bindingIndex =
- descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
- ASSERT(bindingIndex < mLayout->GetBindingCount());
-
- // Only a single binding type should be set, so once we found it we can skip to the
- // next loop iteration.
-
- if (entry.buffer != nullptr) {
- ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
- mBindingData.bindings[bindingIndex] = entry.buffer;
- mBindingData.bufferData[bindingIndex].offset = entry.offset;
- uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
- ? entry.buffer->GetSize() - entry.offset
- : entry.size;
- mBindingData.bufferData[bindingIndex].size = bufferSize;
- continue;
- }
-
- if (entry.textureView != nullptr) {
- ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
- mBindingData.bindings[bindingIndex] = entry.textureView;
- continue;
- }
-
- if (entry.sampler != nullptr) {
- ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
- mBindingData.bindings[bindingIndex] = entry.sampler;
- continue;
- }
-
- const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
- FindInChain(entry.nextInChain, &externalTextureBindingEntry);
- if (externalTextureBindingEntry != nullptr) {
- ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
- mBindingData.bindings[bindingIndex] = externalTextureBindingEntry->externalTexture;
- continue;
- }
- }
-
- uint32_t packedIdx = 0;
- for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
- ++bindingIndex) {
- if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
- mBindingData.unverifiedBufferSizes[packedIdx] =
- mBindingData.bufferData[bindingIndex].size;
- ++packedIdx;
- }
- }
-
- TrackInDevice();
- }
-
- BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- BindGroupBase::~BindGroupBase() = default;
-
- void BindGroupBase::DestroyImpl() {
- if (mLayout != nullptr) {
- ASSERT(!IsError());
- for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
- mBindingData.bindings[i].~Ref<ObjectBase>();
- }
- }
- }
-
- void BindGroupBase::DeleteThis() {
- // Add another ref to the layout so that if this is the last ref, the layout
- // is destroyed after the bind group. The bind group is slab-allocated inside
- // memory owned by the layout (except for the null backend).
- Ref<BindGroupLayoutBase> layout = mLayout;
- ApiObjectBase::DeleteThis();
- }
-
- BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mBindingData() {
- }
-
- // static
- BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
- return new BindGroupBase(device, ObjectBase::kError);
- }
-
- ObjectType BindGroupBase::GetType() const {
- return ObjectType::BindGroup;
- }
-
- BindGroupLayoutBase* BindGroupBase::GetLayout() {
- ASSERT(!IsError());
- return mLayout.Get();
- }
-
- const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
- ASSERT(!IsError());
- return mLayout.Get();
- }
-
- const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
- ASSERT(!IsError());
- return mBindingData.unverifiedBufferSizes;
- }
-
- BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mLayout->GetBindingCount());
- ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
- BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
- return {buffer, mBindingData.bufferData[bindingIndex].offset,
- mBindingData.bufferData[bindingIndex].size};
- }
-
- SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mLayout->GetBindingCount());
- ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
- return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
- }
-
- TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mLayout->GetBindingCount());
- ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
- mLayout->GetBindingInfo(bindingIndex).bindingType ==
- BindingInfoType::StorageTexture);
- return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
- }
-
- ExternalTextureBase* BindGroupBase::GetBindingAsExternalTexture(BindingIndex bindingIndex) {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mLayout->GetBindingCount());
- ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType ==
- BindingInfoType::ExternalTexture);
- return static_cast<ExternalTextureBase*>(mBindingData.bindings[bindingIndex].Get());
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.h b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
deleted file mode 100644
index 6b687ce14db..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_BINDGROUP_H_
-#define DAWNNATIVE_BINDGROUP_H_
-
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-
-namespace dawn_native {
-
- class DeviceBase;
-
- MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
- const BindGroupDescriptor* descriptor);
-
- struct BufferBinding {
- BufferBase* buffer;
- uint64_t offset;
- uint64_t size;
- };
-
- class BindGroupBase : public ApiObjectBase {
- public:
- static BindGroupBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- BindGroupLayoutBase* GetLayout();
- const BindGroupLayoutBase* GetLayout() const;
- BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
- SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
- TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
- const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
- ExternalTextureBase* GetBindingAsExternalTexture(BindingIndex bindingIndex);
-
- protected:
- // To save memory, the size of a bind group is dynamically determined and the bind group is
- // placement-allocated into memory big enough to hold the bind group with its
- // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
- // binding data should be passed as |bindingDataStart|.
- BindGroupBase(DeviceBase* device,
- const BindGroupDescriptor* descriptor,
- void* bindingDataStart);
-
- // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
- // be first in the allocation. The binding data is stored after the Derived class.
- template <typename Derived>
- BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
- : BindGroupBase(device,
- descriptor,
- AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
- descriptor->layout->GetBindingDataAlignment())) {
- static_assert(std::is_base_of<BindGroupBase, Derived>::value, "");
- }
-
- // Constructor used only for mocking and testing.
- BindGroupBase(DeviceBase* device);
- void DestroyImpl() override;
-
- ~BindGroupBase() override;
-
- private:
- BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- void DeleteThis() override;
-
- Ref<BindGroupLayoutBase> mLayout;
- BindGroupLayoutBase::BindingDataPointers mBindingData;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_BINDGROUP_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
deleted file mode 100644
index f6687cbd68d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
+++ /dev/null
@@ -1,568 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/BindGroupLayout.h"
-
-#include "common/BitSetIterator.h"
-
-#include "dawn_native/ChainUtils_autogen.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/ObjectContentHasher.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/PerStage.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-#include <algorithm>
-#include <functional>
-#include <set>
-
-namespace dawn_native {
-
- namespace {
- MaybeError ValidateStorageTextureFormat(DeviceBase* device,
- wgpu::TextureFormat storageTextureFormat) {
- const Format* format = nullptr;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
-
- ASSERT(format != nullptr);
- DAWN_INVALID_IF(!format->supportsStorageUsage,
- "Texture format (%s) does not support storage textures.",
- storageTextureFormat);
-
- return {};
- }
-
- MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
- switch (dimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "%s texture views cannot be used as storage textures.", dimension);
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::e3D:
- return {};
-
- case wgpu::TextureViewDimension::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
- const BindGroupLayoutEntry& entry,
- bool allowInternalBinding) {
- DAWN_TRY(ValidateShaderStage(entry.visibility));
-
- int bindingMemberCount = 0;
- BindingInfoType bindingType;
- wgpu::ShaderStage allowedStages = kAllStages;
-
- if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
- bindingMemberCount++;
- bindingType = BindingInfoType::Buffer;
- const BufferBindingLayout& buffer = entry.buffer;
-
- // The kInternalStorageBufferBinding is used internally and not a value
- // in wgpu::BufferBindingType.
- if (buffer.type == kInternalStorageBufferBinding) {
- DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
- } else {
- DAWN_TRY(ValidateBufferBindingType(buffer.type));
- }
-
- if (buffer.type == wgpu::BufferBindingType::Storage ||
- buffer.type == kInternalStorageBufferBinding) {
- allowedStages &= ~wgpu::ShaderStage::Vertex;
- }
-
- // Dynamic storage buffers aren't bounds checked properly in D3D12. Disallow them as
- // unsafe until the bounds checks are implemented.
- DAWN_INVALID_IF(
- device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) &&
- buffer.hasDynamicOffset &&
- (buffer.type == wgpu::BufferBindingType::Storage ||
- buffer.type == kInternalStorageBufferBinding ||
- buffer.type == wgpu::BufferBindingType::ReadOnlyStorage),
- "Dynamic storage buffers are disallowed because they aren't secure yet. "
- "See https://crbug.com/dawn/429");
- }
- if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
- bindingMemberCount++;
- bindingType = BindingInfoType::Sampler;
- DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
- }
- if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
- bindingMemberCount++;
- bindingType = BindingInfoType::Texture;
- const TextureBindingLayout& texture = entry.texture;
- DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
-
- // viewDimension defaults to 2D if left undefined, needs validation otherwise.
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
- if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
- DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
- viewDimension = texture.viewDimension;
- }
-
- DAWN_INVALID_IF(
- texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
- "View dimension (%s) for a multisampled texture bindings was not %s.",
- viewDimension, wgpu::TextureViewDimension::e2D);
- }
- if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
- bindingMemberCount++;
- bindingType = BindingInfoType::StorageTexture;
- const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
- DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
- DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
-
- // viewDimension defaults to 2D if left undefined, needs validation otherwise.
- if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
- DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
- DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
- }
-
- if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
- allowedStages &= ~wgpu::ShaderStage::Vertex;
- }
- }
-
- const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
- FindInChain(entry.nextInChain, &externalTextureBindingLayout);
- if (externalTextureBindingLayout != nullptr) {
- bindingMemberCount++;
- bindingType = BindingInfoType::ExternalTexture;
- }
-
- DAWN_INVALID_IF(bindingMemberCount != 1,
- "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
- "storageTexture, or externalTexture set");
-
- DAWN_INVALID_IF(
- !IsSubset(entry.visibility, allowedStages),
- "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
- bindingType, entry.visibility, allowedStages);
-
- return {};
- }
-
- } // anonymous namespace
-
- MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- bool allowInternalBinding) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-
- std::set<BindingNumber> bindingsSet;
- BindingCounts bindingCounts = {};
-
- for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
- const BindGroupLayoutEntry& entry = descriptor->entries[i];
- BindingNumber bindingNumber = BindingNumber(entry.binding);
-
- DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
- "On entries[%u]: binding index (%u) was specified by a previous entry.",
- i, entry.binding);
-
- DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
- "validating entries[%u]", i);
-
- IncrementBindingCounts(&bindingCounts, entry);
-
- bindingsSet.insert(bindingNumber);
- }
-
- DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
-
- return {};
- }
-
- namespace {
-
- bool operator!=(const BindingInfo& a, const BindingInfo& b) {
- if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
- return true;
- }
-
- switch (a.bindingType) {
- case BindingInfoType::Buffer:
- return a.buffer.type != b.buffer.type ||
- a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
- a.buffer.minBindingSize != b.buffer.minBindingSize;
- case BindingInfoType::Sampler:
- return a.sampler.type != b.sampler.type;
- case BindingInfoType::Texture:
- return a.texture.sampleType != b.texture.sampleType ||
- a.texture.viewDimension != b.texture.viewDimension ||
- a.texture.multisampled != b.texture.multisampled;
- case BindingInfoType::StorageTexture:
- return a.storageTexture.access != b.storageTexture.access ||
- a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
- a.storageTexture.format != b.storageTexture.format;
- case BindingInfoType::ExternalTexture:
- return false;
- }
- UNREACHABLE();
- }
-
- bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
- return binding.buffer.type != wgpu::BufferBindingType::Undefined;
- }
-
- bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
- if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
- return binding.buffer.hasDynamicOffset;
- }
- return false;
- }
-
- BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
- BindingInfo bindingInfo;
- bindingInfo.binding = BindingNumber(binding.binding);
- bindingInfo.visibility = binding.visibility;
-
- if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
- bindingInfo.bindingType = BindingInfoType::Buffer;
- bindingInfo.buffer = binding.buffer;
- } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
- bindingInfo.bindingType = BindingInfoType::Sampler;
- bindingInfo.sampler = binding.sampler;
- } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
- bindingInfo.bindingType = BindingInfoType::Texture;
- bindingInfo.texture = binding.texture;
-
- if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
- bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
- }
- } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
- bindingInfo.bindingType = BindingInfoType::StorageTexture;
- bindingInfo.storageTexture = binding.storageTexture;
-
- if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
- bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
- }
- } else {
- const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
- FindInChain(binding.nextInChain, &externalTextureBindingLayout);
- if (externalTextureBindingLayout != nullptr) {
- bindingInfo.bindingType = BindingInfoType::ExternalTexture;
- }
- }
-
- return bindingInfo;
- }
-
- bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
- const bool aIsBuffer = IsBufferBinding(a);
- const bool bIsBuffer = IsBufferBinding(b);
- if (aIsBuffer != bIsBuffer) {
- // Always place buffers first.
- return aIsBuffer;
- }
-
- if (aIsBuffer) {
- bool aHasDynamicOffset = BindingHasDynamicOffset(a);
- bool bHasDynamicOffset = BindingHasDynamicOffset(b);
- ASSERT(bIsBuffer);
- if (aHasDynamicOffset != bHasDynamicOffset) {
- // Buffers with dynamic offsets should come before those without.
- // This makes it easy to iterate over the dynamic buffer bindings
- // [0, dynamicBufferCount) during validation.
- return aHasDynamicOffset;
- }
- if (aHasDynamicOffset) {
- ASSERT(bHasDynamicOffset);
- ASSERT(a.binding != b.binding);
- // Above, we ensured that dynamic buffers are first. Now, ensure that
- // dynamic buffer bindings are in increasing order. This is because dynamic
- // buffer offsets are applied in increasing order of binding number.
- return a.binding < b.binding;
- }
- }
-
- // This applies some defaults and gives us a single value to check for the binding type.
- BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
- BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
-
- // Sort by type.
- if (aInfo.bindingType != bInfo.bindingType) {
- return aInfo.bindingType < bInfo.bindingType;
- }
-
- if (a.visibility != b.visibility) {
- return a.visibility < b.visibility;
- }
-
- switch (aInfo.bindingType) {
- case BindingInfoType::Buffer:
- if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
- return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
- }
- break;
- case BindingInfoType::Sampler:
- if (aInfo.sampler.type != bInfo.sampler.type) {
- return aInfo.sampler.type < bInfo.sampler.type;
- }
- break;
- case BindingInfoType::Texture:
- if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
- return aInfo.texture.multisampled < bInfo.texture.multisampled;
- }
- if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
- return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
- }
- if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
- return aInfo.texture.sampleType < bInfo.texture.sampleType;
- }
- break;
- case BindingInfoType::StorageTexture:
- if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
- return aInfo.storageTexture.access < bInfo.storageTexture.access;
- }
- if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
- return aInfo.storageTexture.viewDimension <
- bInfo.storageTexture.viewDimension;
- }
- if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
- return aInfo.storageTexture.format < bInfo.storageTexture.format;
- }
- break;
- case BindingInfoType::ExternalTexture:
- break;
- }
- return a.binding < b.binding;
- }
-
- // This is a utility function to help ASSERT that the BGL-binding comparator places buffers
- // first.
- bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
- BindingIndex lastBufferIndex{0};
- BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
- for (BindingIndex i{0}; i < bindings.size(); ++i) {
- if (bindings[i].bindingType == BindingInfoType::Buffer) {
- lastBufferIndex = std::max(i, lastBufferIndex);
- } else {
- firstNonBufferIndex = std::min(i, firstNonBufferIndex);
- }
- }
-
- // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
- // |firstNonBufferIndex| gets set to 0.
- return firstNonBufferIndex >= lastBufferIndex;
- }
-
- } // namespace
-
- // BindGroupLayoutBase
-
- BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken,
- ApiObjectBase::UntrackedByDeviceTag tag)
- : ApiObjectBase(device, descriptor->label),
- mBindingInfo(BindingIndex(descriptor->entryCount)),
- mPipelineCompatibilityToken(pipelineCompatibilityToken) {
- std::vector<BindGroupLayoutEntry> sortedBindings(
- descriptor->entries, descriptor->entries + descriptor->entryCount);
-
- std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
-
- for (BindingIndex i{0}; i < mBindingInfo.size(); ++i) {
- const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
-
- mBindingInfo[i] = CreateBindGroupLayoutInfo(binding);
-
- if (IsBufferBinding(binding)) {
- // Buffers must be contiguously packed at the start of the binding info.
- ASSERT(GetBufferCount() == i);
- }
- IncrementBindingCounts(&mBindingCounts, binding);
-
- const auto& it = mBindingMap.emplace(BindingNumber(binding.binding), i);
- ASSERT(it.second);
- }
- ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
- ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
- }
-
- BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
- TrackInDevice();
- }
-
- BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- BindGroupLayoutBase::~BindGroupLayoutBase() = default;
-
- void BindGroupLayoutBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheBindGroupLayout(this);
- }
- }
-
- // static
- BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
- return new BindGroupLayoutBase(device, ObjectBase::kError);
- }
-
- ObjectType BindGroupLayoutBase::GetType() const {
- return ObjectType::BindGroupLayout;
- }
-
- const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
- ASSERT(!IsError());
- return mBindingMap;
- }
-
- bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
- return mBindingMap.count(bindingNumber) != 0;
- }
-
- BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
- ASSERT(!IsError());
- const auto& it = mBindingMap.find(bindingNumber);
- ASSERT(it != mBindingMap.end());
- return it->second;
- }
-
- size_t BindGroupLayoutBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mPipelineCompatibilityToken);
-
- // std::map is sorted by key, so two BGLs constructed in different orders
- // will still record the same.
- for (const auto& it : mBindingMap) {
- recorder.Record(it.first, it.second);
-
- const BindingInfo& info = mBindingInfo[it.second];
-
- recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
- info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
- info.texture.sampleType, info.texture.viewDimension,
- info.texture.multisampled, info.storageTexture.access,
- info.storageTexture.format, info.storageTexture.viewDimension);
- }
-
- return recorder.GetContentHash();
- }
-
- bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
- const BindGroupLayoutBase* b) const {
- return a->IsLayoutEqual(b);
- }
-
- BindingIndex BindGroupLayoutBase::GetBindingCount() const {
- return mBindingInfo.size();
- }
-
- BindingIndex BindGroupLayoutBase::GetBufferCount() const {
- return BindingIndex(mBindingCounts.bufferCount);
- }
-
- BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
- // This is a binding index because dynamic buffers are packed at the front of the binding
- // info.
- return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
- mBindingCounts.dynamicUniformBufferCount);
- }
-
- uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
- return mBindingCounts.unverifiedBufferCount;
- }
-
- const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
- return mBindingCounts;
- }
-
- bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
- bool excludePipelineCompatibiltyToken) const {
- if (!excludePipelineCompatibiltyToken &&
- GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
- return false;
- }
- if (GetBindingCount() != other->GetBindingCount()) {
- return false;
- }
- for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
- if (mBindingInfo[i] != other->mBindingInfo[i]) {
- return false;
- }
- }
- return mBindingMap == other->mBindingMap;
- }
-
- PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
- return mPipelineCompatibilityToken;
- }
-
- size_t BindGroupLayoutBase::GetBindingDataSize() const {
- // | ------ buffer-specific ----------| ------------ object pointers -------------|
- // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
- // Followed by:
- // |---------buffer size array--------|
- // |-uint64_t[mUnverifiedBufferCount]-|
- size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
- ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
- size_t bufferSizeArrayStart =
- Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
- sizeof(uint64_t));
- ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
- return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
- }
-
- BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
- void* dataStart) const {
- BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
- auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
- uint64_t* unverifiedBufferSizes = AlignPtr(
- reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
-
- ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
- ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
- ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
-
- return {{bufferData, GetBufferCount()},
- {bindings, GetBindingCount()},
- {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
- }
-
- bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
- ASSERT(bindingIndex < GetBufferCount());
- switch (GetBindingInfo(bindingIndex).buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- return false;
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::Storage:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- return true;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
deleted file mode 100644
index 6749adbb185..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_BINDGROUPLAYOUT_H_
-#define DAWNNATIVE_BINDGROUPLAYOUT_H_
-
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "common/SlabAllocator.h"
-#include "common/ityp_span.h"
-#include "common/ityp_vector.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/CachedObject.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <bitset>
-#include <map>
-
-namespace dawn_native {
-
- MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- bool allowInternalBinding = false);
-
- // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
- // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
- // into a packed range of |BindingIndex| integers.
- class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
- public:
- BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken,
- ApiObjectBase::UntrackedByDeviceTag tag);
- BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
- ~BindGroupLayoutBase() override;
-
- static BindGroupLayoutBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- // A map from the BindingNumber to its packed BindingIndex.
- using BindingMap = std::map<BindingNumber, BindingIndex>;
-
- const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
- ASSERT(!IsError());
- ASSERT(bindingIndex < mBindingInfo.size());
- return mBindingInfo[bindingIndex];
- }
- const BindingMap& GetBindingMap() const;
- bool HasBinding(BindingNumber bindingNumber) const;
- BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
-
- // Functions necessary for the unordered_set<BGLBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
- };
-
- BindingIndex GetBindingCount() const;
- // Returns |BindingIndex| because buffers are packed at the front.
- BindingIndex GetBufferCount() const;
- // Returns |BindingIndex| because dynamic buffers are packed at the front.
- BindingIndex GetDynamicBufferCount() const;
- uint32_t GetUnverifiedBufferCount() const;
-
- // Used to get counts and validate them in pipeline layout creation. Other getters
- // should be used to get typed integer counts.
- const BindingCounts& GetBindingCountInfo() const;
-
- // Tests that the BindingInfo of two bind groups are equal,
- // ignoring their compatibility groups.
- bool IsLayoutEqual(const BindGroupLayoutBase* other,
- bool excludePipelineCompatibiltyToken = false) const;
- PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
-
- struct BufferBindingData {
- uint64_t offset;
- uint64_t size;
- };
-
- struct BindingDataPointers {
- ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
- ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
- ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
- };
-
- // Compute the amount of space / alignment required to store bindings for a bind group of
- // this layout.
- size_t GetBindingDataSize() const;
- static constexpr size_t GetBindingDataAlignment() {
- static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData), "");
- return alignof(BufferBindingData);
- }
-
- BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
-
- bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
-
- protected:
- // Constructor used only for mocking and testing.
- BindGroupLayoutBase(DeviceBase* device);
- void DestroyImpl() override;
-
- template <typename BindGroup>
- SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
- return SlabAllocator<BindGroup>(
- size, // bytes
- Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(), // size
- std::max(alignof(BindGroup), GetBindingDataAlignment()) // alignment
- );
- }
-
- private:
- BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- BindingCounts mBindingCounts = {};
- ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
-
- // Map from BindGroupLayoutEntry.binding to packed indices.
- BindingMap mBindingMap;
-
- // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
- const PipelineCompatibilityToken mPipelineCompatibilityToken =
- PipelineCompatibilityToken(0);
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_BINDGROUPLAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
deleted file mode 100644
index f7a9142083e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_BINDGROUPTRACKER_H_
-#define DAWNNATIVE_BINDGROUPTRACKER_H_
-
-#include "common/Constants.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Pipeline.h"
-#include "dawn_native/PipelineLayout.h"
-
-#include <array>
-#include <bitset>
-
-namespace dawn_native {
-
- // Keeps track of the dirty bind groups so they can be lazily applied when we know the
- // pipeline state or it changes.
- // |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
- // in other backends.
- template <bool CanInheritBindGroups, typename DynamicOffset>
- class BindGroupTrackerBase {
- public:
- void OnSetBindGroup(BindGroupIndex index,
- BindGroupBase* bindGroup,
- uint32_t dynamicOffsetCount,
- uint32_t* dynamicOffsets) {
- ASSERT(index < kMaxBindGroupsTyped);
-
- if (mBindGroupLayoutsMask[index]) {
- // It is okay to only dirty bind groups that are used by the current pipeline
- // layout. If the pipeline layout changes, then the bind groups it uses will
- // become dirty.
-
- if (mBindGroups[index] != bindGroup) {
- mDirtyBindGroups.set(index);
- mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
- }
-
- if (dynamicOffsetCount > 0) {
- mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
- }
- }
-
- mBindGroups[index] = bindGroup;
- mDynamicOffsetCounts[index] = dynamicOffsetCount;
- SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
- }
-
- void OnSetPipeline(PipelineBase* pipeline) {
- mPipelineLayout = pipeline->GetLayout();
- }
-
- protected:
- // The Derived class should call this before it applies bind groups.
- void BeforeApply() {
- if (mLastAppliedPipelineLayout == mPipelineLayout) {
- return;
- }
-
- // Use the bind group layout mask to avoid marking unused bind groups as dirty.
- mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
-
- // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
- // the first |k| matching bind groups may be inherited.
- if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
- // Dirty bind groups that cannot be inherited.
- BindGroupLayoutMask dirtiedGroups =
- ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
-
- mDirtyBindGroups |= dirtiedGroups;
- mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
-
- // Clear any bind groups not in the mask.
- mDirtyBindGroups &= mBindGroupLayoutsMask;
- mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
- } else {
- mDirtyBindGroups = mBindGroupLayoutsMask;
- mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
- }
- }
-
- // The Derived class should call this after it applies bind groups.
- void AfterApply() {
- // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
- // will be dirtied again by the next pipeline change.
- mDirtyBindGroups.reset();
- mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
- // Keep track of the last applied pipeline layout. This allows us to avoid computing
- // the intersection of the dirty bind groups and bind group layout mask in next Draw
- // or Dispatch (which is very hot code) until the layout is changed again.
- mLastAppliedPipelineLayout = mPipelineLayout;
- }
-
- BindGroupLayoutMask mDirtyBindGroups = 0;
- BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
- BindGroupLayoutMask mBindGroupLayoutsMask = 0;
- ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
- ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
- ityp::array<BindGroupIndex,
- std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
- kMaxBindGroups>
- mDynamicOffsets = {};
-
- // |mPipelineLayout| is the current pipeline layout set on the command buffer.
- // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
- // to the bind group bindings.
- PipelineLayoutBase* mPipelineLayout = nullptr;
- PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
-
- private:
- // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
- // in other backends.
- static void SetDynamicOffsets(uint64_t* data,
- uint32_t dynamicOffsetCount,
- uint32_t* dynamicOffsets) {
- for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
- data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
- }
- }
-
- static void SetDynamicOffsets(uint32_t* data,
- uint32_t dynamicOffsetCount,
- uint32_t* dynamicOffsets) {
- memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
- }
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_BINDGROUPTRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BindingInfo.cpp b/chromium/third_party/dawn/src/dawn_native/BindingInfo.cpp
deleted file mode 100644
index aba5d0dde19..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BindingInfo.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/BindingInfo.h"
-
-#include "dawn_native/ChainUtils_autogen.h"
-
-namespace dawn_native {
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- BindingInfoType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case BindingInfoType::Buffer:
- s->Append("Buffer");
- break;
- case BindingInfoType::Sampler:
- s->Append("Sampler");
- break;
- case BindingInfoType::Texture:
- s->Append("Texture");
- break;
- case BindingInfoType::StorageTexture:
- s->Append("StorageTexture");
- break;
- case BindingInfoType::ExternalTexture:
- s->Append("ExternalTexture");
- break;
- default:
- UNREACHABLE();
- }
- return {true};
- }
-
- void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
- bindingCounts->totalCount += 1;
-
- uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
-
- if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
- ++bindingCounts->bufferCount;
- const BufferBindingLayout& buffer = entry.buffer;
-
- if (buffer.minBindingSize == 0) {
- ++bindingCounts->unverifiedBufferCount;
- }
-
- switch (buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- if (buffer.hasDynamicOffset) {
- ++bindingCounts->dynamicUniformBufferCount;
- }
- perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
- break;
-
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- if (buffer.hasDynamicOffset) {
- ++bindingCounts->dynamicStorageBufferCount;
- }
- perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
- break;
-
- case wgpu::BufferBindingType::Undefined:
- // Can't get here due to the enclosing if statement.
- UNREACHABLE();
- break;
- }
- } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
- perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
- } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
- perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
- } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
- perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
- } else {
- const ExternalTextureBindingLayout* externalTextureBindingLayout;
- FindInChain(entry.nextInChain, &externalTextureBindingLayout);
- if (externalTextureBindingLayout != nullptr) {
- perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
- }
- }
-
- ASSERT(perStageBindingCountMember != nullptr);
- for (SingleShaderStage stage : IterateStages(entry.visibility)) {
- ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
- }
- }
-
- void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
- bindingCounts->totalCount += rhs.totalCount;
- bindingCounts->bufferCount += rhs.bufferCount;
- bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
- bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
- bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
-
- for (SingleShaderStage stage : IterateStages(kAllStages)) {
- bindingCounts->perStage[stage].sampledTextureCount +=
- rhs.perStage[stage].sampledTextureCount;
- bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
- bindingCounts->perStage[stage].storageBufferCount +=
- rhs.perStage[stage].storageBufferCount;
- bindingCounts->perStage[stage].storageTextureCount +=
- rhs.perStage[stage].storageTextureCount;
- bindingCounts->perStage[stage].uniformBufferCount +=
- rhs.perStage[stage].uniformBufferCount;
- bindingCounts->perStage[stage].externalTextureCount +=
- rhs.perStage[stage].externalTextureCount;
- }
- }
-
- MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
- DAWN_INVALID_IF(
- bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
- "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
- "limit (%u).",
- bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
-
- DAWN_INVALID_IF(
- bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
- "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
- "limit (%u).",
- bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
-
- for (SingleShaderStage stage : IterateStages(kAllStages)) {
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].sampledTextureCount >
- kMaxSampledTexturesPerShaderStage,
- "The number of sampled textures (%u) in the %s stage exceeds the maximum "
- "per-stage limit (%u).",
- bindingCounts.perStage[stage].sampledTextureCount, stage,
- kMaxSampledTexturesPerShaderStage);
-
- // The per-stage number of external textures is bound by the maximum sampled textures
- // per stage.
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].externalTextureCount >
- kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
- "The number of external textures (%u) in the %s stage exceeds the maximum "
- "per-stage limit (%u).",
- bindingCounts.perStage[stage].externalTextureCount, stage,
- kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].sampledTextureCount +
- (bindingCounts.perStage[stage].externalTextureCount *
- kSampledTexturesPerExternalTexture) >
- kMaxSampledTexturesPerShaderStage,
- "The combination of sampled textures (%u) and external textures (%u) in the %s "
- "stage exceeds the maximum per-stage limit (%u).",
- bindingCounts.perStage[stage].sampledTextureCount,
- bindingCounts.perStage[stage].externalTextureCount, stage,
- kMaxSampledTexturesPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
- "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
- "(%u).",
- bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].samplerCount +
- (bindingCounts.perStage[stage].externalTextureCount *
- kSamplersPerExternalTexture) >
- kMaxSamplersPerShaderStage,
- "The combination of samplers (%u) and external textures (%u) in the %s stage "
- "exceeds the maximum per-stage limit (%u).",
- bindingCounts.perStage[stage].samplerCount,
- bindingCounts.perStage[stage].externalTextureCount, stage,
- kMaxSamplersPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
- "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
- "limit (%u).",
- bindingCounts.perStage[stage].storageBufferCount, stage,
- kMaxStorageBuffersPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].storageTextureCount >
- kMaxStorageTexturesPerShaderStage,
- "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
- "limit (%u).",
- bindingCounts.perStage[stage].storageTextureCount, stage,
- kMaxStorageTexturesPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
- "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
- "limit (%u).",
- bindingCounts.perStage[stage].uniformBufferCount, stage,
- kMaxUniformBuffersPerShaderStage);
-
- DAWN_INVALID_IF(
- bindingCounts.perStage[stage].uniformBufferCount +
- (bindingCounts.perStage[stage].externalTextureCount *
- kUniformsPerExternalTexture) >
- kMaxUniformBuffersPerShaderStage,
- "The combination of uniform buffers (%u) and external textures (%u) in the %s "
- "stage exceeds the maximum per-stage limit (%u).",
- bindingCounts.perStage[stage].uniformBufferCount,
- bindingCounts.perStage[stage].externalTextureCount, stage,
- kMaxUniformBuffersPerShaderStage);
- }
-
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindingInfo.h b/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
deleted file mode 100644
index 66625540901..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_BINDINGINFO_H_
-#define DAWNNATIVE_BINDINGINFO_H_
-
-#include "common/Constants.h"
-#include "common/ityp_array.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/PerStage.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <cstdint>
-
-namespace dawn_native {
-
- // Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
- static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
- kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
-
- static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
- BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
-
- // Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
- // API. There should never be more bindings than the max per stage, for each stage.
- static constexpr uint32_t kMaxBindingsPerPipelineLayout =
- 3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
- kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
- kMaxUniformBuffersPerShaderStage);
-
- static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
- BindingIndex(kMaxBindingsPerPipelineLayout);
-
- // TODO(enga): Figure out a good number for this.
- static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
-
- enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- BindingInfoType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- struct BindingInfo {
- BindingNumber binding;
- wgpu::ShaderStage visibility;
-
- BindingInfoType bindingType;
-
- // TODO(dawn:527): These four values could be made into a union.
- BufferBindingLayout buffer;
- SamplerBindingLayout sampler;
- TextureBindingLayout texture;
- StorageTextureBindingLayout storageTexture;
- };
-
- struct BindingSlot {
- BindGroupIndex group;
- BindingNumber binding;
- };
-
- struct PerStageBindingCounts {
- uint32_t sampledTextureCount;
- uint32_t samplerCount;
- uint32_t storageBufferCount;
- uint32_t storageTextureCount;
- uint32_t uniformBufferCount;
- uint32_t externalTextureCount;
- };
-
- struct BindingCounts {
- uint32_t totalCount;
- uint32_t bufferCount;
- uint32_t unverifiedBufferCount; // Buffers with minimum buffer size unspecified
- uint32_t dynamicUniformBufferCount;
- uint32_t dynamicStorageBufferCount;
- PerStage<PerStageBindingCounts> perStage;
- };
-
- void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
- void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
- MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
-
- // For buffer size validation
- using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_BINDINGINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp
deleted file mode 100644
index b19cc19e55e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.cpp
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/BuddyAllocator.h"
-
-#include "common/Assert.h"
-#include "common/Math.h"
-
-namespace dawn_native {
-
- BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
- ASSERT(IsPowerOfTwo(maxSize));
-
- mFreeLists.resize(Log2(mMaxBlockSize) + 1);
-
- // Insert the level0 free block.
- mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
- mFreeLists[0] = {mRoot};
- }
-
- BuddyAllocator::~BuddyAllocator() {
- if (mRoot) {
- DeleteBlock(mRoot);
- }
- }
-
- uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
- return ComputeNumOfFreeBlocks(mRoot);
- }
-
- uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
- if (block->mState == BlockState::Free) {
- return 1;
- } else if (block->mState == BlockState::Split) {
- return ComputeNumOfFreeBlocks(block->split.pLeft) +
- ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
- }
- return 0;
- }
-
- uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
- // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
- // However, mFreeList zero-indexed by level.
- // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
- return Log2(mMaxBlockSize) - Log2(blockSize);
- }
-
- uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
- uint64_t alignment) const {
- ASSERT(IsPowerOfTwo(alignment));
- // The current level is the level that corresponds to the allocation size. The free list may
- // not contain a block at that level until a larger one gets allocated (and splits).
- // Continue to go up the tree until such a larger block exists.
- //
- // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
- // When the alignment is also a power-of-two, we simply use the next free block whose size
- // is greater than or equal to the alignment value.
- //
- // After one 8-byte allocation:
- //
- // Level --------------------------------
- // 0 32 | S |
- // --------------------------------
- // 1 16 | S | F2 | S - split
- // -------------------------------- F - free
- // 2 8 | Aa | F1 | | A - allocated
- // --------------------------------
- //
- // Allocate(size=8, alignment=8) will be satisfied by using F1.
- // Allocate(size=8, alignment=4) will be satified by using F1.
- // Allocate(size=8, alignment=16) will be satisified by using F2.
- //
- for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
- size_t currLevel = allocationBlockLevel - ii;
- BuddyBlock* freeBlock = mFreeLists[currLevel].head;
- if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
- return currLevel;
- }
- }
- return kInvalidOffset; // No free block exists at any level.
- }
-
- // Inserts existing free block into the free-list.
- // Called by allocate upon splitting to insert a child block into a free-list.
- // Note: Always insert into the head of the free-list. As when a larger free block at a lower
- // level was split, there were no smaller free blocks at a higher level to allocate.
- void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
- ASSERT(block->mState == BlockState::Free);
-
- // Inserted block is now the front (no prev).
- block->free.pPrev = nullptr;
-
- // Old head is now the inserted block's next.
- block->free.pNext = mFreeLists[level].head;
-
- // Block already in HEAD position (ex. right child was inserted first).
- if (mFreeLists[level].head != nullptr) {
- // Old head's previous is the inserted block.
- mFreeLists[level].head->free.pPrev = block;
- }
-
- mFreeLists[level].head = block;
- }
-
- void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
- ASSERT(block->mState == BlockState::Free);
-
- if (mFreeLists[level].head == block) {
- // Block is in HEAD position.
- mFreeLists[level].head = mFreeLists[level].head->free.pNext;
- } else {
- // Block is after HEAD position.
- BuddyBlock* pPrev = block->free.pPrev;
- BuddyBlock* pNext = block->free.pNext;
-
- ASSERT(pPrev != nullptr);
- ASSERT(pPrev->mState == BlockState::Free);
-
- pPrev->free.pNext = pNext;
-
- if (pNext != nullptr) {
- ASSERT(pNext->mState == BlockState::Free);
- pNext->free.pPrev = pPrev;
- }
- }
- }
-
- uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
- if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
- return kInvalidOffset;
- }
-
- // Compute the level
- const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
-
- ASSERT(allocationSizeToLevel < mFreeLists.size());
-
- uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
-
- // Error when no free blocks exist (allocator is full)
- if (currBlockLevel == kInvalidOffset) {
- return kInvalidOffset;
- }
-
- // Split free blocks level-by-level.
- // Terminate when the current block level is equal to the computed level of the requested
- // allocation.
- BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
-
- for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
- ASSERT(currBlock->mState == BlockState::Free);
-
- // Remove curr block (about to be split).
- RemoveFreeBlock(currBlock, currBlockLevel);
-
- // Create two free child blocks (the buddies).
- const uint64_t nextLevelSize = currBlock->mSize / 2;
- BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
- BuddyBlock* rightChildBlock =
- new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
-
- // Remember the parent to merge these back upon de-allocation.
- rightChildBlock->pParent = currBlock;
- leftChildBlock->pParent = currBlock;
-
- // Make them buddies.
- leftChildBlock->pBuddy = rightChildBlock;
- rightChildBlock->pBuddy = leftChildBlock;
-
- // Insert the children back into the free list into the next level.
- // The free list does not require a specific order. However, an order is specified as
- // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
- InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
- InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
-
- // Curr block is now split.
- currBlock->mState = BlockState::Split;
- currBlock->split.pLeft = leftChildBlock;
-
- // Decend down into the next level.
- currBlock = leftChildBlock;
- }
-
- // Remove curr block from free-list (now allocated).
- RemoveFreeBlock(currBlock, currBlockLevel);
- currBlock->mState = BlockState::Allocated;
-
- return currBlock->mOffset;
- }
-
- void BuddyAllocator::Deallocate(uint64_t offset) {
- BuddyBlock* curr = mRoot;
-
- // TODO(crbug.com/dawn/827): Optimize de-allocation.
- // Passing allocationSize directly will avoid the following level-by-level search;
- // however, it requires the size information to be stored outside the allocator.
-
- // Search for the free block node that corresponds to the block offset.
- size_t currBlockLevel = 0;
- while (curr->mState == BlockState::Split) {
- if (offset < curr->split.pLeft->pBuddy->mOffset) {
- curr = curr->split.pLeft;
- } else {
- curr = curr->split.pLeft->pBuddy;
- }
-
- currBlockLevel++;
- }
-
- ASSERT(curr->mState == BlockState::Allocated);
-
- // Ensure the block is at the correct level
- ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
-
- // Mark curr free so we can merge.
- curr->mState = BlockState::Free;
-
- // Merge the buddies (LevelN-to-Level0).
- while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
- // Remove the buddy.
- RemoveFreeBlock(curr->pBuddy, currBlockLevel);
-
- BuddyBlock* parent = curr->pParent;
-
- // The buddies were inserted in a specific order but
- // could be deleted in any order.
- DeleteBlock(curr->pBuddy);
- DeleteBlock(curr);
-
- // Parent is now free.
- parent->mState = BlockState::Free;
-
- // Ascend up to the next level (parent block).
- curr = parent;
- currBlockLevel--;
- }
-
- InsertFreeBlock(curr, currBlockLevel);
- }
-
- // Helper which deletes a block in the tree recursively (post-order).
- void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
- ASSERT(block != nullptr);
-
- if (block->mState == BlockState::Split) {
- // Delete the pair in same order we inserted.
- DeleteBlock(block->split.pLeft->pBuddy);
- DeleteBlock(block->split.pLeft);
- }
- delete block;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h b/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h
deleted file mode 100644
index af959fc4a72..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BuddyAllocator.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_BUDDYALLOCATOR_H_
-#define DAWNNATIVE_BUDDYALLOCATOR_H_
-
-#include <cstddef>
-#include <cstdint>
-#include <limits>
-#include <vector>
-
-namespace dawn_native {
-
- // Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
- // Memory is split into halves until just large enough to fit to the request. This
- // requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
- // returning the starting offset whose size is guaranteed to be greater than or equal to the
- // allocation size. To deallocate, the same offset is used to find the corresponding block.
- //
- // Internally, it manages a free list to track free blocks in a full binary tree.
- // Every index in the free list corresponds to a level in the tree. That level also determines
- // the size of the block to be used to satisfy the request. The first level (index=0) represents
- // the root whose size is also called the max block size.
- //
- class BuddyAllocator {
- public:
- BuddyAllocator(uint64_t maxSize);
- ~BuddyAllocator();
-
- // Required methods.
- uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
- void Deallocate(uint64_t offset);
-
- // For testing purposes only.
- uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
-
- static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
-
- private:
- uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
- uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
-
- enum class BlockState { Free, Split, Allocated };
-
- struct BuddyBlock {
- BuddyBlock(uint64_t size, uint64_t offset)
- : mOffset(offset), mSize(size), mState(BlockState::Free) {
- free.pPrev = nullptr;
- free.pNext = nullptr;
- }
-
- uint64_t mOffset;
- uint64_t mSize;
-
- // Pointer to this block's buddy, iff parent is split.
- // Used to quickly merge buddy blocks upon de-allocate.
- BuddyBlock* pBuddy = nullptr;
- BuddyBlock* pParent = nullptr;
-
- // Track whether this block has been split or not.
- BlockState mState;
-
- struct FreeLinks {
- BuddyBlock* pPrev;
- BuddyBlock* pNext;
- };
-
- struct SplitLink {
- BuddyBlock* pLeft;
- };
-
- union {
- // Used upon allocation.
- // Avoids searching for the next free block.
- FreeLinks free;
-
- // Used upon de-allocation.
- // Had this block split upon allocation, it and it's buddy is to be deleted.
- SplitLink split;
- };
- };
-
- void InsertFreeBlock(BuddyBlock* block, size_t level);
- void RemoveFreeBlock(BuddyBlock* block, size_t level);
- void DeleteBlock(BuddyBlock* block);
-
- uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
-
- // Keep track the head and tail (for faster insertion/removal).
- struct BlockList {
- BuddyBlock* head = nullptr; // First free block in level.
- // TODO(crbug.com/dawn/827): Track the tail.
- };
-
- BuddyBlock* mRoot = nullptr; // Used to deallocate non-free blocks.
-
- uint64_t mMaxBlockSize = 0;
-
- // List of linked-lists of free blocks where the index is a level that
- // corresponds to a power-of-two sized block.
- std::vector<BlockList> mFreeLists;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_BUDDYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
deleted file mode 100644
index eb7320c56a2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/BuddyMemoryAllocator.h"
-
-#include "common/Math.h"
-#include "dawn_native/ResourceHeapAllocator.h"
-
-namespace dawn_native {
-
- BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
- uint64_t memoryBlockSize,
- ResourceHeapAllocator* heapAllocator)
- : mMemoryBlockSize(memoryBlockSize),
- mBuddyBlockAllocator(maxSystemSize),
- mHeapAllocator(heapAllocator) {
- ASSERT(memoryBlockSize <= maxSystemSize);
- ASSERT(IsPowerOfTwo(mMemoryBlockSize));
- ASSERT(maxSystemSize % mMemoryBlockSize == 0);
-
- mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
- }
-
- uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
- ASSERT(offset != BuddyAllocator::kInvalidOffset);
- return offset / mMemoryBlockSize;
- }
-
- ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
- uint64_t alignment) {
- ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
-
- if (allocationSize == 0) {
- return std::move(invalidAllocation);
- }
-
- // Check the unaligned size to avoid overflowing NextPowerOfTwo.
- if (allocationSize > mMemoryBlockSize) {
- return std::move(invalidAllocation);
- }
-
- // Round allocation size to nearest power-of-two.
- allocationSize = NextPowerOfTwo(allocationSize);
-
- // Allocation cannot exceed the memory size.
- if (allocationSize > mMemoryBlockSize) {
- return std::move(invalidAllocation);
- }
-
- // Attempt to sub-allocate a block of the requested size.
- const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
- if (blockOffset == BuddyAllocator::kInvalidOffset) {
- return std::move(invalidAllocation);
- }
-
- const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
- if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
- // Transfer ownership to this allocator
- std::unique_ptr<ResourceHeapBase> memory;
- DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
- mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
- }
-
- mTrackedSubAllocations[memoryIndex].refcount++;
-
- AllocationInfo info;
- info.mBlockOffset = blockOffset;
- info.mMethod = AllocationMethod::kSubAllocated;
-
- // Allocation offset is always local to the memory.
- const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
-
- return ResourceMemoryAllocation{
- info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
- }
-
- void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
- const AllocationInfo info = allocation.GetInfo();
-
- ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
-
- const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
-
- ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
- mTrackedSubAllocations[memoryIndex].refcount--;
-
- if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
- mHeapAllocator->DeallocateResourceHeap(
- std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
- }
-
- mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
- }
-
- uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
- return mMemoryBlockSize;
- }
-
- uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
- uint64_t count = 0;
- for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
- if (allocation.refcount > 0) {
- count++;
- }
- }
- return count;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h
deleted file mode 100644
index c057f748223..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
-#define DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
-
-#include "dawn_native/BuddyAllocator.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/ResourceMemoryAllocation.h"
-
-#include <memory>
-#include <vector>
-
-namespace dawn_native {
-
- class ResourceHeapAllocator;
-
- // BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
- // memory created by MemoryAllocator clients. It creates a very large buddy system
- // where backing device memory blocks equal a specified level in the system.
- //
- // Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
- // memory index and should the memory not exist, it is created. If two sub-allocations share the
- // same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
- // release the other prematurely.
- //
- // The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
- // It should also outlive all the resources that are in the buddy allocator.
- class BuddyMemoryAllocator {
- public:
- BuddyMemoryAllocator(uint64_t maxSystemSize,
- uint64_t memoryBlockSize,
- ResourceHeapAllocator* heapAllocator);
- ~BuddyMemoryAllocator() = default;
-
- ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
- uint64_t alignment);
- void Deallocate(const ResourceMemoryAllocation& allocation);
-
- uint64_t GetMemoryBlockSize() const;
-
- // For testing purposes.
- uint64_t ComputeTotalNumOfHeapsForTesting() const;
-
- private:
- uint64_t GetMemoryIndex(uint64_t offset) const;
-
- uint64_t mMemoryBlockSize = 0;
-
- BuddyAllocator mBuddyBlockAllocator;
- ResourceHeapAllocator* mHeapAllocator;
-
- struct TrackedSubAllocations {
- size_t refcount = 0;
- std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
- };
-
- std::vector<TrackedSubAllocations> mTrackedSubAllocations;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
deleted file mode 100644
index 6b04cbb4931..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Buffer.h"
-
-#include "common/Alloc.h"
-#include "common/Assert.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/Queue.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-#include <cstdio>
-#include <cstring>
-#include <utility>
-
-namespace dawn_native {
-
- namespace {
- struct MapRequestTask : QueueBase::TaskInFlight {
- MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
- : buffer(std::move(buffer)), id(id) {
- }
- void Finish() override {
- buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
- }
- void HandleDeviceLoss() override {
- buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
- }
- ~MapRequestTask() override = default;
-
- private:
- Ref<BufferBase> buffer;
- MapRequestID id;
- };
-
- class ErrorBuffer final : public BufferBase {
- public:
- ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor, ObjectBase::kError) {
- if (descriptor->mappedAtCreation) {
- // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
- // is invalid, and on 32bit systems we should avoid a narrowing conversion that
- // would make size = 1 << 32 + 1 allocate one byte.
- bool isValidSize =
- descriptor->size != 0 &&
- descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
-
- if (isValidSize) {
- mFakeMappedData =
- std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
- }
- // Since error buffers in this case may allocate memory, we need to track them
- // for destruction on the device.
- TrackInDevice();
- }
- }
-
- private:
- bool IsCPUWritableAtCreation() const override {
- UNREACHABLE();
- }
-
- MaybeError MapAtCreationImpl() override {
- UNREACHABLE();
- }
-
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
- UNREACHABLE();
- }
-
- void* GetMappedPointerImpl() override {
- return mFakeMappedData.get();
- }
-
- void UnmapImpl() override {
- mFakeMappedData.reset();
- }
-
- std::unique_ptr<uint8_t[]> mFakeMappedData;
- };
-
- } // anonymous namespace
-
- MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
- DAWN_TRY(ValidateBufferUsage(descriptor->usage));
-
- wgpu::BufferUsage usage = descriptor->usage;
-
- const wgpu::BufferUsage kMapWriteAllowedUsages =
- wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
- DAWN_INVALID_IF(
- usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
- "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
- "usage is %s.",
- usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
-
- const wgpu::BufferUsage kMapReadAllowedUsages =
- wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
- DAWN_INVALID_IF(
- usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
- "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
- "usage is %s.",
- usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
-
- DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
- "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
- descriptor->size);
-
- return {};
- }
-
- // Buffer
-
- BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label),
- mSize(descriptor->size),
- mUsage(descriptor->usage),
- mState(BufferState::Unmapped) {
- // Add readonly storage usage if the buffer has a storage usage. The validation rules in
- // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
- if (mUsage & wgpu::BufferUsage::Storage) {
- mUsage |= kReadOnlyStorageBuffer;
- }
-
- // The query resolve buffer need to be used as a storage buffer in the internal compute
- // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
- // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
- // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
- // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
- // as storage buffer if it's created without Storage usage.
- if (mUsage & wgpu::BufferUsage::QueryResolve) {
- mUsage |= kInternalStorageBuffer;
- }
-
- // We also add internal storage usage for Indirect buffers for some transformations before
- // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
- // D3D12), since these transformations involve binding them as storage buffers for use in a
- // compute pass.
- if (mUsage & wgpu::BufferUsage::Indirect) {
- mUsage |= kInternalStorageBuffer;
- }
-
- TrackInDevice();
- }
-
- BufferBase::BufferBase(DeviceBase* device,
- const BufferDescriptor* descriptor,
- ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
- if (descriptor->mappedAtCreation) {
- mState = BufferState::MappedAtCreation;
- mMapOffset = 0;
- mMapSize = mSize;
- }
- }
-
- BufferBase::BufferBase(DeviceBase* device, BufferState state)
- : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
- TrackInDevice();
- }
-
- BufferBase::~BufferBase() {
- ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
- }
-
- void BufferBase::DestroyImpl() {
- if (mState == BufferState::Mapped) {
- UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
- } else if (mState == BufferState::MappedAtCreation) {
- if (mStagingBuffer != nullptr) {
- mStagingBuffer.reset();
- } else if (mSize != 0) {
- UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
- }
- }
- mState = BufferState::Destroyed;
- }
-
- // static
- BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
- return new ErrorBuffer(device, descriptor);
- }
-
- ObjectType BufferBase::GetType() const {
- return ObjectType::Buffer;
- }
-
- uint64_t BufferBase::GetSize() const {
- ASSERT(!IsError());
- return mSize;
- }
-
- uint64_t BufferBase::GetAllocatedSize() const {
- ASSERT(!IsError());
- // The backend must initialize this value.
- ASSERT(mAllocatedSize != 0);
- return mAllocatedSize;
- }
-
- wgpu::BufferUsage BufferBase::GetUsage() const {
- ASSERT(!IsError());
- return mUsage;
- }
-
- MaybeError BufferBase::MapAtCreation() {
- DAWN_TRY(MapAtCreationInternal());
-
- void* ptr;
- size_t size;
- if (mSize == 0) {
- return {};
- } else if (mStagingBuffer) {
- // If there is a staging buffer for initialization, clear its contents directly.
- // It should be exactly as large as the buffer allocation.
- ptr = mStagingBuffer->GetMappedPointer();
- size = mStagingBuffer->GetSize();
- ASSERT(size == GetAllocatedSize());
- } else {
- // Otherwise, the buffer is directly mappable on the CPU.
- ptr = GetMappedPointerImpl();
- size = GetAllocatedSize();
- }
-
- DeviceBase* device = GetDevice();
- if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- memset(ptr, uint8_t(0u), size);
- SetIsDataInitialized();
- device->IncrementLazyClearCountForTesting();
- } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- memset(ptr, uint8_t(1u), size);
- }
-
- return {};
- }
-
- MaybeError BufferBase::MapAtCreationInternal() {
- ASSERT(!IsError());
- mMapOffset = 0;
- mMapSize = mSize;
-
- // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
- // Skip handling 0-sized buffers so we don't try to map them in the backend.
- if (mSize != 0) {
- // Mappable buffers don't use a staging buffer and are just as if mapped through
- // MapAsync.
- if (IsCPUWritableAtCreation()) {
- DAWN_TRY(MapAtCreationImpl());
- } else {
- // If any of these fail, the buffer will be deleted and replaced with an error
- // buffer. The staging buffer is used to return mappable data to inititalize the
- // buffer contents. Allocate one as large as the real buffer size so that every byte
- // is initialized.
- // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
- // buffer so we don't create many small buffers.
- DAWN_TRY_ASSIGN(mStagingBuffer,
- GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
- }
- }
-
- // Only set the state to mapped at creation if we did no fail any point in this helper.
- // Otherwise, if we override the default unmapped state before succeeding to create a
- // staging buffer, we will have issues when we try to destroy the buffer.
- mState = BufferState::MappedAtCreation;
- return {};
- }
-
- MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
- ASSERT(!IsError());
-
- switch (mState) {
- case BufferState::Destroyed:
- return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
- case BufferState::Mapped:
- case BufferState::MappedAtCreation:
- return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
- case BufferState::Unmapped:
- return {};
- }
- UNREACHABLE();
- }
-
- void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
- ASSERT(!IsError());
- if (mMapCallback != nullptr && mapID == mLastMapID) {
- // Tag the callback as fired before firing it, otherwise it could fire a second time if
- // for example buffer.Unmap() is called inside the application-provided callback.
- WGPUBufferMapCallback callback = mMapCallback;
- mMapCallback = nullptr;
-
- if (GetDevice()->IsLost()) {
- callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
- } else {
- callback(status, mMapUserdata);
- }
- }
- }
-
- void BufferBase::APIMapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata) {
- // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
- // possible to default the function argument (because there is the callback later in the
- // argument list)
- if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
- size = mSize - offset;
- }
-
- WGPUBufferMapAsyncStatus status;
- if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
- "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
- size)) {
- if (callback) {
- callback(status, userdata);
- }
- return;
- }
- ASSERT(!IsError());
-
- mLastMapID++;
- mMapMode = mode;
- mMapOffset = offset;
- mMapSize = size;
- mMapCallback = callback;
- mMapUserdata = userdata;
- mState = BufferState::Mapped;
-
- if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
- CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
- return;
- }
- std::unique_ptr<MapRequestTask> request =
- std::make_unique<MapRequestTask>(this, mLastMapID);
- GetDevice()->GetQueue()->TrackTask(std::move(request),
- GetDevice()->GetPendingCommandSerial());
- }
-
- void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
- return GetMappedRange(offset, size, true);
- }
-
- const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
- return GetMappedRange(offset, size, false);
- }
-
- void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
- if (!CanGetMappedRange(writable, offset, size)) {
- return nullptr;
- }
-
- if (mStagingBuffer != nullptr) {
- return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
- }
- if (mSize == 0) {
- return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
- }
- uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
- return start == nullptr ? nullptr : start + offset;
- }
-
- void BufferBase::APIDestroy() {
- Destroy();
- }
-
- MaybeError BufferBase::CopyFromStagingBuffer() {
- ASSERT(mStagingBuffer);
- if (mSize == 0) {
- // Staging buffer is not created if zero size.
- ASSERT(mStagingBuffer == nullptr);
- return {};
- }
-
- DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
- GetAllocatedSize()));
-
- DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
- uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
-
- return {};
- }
-
- void BufferBase::APIUnmap() {
- if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
- return;
- }
- Unmap();
- }
-
- void BufferBase::Unmap() {
- UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
- }
-
- void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
- if (mState == BufferState::Mapped) {
- // A map request can only be called once, so this will fire only if the request wasn't
- // completed before the Unmap.
- // Callbacks are not fired if there is no callback registered, so this is correct for
- // mappedAtCreation = true.
- CallMapCallback(mLastMapID, callbackStatus);
- UnmapImpl();
-
- mMapCallback = nullptr;
- mMapUserdata = 0;
- } else if (mState == BufferState::MappedAtCreation) {
- if (mStagingBuffer != nullptr) {
- GetDevice()->ConsumedError(CopyFromStagingBuffer());
- } else if (mSize != 0) {
- UnmapImpl();
- }
- }
-
- mState = BufferState::Unmapped;
- }
-
- MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapAsyncStatus* status) const {
- *status = WGPUBufferMapAsyncStatus_DeviceLost;
- DAWN_TRY(GetDevice()->ValidateIsAlive());
-
- *status = WGPUBufferMapAsyncStatus_Error;
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- DAWN_INVALID_IF(uint64_t(offset) > mSize,
- "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
- this);
-
- DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
- DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
-
- DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
- "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
- offset, size, mSize, this);
-
- switch (mState) {
- case BufferState::Mapped:
- case BufferState::MappedAtCreation:
- return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
- case BufferState::Destroyed:
- return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
- case BufferState::Unmapped:
- break;
- }
-
- bool isReadMode = mode & wgpu::MapMode::Read;
- bool isWriteMode = mode & wgpu::MapMode::Write;
- DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
- wgpu::MapMode::Write, wgpu::MapMode::Read);
-
- if (mode & wgpu::MapMode::Read) {
- DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
- "The buffer usages (%s) do not contain %s.", mUsage,
- wgpu::BufferUsage::MapRead);
- } else {
- ASSERT(mode & wgpu::MapMode::Write);
- DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
- "The buffer usages (%s) do not contain %s.", mUsage,
- wgpu::BufferUsage::MapWrite);
- }
-
- *status = WGPUBufferMapAsyncStatus_Success;
- return {};
- }
-
- bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
- if (offset % 8 != 0 || size % 4 != 0) {
- return false;
- }
-
- if (size > mMapSize || offset < mMapOffset) {
- return false;
- }
-
- size_t offsetInMappedRange = offset - mMapOffset;
- if (offsetInMappedRange > mMapSize - size) {
- return false;
- }
-
- // Note that:
- //
- // - We don't check that the device is alive because the application can ask for the
- // mapped pointer before it knows, and even Dawn knows, that the device was lost, and
- // still needs to work properly.
- // - We don't check that the object is alive because we need to return mapped pointers
- // for error buffers too.
-
- switch (mState) {
- // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
- case BufferState::MappedAtCreation:
- return true;
-
- case BufferState::Mapped:
- ASSERT(bool(mMapMode & wgpu::MapMode::Read) ^
- bool(mMapMode & wgpu::MapMode::Write));
- return !writable || (mMapMode & wgpu::MapMode::Write);
-
- case BufferState::Unmapped:
- case BufferState::Destroyed:
- return false;
- }
- UNREACHABLE();
- }
-
- MaybeError BufferBase::ValidateUnmap() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
-
- switch (mState) {
- case BufferState::Mapped:
- case BufferState::MappedAtCreation:
- // A buffer may be in the Mapped state if it was created with mappedAtCreation
- // even if it did not have a mappable usage.
- return {};
- case BufferState::Unmapped:
- return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
- case BufferState::Destroyed:
- return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
- }
- UNREACHABLE();
- }
-
- void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
- CallMapCallback(mapID, status);
- }
-
- bool BufferBase::NeedsInitialization() const {
- return !mIsDataInitialized &&
- GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
- }
-
- bool BufferBase::IsDataInitialized() const {
- return mIsDataInitialized;
- }
-
- void BufferBase::SetIsDataInitialized() {
- mIsDataInitialized = true;
- }
-
- bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
- return offset == 0 && size == GetSize();
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.h b/chromium/third_party/dawn/src/dawn_native/Buffer.h
deleted file mode 100644
index acdbc717d5d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.h
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_BUFFER_H_
-#define DAWNNATIVE_BUFFER_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <memory>
-
-namespace dawn_native {
-
- struct CopyTextureToBufferCmd;
-
- enum class MapType : uint32_t;
-
- MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
-
- static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
- wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
- wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
- wgpu::BufferUsage::Indirect;
-
- static constexpr wgpu::BufferUsage kMappableBufferUsages =
- wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
-
- class BufferBase : public ApiObjectBase {
- public:
- enum class BufferState {
- Unmapped,
- Mapped,
- MappedAtCreation,
- Destroyed,
- };
- BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
-
- static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
-
- ObjectType GetType() const override;
-
- uint64_t GetSize() const;
- uint64_t GetAllocatedSize() const;
- wgpu::BufferUsage GetUsage() const;
-
- MaybeError MapAtCreation();
- void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
-
- MaybeError ValidateCanUseOnQueueNow() const;
-
- bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
- bool NeedsInitialization() const;
- bool IsDataInitialized() const;
- void SetIsDataInitialized();
-
- void* GetMappedRange(size_t offset, size_t size, bool writable = true);
- void Unmap();
-
- // Dawn API
- void APIMapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata);
- void* APIGetMappedRange(size_t offset, size_t size);
- const void* APIGetConstMappedRange(size_t offset, size_t size);
- void APIUnmap();
- void APIDestroy();
-
- protected:
- BufferBase(DeviceBase* device,
- const BufferDescriptor* descriptor,
- ObjectBase::ErrorTag tag);
-
- // Constructor used only for mocking and testing.
- BufferBase(DeviceBase* device, BufferState state);
- void DestroyImpl() override;
-
- ~BufferBase() override;
-
- MaybeError MapAtCreationInternal();
-
- uint64_t mAllocatedSize = 0;
-
- private:
- virtual MaybeError MapAtCreationImpl() = 0;
- virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
- virtual void UnmapImpl() = 0;
- virtual void* GetMappedPointerImpl() = 0;
-
- virtual bool IsCPUWritableAtCreation() const = 0;
- MaybeError CopyFromStagingBuffer();
- void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
-
- MaybeError ValidateMapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapAsyncStatus* status) const;
- MaybeError ValidateUnmap() const;
- bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
- void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
-
- uint64_t mSize = 0;
- wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
- BufferState mState;
- bool mIsDataInitialized = false;
-
- std::unique_ptr<StagingBufferBase> mStagingBuffer;
-
- WGPUBufferMapCallback mMapCallback = nullptr;
- void* mMapUserdata = 0;
- MapRequestID mLastMapID = MapRequestID(0);
- wgpu::MapMode mMapMode = wgpu::MapMode::None;
- size_t mMapOffset = 0;
- size_t mMapSize = 0;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_BUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
deleted file mode 100644
index b6b4f478504..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
+++ /dev/null
@@ -1,561 +0,0 @@
-# Copyright 2020 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DawnJSONGenerator(
- TARGET "dawn_native_utils"
- PRINT_NAME "Dawn native utilities"
- RESULT_VARIABLE "DAWN_NATIVE_UTILS_GEN_SOURCES"
-)
-
-add_library(dawn_native ${DAWN_DUMMY_FILE})
-
-target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_IMPLEMENTATION")
-if(BUILD_SHARED_LIBS)
- target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_SHARED_LIBRARY")
-endif()
-
-target_sources(dawn_native PRIVATE
- "${DAWN_INCLUDE_DIR}/dawn_native/DawnNative.h"
- "${DAWN_INCLUDE_DIR}/dawn_native/dawn_native_export.h"
- ${DAWN_NATIVE_UTILS_GEN_SOURCES}
- "Adapter.cpp"
- "Adapter.h"
- "AsyncTask.cpp"
- "AsyncTask.h"
- "AttachmentState.cpp"
- "AttachmentState.h"
- "BackendConnection.cpp"
- "BackendConnection.h"
- "BindGroup.cpp"
- "BindGroup.h"
- "BindGroupLayout.cpp"
- "BindGroupLayout.h"
- "BindGroupTracker.h"
- "BindingInfo.cpp"
- "BindingInfo.h"
- "BuddyAllocator.cpp"
- "BuddyAllocator.h"
- "BuddyMemoryAllocator.cpp"
- "BuddyMemoryAllocator.h"
- "Buffer.cpp"
- "Buffer.h"
- "CachedObject.cpp"
- "CachedObject.h"
- "CallbackTaskManager.cpp"
- "CallbackTaskManager.h"
- "CommandAllocator.cpp"
- "CommandAllocator.h"
- "CommandBuffer.cpp"
- "CommandBuffer.h"
- "CommandBufferStateTracker.cpp"
- "CommandBufferStateTracker.h"
- "CommandEncoder.cpp"
- "CommandEncoder.h"
- "CommandValidation.cpp"
- "CommandValidation.h"
- "Commands.cpp"
- "Commands.h"
- "CompilationMessages.cpp"
- "CompilationMessages.h"
- "ComputePassEncoder.cpp"
- "ComputePassEncoder.h"
- "ComputePipeline.cpp"
- "ComputePipeline.h"
- "CopyTextureForBrowserHelper.cpp"
- "CopyTextureForBrowserHelper.h"
- "CreatePipelineAsyncTask.cpp"
- "CreatePipelineAsyncTask.h"
- "Device.cpp"
- "Device.h"
- "DynamicUploader.cpp"
- "DynamicUploader.h"
- "EncodingContext.cpp"
- "EncodingContext.h"
- "EnumClassBitmasks.h"
- "EnumMaskIterator.h"
- "Error.cpp"
- "Error.h"
- "ErrorData.cpp"
- "ErrorData.h"
- "ErrorInjector.cpp"
- "ErrorInjector.h"
- "ErrorScope.cpp"
- "ErrorScope.h"
- "Features.cpp"
- "Features.h"
- "ExternalTexture.cpp"
- "ExternalTexture.h"
- "IndirectDrawMetadata.cpp"
- "IndirectDrawMetadata.h"
- "IndirectDrawValidationEncoder.cpp"
- "IndirectDrawValidationEncoder.h"
- "ObjectContentHasher.cpp"
- "ObjectContentHasher.h"
- "Format.cpp"
- "Format.h"
- "Forward.h"
- "Instance.cpp"
- "Instance.h"
- "InternalPipelineStore.cpp"
- "InternalPipelineStore.h"
- "IntegerTypes.h"
- "Limits.cpp"
- "Limits.h"
- "ObjectBase.cpp"
- "ObjectBase.h"
- "PassResourceUsage.h"
- "PassResourceUsageTracker.cpp"
- "PassResourceUsageTracker.h"
- "PersistentCache.cpp"
- "PersistentCache.h"
- "PerStage.cpp"
- "PerStage.h"
- "Pipeline.cpp"
- "Pipeline.h"
- "PipelineLayout.cpp"
- "PipelineLayout.h"
- "PooledResourceMemoryAllocator.cpp"
- "PooledResourceMemoryAllocator.h"
- "ProgrammableEncoder.cpp"
- "ProgrammableEncoder.h"
- "QueryHelper.cpp"
- "QueryHelper.h"
- "QuerySet.cpp"
- "QuerySet.h"
- "Queue.cpp"
- "Queue.h"
- "RenderBundle.cpp"
- "RenderBundle.h"
- "RenderBundleEncoder.cpp"
- "RenderBundleEncoder.h"
- "RenderEncoderBase.cpp"
- "RenderEncoderBase.h"
- "RenderPassEncoder.cpp"
- "RenderPassEncoder.h"
- "RenderPipeline.cpp"
- "RenderPipeline.h"
- "ResourceHeap.h"
- "ResourceHeapAllocator.h"
- "ResourceMemoryAllocation.cpp"
- "ResourceMemoryAllocation.h"
- "RingBufferAllocator.cpp"
- "RingBufferAllocator.h"
- "Sampler.cpp"
- "Sampler.h"
- "ScratchBuffer.cpp"
- "ScratchBuffer.h"
- "ShaderModule.cpp"
- "ShaderModule.h"
- "StagingBuffer.cpp"
- "StagingBuffer.h"
- "Subresource.cpp"
- "Subresource.h"
- "SubresourceStorage.h"
- "Surface.cpp"
- "Surface.h"
- "SwapChain.cpp"
- "SwapChain.h"
- "Texture.cpp"
- "Texture.h"
- "TintUtils.cpp"
- "TintUtils.h"
- "ToBackend.h"
- "Toggles.cpp"
- "Toggles.h"
- "VertexFormat.cpp"
- "VertexFormat.h"
- "dawn_platform.h"
- "utils/WGPUHelpers.cpp"
- "utils/WGPUHelpers.h"
-)
-target_link_libraries(dawn_native
- PUBLIC dawncpp_headers
- PRIVATE dawn_common
- dawn_platform
- dawn_internal_config
- libtint
- SPIRV-Tools-opt
- absl_strings
- absl_str_format_internal
-)
-
-if (DAWN_REQUIRES_SPIRV_CROSS)
- target_link_libraries(dawn_native PRIVATE spirv-cross-core)
- if (DAWN_ENABLE_OPENGL)
- target_link_libraries(dawn_native PRIVATE spirv-cross-glsl)
- endif()
-endif()
-
-target_include_directories(dawn_native PRIVATE ${DAWN_ABSEIL_DIR})
-
-if (DAWN_USE_X11)
- find_package(X11 REQUIRED)
- target_link_libraries(dawn_native PRIVATE ${X11_LIBRARIES})
- target_include_directories(dawn_native PRIVATE ${X11_INCLUDE_DIR})
- target_sources(dawn_native PRIVATE
- "XlibXcbFunctions.cpp"
- "XlibXcbFunctions.h"
- )
-endif()
-
-# Only win32 app needs to link with user32.lib
-# In UWP, all availiable APIs are defined in WindowsApp.lib
-# and is automatically linked when WINDOWS_STORE set
-if (WIN32 AND NOT WINDOWS_STORE)
- target_link_libraries(dawn_native PRIVATE user32.lib)
-endif()
-
-# DXGIGetDebugInterface1 is defined in dxgi.lib
-# But this API is tagged as a development-only capability
-# which implies that linking to this function will cause
-# the application to fail Windows store certification
-# So we only link to it in debug build when compiling for UWP.
-# In win32 we load dxgi.dll using LoadLibrary
-# so no need for static linking.
-if (WINDOWS_STORE)
- target_link_libraries(dawn_native PRIVATE debug dxgi.lib)
-endif()
-
-if (DAWN_ENABLE_D3D12)
- target_sources(dawn_native PRIVATE
- "${DAWN_INCLUDE_DIR}/dawn_native/D3D12Backend.h"
- "d3d12/AdapterD3D12.cpp"
- "d3d12/AdapterD3D12.h"
- "d3d12/BackendD3D12.cpp"
- "d3d12/BackendD3D12.h"
- "d3d12/BindGroupD3D12.cpp"
- "d3d12/BindGroupD3D12.h"
- "d3d12/BindGroupLayoutD3D12.cpp"
- "d3d12/BindGroupLayoutD3D12.h"
- "d3d12/BufferD3D12.cpp"
- "d3d12/BufferD3D12.h"
- "d3d12/CPUDescriptorHeapAllocationD3D12.cpp"
- "d3d12/CPUDescriptorHeapAllocationD3D12.h"
- "d3d12/CommandAllocatorManager.cpp"
- "d3d12/CommandAllocatorManager.h"
- "d3d12/CommandBufferD3D12.cpp"
- "d3d12/CommandBufferD3D12.h"
- "d3d12/CommandRecordingContext.cpp"
- "d3d12/CommandRecordingContext.h"
- "d3d12/ComputePipelineD3D12.cpp"
- "d3d12/ComputePipelineD3D12.h"
- "d3d12/D3D11on12Util.cpp"
- "d3d12/D3D11on12Util.h"
- "d3d12/D3D12Error.cpp"
- "d3d12/D3D12Error.h"
- "d3d12/D3D12Info.cpp"
- "d3d12/D3D12Info.h"
- "d3d12/DeviceD3D12.cpp"
- "d3d12/DeviceD3D12.h"
- "d3d12/Forward.h"
- "d3d12/GPUDescriptorHeapAllocationD3D12.cpp"
- "d3d12/GPUDescriptorHeapAllocationD3D12.h"
- "d3d12/HeapAllocatorD3D12.cpp"
- "d3d12/HeapAllocatorD3D12.h"
- "d3d12/HeapD3D12.cpp"
- "d3d12/HeapD3D12.h"
- "d3d12/IntegerTypes.h"
- "d3d12/NativeSwapChainImplD3D12.cpp"
- "d3d12/NativeSwapChainImplD3D12.h"
- "d3d12/PageableD3D12.cpp"
- "d3d12/PageableD3D12.h"
- "d3d12/PipelineLayoutD3D12.cpp"
- "d3d12/PipelineLayoutD3D12.h"
- "d3d12/PlatformFunctions.cpp"
- "d3d12/PlatformFunctions.h"
- "d3d12/QuerySetD3D12.cpp"
- "d3d12/QuerySetD3D12.h"
- "d3d12/QueueD3D12.cpp"
- "d3d12/QueueD3D12.h"
- "d3d12/RenderPassBuilderD3D12.cpp"
- "d3d12/RenderPassBuilderD3D12.h"
- "d3d12/RenderPipelineD3D12.cpp"
- "d3d12/RenderPipelineD3D12.h"
- "d3d12/ResidencyManagerD3D12.cpp"
- "d3d12/ResidencyManagerD3D12.h"
- "d3d12/ResourceAllocatorManagerD3D12.cpp"
- "d3d12/ResourceAllocatorManagerD3D12.h"
- "d3d12/ResourceHeapAllocationD3D12.cpp"
- "d3d12/ResourceHeapAllocationD3D12.h"
- "d3d12/SamplerD3D12.cpp"
- "d3d12/SamplerD3D12.h"
- "d3d12/SamplerHeapCacheD3D12.cpp"
- "d3d12/SamplerHeapCacheD3D12.h"
- "d3d12/ShaderModuleD3D12.cpp"
- "d3d12/ShaderModuleD3D12.h"
- "d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp"
- "d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
- "d3d12/StagingBufferD3D12.cpp"
- "d3d12/StagingBufferD3D12.h"
- "d3d12/StagingDescriptorAllocatorD3D12.cpp"
- "d3d12/StagingDescriptorAllocatorD3D12.h"
- "d3d12/SwapChainD3D12.cpp"
- "d3d12/SwapChainD3D12.h"
- "d3d12/TextureCopySplitter.cpp"
- "d3d12/TextureCopySplitter.h"
- "d3d12/TextureD3D12.cpp"
- "d3d12/TextureD3D12.h"
- "d3d12/UtilsD3D12.cpp"
- "d3d12/UtilsD3D12.h"
- "d3d12/d3d12_platform.h"
- )
- target_link_libraries(dawn_native PRIVATE dxguid.lib)
-endif()
-
-if (DAWN_ENABLE_METAL)
- target_sources(dawn_native PRIVATE
- "${DAWN_INCLUDE_DIR}/dawn_native/MetalBackend.h"
- "Surface_metal.mm"
- "metal/BackendMTL.h"
- "metal/BackendMTL.mm"
- "metal/BindGroupLayoutMTL.h"
- "metal/BindGroupLayoutMTL.mm"
- "metal/BindGroupMTL.h"
- "metal/BindGroupMTL.mm"
- "metal/BufferMTL.h"
- "metal/BufferMTL.mm"
- "metal/CommandBufferMTL.h"
- "metal/CommandBufferMTL.mm"
- "metal/CommandRecordingContext.h"
- "metal/CommandRecordingContext.mm"
- "metal/ComputePipelineMTL.h"
- "metal/ComputePipelineMTL.mm"
- "metal/DeviceMTL.h"
- "metal/DeviceMTL.mm"
- "metal/Forward.h"
- "metal/PipelineLayoutMTL.h"
- "metal/PipelineLayoutMTL.mm"
- "metal/QueueMTL.h"
- "metal/QueueMTL.mm"
- "metal/QuerySetMTL.h"
- "metal/QuerySetMTL.mm"
- "metal/RenderPipelineMTL.h"
- "metal/RenderPipelineMTL.mm"
- "metal/SamplerMTL.h"
- "metal/SamplerMTL.mm"
- "metal/ShaderModuleMTL.h"
- "metal/ShaderModuleMTL.mm"
- "metal/StagingBufferMTL.h"
- "metal/StagingBufferMTL.mm"
- "metal/SwapChainMTL.h"
- "metal/SwapChainMTL.mm"
- "metal/TextureMTL.h"
- "metal/TextureMTL.mm"
- "metal/UtilsMetal.h"
- "metal/UtilsMetal.mm"
- )
- target_link_libraries(dawn_native PRIVATE
- "-framework Cocoa"
- "-framework IOKit"
- "-framework IOSurface"
- "-framework QuartzCore"
- "-framework Metal"
- )
-endif()
-
-if (DAWN_ENABLE_NULL)
- target_sources(dawn_native PRIVATE
- "${DAWN_INCLUDE_DIR}/dawn_native/NullBackend.h"
- "null/DeviceNull.cpp"
- "null/DeviceNull.h"
- )
-endif()
-
-if (DAWN_ENABLE_OPENGL OR DAWN_ENABLE_VULKAN)
- target_sources(dawn_native PRIVATE
- "SpirvValidation.cpp"
- "SpirvValidation.h"
- )
-endif()
-
-if (DAWN_ENABLE_OPENGL)
- DawnGenerator(
- SCRIPT "${Dawn_SOURCE_DIR}/generator/opengl_loader_generator.py"
- PRINT_NAME "OpenGL function loader"
- ARGS "--gl-xml"
- "${Dawn_SOURCE_DIR}/third_party/khronos/gl.xml"
- "--supported-extensions"
- "${Dawn_SOURCE_DIR}/src/dawn_native/opengl/supported_extensions.json"
- RESULT_VARIABLE "DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES"
- )
-
- target_sources(dawn_native PRIVATE
- "${DAWN_INCLUDE_DIR}/dawn_native/OpenGLBackend.h"
- ${DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES}
- "opengl/BackendGL.cpp"
- "opengl/BackendGL.h"
- "opengl/BindGroupGL.cpp"
- "opengl/BindGroupGL.h"
- "opengl/BindGroupLayoutGL.cpp"
- "opengl/BindGroupLayoutGL.h"
- "opengl/BufferGL.cpp"
- "opengl/BufferGL.h"
- "opengl/CommandBufferGL.cpp"
- "opengl/CommandBufferGL.h"
- "opengl/ComputePipelineGL.cpp"
- "opengl/ComputePipelineGL.h"
- "opengl/DeviceGL.cpp"
- "opengl/DeviceGL.h"
- "opengl/Forward.h"
- "opengl/GLFormat.cpp"
- "opengl/GLFormat.h"
- "opengl/NativeSwapChainImplGL.cpp"
- "opengl/NativeSwapChainImplGL.h"
- "opengl/OpenGLFunctions.cpp"
- "opengl/OpenGLFunctions.h"
- "opengl/OpenGLVersion.cpp"
- "opengl/OpenGLVersion.h"
- "opengl/PersistentPipelineStateGL.cpp"
- "opengl/PersistentPipelineStateGL.h"
- "opengl/PipelineGL.cpp"
- "opengl/PipelineGL.h"
- "opengl/PipelineLayoutGL.cpp"
- "opengl/PipelineLayoutGL.h"
- "opengl/QuerySetGL.cpp"
- "opengl/QuerySetGL.h"
- "opengl/QueueGL.cpp"
- "opengl/QueueGL.h"
- "opengl/RenderPipelineGL.cpp"
- "opengl/RenderPipelineGL.h"
- "opengl/SamplerGL.cpp"
- "opengl/SamplerGL.h"
- "opengl/ShaderModuleGL.cpp"
- "opengl/ShaderModuleGL.h"
- "opengl/SpirvUtils.cpp"
- "opengl/SpirvUtils.h"
- "opengl/SwapChainGL.cpp"
- "opengl/SwapChainGL.h"
- "opengl/TextureGL.cpp"
- "opengl/TextureGL.h"
- "opengl/UtilsGL.cpp"
- "opengl/UtilsGL.h"
- "opengl/opengl_platform.h"
- )
-
- target_link_libraries(dawn_native PRIVATE dawn_khronos_platform)
-endif()
-
-if (DAWN_ENABLE_VULKAN)
- target_sources(dawn_native PRIVATE
- "${DAWN_INCLUDE_DIR}/dawn_native/VulkanBackend.h"
- "vulkan/AdapterVk.cpp"
- "vulkan/AdapterVk.h"
- "vulkan/BackendVk.cpp"
- "vulkan/BackendVk.h"
- "vulkan/BindGroupLayoutVk.cpp"
- "vulkan/BindGroupLayoutVk.h"
- "vulkan/BindGroupVk.cpp"
- "vulkan/BindGroupVk.h"
- "vulkan/BufferVk.cpp"
- "vulkan/BufferVk.h"
- "vulkan/CommandBufferVk.cpp"
- "vulkan/CommandBufferVk.h"
- "vulkan/CommandRecordingContext.h"
- "vulkan/ComputePipelineVk.cpp"
- "vulkan/ComputePipelineVk.h"
- "vulkan/DescriptorSetAllocation.h"
- "vulkan/DescriptorSetAllocator.cpp"
- "vulkan/DescriptorSetAllocator.h"
- "vulkan/DeviceVk.cpp"
- "vulkan/DeviceVk.h"
- "vulkan/ExternalHandle.h"
- "vulkan/FencedDeleter.cpp"
- "vulkan/FencedDeleter.h"
- "vulkan/Forward.h"
- "vulkan/NativeSwapChainImplVk.cpp"
- "vulkan/NativeSwapChainImplVk.h"
- "vulkan/PipelineLayoutVk.cpp"
- "vulkan/PipelineLayoutVk.h"
- "vulkan/QuerySetVk.cpp"
- "vulkan/QuerySetVk.h"
- "vulkan/QueueVk.cpp"
- "vulkan/QueueVk.h"
- "vulkan/RenderPassCache.cpp"
- "vulkan/RenderPassCache.h"
- "vulkan/RenderPipelineVk.cpp"
- "vulkan/RenderPipelineVk.h"
- "vulkan/ResourceHeapVk.cpp"
- "vulkan/ResourceHeapVk.h"
- "vulkan/ResourceMemoryAllocatorVk.cpp"
- "vulkan/ResourceMemoryAllocatorVk.h"
- "vulkan/SamplerVk.cpp"
- "vulkan/SamplerVk.h"
- "vulkan/ShaderModuleVk.cpp"
- "vulkan/ShaderModuleVk.h"
- "vulkan/StagingBufferVk.cpp"
- "vulkan/StagingBufferVk.h"
- "vulkan/SwapChainVk.cpp"
- "vulkan/SwapChainVk.h"
- "vulkan/TextureVk.cpp"
- "vulkan/TextureVk.h"
- "vulkan/UtilsVulkan.cpp"
- "vulkan/UtilsVulkan.h"
- "vulkan/VulkanError.cpp"
- "vulkan/VulkanError.h"
- "vulkan/VulkanExtensions.cpp"
- "vulkan/VulkanExtensions.h"
- "vulkan/VulkanFunctions.cpp"
- "vulkan/VulkanFunctions.h"
- "vulkan/VulkanInfo.cpp"
- "vulkan/VulkanInfo.h"
- "vulkan/external_memory/MemoryService.h"
- "vulkan/external_semaphore/SemaphoreService.h"
- )
-
- target_link_libraries(dawn_native PUBLIC dawn_vulkan_headers)
-
- if (UNIX AND NOT APPLE)
- target_sources(dawn_native PRIVATE
- "vulkan/external_memory/MemoryServiceOpaqueFD.cpp"
- "vulkan/external_semaphore/SemaphoreServiceFD.cpp"
- )
- else()
- target_sources(dawn_native PRIVATE
- "vulkan/external_memory/MemoryServiceNull.cpp"
- "vulkan/external_semaphore/SemaphoreServiceNull.cpp"
- )
- endif()
-endif()
-
-# TODO how to do the component build in CMake?
-target_sources(dawn_native PRIVATE "DawnNative.cpp")
-if (DAWN_ENABLE_D3D12)
- target_sources(dawn_native PRIVATE "d3d12/D3D12Backend.cpp")
-endif()
-if (DAWN_ENABLE_METAL)
- target_sources(dawn_native PRIVATE "metal/MetalBackend.mm")
-endif()
-if (DAWN_ENABLE_NULL)
- target_sources(dawn_native PRIVATE "null/NullBackend.cpp")
-endif()
-if (DAWN_ENABLE_OPENGL)
- target_sources(dawn_native PRIVATE "opengl/OpenGLBackend.cpp")
-endif()
-if (DAWN_ENABLE_VULKAN)
- target_sources(dawn_native PRIVATE "vulkan/VulkanBackend.cpp")
-endif()
-
-DawnJSONGenerator(
- TARGET "webgpu_dawn_native_proc"
- PRINT_NAME "Dawn native WebGPU procs"
- RESULT_VARIABLE "WEBGPU_DAWN_NATIVE_PROC_GEN"
-)
-
-add_library(webgpu_dawn ${DAWN_DUMMY_FILE})
-target_link_libraries(webgpu_dawn PRIVATE dawn_native)
-target_compile_definitions(webgpu_dawn PRIVATE "WGPU_IMPLEMENTATION")
-if(BUILD_SHARED_LIBS)
- target_compile_definitions(webgpu_dawn PRIVATE "WGPU_SHARED_LIBRARY")
-endif()
-target_sources(webgpu_dawn PRIVATE ${WEBGPU_DAWN_NATIVE_PROC_GEN})
diff --git a/chromium/third_party/dawn/src/dawn_native/CachedObject.cpp b/chromium/third_party/dawn/src/dawn_native/CachedObject.cpp
deleted file mode 100644
index 523924d99f0..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CachedObject.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CachedObject.h"
-
-#include "common/Assert.h"
-
-namespace dawn_native {
-
- bool CachedObject::IsCachedReference() const {
- return mIsCachedReference;
- }
-
- void CachedObject::SetIsCachedReference() {
- mIsCachedReference = true;
- }
-
- size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
- return obj->GetContentHash();
- }
-
- size_t CachedObject::GetContentHash() const {
- ASSERT(mIsContentHashInitialized);
- return mContentHash;
- }
-
- void CachedObject::SetContentHash(size_t contentHash) {
- ASSERT(!mIsContentHashInitialized);
- mContentHash = contentHash;
- mIsContentHashInitialized = true;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CachedObject.h b/chromium/third_party/dawn/src/dawn_native/CachedObject.h
deleted file mode 100644
index ff84e1e0d03..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CachedObject.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_CACHED_OBJECT_H_
-#define DAWNNATIVE_CACHED_OBJECT_H_
-
-#include <cstddef>
-
-namespace dawn_native {
-
- // Some objects are cached so that instead of creating new duplicate objects,
- // we increase the refcount of an existing object.
- // When an object is successfully created, the device should call
- // SetIsCachedReference() and insert the object into the cache.
- class CachedObject {
- public:
- bool IsCachedReference() const;
-
- // Functor necessary for the unordered_set<CachedObject*>-based cache.
- struct HashFunc {
- size_t operator()(const CachedObject* obj) const;
- };
-
- size_t GetContentHash() const;
- void SetContentHash(size_t contentHash);
-
- private:
- friend class DeviceBase;
- void SetIsCachedReference();
-
- bool mIsCachedReference = false;
-
- // Called by ObjectContentHasher upon creation to record the object.
- virtual size_t ComputeContentHash() = 0;
-
- size_t mContentHash = 0;
- bool mIsContentHashInitialized = false;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_CACHED_OBJECT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.cpp b/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.cpp
deleted file mode 100644
index 1c9106c2610..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CallbackTaskManager.h"
-
-namespace dawn_native {
-
- bool CallbackTaskManager::IsEmpty() {
- std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
- return mCallbackTaskQueue.empty();
- }
-
- std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
- std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
-
- std::vector<std::unique_ptr<CallbackTask>> allTasks;
- allTasks.swap(mCallbackTaskQueue);
- return allTasks;
- }
-
- void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
- std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
- mCallbackTaskQueue.push_back(std::move(callbackTask));
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.h b/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.h
deleted file mode 100644
index 49108ec0d04..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CallbackTaskManager.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_CALLBACK_TASK_MANAGER_H_
-#define DAWNNATIVE_CALLBACK_TASK_MANAGER_H_
-
-#include <memory>
-#include <mutex>
-#include <vector>
-
-namespace dawn_native {
-
- struct CallbackTask {
- public:
- virtual ~CallbackTask() = default;
- virtual void Finish() = 0;
- virtual void HandleShutDown() = 0;
- virtual void HandleDeviceLoss() = 0;
- };
-
- class CallbackTaskManager {
- public:
- void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
- bool IsEmpty();
- std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
-
- private:
- std::mutex mCallbackTaskQueueMutex;
- std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
- };
-
-} // namespace dawn_native
-
-#endif
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
deleted file mode 100644
index 9516b59c558..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CommandAllocator.h"
-
-#include "common/Assert.h"
-#include "common/Math.h"
-
-#include <algorithm>
-#include <climits>
-#include <cstdlib>
-#include <utility>
-
-namespace dawn_native {
-
- // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
-
- CommandIterator::CommandIterator() {
- Reset();
- }
-
- CommandIterator::~CommandIterator() {
- ASSERT(IsEmpty());
- }
-
- CommandIterator::CommandIterator(CommandIterator&& other) {
- if (!other.IsEmpty()) {
- mBlocks = std::move(other.mBlocks);
- other.Reset();
- }
- Reset();
- }
-
- CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
- ASSERT(IsEmpty());
- if (!other.IsEmpty()) {
- mBlocks = std::move(other.mBlocks);
- other.Reset();
- }
- Reset();
- return *this;
- }
-
- CommandIterator::CommandIterator(CommandAllocator allocator)
- : mBlocks(allocator.AcquireBlocks()) {
- Reset();
- }
-
- void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
- ASSERT(IsEmpty());
- mBlocks.clear();
- for (CommandAllocator& allocator : allocators) {
- CommandBlocks blocks = allocator.AcquireBlocks();
- if (!blocks.empty()) {
- mBlocks.reserve(mBlocks.size() + blocks.size());
- for (BlockDef& block : blocks) {
- mBlocks.push_back(std::move(block));
- }
- }
- }
- Reset();
- }
-
- bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
- mCurrentBlock++;
- if (mCurrentBlock >= mBlocks.size()) {
- Reset();
- *commandId = detail::kEndOfBlock;
- return false;
- }
- mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
- return NextCommandId(commandId);
- }
-
- void CommandIterator::Reset() {
- mCurrentBlock = 0;
-
- if (mBlocks.empty()) {
- // This will case the first NextCommandId call to try to move to the next block and stop
- // the iteration immediately, without special casing the initialization.
- mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
- mBlocks.emplace_back();
- mBlocks[0].size = sizeof(mEndOfBlock);
- mBlocks[0].block = mCurrentPtr;
- } else {
- mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
- }
- }
-
- void CommandIterator::MakeEmptyAsDataWasDestroyed() {
- if (IsEmpty()) {
- return;
- }
-
- for (BlockDef& block : mBlocks) {
- free(block.block);
- }
- mBlocks.clear();
- Reset();
- ASSERT(IsEmpty());
- }
-
- bool CommandIterator::IsEmpty() const {
- return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
- }
-
- // Potential TODO(crbug.com/dawn/835):
- // - Host the size and pointer to next block in the block itself to avoid having an allocation
- // in the vector
- // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
- // in Allocate
- // - Be able to optimize allocation to one block, for command buffers expected to live long to
- // avoid cache misses
- // - Better block allocation, maybe have Dawn API to say command buffer is going to have size
- // close to another
-
- CommandAllocator::CommandAllocator() {
- ResetPointers();
- }
-
- CommandAllocator::~CommandAllocator() {
- Reset();
- }
-
- CommandAllocator::CommandAllocator(CommandAllocator&& other)
- : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
- other.mBlocks.clear();
- if (!other.IsEmpty()) {
- mCurrentPtr = other.mCurrentPtr;
- mEndPtr = other.mEndPtr;
- } else {
- ResetPointers();
- }
- other.Reset();
- }
-
- CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
- Reset();
- if (!other.IsEmpty()) {
- std::swap(mBlocks, other.mBlocks);
- mLastAllocationSize = other.mLastAllocationSize;
- mCurrentPtr = other.mCurrentPtr;
- mEndPtr = other.mEndPtr;
- }
- other.Reset();
- return *this;
- }
-
- void CommandAllocator::Reset() {
- for (BlockDef& block : mBlocks) {
- free(block.block);
- }
- mBlocks.clear();
- mLastAllocationSize = kDefaultBaseAllocationSize;
- ResetPointers();
- }
-
- bool CommandAllocator::IsEmpty() const {
- return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mDummyEnum[0]);
- }
-
- CommandBlocks&& CommandAllocator::AcquireBlocks() {
- ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
- ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
- ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
- *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
-
- mCurrentPtr = nullptr;
- mEndPtr = nullptr;
- return std::move(mBlocks);
- }
-
- uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
- size_t commandSize,
- size_t commandAlignment) {
- // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
- // to move to the next one. kEndOfBlock on the last block means the end of the commands.
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
- *idAlloc = detail::kEndOfBlock;
-
- // We'll request a block that can contain at least the command ID, the command and an
- // additional ID to contain the kEndOfBlock tag.
- size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
-
- // The computation of the request could overflow.
- if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
- return nullptr;
- }
-
- if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
- return nullptr;
- }
- return Allocate(commandId, commandSize, commandAlignment);
- }
-
- bool CommandAllocator::GetNewBlock(size_t minimumSize) {
- // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
- mLastAllocationSize =
- std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
-
- uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
- if (DAWN_UNLIKELY(block == nullptr)) {
- return false;
- }
-
- mBlocks.push_back({mLastAllocationSize, block});
- mCurrentPtr = AlignPtr(block, alignof(uint32_t));
- mEndPtr = block + mLastAllocationSize;
- return true;
- }
-
- void CommandAllocator::ResetPointers() {
- mCurrentPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[0]);
- mEndPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[1]);
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
deleted file mode 100644
index 7a706aad9e7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMMAND_ALLOCATOR_H_
-#define DAWNNATIVE_COMMAND_ALLOCATOR_H_
-
-#include "common/Assert.h"
-#include "common/Math.h"
-#include "common/NonCopyable.h"
-
-#include <cstddef>
-#include <cstdint>
-#include <vector>
-
-namespace dawn_native {
-
- // Allocation for command buffers should be fast. To avoid doing an allocation per command
- // or to avoid copying commands when reallocing, we use a linear allocator in a growing set
- // of large memory blocks. We also use this to have the format to be (u32 commandId, command),
- // so that iteration over the commands is easy.
-
- // Usage of the allocator and iterator:
- // CommandAllocator allocator;
- // DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
- // // Fill command
- // // Repeat allocation and filling commands
- //
- // CommandIterator commands(allocator);
- // CommandType type;
- // while(commands.NextCommandId(&type)) {
- // switch(type) {
- // case CommandType::Draw:
- // DrawCommand* draw = commands.NextCommand<DrawCommand>();
- // // Do the draw
- // break;
- // // other cases
- // }
- // }
-
- // Note that you need to extract the commands from the CommandAllocator before destroying it
- // and must tell the CommandIterator when the allocated commands have been processed for
- // deletion.
-
- // These are the lists of blocks, should not be used directly, only through CommandAllocator
- // and CommandIterator
- struct BlockDef {
- size_t size;
- uint8_t* block;
- };
- using CommandBlocks = std::vector<BlockDef>;
-
- namespace detail {
- constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
- constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
- } // namespace detail
-
- class CommandAllocator;
-
- class CommandIterator : public NonCopyable {
- public:
- CommandIterator();
- ~CommandIterator();
-
- CommandIterator(CommandIterator&& other);
- CommandIterator& operator=(CommandIterator&& other);
-
- // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
- explicit CommandIterator(CommandAllocator allocator);
-
- void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
-
- template <typename E>
- bool NextCommandId(E* commandId) {
- return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
- }
- template <typename T>
- T* NextCommand() {
- return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
- }
- template <typename T>
- T* NextData(size_t count) {
- return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
- }
-
- // Sets iterator to the beginning of the commands without emptying the list. This method can
- // be used if iteration was stopped early and the iterator needs to be restarted.
- void Reset();
-
- // This method must to be called after commands have been deleted. This indicates that the
- // commands have been submitted and they are no longer valid.
- void MakeEmptyAsDataWasDestroyed();
-
- private:
- bool IsEmpty() const;
-
- DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
- uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
- ASSERT(idPtr + sizeof(uint32_t) <=
- mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
- uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
-
- if (id != detail::kEndOfBlock) {
- mCurrentPtr = idPtr + sizeof(uint32_t);
- *commandId = id;
- return true;
- }
- return NextCommandIdInNewBlock(commandId);
- }
-
- bool NextCommandIdInNewBlock(uint32_t* commandId);
-
- DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
- uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
- ASSERT(commandPtr + sizeof(commandSize) <=
- mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
- mCurrentPtr = commandPtr + commandSize;
- return commandPtr;
- }
-
- DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
- uint32_t id;
- bool hasId = NextCommandId(&id);
- ASSERT(hasId);
- ASSERT(id == detail::kAdditionalData);
-
- return NextCommand(dataSize, dataAlignment);
- }
-
- CommandBlocks mBlocks;
- uint8_t* mCurrentPtr = nullptr;
- size_t mCurrentBlock = 0;
- // Used to avoid a special case for empty iterators.
- uint32_t mEndOfBlock = detail::kEndOfBlock;
- };
-
- class CommandAllocator : public NonCopyable {
- public:
- CommandAllocator();
- ~CommandAllocator();
-
- // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
- CommandAllocator(CommandAllocator&&);
- CommandAllocator& operator=(CommandAllocator&&);
-
- // Frees all blocks held by the allocator and restores it to its initial empty state.
- void Reset();
-
- bool IsEmpty() const;
-
- template <typename T, typename E>
- T* Allocate(E commandId) {
- static_assert(sizeof(E) == sizeof(uint32_t), "");
- static_assert(alignof(E) == alignof(uint32_t), "");
- static_assert(alignof(T) <= kMaxSupportedAlignment, "");
- T* result = reinterpret_cast<T*>(
- Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
- if (!result) {
- return nullptr;
- }
- new (result) T;
- return result;
- }
-
- template <typename T>
- T* AllocateData(size_t count) {
- static_assert(alignof(T) <= kMaxSupportedAlignment, "");
- T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
- if (!result) {
- return nullptr;
- }
- for (size_t i = 0; i < count; i++) {
- new (result + i) T;
- }
- return result;
- }
-
- private:
- // This is used for some internal computations and can be any power of two as long as code
- // using the CommandAllocator passes the static_asserts.
- static constexpr size_t kMaxSupportedAlignment = 8;
-
- // To avoid checking for overflows at every step of the computations we compute an upper
- // bound of the space that will be needed in addition to the command data.
- static constexpr size_t kWorstCaseAdditionalSize =
- sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
-
- // The default value of mLastAllocationSize.
- static constexpr size_t kDefaultBaseAllocationSize = 2048;
-
- friend CommandIterator;
- CommandBlocks&& AcquireBlocks();
-
- DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
- size_t commandSize,
- size_t commandAlignment) {
- ASSERT(mCurrentPtr != nullptr);
- ASSERT(mEndPtr != nullptr);
- ASSERT(commandId != detail::kEndOfBlock);
-
- // It should always be possible to allocate one id, for kEndOfBlock tagging,
- ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
- ASSERT(mEndPtr >= mCurrentPtr);
- ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
-
- // The memory after the ID will contain the following:
- // - the current ID
- // - padding to align the command, maximum kMaxSupportedAlignment
- // - the command of size commandSize
- // - padding to align the next ID, maximum alignof(uint32_t)
- // - the next ID of size sizeof(uint32_t)
-
- // This can't overflow because by construction mCurrentPtr always has space for the next
- // ID.
- size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
-
- // The good case were we have enough space for the command data and upper bound of the
- // extra required space.
- if ((remainingSize >= kWorstCaseAdditionalSize) &&
- (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
- *idAlloc = commandId;
-
- uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
- mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
-
- return commandAlloc;
- }
- return AllocateInNewBlock(commandId, commandSize, commandAlignment);
- }
-
- uint8_t* AllocateInNewBlock(uint32_t commandId,
- size_t commandSize,
- size_t commandAlignment);
-
- DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
- return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
- }
-
- bool GetNewBlock(size_t minimumSize);
-
- void ResetPointers();
-
- CommandBlocks mBlocks;
- size_t mLastAllocationSize = kDefaultBaseAllocationSize;
-
- // Data used for the block range at initialization so that the first call to Allocate sees
- // there is not enough space and calls GetNewBlock. This avoids having to special case the
- // initialization in Allocate.
- uint32_t mDummyEnum[1] = {0};
-
- // Pointers to the current range of allocation in the block. Guaranteed to allow for at
- // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
- // be written. Nullptr iff the blocks were moved out.
- uint8_t* mCurrentPtr = nullptr;
- uint8_t* mEndPtr = nullptr;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMMAND_ALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
deleted file mode 100644
index c214fbf04a8..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CommandBuffer.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/Texture.h"
-
-namespace dawn_native {
-
- CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor)
- : ApiObjectBase(encoder->GetDevice(), descriptor->label),
- mCommands(encoder->AcquireCommands()),
- mResourceUsages(encoder->AcquireResourceUsages()) {
- TrackInDevice();
- }
-
- CommandBufferBase::CommandBufferBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- // static
- CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
- return new CommandBufferBase(device, ObjectBase::kError);
- }
-
- ObjectType CommandBufferBase::GetType() const {
- return ObjectType::CommandBuffer;
- }
-
- MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
- ASSERT(!IsError());
-
- DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
- return {};
- }
-
- void CommandBufferBase::DestroyImpl() {
- FreeCommands(&mCommands);
- mResourceUsages = {};
- }
-
- const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
- return mResourceUsages;
- }
-
- CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
- return &mCommands;
- }
-
- bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
- const Extent3D copySize,
- const uint32_t mipLevel) {
- Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
-
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e2D:
- return extent.width == copySize.width && extent.height == copySize.height;
- case wgpu::TextureDimension::e3D:
- return extent.width == copySize.width && extent.height == copySize.height &&
- extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
- default:
- UNREACHABLE();
- }
- }
-
- SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
- const Extent3D& copySize) {
- switch (copy.texture->GetDimension()) {
- case wgpu::TextureDimension::e2D:
- return {
- copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
- case wgpu::TextureDimension::e3D:
- return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
- default:
- UNREACHABLE();
- }
- }
-
- void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- auto& attachmentInfo = renderPass->colorAttachments[i];
- TextureViewBase* view = attachmentInfo.view.Get();
- bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
-
- ASSERT(view->GetLayerCount() == 1);
- ASSERT(view->GetLevelCount() == 1);
- SubresourceRange range = view->GetSubresourceRange();
-
- // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
- if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
- !view->GetTexture()->IsSubresourceContentInitialized(range)) {
- attachmentInfo.loadOp = wgpu::LoadOp::Clear;
- attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
- }
-
- if (hasResolveTarget) {
- // We need to set the resolve target to initialized so that it does not get
- // cleared later in the pipeline. The texture will be resolved from the
- // source color attachment, which will be correctly initialized.
- TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
- ASSERT(resolveView->GetLayerCount() == 1);
- ASSERT(resolveView->GetLevelCount() == 1);
- resolveView->GetTexture()->SetIsSubresourceContentInitialized(
- true, resolveView->GetSubresourceRange());
- }
-
- switch (attachmentInfo.storeOp) {
- case wgpu::StoreOp::Store:
- view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
- break;
-
- case wgpu::StoreOp::Discard:
- view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
- break;
- }
- }
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto& attachmentInfo = renderPass->depthStencilAttachment;
- TextureViewBase* view = attachmentInfo.view.Get();
- ASSERT(view->GetLayerCount() == 1);
- ASSERT(view->GetLevelCount() == 1);
- SubresourceRange range = view->GetSubresourceRange();
-
- SubresourceRange depthRange = range;
- depthRange.aspects = range.aspects & Aspect::Depth;
-
- SubresourceRange stencilRange = range;
- stencilRange.aspects = range.aspects & Aspect::Stencil;
-
- // If the depth stencil texture has not been initialized, we want to use loadop
- // clear to init the contents to 0's
- if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
- attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearDepth = 0.0f;
- attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
- }
-
- if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
- attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearStencil = 0u;
- attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
- }
-
- view->GetTexture()->SetIsSubresourceContentInitialized(
- attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
-
- view->GetTexture()->SetIsSubresourceContentInitialized(
- attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
- }
- }
-
- bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
- ASSERT(copy != nullptr);
-
- if (copy->destination.offset > 0) {
- // The copy doesn't touch the start of the buffer.
- return false;
- }
-
- const TextureBase* texture = copy->source.texture.Get();
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
- const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
- const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
- const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
- const bool multiRow = multiSlice || heightInBlocks > 1;
-
- if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
- // There are gaps between slices that aren't overwritten
- return false;
- }
-
- const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize;
- if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
- // There are gaps between rows that aren't overwritten
- return false;
- }
-
- // After the above checks, we're sure the copy has no gaps.
- // Now, compute the total number of bytes written.
- const uint64_t writtenBytes =
- ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow,
- copy->destination.rowsPerImage)
- .AcquireSuccess();
- if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
- // The written bytes don't cover the whole buffer.
- return false;
- }
-
- return true;
- }
-
- std::array<float, 4> ConvertToFloatColor(dawn_native::Color color) {
- const std::array<float, 4> outputValue = {
- static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
- static_cast<float>(color.a)};
- return outputValue;
- }
- std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn_native::Color color) {
- const std::array<int32_t, 4> outputValue = {
- static_cast<int32_t>(color.r), static_cast<int32_t>(color.g),
- static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)};
- return outputValue;
- }
-
- std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn_native::Color color) {
- const std::array<uint32_t, 4> outputValue = {
- static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
- static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
- return outputValue;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
deleted file mode 100644
index a8c23a6a9cb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMMANDBUFFER_H_
-#define DAWNNATIVE_COMMANDBUFFER_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include "dawn_native/CommandAllocator.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/PassResourceUsage.h"
-#include "dawn_native/Texture.h"
-
-namespace dawn_native {
-
- struct BeginRenderPassCmd;
- struct CopyTextureToBufferCmd;
- struct TextureCopy;
-
- class CommandBufferBase : public ApiObjectBase {
- public:
- CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
-
- static CommandBufferBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- MaybeError ValidateCanUseInSubmitNow() const;
-
- const CommandBufferResourceUsage& GetResourceUsages() const;
-
- CommandIterator* GetCommandIteratorForTesting();
-
- protected:
- // Constructor used only for mocking and testing.
- CommandBufferBase(DeviceBase* device);
- void DestroyImpl() override;
-
- CommandIterator mCommands;
-
- private:
- CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- CommandBufferResourceUsage mResourceUsages;
- };
-
- bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
- const Extent3D copySize,
- const uint32_t mipLevel);
- SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
- const Extent3D& copySize);
-
- void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
-
- bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
-
- std::array<float, 4> ConvertToFloatColor(dawn_native::Color color);
- std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn_native::Color color);
- std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn_native::Color color);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMMANDBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
deleted file mode 100644
index 17a9b616725..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
+++ /dev/null
@@ -1,407 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CommandBufferStateTracker.h"
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/ComputePassEncoder.h"
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/PipelineLayout.h"
-#include "dawn_native/RenderPipeline.h"
-
-// TODO(dawn:563): None of the error messages in this file include the buffer objects they are
-// validating against. It would be nice to improve that, but difficult to do without incurring
-// additional tracking costs.
-
-namespace dawn_native {
-
- namespace {
- bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
- const std::vector<uint64_t>& pipelineMinBufferSizes) {
- ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
-
- for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
- if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
- return false;
- }
- }
-
- return true;
- }
- } // namespace
-
- enum ValidationAspect {
- VALIDATION_ASPECT_PIPELINE,
- VALIDATION_ASPECT_BIND_GROUPS,
- VALIDATION_ASPECT_VERTEX_BUFFERS,
- VALIDATION_ASPECT_INDEX_BUFFER,
-
- VALIDATION_ASPECT_COUNT
- };
- static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects, "");
-
- static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
- 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
-
- static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
- 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
- 1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
-
- static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
- 1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
- 1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
-
- static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
- 1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
- 1 << VALIDATION_ASPECT_INDEX_BUFFER;
-
- MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
- return ValidateOperation(kDispatchAspects);
- }
-
- MaybeError CommandBufferStateTracker::ValidateCanDraw() {
- return ValidateOperation(kDrawAspects);
- }
-
- MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
- return ValidateOperation(kDrawIndexedAspects);
- }
-
- MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(
- uint32_t vertexCount,
- uint32_t firstVertex) {
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- vertexBufferSlotsUsedAsVertexBuffer =
- lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
-
- for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
- const VertexBufferInfo& vertexBuffer =
- lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
- uint64_t arrayStride = vertexBuffer.arrayStride;
- uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
-
- if (arrayStride == 0) {
- DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
- "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
- "is smaller than the required size for all attributes (%u)",
- bufferSize, static_cast<uint8_t>(usedSlotVertex),
- vertexBuffer.usedBytesInStride);
- } else {
- uint64_t requiredSize =
- (static_cast<uint64_t>(firstVertex) + vertexCount) * arrayStride;
- // firstVertex and vertexCount are in uint32_t, and arrayStride must not
- // be larger than kMaxVertexBufferArrayStride, which is currently 2048. So by
- // doing checks in uint64_t we avoid overflows.
- DAWN_INVALID_IF(
- requiredSize > bufferSize,
- "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than the "
- "bound buffer size (%u) of the vertex buffer at slot %u with stride (%u).",
- firstVertex, vertexCount, requiredSize, bufferSize,
- static_cast<uint8_t>(usedSlotVertex), arrayStride);
- }
- }
-
- return {};
- }
-
- MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
- uint32_t instanceCount,
- uint32_t firstInstance) {
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- vertexBufferSlotsUsedAsInstanceBuffer =
- lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
-
- for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
- const VertexBufferInfo& vertexBuffer =
- lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
- uint64_t arrayStride = vertexBuffer.arrayStride;
- uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
- if (arrayStride == 0) {
- DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
- "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
- "is smaller than the required size for all attributes (%u)",
- bufferSize, static_cast<uint8_t>(usedSlotInstance),
- vertexBuffer.usedBytesInStride);
- } else {
- uint64_t requiredSize =
- (static_cast<uint64_t>(firstInstance) + instanceCount) * arrayStride;
- // firstInstance and instanceCount are in uint32_t, and arrayStride must
- // not be larger than kMaxVertexBufferArrayStride, which is currently 2048.
- // So by doing checks in uint64_t we avoid overflows.
- DAWN_INVALID_IF(
- requiredSize > bufferSize,
- "Instance range (first: %u, count: %u) requires a larger buffer (%u) than the "
- "bound buffer size (%u) of the vertex buffer at slot %u with stride (%u).",
- firstInstance, instanceCount, requiredSize, bufferSize,
- static_cast<uint8_t>(usedSlotInstance), arrayStride);
- }
- }
-
- return {};
- }
-
- MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
- uint32_t firstIndex) {
- // Validate the range of index buffer
- // firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
- // wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
- // uint64_t we avoid overflows.
- DAWN_INVALID_IF(
- (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
- mIndexBufferSize,
- "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
- "(%u).",
- firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
- return {};
- }
-
- MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
- // Fast return-true path if everything is good
- ValidationAspects missingAspects = requiredAspects & ~mAspects;
- if (missingAspects.none()) {
- return {};
- }
-
- // Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
- // requires the pipeline to be set.
- DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
-
- RecomputeLazyAspects(missingAspects);
-
- DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
-
- return {};
- }
-
- void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
- ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
- ASSERT((aspects & ~kLazyAspects).none());
-
- if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
- bool matches = true;
-
- for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
- if (mBindgroups[i] == nullptr ||
- mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
- !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
- (*mMinBufferSizes)[i])) {
- matches = false;
- break;
- }
- }
-
- if (matches) {
- mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
- }
- }
-
- if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
- lastRenderPipeline->GetVertexBufferSlotsUsed();
- if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
- mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
- }
- }
-
- if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
- if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
- mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
- mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
- }
- }
- }
-
- MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
- if (!aspects.any()) {
- return {};
- }
-
- DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
-
- if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
- DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
-
- RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
- wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
-
- if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
- DAWN_INVALID_IF(
- pipelineIndexFormat == wgpu::IndexFormat::Undefined,
- "%s has a strip primitive topology (%s) but a strip index format of %s, which "
- "prevents it for being used for indexed draw calls.",
- lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
- pipelineIndexFormat);
-
- DAWN_INVALID_IF(
- mIndexFormat != pipelineIndexFormat,
- "Strip index format (%s) of %s does not match index buffer format (%s).",
- pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
- }
-
- // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
- // It returns the first invalid state found. We shouldn't be able to reach this line
- // because to have invalid aspects one of the above conditions must have failed earlier.
- // If this is reached, make sure lazy aspects and the error checks above are consistent.
- UNREACHABLE();
- return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
- }
-
- // TODO(dawn:563): Indicate which slots were not set.
- DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
- "Vertex buffer slots required by %s were not set.", GetRenderPipeline());
-
- if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
- for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
- ASSERT(HasPipeline());
-
- DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
- static_cast<uint32_t>(i));
-
- BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
- BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
-
- DAWN_INVALID_IF(
- requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
- currentBGL->GetPipelineCompatibilityToken() !=
- requiredBGL->GetPipelineCompatibilityToken(),
- "The current pipeline (%s) was created with a default layout, and is not "
- "compatible with the %s at index %u which uses a %s that was not created by "
- "the pipeline. Either use the bind group layout returned by calling "
- "getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
- "provide an explicit pipeline layout when creating the pipeline.",
- mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
- static_cast<uint32_t>(i));
-
- DAWN_INVALID_IF(
- requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
- currentBGL->GetPipelineCompatibilityToken() !=
- PipelineCompatibilityToken(0),
- "%s at index %u uses a %s which was created as part of the default layout for "
- "a different pipeline than the current one (%s), and as a result is not "
- "compatible. Use an explicit bind group layout when creating bind groups and "
- "an explicit pipeline layout when creating pipelines to share bind groups "
- "between pipelines.",
- mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
-
- DAWN_INVALID_IF(
- mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
- "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
- "group %s at index %u.",
- requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
- static_cast<uint32_t>(i));
-
- // TODO(dawn:563): Report the binding sizes and which ones are failing.
- DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
- (*mMinBufferSizes)[i]),
- "Binding sizes are too small for bind group %s at index %u",
- mBindgroups[i], static_cast<uint32_t>(i));
- }
-
- // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
- // It returns the first invalid state found. We shouldn't be able to reach this line
- // because to have invalid aspects one of the above conditions must have failed earlier.
- // If this is reached, make sure lazy aspects and the error checks above are consistent.
- UNREACHABLE();
- return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
- }
-
- UNREACHABLE();
- }
-
- void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
- SetPipelineCommon(pipeline);
- }
-
- void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
- SetPipelineCommon(pipeline);
- }
-
- void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
- BindGroupBase* bindgroup,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) {
- mBindgroups[index] = bindgroup;
- mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
- mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
- }
-
- void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
- mIndexBufferSet = true;
- mIndexFormat = format;
- mIndexBufferSize = size;
- }
-
- void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
- mVertexBufferSlotsUsed.set(slot);
- mVertexBufferSizes[slot] = size;
- }
-
- void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
- mLastPipeline = pipeline;
- mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
- mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
-
- mAspects.set(VALIDATION_ASPECT_PIPELINE);
-
- // Reset lazy aspects so they get recomputed on the next operation.
- mAspects &= ~kLazyAspects;
- }
-
- BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
- return mBindgroups[index];
- }
-
- const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
- BindGroupIndex index) const {
- return mDynamicOffsets[index];
- }
-
- bool CommandBufferStateTracker::HasPipeline() const {
- return mLastPipeline != nullptr;
- }
-
- RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
- ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
- return static_cast<RenderPipelineBase*>(mLastPipeline);
- }
-
- ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
- ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
- return static_cast<ComputePipelineBase*>(mLastPipeline);
- }
-
- PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
- return mLastPipelineLayout;
- }
-
- wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
- return mIndexFormat;
- }
-
- uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
- return mIndexBufferSize;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
deleted file mode 100644
index 5686956faf9..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
-#define DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
-
-#include "common/Constants.h"
-#include "common/ityp_array.h"
-#include "common/ityp_bitset.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-
-namespace dawn_native {
-
- class CommandBufferStateTracker {
- public:
- // Non-state-modifying validation functions
- MaybeError ValidateCanDispatch();
- MaybeError ValidateCanDraw();
- MaybeError ValidateCanDrawIndexed();
- MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex);
- MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,
- uint32_t firstInstance);
- MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex);
-
- // State-modifying methods
- void SetComputePipeline(ComputePipelineBase* pipeline);
- void SetRenderPipeline(RenderPipelineBase* pipeline);
- void SetBindGroup(BindGroupIndex index,
- BindGroupBase* bindgroup,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets);
- void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size);
- void SetVertexBuffer(VertexBufferSlot slot, uint64_t size);
-
- static constexpr size_t kNumAspects = 4;
- using ValidationAspects = std::bitset<kNumAspects>;
-
- BindGroupBase* GetBindGroup(BindGroupIndex index) const;
- const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const;
- bool HasPipeline() const;
- RenderPipelineBase* GetRenderPipeline() const;
- ComputePipelineBase* GetComputePipeline() const;
- PipelineLayoutBase* GetPipelineLayout() const;
- wgpu::IndexFormat GetIndexFormat() const;
- uint64_t GetIndexBufferSize() const;
-
- private:
- MaybeError ValidateOperation(ValidationAspects requiredAspects);
- void RecomputeLazyAspects(ValidationAspects aspects);
- MaybeError CheckMissingAspects(ValidationAspects aspects);
-
- void SetPipelineCommon(PipelineBase* pipeline);
-
- ValidationAspects mAspects;
-
- ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
- ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {};
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
- bool mIndexBufferSet = false;
- wgpu::IndexFormat mIndexFormat;
- uint64_t mIndexBufferSize = 0;
-
- ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {};
-
- PipelineLayoutBase* mLastPipelineLayout = nullptr;
- PipelineBase* mLastPipeline = nullptr;
-
- const RequiredBufferSizes* mMinBufferSizes = nullptr;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMMANDBUFFERSTATETRACKER_H
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
deleted file mode 100644
index cec2df76248..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ /dev/null
@@ -1,1137 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CommandEncoder.h"
-
-#include "common/BitSetIterator.h"
-#include "common/Math.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/CommandBufferStateTracker.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/ComputePassEncoder.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/QueryHelper.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/Queue.h"
-#include "dawn_native/RenderPassEncoder.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-
-#include <cmath>
-#include <map>
-
-namespace dawn_native {
-
- namespace {
-
- MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
- uint64_t srcOffset,
- uint64_t dstOffset) {
- // Copy size must be a multiple of 4 bytes on macOS.
- DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
-
- // SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
- DAWN_INVALID_IF(
- srcOffset % 4 != 0 || dstOffset % 4 != 0,
- "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
- srcOffset, dstOffset);
-
- return {};
- }
-
- MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
- DAWN_INVALID_IF(texture->GetSampleCount() > 1,
- "%s sample count (%u) is not 1 when copying to or from a buffer.",
- texture, texture->GetSampleCount());
-
- return {};
- }
-
- MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
- const TexelBlockInfo& blockInfo,
- const bool hasDepthOrStencil) {
- if (hasDepthOrStencil) {
- // For depth-stencil texture, buffer offset must be a multiple of 4.
- DAWN_INVALID_IF(layout.offset % 4 != 0,
- "Offset (%u) is not a multiple of 4 for depth/stencil texture.",
- layout.offset);
- } else {
- DAWN_INVALID_IF(layout.offset % blockInfo.byteSize != 0,
- "Offset (%u) is not a multiple of the texel block byte size (%u).",
- layout.offset, blockInfo.byteSize);
- }
- return {};
- }
-
- MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(
- const ImageCopyTexture& src) {
- Aspect aspectUsed;
- DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
- if (aspectUsed == Aspect::Depth) {
- switch (src.texture->GetFormat().format) {
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "The depth aspect of %s format %s cannot be selected in a texture to "
- "buffer copy.",
- src.texture, src.texture->GetFormat().format);
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth16Unorm:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- break;
-
- default:
- UNREACHABLE();
- }
- }
-
- return {};
- }
-
- MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
- // Currently we do not support layered rendering.
- DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
- "The layer count (%u) of %s used as attachment is greater than 1.",
- attachment->GetLayerCount(), attachment);
-
- DAWN_INVALID_IF(attachment->GetLevelCount() > 1,
- "The mip level count (%u) of %s used as attachment is greater than 1.",
- attachment->GetLevelCount(), attachment);
-
- return {};
- }
-
- MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
- uint32_t* width,
- uint32_t* height) {
- const Extent3D& attachmentSize =
- attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
-
- if (*width == 0) {
- DAWN_ASSERT(*height == 0);
- *width = attachmentSize.width;
- *height = attachmentSize.height;
- DAWN_ASSERT(*width != 0 && *height != 0);
- } else {
- DAWN_INVALID_IF(
- *width != attachmentSize.width || *height != attachmentSize.height,
- "Attachment %s size (width: %u, height: %u) does not match the size of the "
- "other attachments (width: %u, height: %u).",
- attachment, attachmentSize.width, attachmentSize.height, *width, *height);
- }
-
- return {};
- }
-
- MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
- uint32_t* sampleCount) {
- if (*sampleCount == 0) {
- *sampleCount = colorAttachment->GetTexture()->GetSampleCount();
- DAWN_ASSERT(*sampleCount != 0);
- } else {
- DAWN_INVALID_IF(
- *sampleCount != colorAttachment->GetTexture()->GetSampleCount(),
- "Color attachment %s sample count (%u) does not match the sample count of the "
- "other attachments (%u).",
- colorAttachment, colorAttachment->GetTexture()->GetSampleCount(), *sampleCount);
- }
-
- return {};
- }
-
- MaybeError ValidateResolveTarget(const DeviceBase* device,
- const RenderPassColorAttachment& colorAttachment) {
- if (colorAttachment.resolveTarget == nullptr) {
- return {};
- }
-
- const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
- const TextureViewBase* attachment = colorAttachment.view;
- DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
- DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
- wgpu::TextureUsage::RenderAttachment));
-
- DAWN_INVALID_IF(
- !attachment->GetTexture()->IsMultisampledTexture(),
- "Cannot set %s as a resolve target when the color attachment %s has a sample "
- "count of 1.",
- resolveTarget, attachment);
-
- DAWN_INVALID_IF(resolveTarget->GetTexture()->IsMultisampledTexture(),
- "Cannot use %s as resolve target. Sample count (%u) is greater than 1.",
- resolveTarget, resolveTarget->GetTexture()->GetSampleCount());
-
- DAWN_INVALID_IF(resolveTarget->GetLayerCount() > 1,
- "The resolve target %s array layer count (%u) is not 1.", resolveTarget,
- resolveTarget->GetLayerCount());
-
- DAWN_INVALID_IF(resolveTarget->GetLevelCount() > 1,
- "The resolve target %s mip level count (%u) is not 1.", resolveTarget,
- resolveTarget->GetLevelCount());
-
- const Extent3D& colorTextureSize =
- attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
- const Extent3D& resolveTextureSize =
- resolveTarget->GetTexture()->GetMipLevelVirtualSize(
- resolveTarget->GetBaseMipLevel());
- DAWN_INVALID_IF(
- colorTextureSize.width != resolveTextureSize.width ||
- colorTextureSize.height != resolveTextureSize.height,
- "The Resolve target %s size (width: %u, height: %u) does not match the color "
- "attachment %s size (width: %u, height: %u).",
- resolveTarget, resolveTextureSize.width, resolveTextureSize.height, attachment,
- colorTextureSize.width, colorTextureSize.height);
-
- wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
- DAWN_INVALID_IF(
- resolveTargetFormat != attachment->GetFormat().format,
- "The resolve target %s format (%s) does not match the color attachment %s format "
- "(%s).",
- resolveTarget, resolveTargetFormat, attachment, attachment->GetFormat().format);
-
- return {};
- }
-
- MaybeError ValidateRenderPassColorAttachment(
- DeviceBase* device,
- const RenderPassColorAttachment& colorAttachment,
- uint32_t* width,
- uint32_t* height,
- uint32_t* sampleCount) {
- TextureViewBase* attachment = colorAttachment.view;
- DAWN_TRY(device->ValidateObject(attachment));
- DAWN_TRY(
- ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment));
-
- DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) ||
- !attachment->GetFormat().isRenderable,
- "The color attachment %s format (%s) is not color renderable.",
- attachment, attachment->GetFormat().format);
-
- DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
- DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
-
- if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
- DAWN_INVALID_IF(std::isnan(colorAttachment.clearColor.r) ||
- std::isnan(colorAttachment.clearColor.g) ||
- std::isnan(colorAttachment.clearColor.b) ||
- std::isnan(colorAttachment.clearColor.a),
- "Color clear value (%s) contain a NaN.",
- &colorAttachment.clearColor);
- }
-
- DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
-
- DAWN_TRY(ValidateResolveTarget(device, colorAttachment));
-
- DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
- DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
-
- return {};
- }
-
- MaybeError ValidateRenderPassDepthStencilAttachment(
- DeviceBase* device,
- const RenderPassDepthStencilAttachment* depthStencilAttachment,
- uint32_t* width,
- uint32_t* height,
- uint32_t* sampleCount) {
- DAWN_ASSERT(depthStencilAttachment != nullptr);
-
- TextureViewBase* attachment = depthStencilAttachment->view;
- DAWN_TRY(device->ValidateObject(attachment));
- DAWN_TRY(
- ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment));
-
- const Format& format = attachment->GetFormat();
- DAWN_INVALID_IF(
- !format.HasDepthOrStencil(),
- "The depth stencil attachment %s format (%s) is not a depth stencil format.",
- attachment, format.format);
-
- DAWN_INVALID_IF(!format.isRenderable,
- "The depth stencil attachment %s format (%s) is not renderable.",
- attachment, format.format);
-
- DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
- "The depth stencil attachment %s must encompass all aspects.",
- attachment);
-
- DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
- DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
- DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
- DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
-
- DAWN_INVALID_IF(
- attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
- depthStencilAttachment->depthReadOnly !=
- depthStencilAttachment->stencilReadOnly,
- "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
- "is 'all'.",
- depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
-
- DAWN_INVALID_IF(
- depthStencilAttachment->depthReadOnly &&
- (depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Load ||
- depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Store),
- "depthLoadOp (%s) is not %s or depthStoreOp (%s) is not %s when depthReadOnly "
- "is true.",
- depthStencilAttachment->depthLoadOp, wgpu::LoadOp::Load,
- depthStencilAttachment->depthStoreOp, wgpu::StoreOp::Store);
-
- DAWN_INVALID_IF(depthStencilAttachment->stencilReadOnly &&
- (depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Load ||
- depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Store),
- "stencilLoadOp (%s) is not %s or stencilStoreOp (%s) is not %s when "
- "stencilReadOnly is true.",
- depthStencilAttachment->stencilLoadOp, wgpu::LoadOp::Load,
- depthStencilAttachment->stencilStoreOp, wgpu::StoreOp::Store);
-
- DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
- std::isnan(depthStencilAttachment->clearDepth),
- "Depth clear value is NaN.");
-
- // *sampleCount == 0 must only happen when there is no color attachment. In that case we
- // do not need to validate the sample count of the depth stencil attachment.
- const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
- if (*sampleCount != 0) {
- DAWN_INVALID_IF(
- depthStencilSampleCount != *sampleCount,
- "The depth stencil attachment %s sample count (%u) does not match the sample "
- "count of the other attachments (%u).",
- attachment, depthStencilSampleCount, *sampleCount);
- } else {
- *sampleCount = depthStencilSampleCount;
- }
-
- DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
- DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
-
- return {};
- }
-
- MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
- const RenderPassDescriptor* descriptor,
- uint32_t* width,
- uint32_t* height,
- uint32_t* sampleCount) {
- DAWN_INVALID_IF(
- descriptor->colorAttachmentCount > kMaxColorAttachments,
- "Color attachment count (%u) exceeds the maximum number of color attachments (%u).",
- descriptor->colorAttachmentCount, kMaxColorAttachments);
-
- for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
- DAWN_TRY_CONTEXT(
- ValidateRenderPassColorAttachment(device, descriptor->colorAttachments[i],
- width, height, sampleCount),
- "validating colorAttachments[%u].", i);
- }
-
- if (descriptor->depthStencilAttachment != nullptr) {
- DAWN_TRY_CONTEXT(
- ValidateRenderPassDepthStencilAttachment(
- device, descriptor->depthStencilAttachment, width, height, sampleCount),
- "validating depthStencilAttachment.");
- }
-
- if (descriptor->occlusionQuerySet != nullptr) {
- DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
-
- DAWN_INVALID_IF(
- descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
- "The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet,
- descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
- }
-
- DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 &&
- descriptor->depthStencilAttachment == nullptr,
- "Render pass has no attachments.");
-
- return {};
- }
-
- MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
- const ComputePassDescriptor* descriptor) {
- return {};
- }
-
- MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- const BufferBase* destination,
- uint64_t destinationOffset) {
- DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
- "First query (%u) exceeds the number of queries (%u) in %s.",
- firstQuery, querySet->GetQueryCount(), querySet);
-
- DAWN_INVALID_IF(
- queryCount > querySet->GetQueryCount() - firstQuery,
- "The query range (firstQuery: %u, queryCount: %u) exceeds the number of queries "
- "(%u) in %s.",
- firstQuery, queryCount, querySet->GetQueryCount(), querySet);
-
- DAWN_INVALID_IF(destinationOffset % 256 != 0,
- "The destination buffer %s offset (%u) is not a multiple of 256.",
- destination, destinationOffset);
-
- uint64_t bufferSize = destination->GetSize();
- // The destination buffer must have enough storage, from destination offset, to contain
- // the result of resolved queries
- bool fitsInBuffer = destinationOffset <= bufferSize &&
- (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
- (bufferSize - destinationOffset));
- DAWN_INVALID_IF(
- !fitsInBuffer,
- "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
- querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination,
- bufferSize, destinationOffset);
-
- return {};
- }
-
- MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
- QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset) {
- DeviceBase* device = encoder->GetDevice();
-
- // The availability got from query set is a reference to vector<bool>, need to covert
- // bool to uint32_t due to a user input in pipeline must not contain a bool type in
- // WGSL.
- std::vector<uint32_t> availability{querySet->GetQueryAvailability().begin(),
- querySet->GetQueryAvailability().end()};
-
- // Timestamp availability storage buffer
- BufferDescriptor availabilityDesc = {};
- availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
- availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
- Ref<BufferBase> availabilityBuffer;
- DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
-
- DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
- availability.data(),
- availability.size() * sizeof(uint32_t)));
-
- // Timestamp params uniform buffer
- TimestampParams params = {firstQuery, queryCount,
- static_cast<uint32_t>(destinationOffset),
- device->GetTimestampPeriodInNS()};
-
- BufferDescriptor parmsDesc = {};
- parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
- parmsDesc.size = sizeof(params);
- Ref<BufferBase> paramsBuffer;
- DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
-
- DAWN_TRY(
- device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
-
- return EncodeConvertTimestampsToNanoseconds(
- encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
- }
-
- bool IsReadOnlyDepthStencilAttachment(
- const RenderPassDepthStencilAttachment* depthStencilAttachment) {
- DAWN_ASSERT(depthStencilAttachment != nullptr);
- Aspect aspects = depthStencilAttachment->view->GetAspects();
- DAWN_ASSERT(IsSubset(aspects, Aspect::Depth | Aspect::Stencil));
-
- if ((aspects & Aspect::Depth) && !depthStencilAttachment->depthReadOnly) {
- return false;
- }
- if (aspects & Aspect::Stencil && !depthStencilAttachment->stencilReadOnly) {
- return false;
- }
- return true;
- }
-
- } // namespace
-
- CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label), mEncodingContext(device, this) {
- TrackInDevice();
- }
-
- ObjectType CommandEncoder::GetType() const {
- return ObjectType::CommandEncoder;
- }
-
- void CommandEncoder::DestroyImpl() {
- mEncodingContext.Destroy();
- }
-
- CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
- return CommandBufferResourceUsage{
- mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
- std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
- }
-
- CommandIterator CommandEncoder::AcquireCommands() {
- return mEncodingContext.AcquireCommands();
- }
-
- void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
- mUsedQuerySets.insert(querySet);
- }
-
- void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
- DAWN_ASSERT(querySet != nullptr);
-
- if (GetDevice()->IsValidationEnabled()) {
- TrackUsedQuerySet(querySet);
- }
-
- // Set the query at queryIndex to available for resolving in query set.
- querySet->SetQueryAvailability(queryIndex, true);
- }
-
- // Implementation of the API's command recording methods
-
- ComputePassEncoder* CommandEncoder::APIBeginComputePass(
- const ComputePassDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
-
- bool success = mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- DAWN_TRY(ValidateComputePassDescriptor(device, descriptor));
-
- allocator->Allocate<BeginComputePassCmd>(Command::BeginComputePass);
-
- return {};
- },
- "encoding %s.BeginComputePass(%s).", this, descriptor);
-
- if (success) {
- const ComputePassDescriptor defaultDescriptor = {};
- if (descriptor == nullptr) {
- descriptor = &defaultDescriptor;
- }
-
- ComputePassEncoder* passEncoder =
- new ComputePassEncoder(device, descriptor, this, &mEncodingContext);
- mEncodingContext.EnterPass(passEncoder);
- return passEncoder;
- }
-
- return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
- }
-
- RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
-
- RenderPassResourceUsageTracker usageTracker;
-
- uint32_t width = 0;
- uint32_t height = 0;
- bool depthReadOnly = false;
- bool stencilReadOnly = false;
- Ref<AttachmentState> attachmentState;
- bool success = mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- uint32_t sampleCount = 0;
-
- DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height,
- &sampleCount));
-
- ASSERT(width > 0 && height > 0 && sampleCount > 0);
-
- mEncodingContext.WillBeginRenderPass();
- BeginRenderPassCmd* cmd =
- allocator->Allocate<BeginRenderPassCmd>(Command::BeginRenderPass);
-
- cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
- attachmentState = cmd->attachmentState;
-
- for (ColorAttachmentIndex index :
- IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
- uint8_t i = static_cast<uint8_t>(index);
- TextureViewBase* view = descriptor->colorAttachments[i].view;
- TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
-
- cmd->colorAttachments[index].view = view;
- cmd->colorAttachments[index].resolveTarget = resolveTarget;
- cmd->colorAttachments[index].loadOp = descriptor->colorAttachments[i].loadOp;
- cmd->colorAttachments[index].storeOp = descriptor->colorAttachments[i].storeOp;
- cmd->colorAttachments[index].clearColor =
- descriptor->colorAttachments[i].clearColor;
-
- usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
-
- if (resolveTarget != nullptr) {
- usageTracker.TextureViewUsedAs(resolveTarget,
- wgpu::TextureUsage::RenderAttachment);
- }
- }
-
- if (cmd->attachmentState->HasDepthStencilAttachment()) {
- TextureViewBase* view = descriptor->depthStencilAttachment->view;
-
- cmd->depthStencilAttachment.view = view;
- cmd->depthStencilAttachment.clearDepth =
- descriptor->depthStencilAttachment->clearDepth;
- cmd->depthStencilAttachment.clearStencil =
- descriptor->depthStencilAttachment->clearStencil;
- cmd->depthStencilAttachment.depthLoadOp =
- descriptor->depthStencilAttachment->depthLoadOp;
- cmd->depthStencilAttachment.depthStoreOp =
- descriptor->depthStencilAttachment->depthStoreOp;
- cmd->depthStencilAttachment.stencilLoadOp =
- descriptor->depthStencilAttachment->stencilLoadOp;
- cmd->depthStencilAttachment.stencilStoreOp =
- descriptor->depthStencilAttachment->stencilStoreOp;
- cmd->depthStencilAttachment.depthReadOnly =
- descriptor->depthStencilAttachment->depthReadOnly;
- cmd->depthStencilAttachment.stencilReadOnly =
- descriptor->depthStencilAttachment->stencilReadOnly;
-
- if (IsReadOnlyDepthStencilAttachment(descriptor->depthStencilAttachment)) {
- // TODO(dawn:485): Readonly depth/stencil attachment is not fully
- // implemented. Disallow it as unsafe until the implementaion is completed.
- DAWN_INVALID_IF(
- device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
- "Readonly depth/stencil attachment is disallowed because it's not "
- "fully implemented");
-
- usageTracker.TextureViewUsedAs(view, kReadOnlyRenderAttachment);
- } else {
- usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
- }
-
- depthReadOnly = descriptor->depthStencilAttachment->depthReadOnly;
- stencilReadOnly = descriptor->depthStencilAttachment->stencilReadOnly;
- }
-
- cmd->width = width;
- cmd->height = height;
-
- cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
-
- return {};
- },
- "encoding %s.BeginRenderPass(%s).", this, descriptor);
-
- if (success) {
- RenderPassEncoder* passEncoder = new RenderPassEncoder(
- device, descriptor, this, &mEncodingContext, std::move(usageTracker),
- std::move(attachmentState), descriptor->occlusionQuerySet, width, height,
- depthReadOnly, stencilReadOnly);
- mEncodingContext.EnterPass(passEncoder);
- return passEncoder;
- }
-
- return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
- }
-
- void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(source));
- DAWN_TRY(GetDevice()->ValidateObject(destination));
-
- DAWN_INVALID_IF(source == destination,
- "Source and destination are the same buffer (%s).", source);
-
- DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
- "validating source %s copy size.", source);
- DAWN_TRY_CONTEXT(
- ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
- "validating destination %s copy size.", destination);
- DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
-
- DAWN_TRY_CONTEXT(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc),
- "validating source %s usage.", source);
- DAWN_TRY_CONTEXT(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst),
- "validating destination %s usage.", destination);
-
- mTopLevelBuffers.insert(source);
- mTopLevelBuffers.insert(destination);
- }
-
- CopyBufferToBufferCmd* copy =
- allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
- copy->source = source;
- copy->sourceOffset = sourceOffset;
- copy->destination = destination;
- copy->destinationOffset = destinationOffset;
- copy->size = size;
-
- return {};
- },
- "encoding %s.CopyBufferToBuffer(%s, %u, %s, %u, %u).", this, source, sourceOffset,
- destination, destinationOffset, size);
- }
-
- void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
- DAWN_TRY_CONTEXT(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc),
- "validating source %s usage.", source->buffer);
-
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
- DAWN_TRY_CONTEXT(
- ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst),
- "validating destination %s usage.", destination->texture);
- DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
-
- DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
- // We validate texture copy range before validating linear texture data,
- // because in the latter we divide copyExtent.width by blockWidth and
- // copyExtent.height by blockHeight while the divisibility conditions are
- // checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
- }
- const TexelBlockInfo& blockInfo =
- destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateLinearTextureCopyOffset(
- source->layout, blockInfo,
- destination->texture->GetFormat().HasDepthOrStencil()));
- DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
- blockInfo, *copySize));
-
- mTopLevelBuffers.insert(source->buffer);
- mTopLevelTextures.insert(destination->texture);
- }
-
- TextureDataLayout srcLayout = source->layout;
- ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
-
- CopyBufferToTextureCmd* copy =
- allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
- copy->source.buffer = source->buffer;
- copy->source.offset = srcLayout.offset;
- copy->source.bytesPerRow = srcLayout.bytesPerRow;
- copy->source.rowsPerImage = srcLayout.rowsPerImage;
- copy->destination.texture = destination->texture;
- copy->destination.origin = destination->origin;
- copy->destination.mipLevel = destination->mipLevel;
- copy->destination.aspect =
- ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = *copySize;
-
- return {};
- },
- "encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer,
- destination->texture, copySize);
- }
-
- void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
- const ImageCopyBuffer* destination,
- const Extent3D* copySize) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
- DAWN_TRY_CONTEXT(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc),
- "validating source %s usage.", source->texture);
- DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
- DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
-
- DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
- DAWN_TRY_CONTEXT(
- ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
- "validating destination %s usage.", destination->buffer);
-
- // We validate texture copy range before validating linear texture data,
- // because in the latter we divide copyExtent.width by blockWidth and
- // copyExtent.height by blockHeight while the divisibility conditions are
- // checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
- }
- const TexelBlockInfo& blockInfo =
- source->texture->GetFormat().GetAspectInfo(source->aspect).block;
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateLinearTextureCopyOffset(
- destination->layout, blockInfo,
- source->texture->GetFormat().HasDepthOrStencil()));
- DAWN_TRY(ValidateLinearTextureData(
- destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
-
- mTopLevelTextures.insert(source->texture);
- mTopLevelBuffers.insert(destination->buffer);
- }
-
- TextureDataLayout dstLayout = destination->layout;
- ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
-
- CopyTextureToBufferCmd* copy =
- allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
- copy->source.texture = source->texture;
- copy->source.origin = source->origin;
- copy->source.mipLevel = source->mipLevel;
- copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
- copy->destination.buffer = destination->buffer;
- copy->destination.offset = dstLayout.offset;
- copy->destination.bytesPerRow = dstLayout.bytesPerRow;
- copy->destination.rowsPerImage = dstLayout.rowsPerImage;
- copy->copySize = *copySize;
-
- return {};
- },
- "encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture,
- destination->buffer, copySize);
- }
-
- void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize) {
- APICopyTextureToTextureHelper<false>(source, destination, copySize);
- }
-
- void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize) {
- APICopyTextureToTextureHelper<true>(source, destination, copySize);
- }
-
- template <bool Internal>
- void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(source->texture));
- DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
-
- DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *source, *copySize),
- "validating source %s.", source->texture);
- DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *destination, *copySize),
- "validating destination %s.", destination->texture);
-
- DAWN_TRY(
- ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
-
- DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *source, *copySize),
- "validating source %s copy range.", source->texture);
- DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *destination, *copySize),
- "validating source %s copy range.", destination->texture);
-
- // For internal usages (CopyToCopyInternal) we don't care if the user has added
- // CopySrc as a usage for this texture, but we will always add it internally.
- if (Internal) {
- DAWN_TRY(
- ValidateInternalCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
- DAWN_TRY(ValidateInternalCanUseAs(destination->texture,
- wgpu::TextureUsage::CopyDst));
- } else {
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
- DAWN_TRY(
- ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
- }
-
- mTopLevelTextures.insert(source->texture);
- mTopLevelTextures.insert(destination->texture);
- }
-
- CopyTextureToTextureCmd* copy =
- allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
- copy->source.texture = source->texture;
- copy->source.origin = source->origin;
- copy->source.mipLevel = source->mipLevel;
- copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
- copy->destination.texture = destination->texture;
- copy->destination.origin = destination->origin;
- copy->destination.mipLevel = destination->mipLevel;
- copy->destination.aspect =
- ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = *copySize;
-
- return {};
- },
- "encoding %s.CopyTextureToTexture(%s, %s, %s).", this, source->texture,
- destination->texture, copySize);
- }
-
- void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
-
- uint64_t bufferSize = buffer->GetSize();
- DAWN_INVALID_IF(offset > bufferSize,
- "Buffer offset (%u) is larger than the size (%u) of %s.",
- offset, bufferSize, buffer);
-
- uint64_t remainingSize = bufferSize - offset;
- if (size == wgpu::kWholeSize) {
- size = remainingSize;
- } else {
- DAWN_INVALID_IF(size > remainingSize,
- "Buffer range (offset: %u, size: %u) doesn't fit in "
- "the size (%u) of %s.",
- offset, size, bufferSize, buffer);
- }
-
- DAWN_TRY_CONTEXT(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst),
- "validating buffer %s usage.", buffer);
-
- // Size must be a multiple of 4 bytes on macOS.
- DAWN_INVALID_IF(size % 4 != 0, "Fill size (%u) is not a multiple of 4 bytes.",
- size);
-
- // Offset must be multiples of 4 bytes on macOS.
- DAWN_INVALID_IF(offset % 4 != 0, "Offset (%u) is not a multiple of 4 bytes,",
- offset);
-
- mTopLevelBuffers.insert(buffer);
- } else {
- if (size == wgpu::kWholeSize) {
- DAWN_ASSERT(buffer->GetSize() >= offset);
- size = buffer->GetSize() - offset;
- }
- }
-
- ClearBufferCmd* cmd = allocator->Allocate<ClearBufferCmd>(Command::ClearBuffer);
- cmd->buffer = buffer;
- cmd->offset = offset;
- cmd->size = size;
-
- return {};
- },
- "encoding %s.ClearBuffer(%s, %u, %u).", this, buffer, offset, size);
- }
-
- void CommandEncoder::APIInjectValidationError(const char* message) {
- if (mEncodingContext.CheckCurrentEncoder(this)) {
- mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
- }
- }
-
- void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- InsertDebugMarkerCmd* cmd =
- allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
- cmd->length = strlen(groupLabel);
-
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
-
- return {};
- },
- "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
- }
-
- void CommandEncoder::APIPopDebugGroup() {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_INVALID_IF(
- mDebugGroupStackSize == 0,
- "PopDebugGroup called when no debug groups are currently pushed.");
- }
- allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
- mDebugGroupStackSize--;
- mEncodingContext.PopDebugGroupLabel();
-
- return {};
- },
- "encoding %s.PopDebugGroup().", this);
- }
-
- void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- PushDebugGroupCmd* cmd =
- allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
- cmd->length = strlen(groupLabel);
-
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
-
- mDebugGroupStackSize++;
- mEncodingContext.PushDebugGroupLabel(groupLabel);
-
- return {};
- },
- "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
- }
-
- void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(GetDevice()->ValidateObject(destination));
-
- DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
- destinationOffset));
-
- DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
-
- TrackUsedQuerySet(querySet);
- mTopLevelBuffers.insert(destination);
- }
-
- ResolveQuerySetCmd* cmd =
- allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
- cmd->querySet = querySet;
- cmd->firstQuery = firstQuery;
- cmd->queryCount = queryCount;
- cmd->destination = destination;
- cmd->destinationOffset = destinationOffset;
-
- // Encode internal compute pipeline for timestamp query
- if (querySet->GetQueryType() == wgpu::QueryType::Timestamp) {
- DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
- this, querySet, firstQuery, queryCount, destination, destinationOffset));
- }
-
- return {};
- },
- "encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery,
- queryCount, destination, destinationOffset);
- }
-
- void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const uint8_t* data,
- uint64_t size) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
- }
-
- WriteBufferCmd* cmd = allocator->Allocate<WriteBufferCmd>(Command::WriteBuffer);
- cmd->buffer = buffer;
- cmd->offset = bufferOffset;
- cmd->size = size;
-
- uint8_t* inlinedData = allocator->AllocateData<uint8_t>(size);
- memcpy(inlinedData, data, size);
-
- mTopLevelBuffers.insert(buffer);
-
- return {};
- },
- "encoding %s.WriteBuffer(%s, %u, ..., %u).", this, buffer, bufferOffset, size);
- }
-
- void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext.TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
- }
-
- TrackQueryAvailability(querySet, queryIndex);
-
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
-
- return {};
- },
- "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
- }
-
- CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
- Ref<CommandBufferBase> commandBuffer;
- if (GetDevice()->ConsumedError(FinishInternal(descriptor), &commandBuffer)) {
- return CommandBufferBase::MakeError(GetDevice());
- }
- ASSERT(!IsError());
- return commandBuffer.Detach();
- }
-
- ResultOrError<Ref<CommandBufferBase>> CommandEncoder::FinishInternal(
- const CommandBufferDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
-
- // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
- // state of the encoding context. The internal state is set to finished, and subsequent
- // calls to encode commands will generate errors.
- DAWN_TRY(mEncodingContext.Finish());
- DAWN_TRY(device->ValidateIsAlive());
-
- if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateFinish());
- }
-
- const CommandBufferDescriptor defaultDescriptor = {};
- if (descriptor == nullptr) {
- descriptor = &defaultDescriptor;
- }
-
- return device->CreateCommandBuffer(this, descriptor);
- }
-
- // Implementation of the command buffer validation that can be precomputed before submit
- MaybeError CommandEncoder::ValidateFinish() const {
- TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
- DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(passUsage),
- "validating render pass usage.");
- }
-
- for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
- for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
- DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(scope),
- "validating compute pass usage.");
- }
- }
-
- DAWN_INVALID_IF(
- mDebugGroupStackSize != 0,
- "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup prior to "
- "calling Finish.",
- mDebugGroupStackSize);
-
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
deleted file mode 100644
index ec0a12145db..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMMANDENCODER_H_
-#define DAWNNATIVE_COMMANDENCODER_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include "dawn_native/EncodingContext.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/PassResourceUsage.h"
-
-#include <string>
-
-namespace dawn_native {
-
- class CommandEncoder final : public ApiObjectBase {
- public:
- CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
-
- ObjectType GetType() const override;
-
- CommandIterator AcquireCommands();
- CommandBufferResourceUsage AcquireResourceUsages();
-
- void TrackUsedQuerySet(QuerySetBase* querySet);
- void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
-
- // Dawn API
- ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
- RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
-
- void APICopyBufferToBuffer(BufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size);
- void APICopyBufferToTexture(const ImageCopyBuffer* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize);
- void APICopyTextureToBuffer(const ImageCopyTexture* source,
- const ImageCopyBuffer* destination,
- const Extent3D* copySize);
- void APICopyTextureToTexture(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize);
- void APICopyTextureToTextureInternal(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize);
- void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size);
-
- void APIInjectValidationError(const char* message);
- void APIInsertDebugMarker(const char* groupLabel);
- void APIPopDebugGroup();
- void APIPushDebugGroup(const char* groupLabel);
-
- void APIResolveQuerySet(QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset);
- void APIWriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const uint8_t* data,
- uint64_t size);
- void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
-
- CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
-
- private:
- void DestroyImpl() override;
- ResultOrError<Ref<CommandBufferBase>> FinishInternal(
- const CommandBufferDescriptor* descriptor);
-
- // Helper to be able to implement both APICopyTextureToTexture and
- // APICopyTextureToTextureInternal. The only difference between both
- // copies, is that the Internal one will also check internal usage.
- template <bool Internal>
- void APICopyTextureToTextureHelper(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize);
-
- MaybeError ValidateFinish() const;
-
- EncodingContext mEncodingContext;
- std::set<BufferBase*> mTopLevelBuffers;
- std::set<TextureBase*> mTopLevelTextures;
- std::set<QuerySetBase*> mUsedQuerySets;
-
- uint64_t mDebugGroupStackSize = 0;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMMANDENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
deleted file mode 100644
index ea2017d5e74..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ /dev/null
@@ -1,466 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CommandValidation.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandBufferStateTracker.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/PassResourceUsage.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/RenderBundle.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-namespace dawn_native {
-
- // Performs validation of the "synchronization scope" rules of WebGPU.
- MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
- // Buffers can only be used as single-write or multiple read.
- for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
- const wgpu::BufferUsage usage = scope.bufferUsages[i];
- bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
- bool singleUse = wgpu::HasZeroOrOneBits(usage);
-
- DAWN_INVALID_IF(!readOnly && !singleUse,
- "%s usage (%s) includes writable usage and another usage in the same "
- "synchronization scope.",
- scope.buffers[i], usage);
- }
-
- // Check that every single subresource is used as either a single-write usage or a
- // combination of readonly usages.
- for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
- const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
- MaybeError error = {};
- textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
- bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
- bool singleUse = wgpu::HasZeroOrOneBits(usage);
- if (!readOnly && !singleUse && !error.IsError()) {
- error = DAWN_FORMAT_VALIDATION_ERROR(
- "%s usage (%s) includes writable usage and another usage in the same "
- "synchronization scope.",
- scope.textures[i], usage);
- }
- });
- DAWN_TRY(std::move(error));
- }
- return {};
- }
-
- MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex) {
- DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
- "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
-
- DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
- "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
- querySet->GetQueryCount(), querySet);
-
- return {};
- }
-
- MaybeError ValidateWriteBuffer(const DeviceBase* device,
- const BufferBase* buffer,
- uint64_t bufferOffset,
- uint64_t size) {
- DAWN_TRY(device->ValidateObject(buffer));
-
- DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
- bufferOffset);
-
- DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
-
- uint64_t bufferSize = buffer->GetSize();
- DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
- "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
- bufferOffset, size, buffer, bufferSize);
-
- DAWN_INVALID_IF(!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst),
- "%s usage (%s) does not include %s.", buffer, buffer->GetUsage(),
- wgpu::BufferUsage::CopyDst);
-
- return {};
- }
-
- bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
- uint32_t maxStart = std::max(startA, startB);
- uint32_t minStart = std::min(startA, startB);
- return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
- static_cast<uint64_t>(maxStart);
- }
-
- template <typename A, typename B>
- DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
- static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
- static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
- return uint64_t(a) * uint64_t(b);
- }
-
- ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
- const Extent3D& copySize,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- ASSERT(copySize.width % blockInfo.width == 0);
- ASSERT(copySize.height % blockInfo.height == 0);
- uint32_t widthInBlocks = copySize.width / blockInfo.width;
- uint32_t heightInBlocks = copySize.height / blockInfo.height;
- uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
-
- if (copySize.depthOrArrayLayers == 0) {
- return 0;
- }
-
- // Check for potential overflows for the rest of the computations. We have the following
- // inequalities:
- //
- // bytesInLastRow <= bytesPerRow
- // heightInBlocks <= rowsPerImage
- //
- // So:
- //
- // bytesInLastImage = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
- // <= bytesPerRow * heightInBlocks
- // <= bytesPerRow * rowsPerImage
- // <= bytesPerImage
- //
- // This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
- // computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
- ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
- rowsPerImage != wgpu::kCopyStrideUndefined));
- uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
- DAWN_INVALID_IF(
- bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
- "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
- bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
- copySize.depthOrArrayLayers);
-
- uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
- if (heightInBlocks > 0) {
- ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
- uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
- requiredBytesInCopy += bytesInLastImage;
- }
- return requiredBytesInCopy;
- }
-
- MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
- uint64_t offset,
- uint64_t size) {
- uint64_t bufferSize = buffer->GetSize();
- bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
- DAWN_INVALID_IF(!fitsInBuffer,
- "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
- size, buffer.Get(), bufferSize);
-
- return {};
- }
-
- // Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
- // it.
- void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copyExtent) {
- ASSERT(layout != nullptr);
- ASSERT(copyExtent.height % blockInfo.height == 0);
- uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
-
- if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
- ASSERT(copyExtent.width % blockInfo.width == 0);
- uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
- uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
-
- ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
- layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
- }
- if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
- ASSERT(copyExtent.depthOrArrayLayers <= 1);
- layout->rowsPerImage = heightInBlocks;
- }
- }
-
- MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
- uint64_t byteSize,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copyExtent) {
- ASSERT(copyExtent.height % blockInfo.height == 0);
- uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
-
- // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
- // validation message. Investigate ways to make it print as a more readable symbol.
- DAWN_INVALID_IF(
- copyExtent.depthOrArrayLayers > 1 &&
- (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
- layout.rowsPerImage == wgpu::kCopyStrideUndefined),
- "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
- copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
-
- DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
- "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
- heightInBlocks);
-
- // Validation for other members in layout:
- ASSERT(copyExtent.width % blockInfo.width == 0);
- uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
- ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <=
- std::numeric_limits<uint32_t>::max());
- uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
-
- // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
- // but they should get optimized out.
- DAWN_INVALID_IF(
- layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
- "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
- layout.bytesPerRow);
-
- DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
- heightInBlocks > layout.rowsPerImage,
- "The height of each image in blocks (%u) is > rowsPerImage (%u).",
- heightInBlocks, layout.rowsPerImage);
-
- // We compute required bytes in copy after validating texel block alignments
- // because the divisibility conditions are necessary for the algorithm to be valid,
- // also the bytesPerRow bound is necessary to avoid overflows.
- uint64_t requiredBytesInCopy;
- DAWN_TRY_ASSIGN(requiredBytesInCopy,
- ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow,
- layout.rowsPerImage));
-
- bool fitsInData =
- layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
- DAWN_INVALID_IF(
- !fitsInData,
- "Required size for texture data layout (%u) exceeds the linear data size (%u) with "
- "offset (%u).",
- requiredBytesInCopy, byteSize, layout.offset);
-
- return {};
- }
-
- MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
- const ImageCopyBuffer& imageCopyBuffer) {
- DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
- if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
- DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
- "bytesPerRow (%u) is not a multiple of %u.",
- imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
- }
-
- return {};
- }
-
- MaybeError ValidateImageCopyTexture(DeviceBase const* device,
- const ImageCopyTexture& textureCopy,
- const Extent3D& copySize) {
- const TextureBase* texture = textureCopy.texture;
- DAWN_TRY(device->ValidateObject(texture));
- DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
- "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
- textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
-
- DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
- DAWN_INVALID_IF(
- SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
- "%s format (%s) does not have the selected aspect (%s).", texture,
- texture->GetFormat().format, textureCopy.aspect);
-
- if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
- Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- DAWN_INVALID_IF(
- textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
- subresourceSize.width != copySize.width ||
- subresourceSize.height != copySize.height,
- "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
- "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
- "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
- &textureCopy.origin, &copySize, &subresourceSize, texture,
- texture->GetFormat().format, texture->GetSampleCount());
- }
-
- return {};
- }
-
- MaybeError ValidateTextureCopyRange(DeviceBase const* device,
- const ImageCopyTexture& textureCopy,
- const Extent3D& copySize) {
- const TextureBase* texture = textureCopy.texture;
-
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
-
- // Validation for the copy being in-bounds:
- Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
- // For 1D/2D textures, include the array layer as depth so it can be checked with other
- // dimensions.
- if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
- mipSize.depthOrArrayLayers = texture->GetArrayLayers();
- }
- // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
- // overflows.
- DAWN_INVALID_IF(
- static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
- static_cast<uint64_t>(mipSize.width) ||
- static_cast<uint64_t>(textureCopy.origin.y) +
- static_cast<uint64_t>(copySize.height) >
- static_cast<uint64_t>(mipSize.height) ||
- static_cast<uint64_t>(textureCopy.origin.z) +
- static_cast<uint64_t>(copySize.depthOrArrayLayers) >
- static_cast<uint64_t>(mipSize.depthOrArrayLayers),
- "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
- "size (%s).",
- &textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
-
- // Validation for the texel block alignments:
- const Format& format = textureCopy.texture->GetFormat();
- if (format.isCompressed) {
- const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
- DAWN_INVALID_IF(
- textureCopy.origin.x % blockInfo.width != 0,
- "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
- "width (%u).",
- textureCopy.origin.x, blockInfo.width);
- DAWN_INVALID_IF(
- textureCopy.origin.y % blockInfo.height != 0,
- "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
- "height (%u).",
- textureCopy.origin.y, blockInfo.height);
- DAWN_INVALID_IF(
- copySize.width % blockInfo.width != 0,
- "copySize.width (%u) is not a multiple of compressed texture format block width "
- "(%u).",
- copySize.width, blockInfo.width);
- DAWN_INVALID_IF(
- copySize.height % blockInfo.height != 0,
- "copySize.height (%u) is not a multiple of compressed texture format block "
- "height (%u).",
- copySize.height, blockInfo.height);
- }
-
- return {};
- }
-
- // Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
- // formats).
- ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
- const Format& format = view.texture->GetFormat();
- switch (view.aspect) {
- case wgpu::TextureAspect::All: {
- DAWN_INVALID_IF(
- !HasOneBit(format.aspects),
- "More than a single aspect (%s) is selected for multi-planar format (%s) in "
- "%s <-> linear data copy.",
- view.aspect, format.format, view.texture);
-
- Aspect single = format.aspects;
- return single;
- }
- case wgpu::TextureAspect::DepthOnly:
- ASSERT(format.aspects & Aspect::Depth);
- return Aspect::Depth;
- case wgpu::TextureAspect::StencilOnly:
- ASSERT(format.aspects & Aspect::Stencil);
- return Aspect::Stencil;
- case wgpu::TextureAspect::Plane0Only:
- case wgpu::TextureAspect::Plane1Only:
- break;
- }
- UNREACHABLE();
- }
-
- MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
- Aspect aspectUsed;
- DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
- DAWN_INVALID_IF(aspectUsed == Aspect::Depth, "Cannot copy into the depth aspect of %s.",
- dst.texture);
-
- return {};
- }
-
- MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize) {
- const uint32_t srcSamples = src.texture->GetSampleCount();
- const uint32_t dstSamples = dst.texture->GetSampleCount();
-
- DAWN_INVALID_IF(
- srcSamples != dstSamples,
- "Source %s sample count (%u) and destination %s sample count (%u) does not match.",
- src.texture, srcSamples, dst.texture, dstSamples);
-
- // Metal cannot select a single aspect for texture-to-texture copies.
- const Format& format = src.texture->GetFormat();
- DAWN_INVALID_IF(
- SelectFormatAspects(format, src.aspect) != format.aspects,
- "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
- src.texture, src.aspect, format.format);
-
- DAWN_INVALID_IF(
- SelectFormatAspects(format, dst.aspect) != format.aspects,
- "Destination %s aspect (%s) doesn't select all the aspects of the destination format "
- "(%s).",
- dst.texture, dst.aspect, format.format);
-
- if (src.texture == dst.texture && src.mipLevel == dst.mipLevel) {
- wgpu::TextureDimension dimension = src.texture->GetDimension();
- ASSERT(dimension != wgpu::TextureDimension::e1D);
- DAWN_INVALID_IF(
- (dimension == wgpu::TextureDimension::e2D &&
- IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers)) ||
- dimension == wgpu::TextureDimension::e3D,
- "Cannot copy between overlapping subresources of %s.", src.texture);
- }
-
- return {};
- }
-
- MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize) {
- // Metal requires texture-to-texture copies be the same format
- DAWN_INVALID_IF(src.texture->GetFormat().format != dst.texture->GetFormat().format,
- "Source %s format (%s) and destination %s format (%s) do not match.",
- src.texture, src.texture->GetFormat().format, dst.texture,
- dst.texture->GetFormat().format);
-
- return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
- }
-
- MaybeError ValidateCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage) {
- ASSERT(wgpu::HasZeroOrOneBits(usage));
- DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
- texture, texture->GetUsage(), usage);
-
- return {};
- }
-
- MaybeError ValidateInternalCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage) {
- ASSERT(wgpu::HasZeroOrOneBits(usage));
- DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
- "%s internal usage (%s) doesn't include %s.", texture,
- texture->GetInternalUsage(), usage);
-
- return {};
- }
-
- MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
- ASSERT(wgpu::HasZeroOrOneBits(usage));
- DAWN_INVALID_IF(!(buffer->GetUsage() & usage), "%s usage (%s) doesn't include %s.", buffer,
- buffer->GetUsage(), usage);
-
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
deleted file mode 100644
index 3f57eae972c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMMANDVALIDATION_H_
-#define DAWNNATIVE_COMMANDVALIDATION_H_
-
-#include "dawn_native/CommandAllocator.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Texture.h"
-
-#include <vector>
-
-namespace dawn_native {
-
- class QuerySetBase;
- struct SyncScopeResourceUsage;
- struct TexelBlockInfo;
-
- MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
-
- MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex);
-
- MaybeError ValidateWriteBuffer(const DeviceBase* device,
- const BufferBase* buffer,
- uint64_t bufferOffset,
- uint64_t size);
-
- ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
- const Extent3D& copySize,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage);
-
- void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copyExtent);
- MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
- uint64_t byteSize,
- const TexelBlockInfo& blockInfo,
- const Extent3D& copyExtent);
- MaybeError ValidateTextureCopyRange(DeviceBase const* device,
- const ImageCopyTexture& imageCopyTexture,
- const Extent3D& copySize);
- ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
- MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
-
- MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
- const ImageCopyBuffer& imageCopyBuffer);
- MaybeError ValidateImageCopyTexture(DeviceBase const* device,
- const ImageCopyTexture& imageCopyTexture,
- const Extent3D& copySize);
-
- MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
- uint64_t offset,
- uint64_t size);
-
- bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
-
- MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize);
- MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize);
-
- MaybeError ValidateCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage);
-
- MaybeError ValidateInternalCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage);
-
- MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMMANDVALIDATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.cpp b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
deleted file mode 100644
index fbc6d93b282..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Commands.cpp
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Commands.h"
-
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandAllocator.h"
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/RenderBundle.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/Texture.h"
-
-namespace dawn_native {
-
- void FreeCommands(CommandIterator* commands) {
- commands->Reset();
-
- Command type;
- while (commands->NextCommandId(&type)) {
- switch (type) {
- case Command::BeginComputePass: {
- BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
- begin->~BeginComputePassCmd();
- break;
- }
- case Command::BeginOcclusionQuery: {
- BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
- begin->~BeginOcclusionQueryCmd();
- break;
- }
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
- begin->~BeginRenderPassCmd();
- break;
- }
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
- copy->~CopyBufferToBufferCmd();
- break;
- }
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
- copy->~CopyBufferToTextureCmd();
- break;
- }
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
- copy->~CopyTextureToBufferCmd();
- break;
- }
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- commands->NextCommand<CopyTextureToTextureCmd>();
- copy->~CopyTextureToTextureCmd();
- break;
- }
- case Command::Dispatch: {
- DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
- dispatch->~DispatchCmd();
- break;
- }
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
- dispatch->~DispatchIndirectCmd();
- break;
- }
- case Command::Draw: {
- DrawCmd* draw = commands->NextCommand<DrawCmd>();
- draw->~DrawCmd();
- break;
- }
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
- draw->~DrawIndexedCmd();
- break;
- }
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
- draw->~DrawIndirectCmd();
- break;
- }
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
- draw->~DrawIndexedIndirectCmd();
- break;
- }
- case Command::EndComputePass: {
- EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
- cmd->~EndComputePassCmd();
- break;
- }
- case Command::EndOcclusionQuery: {
- EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
- cmd->~EndOcclusionQueryCmd();
- break;
- }
- case Command::EndRenderPass: {
- EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
- cmd->~EndRenderPassCmd();
- break;
- }
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
- auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
- for (size_t i = 0; i < cmd->count; ++i) {
- (&bundles[i])->~Ref<RenderBundleBase>();
- }
- cmd->~ExecuteBundlesCmd();
- break;
- }
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
- cmd->~ClearBufferCmd();
- break;
- }
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
- commands->NextData<char>(cmd->length + 1);
- cmd->~InsertDebugMarkerCmd();
- break;
- }
- case Command::PopDebugGroup: {
- PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
- cmd->~PopDebugGroupCmd();
- break;
- }
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
- commands->NextData<char>(cmd->length + 1);
- cmd->~PushDebugGroupCmd();
- break;
- }
- case Command::ResolveQuerySet: {
- ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
- cmd->~ResolveQuerySetCmd();
- break;
- }
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
- cmd->~SetComputePipelineCmd();
- break;
- }
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
- cmd->~SetRenderPipelineCmd();
- break;
- }
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
- cmd->~SetStencilReferenceCmd();
- break;
- }
- case Command::SetViewport: {
- SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
- cmd->~SetViewportCmd();
- break;
- }
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
- cmd->~SetScissorRectCmd();
- break;
- }
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
- cmd->~SetBlendConstantCmd();
- break;
- }
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
- if (cmd->dynamicOffsetCount > 0) {
- commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
- cmd->~SetBindGroupCmd();
- break;
- }
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
- cmd->~SetIndexBufferCmd();
- break;
- }
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
- cmd->~SetVertexBufferCmd();
- break;
- }
- case Command::WriteBuffer: {
- WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
- commands->NextData<uint8_t>(write->size);
- write->~WriteBufferCmd();
- break;
- }
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
- cmd->~WriteTimestampCmd();
- break;
- }
- }
- }
-
- commands->MakeEmptyAsDataWasDestroyed();
- }
-
- void SkipCommand(CommandIterator* commands, Command type) {
- switch (type) {
- case Command::BeginComputePass:
- commands->NextCommand<BeginComputePassCmd>();
- break;
-
- case Command::BeginOcclusionQuery:
- commands->NextCommand<BeginOcclusionQueryCmd>();
- break;
-
- case Command::BeginRenderPass:
- commands->NextCommand<BeginRenderPassCmd>();
- break;
-
- case Command::CopyBufferToBuffer:
- commands->NextCommand<CopyBufferToBufferCmd>();
- break;
-
- case Command::CopyBufferToTexture:
- commands->NextCommand<CopyBufferToTextureCmd>();
- break;
-
- case Command::CopyTextureToBuffer:
- commands->NextCommand<CopyTextureToBufferCmd>();
- break;
-
- case Command::CopyTextureToTexture:
- commands->NextCommand<CopyTextureToTextureCmd>();
- break;
-
- case Command::Dispatch:
- commands->NextCommand<DispatchCmd>();
- break;
-
- case Command::DispatchIndirect:
- commands->NextCommand<DispatchIndirectCmd>();
- break;
-
- case Command::Draw:
- commands->NextCommand<DrawCmd>();
- break;
-
- case Command::DrawIndexed:
- commands->NextCommand<DrawIndexedCmd>();
- break;
-
- case Command::DrawIndirect:
- commands->NextCommand<DrawIndirectCmd>();
- break;
-
- case Command::DrawIndexedIndirect:
- commands->NextCommand<DrawIndexedIndirectCmd>();
- break;
-
- case Command::EndComputePass:
- commands->NextCommand<EndComputePassCmd>();
- break;
-
- case Command::EndOcclusionQuery:
- commands->NextCommand<EndOcclusionQueryCmd>();
- break;
-
- case Command::EndRenderPass:
- commands->NextCommand<EndRenderPassCmd>();
- break;
-
- case Command::ExecuteBundles: {
- auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
- commands->NextData<Ref<RenderBundleBase>>(cmd->count);
- break;
- }
-
- case Command::ClearBuffer:
- commands->NextCommand<ClearBufferCmd>();
- break;
-
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
- commands->NextData<char>(cmd->length + 1);
- break;
- }
-
- case Command::PopDebugGroup:
- commands->NextCommand<PopDebugGroupCmd>();
- break;
-
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
- commands->NextData<char>(cmd->length + 1);
- break;
- }
-
- case Command::ResolveQuerySet: {
- commands->NextCommand<ResolveQuerySetCmd>();
- break;
- }
-
- case Command::SetComputePipeline:
- commands->NextCommand<SetComputePipelineCmd>();
- break;
-
- case Command::SetRenderPipeline:
- commands->NextCommand<SetRenderPipelineCmd>();
- break;
-
- case Command::SetStencilReference:
- commands->NextCommand<SetStencilReferenceCmd>();
- break;
-
- case Command::SetViewport:
- commands->NextCommand<SetViewportCmd>();
- break;
-
- case Command::SetScissorRect:
- commands->NextCommand<SetScissorRectCmd>();
- break;
-
- case Command::SetBlendConstant:
- commands->NextCommand<SetBlendConstantCmd>();
- break;
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
- if (cmd->dynamicOffsetCount > 0) {
- commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
- break;
- }
-
- case Command::SetIndexBuffer:
- commands->NextCommand<SetIndexBufferCmd>();
- break;
-
- case Command::SetVertexBuffer: {
- commands->NextCommand<SetVertexBufferCmd>();
- break;
- }
-
- case Command::WriteBuffer:
- commands->NextCommand<WriteBufferCmd>();
- break;
-
- case Command::WriteTimestamp: {
- commands->NextCommand<WriteTimestampCmd>();
- break;
- }
- }
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.h b/chromium/third_party/dawn/src/dawn_native/Commands.h
deleted file mode 100644
index 03eed407e92..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Commands.h
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMMANDS_H_
-#define DAWNNATIVE_COMMANDS_H_
-
-#include "common/Constants.h"
-
-#include "dawn_native/AttachmentState.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/Texture.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-#include <bitset>
-
-namespace dawn_native {
-
- // Definition of the commands that are present in the CommandIterator given by the
- // CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
- // dependencies: Ref<Object> needs Object to be defined.
-
- enum class Command {
- BeginComputePass,
- BeginOcclusionQuery,
- BeginRenderPass,
- ClearBuffer,
- CopyBufferToBuffer,
- CopyBufferToTexture,
- CopyTextureToBuffer,
- CopyTextureToTexture,
- Dispatch,
- DispatchIndirect,
- Draw,
- DrawIndexed,
- DrawIndirect,
- DrawIndexedIndirect,
- EndComputePass,
- EndOcclusionQuery,
- EndRenderPass,
- ExecuteBundles,
- InsertDebugMarker,
- PopDebugGroup,
- PushDebugGroup,
- ResolveQuerySet,
- SetComputePipeline,
- SetRenderPipeline,
- SetStencilReference,
- SetViewport,
- SetScissorRect,
- SetBlendConstant,
- SetBindGroup,
- SetIndexBuffer,
- SetVertexBuffer,
- WriteBuffer,
- WriteTimestamp,
- };
-
- struct BeginComputePassCmd {};
-
- struct BeginOcclusionQueryCmd {
- Ref<QuerySetBase> querySet;
- uint32_t queryIndex;
- };
-
- struct RenderPassColorAttachmentInfo {
- Ref<TextureViewBase> view;
- Ref<TextureViewBase> resolveTarget;
- wgpu::LoadOp loadOp;
- wgpu::StoreOp storeOp;
- dawn_native::Color clearColor;
- };
-
- struct RenderPassDepthStencilAttachmentInfo {
- Ref<TextureViewBase> view;
- wgpu::LoadOp depthLoadOp;
- wgpu::StoreOp depthStoreOp;
- wgpu::LoadOp stencilLoadOp;
- wgpu::StoreOp stencilStoreOp;
- float clearDepth;
- uint32_t clearStencil;
- bool depthReadOnly;
- bool stencilReadOnly;
- };
-
- struct BeginRenderPassCmd {
- Ref<AttachmentState> attachmentState;
- ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
- colorAttachments;
- RenderPassDepthStencilAttachmentInfo depthStencilAttachment;
-
- // Cache the width and height of all attachments for convenience
- uint32_t width;
- uint32_t height;
-
- Ref<QuerySetBase> occlusionQuerySet;
- };
-
- struct BufferCopy {
- Ref<BufferBase> buffer;
- uint64_t offset;
- uint32_t bytesPerRow;
- uint32_t rowsPerImage;
- };
-
- struct TextureCopy {
- Ref<TextureBase> texture;
- uint32_t mipLevel;
- Origin3D origin; // Texels / array layer
- Aspect aspect;
- };
-
- struct CopyBufferToBufferCmd {
- Ref<BufferBase> source;
- uint64_t sourceOffset;
- Ref<BufferBase> destination;
- uint64_t destinationOffset;
- uint64_t size;
- };
-
- struct CopyBufferToTextureCmd {
- BufferCopy source;
- TextureCopy destination;
- Extent3D copySize; // Texels
- };
-
- struct CopyTextureToBufferCmd {
- TextureCopy source;
- BufferCopy destination;
- Extent3D copySize; // Texels
- };
-
- struct CopyTextureToTextureCmd {
- TextureCopy source;
- TextureCopy destination;
- Extent3D copySize; // Texels
- };
-
- struct DispatchCmd {
- uint32_t x;
- uint32_t y;
- uint32_t z;
- };
-
- struct DispatchIndirectCmd {
- Ref<BufferBase> indirectBuffer;
- uint64_t indirectOffset;
- };
-
- struct DrawCmd {
- uint32_t vertexCount;
- uint32_t instanceCount;
- uint32_t firstVertex;
- uint32_t firstInstance;
- };
-
- struct DrawIndexedCmd {
- uint32_t indexCount;
- uint32_t instanceCount;
- uint32_t firstIndex;
- int32_t baseVertex;
- uint32_t firstInstance;
- };
-
- struct DrawIndirectCmd {
- Ref<BufferBase> indirectBuffer;
- uint64_t indirectOffset;
- };
-
- struct DrawIndexedIndirectCmd {
- Ref<BufferBase> indirectBuffer;
- uint64_t indirectOffset;
- };
-
- struct EndComputePassCmd {};
-
- struct EndOcclusionQueryCmd {
- Ref<QuerySetBase> querySet;
- uint32_t queryIndex;
- };
-
- struct EndRenderPassCmd {};
-
- struct ExecuteBundlesCmd {
- uint32_t count;
- };
-
- struct ClearBufferCmd {
- Ref<BufferBase> buffer;
- uint64_t offset;
- uint64_t size;
- };
-
- struct InsertDebugMarkerCmd {
- uint32_t length;
- };
-
- struct PopDebugGroupCmd {};
-
- struct PushDebugGroupCmd {
- uint32_t length;
- };
-
- struct ResolveQuerySetCmd {
- Ref<QuerySetBase> querySet;
- uint32_t firstQuery;
- uint32_t queryCount;
- Ref<BufferBase> destination;
- uint64_t destinationOffset;
- };
-
- struct SetComputePipelineCmd {
- Ref<ComputePipelineBase> pipeline;
- };
-
- struct SetRenderPipelineCmd {
- Ref<RenderPipelineBase> pipeline;
- };
-
- struct SetStencilReferenceCmd {
- uint32_t reference;
- };
-
- struct SetViewportCmd {
- float x, y, width, height, minDepth, maxDepth;
- };
-
- struct SetScissorRectCmd {
- uint32_t x, y, width, height;
- };
-
- struct SetBlendConstantCmd {
- Color color;
- };
-
- struct SetBindGroupCmd {
- BindGroupIndex index;
- Ref<BindGroupBase> group;
- uint32_t dynamicOffsetCount;
- };
-
- struct SetIndexBufferCmd {
- Ref<BufferBase> buffer;
- wgpu::IndexFormat format;
- uint64_t offset;
- uint64_t size;
- };
-
- struct SetVertexBufferCmd {
- VertexBufferSlot slot;
- Ref<BufferBase> buffer;
- uint64_t offset;
- uint64_t size;
- };
-
- struct WriteBufferCmd {
- Ref<BufferBase> buffer;
- uint64_t offset;
- uint64_t size;
- };
-
- struct WriteTimestampCmd {
- Ref<QuerySetBase> querySet;
- uint32_t queryIndex;
- };
-
- // This needs to be called before the CommandIterator is freed so that the Ref<> present in
- // the commands have a chance to run their destructor and remove internal references.
- class CommandIterator;
- void FreeCommands(CommandIterator* commands);
-
- // Helper function to allow skipping over a command when it is unimplemented, while still
- // consuming the correct amount of data from the command iterator.
- void SkipCommand(CommandIterator* commands, Command type);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMMANDS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp
deleted file mode 100644
index 70aae54795b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CompilationMessages.h"
-
-#include "common/Assert.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <tint/tint.h>
-
-namespace dawn_native {
-
- namespace {
-
- WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
- switch (severity) {
- case tint::diag::Severity::Note:
- return WGPUCompilationMessageType_Info;
- case tint::diag::Severity::Warning:
- return WGPUCompilationMessageType_Warning;
- default:
- return WGPUCompilationMessageType_Error;
- }
- }
-
- } // anonymous namespace
-
- OwnedCompilationMessages::OwnedCompilationMessages() {
- mCompilationInfo.nextInChain = 0;
- mCompilationInfo.messageCount = 0;
- mCompilationInfo.messages = nullptr;
- }
-
- void OwnedCompilationMessages::AddMessageForTesting(std::string message,
- wgpu::CompilationMessageType type,
- uint64_t lineNum,
- uint64_t linePos,
- uint64_t offset,
- uint64_t length) {
- // Cannot add messages after GetCompilationInfo has been called.
- ASSERT(mCompilationInfo.messages == nullptr);
-
- mMessageStrings.push_back(message);
- mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type),
- lineNum, linePos, offset, length});
- }
-
- void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
- // Cannot add messages after GetCompilationInfo has been called.
- ASSERT(mCompilationInfo.messages == nullptr);
-
- // Tint line and column values are 1-based.
- uint64_t lineNum = diagnostic.source.range.begin.line;
- uint64_t linePos = diagnostic.source.range.begin.column;
- // The offset is 0-based.
- uint64_t offset = 0;
- uint64_t length = 0;
-
- if (lineNum && linePos && diagnostic.source.file_content) {
- const std::vector<std::string>& lines = diagnostic.source.file_content->lines;
- size_t i = 0;
- // To find the offset of the message position, loop through each of the first lineNum-1
- // lines and add it's length (+1 to account for the line break) to the offset.
- for (; i < lineNum - 1; ++i) {
- offset += lines[i].length() + 1;
- }
-
- // If the end line is on a different line from the beginning line, add the length of the
- // lines in between to the ending offset.
- uint64_t endLineNum = diagnostic.source.range.end.line;
- uint64_t endLinePos = diagnostic.source.range.end.column;
- uint64_t endOffset = offset;
- for (; i < endLineNum - 1; ++i) {
- endOffset += lines[i].length() + 1;
- }
-
- // Add the line positions to the offset and endOffset to get their final positions
- // within the code string.
- offset += linePos - 1;
- endOffset += endLinePos - 1;
-
- // The length of the message is the difference between the starting offset and the
- // ending offset.
- length = endOffset - offset;
- }
-
- if (diagnostic.code) {
- mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
- } else {
- mMessageStrings.push_back(diagnostic.message);
- }
-
- mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity),
- lineNum, linePos, offset, length});
- }
-
- void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
- // Cannot add messages after GetCompilationInfo has been called.
- ASSERT(mCompilationInfo.messages == nullptr);
-
- for (const auto& diag : diagnostics) {
- AddMessage(diag);
- }
-
- AddFormattedTintMessages(diagnostics);
- }
-
- void OwnedCompilationMessages::ClearMessages() {
- // Cannot clear messages after GetCompilationInfo has been called.
- ASSERT(mCompilationInfo.messages == nullptr);
-
- mMessageStrings.clear();
- mMessages.clear();
- }
-
- const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
- mCompilationInfo.messageCount = mMessages.size();
- mCompilationInfo.messages = mMessages.data();
-
- // Ensure every message points at the correct message string. Cannot do this earlier, since
- // vector reallocations may move the pointers around.
- for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
- WGPUCompilationMessage& message = mMessages[i];
- std::string& messageString = mMessageStrings[i];
- message.message = messageString.c_str();
- }
-
- return &mCompilationInfo;
- }
-
- const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
- return mFormattedTintMessages;
- }
-
- void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
- tint::diag::List messageList;
- size_t warningCount = 0;
- size_t errorCount = 0;
- for (auto& diag : diagnostics) {
- switch (diag.severity) {
- case (tint::diag::Severity::Fatal):
- case (tint::diag::Severity::Error):
- case (tint::diag::Severity::InternalCompilerError): {
- errorCount++;
- messageList.add(tint::diag::Diagnostic(diag));
- break;
- }
- case (tint::diag::Severity::Warning): {
- warningCount++;
- messageList.add(tint::diag::Diagnostic(diag));
- break;
- }
- default:
- break;
- }
- }
- if (errorCount == 0 && warningCount == 0) {
- return;
- }
- tint::diag::Formatter::Style style;
- style.print_newline_at_end = false;
- std::ostringstream t;
- if (errorCount > 0) {
- t << errorCount << " error(s) ";
- if (warningCount > 0) {
- t << "and ";
- }
- }
- if (warningCount > 0) {
- t << warningCount << " warning(s) ";
- }
- t << "generated while compiling the shader:" << std::endl
- << tint::diag::Formatter{style}.format(messageList);
- mFormattedTintMessages.push_back(t.str());
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h
deleted file mode 100644
index d6edbe9fa53..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMPILATIONMESSAGES_H_
-#define DAWNNATIVE_COMPILATIONMESSAGES_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include "common/NonCopyable.h"
-
-#include <string>
-#include <vector>
-
-namespace tint { namespace diag {
- class Diagnostic;
- class List;
-}} // namespace tint::diag
-
-namespace dawn_native {
-
- class OwnedCompilationMessages : public NonCopyable {
- public:
- OwnedCompilationMessages();
- ~OwnedCompilationMessages() = default;
-
- void AddMessageForTesting(
- std::string message,
- wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
- uint64_t lineNum = 0,
- uint64_t linePos = 0,
- uint64_t offset = 0,
- uint64_t length = 0);
- void AddMessages(const tint::diag::List& diagnostics);
- void ClearMessages();
-
- const WGPUCompilationInfo* GetCompilationInfo();
- const std::vector<std::string>& GetFormattedTintMessages();
-
- private:
- void AddMessage(const tint::diag::Diagnostic& diagnostic);
- void AddFormattedTintMessages(const tint::diag::List& diagnostics);
-
- WGPUCompilationInfo mCompilationInfo;
- std::vector<std::string> mMessageStrings;
- std::vector<WGPUCompilationMessage> mMessages;
- std::vector<std::string> mFormattedTintMessages;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMPILATIONMESSAGES_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
deleted file mode 100644
index 383186892eb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
+++ /dev/null
@@ -1,459 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ComputePassEncoder.h"
-
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/InternalPipelineStore.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/PassResourceUsageTracker.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/utils/WGPUHelpers.h"
-
-namespace dawn_native {
-
- namespace {
-
- ResultOrError<ComputePipelineBase*> GetOrCreateIndirectDispatchValidationPipeline(
- DeviceBase* device) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
-
- if (store->dispatchIndirectValidationPipeline != nullptr) {
- return store->dispatchIndirectValidationPipeline.Get();
- }
-
- // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this
- // shader in various failure modes.
- // Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable.
- Ref<ShaderModuleBase> shaderModule;
- DAWN_TRY_ASSIGN(shaderModule, utils::CreateShaderModule(device, R"(
- [[block]] struct UniformParams {
- maxComputeWorkgroupsPerDimension: u32;
- clientOffsetInU32: u32;
- enableValidation: u32;
- duplicateNumWorkgroups: u32;
- };
-
- [[block]] struct IndirectParams {
- data: array<u32>;
- };
-
- [[block]] struct ValidatedParams {
- data: array<u32>;
- };
-
- [[group(0), binding(0)]] var<uniform> uniformParams: UniformParams;
- [[group(0), binding(1)]] var<storage, read_write> clientParams: IndirectParams;
- [[group(0), binding(2)]] var<storage, write> validatedParams: ValidatedParams;
-
- [[stage(compute), workgroup_size(1, 1, 1)]]
- fn main() {
- for (var i = 0u; i < 3u; i = i + 1u) {
- var numWorkgroups = clientParams.data[uniformParams.clientOffsetInU32 + i];
- if (uniformParams.enableValidation > 0u &&
- numWorkgroups > uniformParams.maxComputeWorkgroupsPerDimension) {
- numWorkgroups = 0u;
- }
- validatedParams.data[i] = numWorkgroups;
-
- if (uniformParams.duplicateNumWorkgroups > 0u) {
- validatedParams.data[i + 3u] = numWorkgroups;
- }
- }
- }
- )"));
-
- Ref<BindGroupLayoutBase> bindGroupLayout;
- DAWN_TRY_ASSIGN(
- bindGroupLayout,
- utils::MakeBindGroupLayout(
- device,
- {
- {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
- {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
- {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
- },
- /* allowInternalBinding */ true));
-
- Ref<PipelineLayoutBase> pipelineLayout;
- DAWN_TRY_ASSIGN(pipelineLayout,
- utils::MakeBasicPipelineLayout(device, bindGroupLayout));
-
- ComputePipelineDescriptor computePipelineDescriptor = {};
- computePipelineDescriptor.layout = pipelineLayout.Get();
- computePipelineDescriptor.compute.module = shaderModule.Get();
- computePipelineDescriptor.compute.entryPoint = "main";
-
- DAWN_TRY_ASSIGN(store->dispatchIndirectValidationPipeline,
- device->CreateComputePipeline(&computePipelineDescriptor));
-
- return store->dispatchIndirectValidationPipeline.Get();
- }
-
- } // namespace
-
- ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
- const ComputePassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext)
- : ProgrammableEncoder(device, descriptor->label, encodingContext),
- mCommandEncoder(commandEncoder) {
- TrackInDevice();
- }
-
- ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
- : ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
- }
-
- ComputePassEncoder* ComputePassEncoder::MakeError(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext) {
- return new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
- }
-
- void ComputePassEncoder::DestroyImpl() {
- // Ensure that the pass has exited. This is done for passes only since validation requires
- // they exit before destruction while bundles do not.
- mEncodingContext->EnsurePassExited(this);
- }
-
- ObjectType ComputePassEncoder::GetType() const {
- return ObjectType::ComputePassEncoder;
- }
-
- void ComputePassEncoder::APIEndPass() {
- if (mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateProgrammableEncoderEnd());
- }
-
- allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
-
- return {};
- },
- "encoding %s.EndPass().", this)) {
- mEncodingContext->ExitComputePass(this, mUsageTracker.AcquireResourceUsage());
- }
- }
-
- void ComputePassEncoder::APIDispatch(uint32_t x, uint32_t y, uint32_t z) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
-
- uint32_t workgroupsPerDimension =
- GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
-
- DAWN_INVALID_IF(
- x > workgroupsPerDimension,
- "Dispatch size X (%u) exceeds max compute workgroups per dimension (%u).",
- x, workgroupsPerDimension);
-
- DAWN_INVALID_IF(
- y > workgroupsPerDimension,
- "Dispatch size Y (%u) exceeds max compute workgroups per dimension (%u).",
- y, workgroupsPerDimension);
-
- DAWN_INVALID_IF(
- z > workgroupsPerDimension,
- "Dispatch size Z (%u) exceeds max compute workgroups per dimension (%u).",
- z, workgroupsPerDimension);
- }
-
- // Record the synchronization scope for Dispatch, which is just the current
- // bindgroups.
- AddDispatchSyncScope();
-
- DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
- dispatch->x = x;
- dispatch->y = y;
- dispatch->z = z;
-
- return {};
- },
- "encoding %s.Dispatch(%u, %u, %u).", this, x, y, z);
- }
-
- ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
- ComputePassEncoder::TransformIndirectDispatchBuffer(Ref<BufferBase> indirectBuffer,
- uint64_t indirectOffset) {
- DeviceBase* device = GetDevice();
-
- const bool shouldDuplicateNumWorkgroups =
- device->ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- mCommandBufferState.GetComputePipeline());
- if (!IsValidationEnabled() && !shouldDuplicateNumWorkgroups) {
- return std::make_pair(indirectBuffer, indirectOffset);
- }
-
- // Save the previous command buffer state so it can be restored after the
- // validation inserts additional commands.
- CommandBufferStateTracker previousState = mCommandBufferState;
-
- auto* const store = device->GetInternalPipelineStore();
-
- Ref<ComputePipelineBase> validationPipeline;
- DAWN_TRY_ASSIGN(validationPipeline, GetOrCreateIndirectDispatchValidationPipeline(device));
-
- Ref<BindGroupLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
-
- uint32_t storageBufferOffsetAlignment =
- device->GetLimits().v1.minStorageBufferOffsetAlignment;
-
- // Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
- const uint32_t clientOffsetFromAlignedBoundary =
- indirectOffset % storageBufferOffsetAlignment;
- const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
- const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
-
- // Let the size of the binding be the additional offset, plus the size.
- const uint64_t clientIndirectBindingSize =
- kDispatchIndirectSize + clientOffsetFromAlignedBoundary;
-
- // Neither 'enableValidation' nor 'duplicateNumWorkgroups' can be declared as 'bool' as
- // currently in WGSL type 'bool' cannot be used in storage class 'uniform' as 'it is
- // non-host-shareable'.
- struct UniformParams {
- uint32_t maxComputeWorkgroupsPerDimension;
- uint32_t clientOffsetInU32;
- uint32_t enableValidation;
- uint32_t duplicateNumWorkgroups;
- };
-
- // Create a uniform buffer to hold parameters for the shader.
- Ref<BufferBase> uniformBuffer;
- {
- UniformParams params;
- params.maxComputeWorkgroupsPerDimension =
- device->GetLimits().v1.maxComputeWorkgroupsPerDimension;
- params.clientOffsetInU32 = clientOffsetFromAlignedBoundary / sizeof(uint32_t);
- params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
- params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
-
- DAWN_TRY_ASSIGN(uniformBuffer, utils::CreateBufferFromData(
- device, wgpu::BufferUsage::Uniform, {params}));
- }
-
- // Reserve space in the scratch buffer to hold the validated indirect params.
- ScratchBuffer& scratchBuffer = store->scratchIndirectStorage;
- const uint64_t scratchBufferSize =
- shouldDuplicateNumWorkgroups ? 2 * kDispatchIndirectSize : kDispatchIndirectSize;
- DAWN_TRY(scratchBuffer.EnsureCapacity(scratchBufferSize));
- Ref<BufferBase> validatedIndirectBuffer = scratchBuffer.GetBuffer();
-
- Ref<BindGroupBase> validationBindGroup;
- ASSERT(indirectBuffer->GetUsage() & kInternalStorageBuffer);
- DAWN_TRY_ASSIGN(validationBindGroup,
- utils::MakeBindGroup(device, layout,
- {
- {0, uniformBuffer},
- {1, indirectBuffer, clientIndirectBindingOffset,
- clientIndirectBindingSize},
- {2, validatedIndirectBuffer, 0, scratchBufferSize},
- }));
-
- // Issue commands to validate the indirect buffer.
- APISetPipeline(validationPipeline.Get());
- APISetBindGroup(0, validationBindGroup.Get());
- APIDispatch(1);
-
- // Restore the state.
- RestoreCommandBufferState(std::move(previousState));
-
- // Return the new indirect buffer and indirect buffer offset.
- return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
- }
-
- void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer,
- uint64_t indirectOffset) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
-
- DAWN_INVALID_IF(indirectOffset % 4 != 0,
- "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
-
- DAWN_INVALID_IF(
- indirectOffset >= indirectBuffer->GetSize() ||
- indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize(),
- "Indirect offset (%u) and dispatch size (%u) exceeds the indirect buffer "
- "size (%u).",
- indirectOffset, kDispatchIndirectSize, indirectBuffer->GetSize());
- }
-
- SyncScopeUsageTracker scope;
- scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
- mUsageTracker.AddReferencedBuffer(indirectBuffer);
- // TODO(crbug.com/dawn/1166): If validation is enabled, adding |indirectBuffer|
- // is needed for correct usage validation even though it will only be bound for
- // storage. This will unecessarily transition the |indirectBuffer| in
- // the backend.
-
- Ref<BufferBase> indirectBufferRef = indirectBuffer;
-
- // Get applied indirect buffer with necessary changes on the original indirect
- // buffer. For example,
- // - Validate each indirect dispatch with a single dispatch to copy the indirect
- // buffer params into a scratch buffer if they're valid, and otherwise zero them
- // out.
- // - Duplicate all the indirect dispatch parameters to support [[num_workgroups]] on
- // D3D12.
- // - Directly return the original indirect dispatch buffer if we don't need any
- // transformations on it.
- // We could consider moving the validation earlier in the pass after the last
- // last point the indirect buffer was used with writable usage, as well as batch
- // validation for multiple dispatches into one, but inserting commands at
- // arbitrary points in the past is not possible right now.
- DAWN_TRY_ASSIGN(std::tie(indirectBufferRef, indirectOffset),
- TransformIndirectDispatchBuffer(indirectBufferRef, indirectOffset));
-
- // If we have created a new scratch dispatch indirect buffer in
- // TransformIndirectDispatchBuffer(), we need to track it in mUsageTracker.
- if (indirectBufferRef.Get() != indirectBuffer) {
- // |indirectBufferRef| was replaced with a scratch buffer. Add it to the
- // synchronization scope.
- scope.BufferUsedAs(indirectBufferRef.Get(), wgpu::BufferUsage::Indirect);
- mUsageTracker.AddReferencedBuffer(indirectBufferRef.Get());
- }
-
- AddDispatchSyncScope(std::move(scope));
-
- DispatchIndirectCmd* dispatch =
- allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
- dispatch->indirectBuffer = std::move(indirectBufferRef);
- dispatch->indirectOffset = indirectOffset;
- return {};
- },
- "encoding %s.DispatchIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
- }
-
- void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(pipeline));
- }
-
- mCommandBufferState.SetComputePipeline(pipeline);
-
- SetComputePipelineCmd* cmd =
- allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
- cmd->pipeline = pipeline;
-
- return {};
- },
- "encoding %s.SetPipeline(%s).", this, pipeline);
- }
-
- void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- BindGroupIndex groupIndex(groupIndexIn);
-
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
- dynamicOffsets));
- }
-
- mUsageTracker.AddResourcesReferencedByBindGroup(group);
- RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
- dynamicOffsets);
- mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
- dynamicOffsets);
-
- return {};
- },
- "encoding %s.SetBindGroup(%u, %s, %u, ...).", this, groupIndexIn, group,
- dynamicOffsetCount);
- }
-
- void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
- }
-
- mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
-
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
-
- return {};
- },
- "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
- }
-
- void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
- PipelineLayoutBase* layout = mCommandBufferState.GetPipelineLayout();
- for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- scope.AddBindGroup(mCommandBufferState.GetBindGroup(i));
- }
- mUsageTracker.AddDispatch(scope.AcquireSyncScopeUsage());
- }
-
- void ComputePassEncoder::RestoreCommandBufferState(CommandBufferStateTracker state) {
- // Encode commands for the backend to restore the pipeline and bind groups.
- if (state.HasPipeline()) {
- APISetPipeline(state.GetComputePipeline());
- }
- for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
- BindGroupBase* bg = state.GetBindGroup(i);
- if (bg != nullptr) {
- const std::vector<uint32_t>& offsets = state.GetDynamicOffsets(i);
- if (offsets.empty()) {
- APISetBindGroup(static_cast<uint32_t>(i), bg);
- } else {
- APISetBindGroup(static_cast<uint32_t>(i), bg, offsets.size(), offsets.data());
- }
- }
- }
-
- // Restore the frontend state tracking information.
- mCommandBufferState = std::move(state);
- }
-
- CommandBufferStateTracker* ComputePassEncoder::GetCommandBufferStateTrackerForTesting() {
- return &mCommandBufferState;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
deleted file mode 100644
index 41d4d472c79..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMPUTEPASSENCODER_H_
-#define DAWNNATIVE_COMPUTEPASSENCODER_H_
-
-#include "dawn_native/CommandBufferStateTracker.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/PassResourceUsageTracker.h"
-#include "dawn_native/ProgrammableEncoder.h"
-
-namespace dawn_native {
-
- class SyncScopeUsageTracker;
-
- class ComputePassEncoder final : public ProgrammableEncoder {
- public:
- ComputePassEncoder(DeviceBase* device,
- const ComputePassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext);
-
- static ComputePassEncoder* MakeError(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext);
-
- ObjectType GetType() const override;
-
- void APIEndPass();
-
- void APIDispatch(uint32_t x, uint32_t y = 1, uint32_t z = 1);
- void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
- void APISetPipeline(ComputePipelineBase* pipeline);
-
- void APISetBindGroup(uint32_t groupIndex,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount = 0,
- const uint32_t* dynamicOffsets = nullptr);
-
- void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
-
- CommandBufferStateTracker* GetCommandBufferStateTrackerForTesting();
- void RestoreCommandBufferStateForTesting(CommandBufferStateTracker state) {
- RestoreCommandBufferState(std::move(state));
- }
-
- protected:
- ComputePassEncoder(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag);
-
- private:
- void DestroyImpl() override;
-
- ResultOrError<std::pair<Ref<BufferBase>, uint64_t>> TransformIndirectDispatchBuffer(
- Ref<BufferBase> indirectBuffer,
- uint64_t indirectOffset);
-
- void RestoreCommandBufferState(CommandBufferStateTracker state);
-
- CommandBufferStateTracker mCommandBufferState;
-
- // Adds the bindgroups used for the current dispatch to the SyncScopeResourceUsage and
- // records it in mUsageTracker.
- void AddDispatchSyncScope(SyncScopeUsageTracker scope = {});
- ComputePassResourceUsageTracker mUsageTracker;
-
- // For render and compute passes, the encoding context is borrowed from the command encoder.
- // Keep a reference to the encoder to make sure the context isn't freed.
- Ref<CommandEncoder> mCommandEncoder;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMPUTEPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
deleted file mode 100644
index b6c1766aed3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ComputePipeline.h"
-
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectContentHasher.h"
-#include "dawn_native/ObjectType_autogen.h"
-
-namespace dawn_native {
-
- MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
- const ComputePipelineDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_FORMAT_VALIDATION_ERROR("nextInChain must be nullptr.");
- }
-
- if (descriptor->layout != nullptr) {
- DAWN_TRY(device->ValidateObject(descriptor->layout));
- }
-
- return ValidateProgrammableStage(
- device, descriptor->compute.module, descriptor->compute.entryPoint,
- descriptor->compute.constantCount, descriptor->compute.constants, descriptor->layout,
- SingleShaderStage::Compute);
- }
-
- // ComputePipelineBase
-
- ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
- const ComputePipelineDescriptor* descriptor)
- : PipelineBase(device,
- descriptor->layout,
- descriptor->label,
- {{SingleShaderStage::Compute, descriptor->compute.module,
- descriptor->compute.entryPoint, descriptor->compute.constantCount,
- descriptor->compute.constants}}) {
- SetContentHash(ComputeContentHash());
- TrackInDevice();
- }
-
- ComputePipelineBase::ComputePipelineBase(DeviceBase* device) : PipelineBase(device) {
- TrackInDevice();
- }
-
- ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : PipelineBase(device, tag) {
- }
-
- ComputePipelineBase::~ComputePipelineBase() = default;
-
- void ComputePipelineBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheComputePipeline(this);
- }
- }
-
- // static
- ComputePipelineBase* ComputePipelineBase::MakeError(DeviceBase* device) {
- class ErrorComputePipeline final : public ComputePipelineBase {
- public:
- ErrorComputePipeline(DeviceBase* device)
- : ComputePipelineBase(device, ObjectBase::kError) {
- }
-
- MaybeError Initialize() override {
- UNREACHABLE();
- return {};
- }
- };
-
- return new ErrorComputePipeline(device);
- }
-
- ObjectType ComputePipelineBase::GetType() const {
- return ObjectType::ComputePipeline;
- }
-
- bool ComputePipelineBase::EqualityFunc::operator()(const ComputePipelineBase* a,
- const ComputePipelineBase* b) const {
- return PipelineBase::EqualForCache(a, b);
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h
deleted file mode 100644
index 28d5ed9d6c5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COMPUTEPIPELINE_H_
-#define DAWNNATIVE_COMPUTEPIPELINE_H_
-
-#include "common/NonCopyable.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/Pipeline.h"
-
-namespace dawn_native {
-
- class DeviceBase;
- struct EntryPointMetadata;
-
- MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
- const ComputePipelineDescriptor* descriptor);
-
- class ComputePipelineBase : public PipelineBase {
- public:
- ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
- ~ComputePipelineBase() override;
-
- static ComputePipelineBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- // Functors necessary for the unordered_set<ComputePipelineBase*>-based cache.
- struct EqualityFunc {
- bool operator()(const ComputePipelineBase* a, const ComputePipelineBase* b) const;
- };
-
- protected:
- // Constructor used only for mocking and testing.
- ComputePipelineBase(DeviceBase* device);
- void DestroyImpl() override;
-
- private:
- ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COMPUTEPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
deleted file mode 100644
index bc93cb1593c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CopyTextureForBrowserHelper.h"
-
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/InternalPipelineStore.h"
-#include "dawn_native/Queue.h"
-#include "dawn_native/RenderPassEncoder.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/Sampler.h"
-#include "dawn_native/Texture.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-#include "dawn_native/utils/WGPUHelpers.h"
-
-#include <unordered_set>
-
-namespace dawn_native {
- namespace {
-
- static const char sCopyTextureForBrowserShader[] = R"(
- [[block]] struct Uniforms {
- u_scale: vec2<f32>;
- u_offset: vec2<f32>;
- u_alphaOp: u32;
- };
-
- [[binding(0), group(0)]] var<uniform> uniforms : Uniforms;
-
- struct VertexOutputs {
- [[location(0)]] texcoords : vec2<f32>;
- [[builtin(position)]] position : vec4<f32>;
- };
-
- [[stage(vertex)]] fn vs_main(
- [[builtin(vertex_index)]] VertexIndex : u32
- ) -> VertexOutputs {
- var texcoord = array<vec2<f32>, 3>(
- vec2<f32>(-0.5, 0.0),
- vec2<f32>( 1.5, 0.0),
- vec2<f32>( 0.5, 2.0));
-
- var output : VertexOutputs;
- output.position = vec4<f32>((texcoord[VertexIndex] * 2.0 - vec2<f32>(1.0, 1.0)), 0.0, 1.0);
-
- // Y component of scale is calculated by the copySizeHeight / textureHeight. Only
- // flipY case can get negative number.
- var flipY = uniforms.u_scale.y < 0.0;
-
- // Texture coordinate takes top-left as origin point. We need to map the
- // texture to triangle carefully.
- if (flipY) {
- // We need to get the mirror positions(mirrored based on y = 0.5) on flip cases.
- // Adopt transform to src texture and then mapping it to triangle coord which
- // do a +1 shift on Y dimension will help us got that mirror position perfectly.
- output.texcoords = (texcoord[VertexIndex] * uniforms.u_scale + uniforms.u_offset) *
- vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0);
- } else {
- // For the normal case, we need to get the exact position.
- // So mapping texture to triangle firstly then adopt the transform.
- output.texcoords = (texcoord[VertexIndex] *
- vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0)) *
- uniforms.u_scale + uniforms.u_offset;
- }
-
- return output;
- }
-
- [[binding(1), group(0)]] var mySampler: sampler;
- [[binding(2), group(0)]] var myTexture: texture_2d<f32>;
-
- [[stage(fragment)]] fn fs_main(
- [[location(0)]] texcoord : vec2<f32>
- ) -> [[location(0)]] vec4<f32> {
- // Clamp the texcoord and discard the out-of-bound pixels.
- var clampedTexcoord =
- clamp(texcoord, vec2<f32>(0.0, 0.0), vec2<f32>(1.0, 1.0));
- if (!all(clampedTexcoord == texcoord)) {
- discard;
- }
-
- // Swizzling of texture formats when sampling / rendering is handled by the
- // hardware so we don't need special logic in this shader. This is covered by tests.
- var srcColor = textureSample(myTexture, mySampler, texcoord);
-
- // Handle alpha. Alpha here helps on the source content and dst content have
- // different alpha config. There are three possible ops: DontChange, Premultiply
- // and Unpremultiply.
- // TODO(crbug.com/1217153): if wgsl support `constexpr` and allow it
- // to be case selector, Replace 0u/1u/2u with a constexpr variable with
- // meaningful name.
- switch(uniforms.u_alphaOp) {
- case 0u: { // AlphaOp: DontChange
- break;
- }
- case 1u: { // AlphaOp: Premultiply
- srcColor = vec4<f32>(srcColor.rgb * srcColor.a, srcColor.a);
- break;
- }
- case 2u: { // AlphaOp: Unpremultiply
- if (srcColor.a != 0.0) {
- srcColor = vec4<f32>(srcColor.rgb / srcColor.a, srcColor.a);
- }
- break;
- }
- default: {
- break;
- }
- }
-
- return srcColor;
- }
- )";
-
- struct Uniform {
- float scaleX;
- float scaleY;
- float offsetX;
- float offsetY;
- wgpu::AlphaOp alphaOp;
- };
- static_assert(sizeof(Uniform) == 20, "");
-
- // TODO(crbug.com/dawn/856): Expand copyTextureForBrowser to support any
- // non-depth, non-stencil, non-compressed texture format pair copy. Now this API
- // supports CopyImageBitmapToTexture normal format pairs.
- MaybeError ValidateCopyTextureFormatConversion(const wgpu::TextureFormat srcFormat,
- const wgpu::TextureFormat dstFormat) {
- switch (srcFormat) {
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::RGBA8Unorm:
- break;
- default:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Source texture format (%s) is not supported.", srcFormat);
- }
-
- switch (dstFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- break;
- default:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Destination texture format (%s) is not supported.", dstFormat);
- }
-
- return {};
- }
-
- RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store,
- wgpu::TextureFormat dstFormat) {
- auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
- if (pipeline != store->copyTextureForBrowserPipelines.end()) {
- return pipeline->second.Get();
- }
- return nullptr;
- }
-
- ResultOrError<RenderPipelineBase*> GetOrCreateCopyTextureForBrowserPipeline(
- DeviceBase* device,
- wgpu::TextureFormat dstFormat) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
-
- if (GetCachedPipeline(store, dstFormat) == nullptr) {
- // Create vertex shader module if not cached before.
- if (store->copyTextureForBrowser == nullptr) {
- DAWN_TRY_ASSIGN(
- store->copyTextureForBrowser,
- utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
- }
-
- ShaderModuleBase* shaderModule = store->copyTextureForBrowser.Get();
-
- // Prepare vertex stage.
- VertexState vertex = {};
- vertex.module = shaderModule;
- vertex.entryPoint = "vs_main";
-
- // Prepare frgament stage.
- FragmentState fragment = {};
- fragment.module = shaderModule;
- fragment.entryPoint = "fs_main";
-
- // Prepare color state.
- ColorTargetState target = {};
- target.format = dstFormat;
-
- // Create RenderPipeline.
- RenderPipelineDescriptor renderPipelineDesc = {};
-
- // Generate the layout based on shader modules.
- renderPipelineDesc.layout = nullptr;
-
- renderPipelineDesc.vertex = vertex;
- renderPipelineDesc.fragment = &fragment;
-
- renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
-
- fragment.targetCount = 1;
- fragment.targets = &target;
-
- Ref<RenderPipelineBase> pipeline;
- DAWN_TRY_ASSIGN(pipeline, device->CreateRenderPipeline(&renderPipelineDesc));
- store->copyTextureForBrowserPipelines.insert({dstFormat, std::move(pipeline)});
- }
-
- return GetCachedPipeline(store, dstFormat);
- }
-
- } // anonymous namespace
-
- MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
- DAWN_TRY(device->ValidateObject(source->texture));
- DAWN_TRY(device->ValidateObject(destination->texture));
-
- DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
- "validating the ImageCopyTexture for the source");
- DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *destination, *copySize),
- "validating the ImageCopyTexture for the destination");
-
- DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *source, *copySize),
- "validating that the copy fits in the source");
- DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *destination, *copySize),
- "validating that the copy fits in the destination");
-
- DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
-
- DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).",
- source->origin.z);
- DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1,
- "Copy is for more than one array layer (%u)", copySize->depthOrArrayLayers);
-
- DAWN_INVALID_IF(
- source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
- "The source texture sample count (%u) or the destination texture sample count (%u) is "
- "not 1.",
- source->texture->GetSampleCount(), destination->texture->GetSampleCount());
-
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::TextureBinding));
-
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::RenderAttachment));
-
- DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
- destination->texture->GetFormat().format));
-
- DAWN_INVALID_IF(options->nextInChain != nullptr, "nextInChain must be nullptr");
- DAWN_TRY(ValidateAlphaOp(options->alphaOp));
-
- return {};
- }
-
- MaybeError DoCopyTextureForBrowser(DeviceBase* device,
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
- // TODO(crbug.com/dawn/856): In D3D12 and Vulkan, compatible texture format can directly
- // copy to each other. This can be a potential fast path.
-
- // Noop copy
- if (copySize->width == 0 || copySize->height == 0 || copySize->depthOrArrayLayers == 0) {
- return {};
- }
-
- RenderPipelineBase* pipeline;
- DAWN_TRY_ASSIGN(pipeline, GetOrCreateCopyTextureForBrowserPipeline(
- device, destination->texture->GetFormat().format));
-
- // Prepare bind group layout.
- Ref<BindGroupLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
-
- Extent3D srcTextureSize = source->texture->GetSize();
-
- // Prepare binding 0 resource: uniform buffer.
- Uniform uniformData = {
- copySize->width / static_cast<float>(srcTextureSize.width),
- copySize->height / static_cast<float>(srcTextureSize.height), // scale
- source->origin.x / static_cast<float>(srcTextureSize.width),
- source->origin.y / static_cast<float>(srcTextureSize.height), // offset
- wgpu::AlphaOp::DontChange // alphaOp default value: DontChange
- };
-
- // Handle flipY. FlipY here means we flip the source texture firstly and then
- // do copy. This helps on the case which source texture is flipped and the copy
- // need to unpack the flip.
- if (options->flipY) {
- uniformData.scaleY *= -1.0;
- uniformData.offsetY += copySize->height / static_cast<float>(srcTextureSize.height);
- }
-
- // Set alpha op.
- uniformData.alphaOp = options->alphaOp;
-
- Ref<BufferBase> uniformBuffer;
- DAWN_TRY_ASSIGN(
- uniformBuffer,
- utils::CreateBufferFromData(
- device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, {uniformData}));
-
- // Prepare binding 1 resource: sampler
- // Use default configuration, filterMode set to Nearest for min and mag.
- SamplerDescriptor samplerDesc = {};
- Ref<SamplerBase> sampler;
- DAWN_TRY_ASSIGN(sampler, device->CreateSampler(&samplerDesc));
-
- // Prepare binding 2 resource: sampled texture
- TextureViewDescriptor srcTextureViewDesc = {};
- srcTextureViewDesc.baseMipLevel = source->mipLevel;
- srcTextureViewDesc.mipLevelCount = 1;
- srcTextureViewDesc.arrayLayerCount = 1;
- Ref<TextureViewBase> srcTextureView;
- DAWN_TRY_ASSIGN(srcTextureView,
- device->CreateTextureView(source->texture, &srcTextureViewDesc));
-
- // Create bind group after all binding entries are set.
- Ref<BindGroupBase> bindGroup;
- DAWN_TRY_ASSIGN(bindGroup, utils::MakeBindGroup(
- device, layout,
- {{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
-
- // Create command encoder.
- CommandEncoderDescriptor encoderDesc = {};
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<CommandEncoder> encoder = AcquireRef(device->APICreateCommandEncoder(&encoderDesc));
-
- // Prepare dst texture view as color Attachment.
- TextureViewDescriptor dstTextureViewDesc;
- dstTextureViewDesc.baseMipLevel = destination->mipLevel;
- dstTextureViewDesc.mipLevelCount = 1;
- dstTextureViewDesc.baseArrayLayer = destination->origin.z;
- dstTextureViewDesc.arrayLayerCount = 1;
- Ref<TextureViewBase> dstView;
- DAWN_TRY_ASSIGN(dstView,
- device->CreateTextureView(destination->texture, &dstTextureViewDesc));
-
- // Prepare render pass color attachment descriptor.
- RenderPassColorAttachment colorAttachmentDesc;
-
- colorAttachmentDesc.view = dstView.Get();
- colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
- colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
- colorAttachmentDesc.clearColor = {0.0, 0.0, 0.0, 1.0};
-
- // Create render pass.
- RenderPassDescriptor renderPassDesc;
- renderPassDesc.colorAttachmentCount = 1;
- renderPassDesc.colorAttachments = &colorAttachmentDesc;
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<RenderPassEncoder> passEncoder =
- AcquireRef(encoder->APIBeginRenderPass(&renderPassDesc));
-
- // Start pipeline and encode commands to complete
- // the copy from src texture to dst texture with transformation.
- passEncoder->APISetPipeline(pipeline);
- passEncoder->APISetBindGroup(0, bindGroup.Get());
- passEncoder->APISetViewport(destination->origin.x, destination->origin.y, copySize->width,
- copySize->height, 0.0, 1.0);
- passEncoder->APIDraw(3);
- passEncoder->APIEndPass();
-
- // Finsh encoding.
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<CommandBufferBase> commandBuffer = AcquireRef(encoder->APIFinish());
- CommandBufferBase* submitCommandBuffer = commandBuffer.Get();
-
- // Submit command buffer.
- device->GetQueue()->APISubmit(1, &submitCommandBuffer);
-
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h
deleted file mode 100644
index e0965abcf1c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
-#define DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/ObjectBase.h"
-
-namespace dawn_native {
- class DeviceBase;
- struct Extent3D;
- struct ImageCopyTexture;
- struct CopyTextureForBrowserOptions;
-
- MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
-
- MaybeError DoCopyTextureForBrowser(DeviceBase* device,
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_COPYTEXTUREFORBROWSERHELPER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp
deleted file mode 100644
index a2669888bd8..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/CreatePipelineAsyncTask.h"
-
-#include "dawn_native/AsyncTask.h"
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-#include "utils/WGPUHelpers.h"
-
-namespace dawn_native {
-
- CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(
- std::string errorMessage,
- void* userdata)
- : mErrorMessage(errorMessage), mUserData(userdata) {
- }
-
- CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
- Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata)
- : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
- mPipeline(std::move(pipeline)),
- mCreateComputePipelineAsyncCallback(callback) {
- }
-
- void CreateComputePipelineAsyncCallbackTask::Finish() {
- ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
-
- if (mPipeline.Get() != nullptr) {
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
- ToAPI(mPipeline.Detach()), "", mUserData);
- } else {
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
- mErrorMessage.c_str(), mUserData);
- }
- }
-
- void CreateComputePipelineAsyncCallbackTask::HandleShutDown() {
- ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
-
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", mUserData);
- }
-
- void CreateComputePipelineAsyncCallbackTask::HandleDeviceLoss() {
- ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
-
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
- "Device lost before callback", mUserData);
- }
-
- CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask(
- Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata)
- : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
- mPipeline(std::move(pipeline)),
- mCreateRenderPipelineAsyncCallback(callback) {
- }
-
- void CreateRenderPipelineAsyncCallbackTask::Finish() {
- ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
-
- if (mPipeline.Get() != nullptr) {
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
- ToAPI(mPipeline.Detach()), "", mUserData);
- } else {
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
- mErrorMessage.c_str(), mUserData);
- }
- }
-
- void CreateRenderPipelineAsyncCallbackTask::HandleShutDown() {
- ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
-
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", mUserData);
- }
-
- void CreateRenderPipelineAsyncCallbackTask::HandleDeviceLoss() {
- ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
-
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
- "Device lost before callback", mUserData);
- }
-
- CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
- Ref<ComputePipelineBase> nonInitializedComputePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata)
- : mComputePipeline(std::move(nonInitializedComputePipeline)),
- mCallback(callback),
- mUserdata(userdata) {
- ASSERT(mComputePipeline != nullptr);
- }
-
- void CreateComputePipelineAsyncTask::Run() {
- const char* eventLabel = utils::GetLabelForTrace(mComputePipeline->GetLabel().c_str());
- TRACE_EVENT_FLOW_END1(mComputePipeline->GetDevice()->GetPlatform(), General,
- "CreateComputePipelineAsyncTask::RunAsync", this, "label",
- eventLabel);
- TRACE_EVENT1(mComputePipeline->GetDevice()->GetPlatform(), General,
- "CreateComputePipelineAsyncTask::Run", "label", eventLabel);
-
- MaybeError maybeError = mComputePipeline->Initialize();
- std::string errorMessage;
- if (maybeError.IsError()) {
- mComputePipeline = nullptr;
- errorMessage = maybeError.AcquireError()->GetMessage();
- }
-
- mComputePipeline->GetDevice()->AddComputePipelineAsyncCallbackTask(
- mComputePipeline, errorMessage, mCallback, mUserdata);
- }
-
- void CreateComputePipelineAsyncTask::RunAsync(
- std::unique_ptr<CreateComputePipelineAsyncTask> task) {
- DeviceBase* device = task->mComputePipeline->GetDevice();
-
- const char* eventLabel =
- utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
-
- // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
- // since C++14:
- // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
- auto asyncTask = [taskPtr = task.release()] {
- std::unique_ptr<CreateComputePipelineAsyncTask> innnerTaskPtr(taskPtr);
- innnerTaskPtr->Run();
- };
-
- TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
- "CreateComputePipelineAsyncTask::RunAsync", task.get(), "label",
- eventLabel);
- device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
- }
-
- CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
- Ref<RenderPipelineBase> nonInitializedRenderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata)
- : mRenderPipeline(std::move(nonInitializedRenderPipeline)),
- mCallback(callback),
- mUserdata(userdata) {
- ASSERT(mRenderPipeline != nullptr);
- }
-
- void CreateRenderPipelineAsyncTask::Run() {
- const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
- TRACE_EVENT_FLOW_END1(mRenderPipeline->GetDevice()->GetPlatform(), General,
- "CreateRenderPipelineAsyncTask::RunAsync", this, "label", eventLabel);
- TRACE_EVENT1(mRenderPipeline->GetDevice()->GetPlatform(), General,
- "CreateRenderPipelineAsyncTask::Run", "label", eventLabel);
-
- MaybeError maybeError = mRenderPipeline->Initialize();
- std::string errorMessage;
- if (maybeError.IsError()) {
- mRenderPipeline = nullptr;
- errorMessage = maybeError.AcquireError()->GetMessage();
- }
-
- mRenderPipeline->GetDevice()->AddRenderPipelineAsyncCallbackTask(
- mRenderPipeline, errorMessage, mCallback, mUserdata);
- }
-
- void CreateRenderPipelineAsyncTask::RunAsync(
- std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
- DeviceBase* device = task->mRenderPipeline->GetDevice();
-
- const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());
-
- // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
- // since C++14:
- // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
- auto asyncTask = [taskPtr = task.release()] {
- std::unique_ptr<CreateRenderPipelineAsyncTask> innerTaskPtr(taskPtr);
- innerTaskPtr->Run();
- };
-
- TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
- "CreateRenderPipelineAsyncTask::RunAsync", task.get(), "label",
- eventLabel);
- device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
- }
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h
deleted file mode 100644
index 3bac477e6a0..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
-#define DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
-
-#include "common/RefCounted.h"
-#include "dawn/webgpu.h"
-#include "dawn_native/CallbackTaskManager.h"
-#include "dawn_native/Error.h"
-
-namespace dawn_native {
-
- class ComputePipelineBase;
- class DeviceBase;
- class PipelineLayoutBase;
- class RenderPipelineBase;
- class ShaderModuleBase;
- struct FlatComputePipelineDescriptor;
-
- struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
- CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
-
- protected:
- std::string mErrorMessage;
- void* mUserData;
- };
-
- struct CreateComputePipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
- CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- void Finish() override;
- void HandleShutDown() final;
- void HandleDeviceLoss() final;
-
- protected:
- Ref<ComputePipelineBase> mPipeline;
- WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
- };
-
- struct CreateRenderPipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
- CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void Finish() override;
- void HandleShutDown() final;
- void HandleDeviceLoss() final;
-
- protected:
- Ref<RenderPipelineBase> mPipeline;
- WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
- };
-
- // CreateComputePipelineAsyncTask defines all the inputs and outputs of
- // CreateComputePipelineAsync() tasks, which are the same among all the backends.
- class CreateComputePipelineAsyncTask {
- public:
- CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- void Run();
-
- static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
-
- private:
- Ref<ComputePipelineBase> mComputePipeline;
- WGPUCreateComputePipelineAsyncCallback mCallback;
- void* mUserdata;
- };
-
- // CreateRenderPipelineAsyncTask defines all the inputs and outputs of
- // CreateRenderPipelineAsync() tasks, which are the same among all the backends.
- class CreateRenderPipelineAsyncTask {
- public:
- CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> nonInitializedRenderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void Run();
-
- static void RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task);
-
- private:
- Ref<RenderPipelineBase> mRenderPipeline;
- WGPUCreateRenderPipelineAsyncCallback mCallback;
- void* mUserdata;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_CREATEPIPELINEASYNCTASK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
deleted file mode 100644
index c10fe0084dd..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/DawnNative.h"
-
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/Texture.h"
-#include "dawn_platform/DawnPlatform.h"
-
-// Contains the entry-points into dawn_native
-
-namespace dawn_native {
-
- const DawnProcTable& GetProcsAutogen();
-
- const DawnProcTable& GetProcs() {
- return GetProcsAutogen();
- }
-
- std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
- return FromAPI(device)->GetTogglesUsed();
- }
-
- // Adapter
-
- Adapter::Adapter() = default;
-
- Adapter::Adapter(AdapterBase* impl) : mImpl(impl) {
- }
-
- Adapter::~Adapter() {
- mImpl = nullptr;
- }
-
- Adapter::Adapter(const Adapter& other) = default;
- Adapter& Adapter::operator=(const Adapter& other) = default;
-
- void Adapter::GetProperties(wgpu::AdapterProperties* properties) const {
- properties->backendType = mImpl->GetBackendType();
- properties->adapterType = mImpl->GetAdapterType();
- properties->driverDescription = mImpl->GetDriverDescription().c_str();
- properties->deviceID = mImpl->GetPCIInfo().deviceId;
- properties->vendorID = mImpl->GetPCIInfo().vendorId;
- properties->name = mImpl->GetPCIInfo().name.c_str();
- }
-
- BackendType Adapter::GetBackendType() const {
- switch (mImpl->GetBackendType()) {
- case wgpu::BackendType::D3D12:
- return BackendType::D3D12;
- case wgpu::BackendType::Metal:
- return BackendType::Metal;
- case wgpu::BackendType::Null:
- return BackendType::Null;
- case wgpu::BackendType::OpenGL:
- return BackendType::OpenGL;
- case wgpu::BackendType::Vulkan:
- return BackendType::Vulkan;
- case wgpu::BackendType::OpenGLES:
- return BackendType::OpenGLES;
-
- case wgpu::BackendType::D3D11:
- case wgpu::BackendType::WebGPU:
- break;
- }
- UNREACHABLE();
- }
-
- DeviceType Adapter::GetDeviceType() const {
- switch (mImpl->GetAdapterType()) {
- case wgpu::AdapterType::DiscreteGPU:
- return DeviceType::DiscreteGPU;
- case wgpu::AdapterType::IntegratedGPU:
- return DeviceType::IntegratedGPU;
- case wgpu::AdapterType::CPU:
- return DeviceType::CPU;
- case wgpu::AdapterType::Unknown:
- return DeviceType::Unknown;
- }
- UNREACHABLE();
- }
-
- const PCIInfo& Adapter::GetPCIInfo() const {
- return mImpl->GetPCIInfo();
- }
-
- std::vector<const char*> Adapter::GetSupportedFeatures() const {
- FeaturesSet supportedFeaturesSet = mImpl->GetSupportedFeatures();
- return supportedFeaturesSet.GetEnabledFeatureNames();
- }
-
- WGPUDeviceProperties Adapter::GetAdapterProperties() const {
- return mImpl->GetAdapterProperties();
- }
-
- bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
- return mImpl->GetLimits(FromAPI(limits));
- }
-
- void Adapter::SetUseTieredLimits(bool useTieredLimits) {
- mImpl->SetUseTieredLimits(useTieredLimits);
- }
-
- bool Adapter::SupportsExternalImages() const {
- return mImpl->SupportsExternalImages();
- }
-
- Adapter::operator bool() const {
- return mImpl != nullptr;
- }
-
- WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
- return ToAPI(mImpl->CreateDevice(deviceDescriptor));
- }
-
- void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata) {
- mImpl->RequestDevice(descriptor, callback, userdata);
- }
-
- void Adapter::ResetInternalDeviceForTesting() {
- mImpl->ResetInternalDeviceForTesting();
- }
-
- // AdapterDiscoverOptionsBase
-
- AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
- : backendType(type) {
- }
-
- // Instance
-
- Instance::Instance() : mImpl(InstanceBase::Create()) {
- }
-
- Instance::~Instance() {
- if (mImpl != nullptr) {
- mImpl->Release();
- mImpl = nullptr;
- }
- }
-
- void Instance::DiscoverDefaultAdapters() {
- mImpl->DiscoverDefaultAdapters();
- }
-
- bool Instance::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
- return mImpl->DiscoverAdapters(options);
- }
-
- std::vector<Adapter> Instance::GetAdapters() const {
- // Adapters are owned by mImpl so it is safe to return non RAII pointers to them
- std::vector<Adapter> adapters;
- for (const std::unique_ptr<AdapterBase>& adapter : mImpl->GetAdapters()) {
- adapters.push_back({adapter.get()});
- }
- return adapters;
- }
-
- const ToggleInfo* Instance::GetToggleInfo(const char* toggleName) {
- return mImpl->GetToggleInfo(toggleName);
- }
-
- void Instance::EnableBackendValidation(bool enableBackendValidation) {
- if (enableBackendValidation) {
- mImpl->SetBackendValidationLevel(BackendValidationLevel::Full);
- }
- }
-
- void Instance::SetBackendValidationLevel(BackendValidationLevel level) {
- mImpl->SetBackendValidationLevel(level);
- }
-
- void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
- mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
- }
-
- void Instance::SetPlatform(dawn_platform::Platform* platform) {
- mImpl->SetPlatform(platform);
- }
-
- WGPUInstance Instance::Get() const {
- return ToAPI(mImpl);
- }
-
- size_t GetLazyClearCountForTesting(WGPUDevice device) {
- return FromAPI(device)->GetLazyClearCountForTesting();
- }
-
- size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
- return FromAPI(device)->GetDeprecationWarningCountForTesting();
- }
-
- bool IsTextureSubresourceInitialized(WGPUTexture texture,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- WGPUTextureAspect cAspect) {
- TextureBase* textureBase = FromAPI(texture);
-
- Aspect aspect =
- ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
- SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
- return textureBase->IsSubresourceContentInitialized(range);
- }
-
- std::vector<const char*> GetProcMapNamesForTestingInternal();
-
- std::vector<const char*> GetProcMapNamesForTesting() {
- return GetProcMapNamesForTestingInternal();
- }
-
- DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
- return FromAPI(device)->APITick();
- }
-
- // ExternalImageDescriptor
-
- ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : type(type) {
- }
-
- // ExternalImageExportInfo
-
- ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : type(type) {
- }
-
- const char* GetObjectLabelForTesting(void* objectHandle) {
- ApiObjectBase* object = reinterpret_cast<ApiObjectBase*>(objectHandle);
- return object->GetLabel().c_str();
- }
-
- uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
- return FromAPI(buffer)->GetAllocatedSize();
- }
-
- bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
- bool excludePipelineCompatibiltyToken = true;
- return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
deleted file mode 100644
index 35b51012da4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ /dev/null
@@ -1,1719 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Device.h"
-
-#include "common/Log.h"
-#include "dawn_native/Adapter.h"
-#include "dawn_native/AsyncTask.h"
-#include "dawn_native/AttachmentState.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/CompilationMessages.h"
-#include "dawn_native/CreatePipelineAsyncTask.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/ErrorInjector.h"
-#include "dawn_native/ErrorScope.h"
-#include "dawn_native/ExternalTexture.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/InternalPipelineStore.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/PersistentCache.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/Queue.h"
-#include "dawn_native/RenderBundleEncoder.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/Sampler.h"
-#include "dawn_native/Surface.h"
-#include "dawn_native/SwapChain.h"
-#include "dawn_native/Texture.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-#include "utils/WGPUHelpers.h"
-
-#include <array>
-#include <mutex>
-#include <unordered_set>
-
-namespace dawn_native {
-
- // DeviceBase sub-structures
-
- // The caches are unordered_sets of pointers with special hash and compare functions
- // to compare the value of the objects, instead of the pointers.
- template <typename Object>
- using ContentLessObjectCache =
- std::unordered_set<Object*, typename Object::HashFunc, typename Object::EqualityFunc>;
-
- struct DeviceBase::Caches {
- ~Caches() {
- ASSERT(attachmentStates.empty());
- ASSERT(bindGroupLayouts.empty());
- ASSERT(computePipelines.empty());
- ASSERT(pipelineLayouts.empty());
- ASSERT(renderPipelines.empty());
- ASSERT(samplers.empty());
- ASSERT(shaderModules.empty());
- }
-
- ContentLessObjectCache<AttachmentStateBlueprint> attachmentStates;
- ContentLessObjectCache<BindGroupLayoutBase> bindGroupLayouts;
- ContentLessObjectCache<ComputePipelineBase> computePipelines;
- ContentLessObjectCache<PipelineLayoutBase> pipelineLayouts;
- ContentLessObjectCache<RenderPipelineBase> renderPipelines;
- ContentLessObjectCache<SamplerBase> samplers;
- ContentLessObjectCache<ShaderModuleBase> shaderModules;
- };
-
- struct DeviceBase::DeprecationWarnings {
- std::unordered_set<std::string> emitted;
- size_t count = 0;
- };
-
- namespace {
- struct LoggingCallbackTask : CallbackTask {
- public:
- LoggingCallbackTask() = delete;
- LoggingCallbackTask(wgpu::LoggingCallback loggingCallback,
- WGPULoggingType loggingType,
- const char* message,
- void* userdata)
- : mCallback(loggingCallback),
- mLoggingType(loggingType),
- mMessage(message),
- mUserdata(userdata) {
- // Since the Finish() will be called in uncertain future in which time the message
- // may already disposed, we must keep a local copy in the CallbackTask.
- }
-
- void Finish() override {
- mCallback(mLoggingType, mMessage.c_str(), mUserdata);
- }
-
- void HandleShutDown() override {
- // Do the logging anyway
- mCallback(mLoggingType, mMessage.c_str(), mUserdata);
- }
-
- void HandleDeviceLoss() override {
- mCallback(mLoggingType, mMessage.c_str(), mUserdata);
- }
-
- private:
- // As all deferred callback tasks will be triggered before modifying the registered
- // callback or shutting down, we are ensured that callback function and userdata pointer
- // stored in tasks is valid when triggered.
- wgpu::LoggingCallback mCallback;
- WGPULoggingType mLoggingType;
- std::string mMessage;
- void* mUserdata;
- };
-
- ResultOrError<Ref<PipelineLayoutBase>>
- ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
- DeviceBase* device,
- const ComputePipelineDescriptor& descriptor,
- ComputePipelineDescriptor* outDescriptor) {
- Ref<PipelineLayoutBase> layoutRef;
- *outDescriptor = descriptor;
-
- if (outDescriptor->layout == nullptr) {
- DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
- device, {{
- SingleShaderStage::Compute,
- outDescriptor->compute.module,
- outDescriptor->compute.entryPoint,
- outDescriptor->compute.constantCount,
- outDescriptor->compute.constants,
- }}));
- outDescriptor->layout = layoutRef.Get();
- }
-
- return layoutRef;
- }
-
- ResultOrError<Ref<PipelineLayoutBase>>
- ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
- DeviceBase* device,
- const RenderPipelineDescriptor& descriptor,
- RenderPipelineDescriptor* outDescriptor) {
- Ref<PipelineLayoutBase> layoutRef;
- *outDescriptor = descriptor;
-
- if (descriptor.layout == nullptr) {
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- DAWN_TRY_ASSIGN(layoutRef,
- PipelineLayoutBase::CreateDefault(
- device, GetRenderStagesAndSetDummyShader(device, &descriptor)));
- outDescriptor->layout = layoutRef.Get();
- }
-
- return layoutRef;
- }
-
- } // anonymous namespace
-
- // DeviceBase
-
- DeviceBase::DeviceBase(AdapterBase* adapter, const DawnDeviceDescriptor* descriptor)
- : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
- if (descriptor != nullptr) {
- ApplyToggleOverrides(descriptor);
- ApplyFeatures(descriptor);
- }
-
- if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
- mLimits.v1 = ReifyDefaultLimits(FromAPI(descriptor->requiredLimits)->limits);
- } else {
- GetDefaultLimits(&mLimits.v1);
- }
-
- mFormatTable = BuildFormatTable(this);
- SetDefaultToggles();
- }
-
- DeviceBase::DeviceBase() : mState(State::Alive) {
- mCaches = std::make_unique<DeviceBase::Caches>();
- }
-
- DeviceBase::~DeviceBase() {
- // We need to explicitly release the Queue before we complete the destructor so that the
- // Queue does not get destroyed after the Device.
- mQueue = nullptr;
- }
-
- MaybeError DeviceBase::Initialize(QueueBase* defaultQueue) {
- mQueue = AcquireRef(defaultQueue);
-
-#if defined(DAWN_ENABLE_ASSERTS)
- mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
- static bool calledOnce = false;
- if (!calledOnce) {
- calledOnce = true;
- dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
- "probably not intended. If you really want to ignore errors "
- "and suppress this message, set the callback to null.";
- }
- };
-
- mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
- static bool calledOnce = false;
- if (!calledOnce) {
- calledOnce = true;
- dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
- "intended. If you really want to ignore device lost "
- "and suppress this message, set the callback to null.";
- }
- };
-#endif // DAWN_ENABLE_ASSERTS
-
- mCaches = std::make_unique<DeviceBase::Caches>();
- mErrorScopeStack = std::make_unique<ErrorScopeStack>();
- mDynamicUploader = std::make_unique<DynamicUploader>(this);
- mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
- mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
- mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
- mPersistentCache = std::make_unique<PersistentCache>(this);
-
- ASSERT(GetPlatform() != nullptr);
- mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
- mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
-
- // Starting from now the backend can start doing reentrant calls so the device is marked as
- // alive.
- mState = State::Alive;
-
- DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
-
- // If dummy fragment shader module is needed, initialize it
- if (IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
- // The empty fragment shader, used as a work around for vertex-only render pipeline
- constexpr char kEmptyFragmentShader[] = R"(
- [[stage(fragment)]] fn fs_empty_main() {}
- )";
- ShaderModuleDescriptor descriptor;
- ShaderModuleWGSLDescriptor wgslDesc;
- wgslDesc.source = kEmptyFragmentShader;
- descriptor.nextInChain = &wgslDesc;
-
- DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
- CreateShaderModule(&descriptor));
- }
-
- return {};
- }
-
- void DeviceBase::DestroyObjects() {
- // List of object types in reverse "dependency" order so we can iterate and delete the
- // objects safely starting at leaf objects. We define dependent here such that if B has
- // a ref to A, then B depends on A. We therefore try to destroy B before destroying A. Note
- // that this only considers the immediate frontend dependencies, while backend objects could
- // add complications and extra dependencies.
- //
- // Note that AttachmentState is not an ApiObject so it cannot be eagerly destroyed. However,
- // since AttachmentStates are cached by the device, objects that hold references to
- // AttachmentStates should make sure to un-ref them in their Destroy operation so that we
- // can destroy the frontend cache.
-
- // clang-format off
- static constexpr std::array<ObjectType, 19> kObjectTypeDependencyOrder = {
- ObjectType::ComputePassEncoder,
- ObjectType::RenderPassEncoder,
- ObjectType::RenderBundleEncoder,
- ObjectType::RenderBundle,
- ObjectType::CommandEncoder,
- ObjectType::CommandBuffer,
- ObjectType::RenderPipeline,
- ObjectType::ComputePipeline,
- ObjectType::PipelineLayout,
- ObjectType::SwapChain,
- ObjectType::BindGroup,
- ObjectType::BindGroupLayout,
- ObjectType::ShaderModule,
- ObjectType::ExternalTexture,
- ObjectType::TextureView,
- ObjectType::Texture,
- ObjectType::QuerySet,
- ObjectType::Sampler,
- ObjectType::Buffer,
- };
- // clang-format on
-
- // We first move all objects out from the tracking list into a separate list so that we can
- // avoid locking the same mutex twice. We can then iterate across the separate list to call
- // the actual destroy function.
- LinkedList<ApiObjectBase> objects;
- for (ObjectType type : kObjectTypeDependencyOrder) {
- ApiObjectList& objList = mObjectLists[type];
- const std::lock_guard<std::mutex> lock(objList.mutex);
- objList.objects.MoveInto(&objects);
- }
- for (LinkNode<ApiObjectBase>* node : objects) {
- node->value()->Destroy();
- }
- }
-
- void DeviceBase::Destroy() {
- // Skip if we are already destroyed.
- if (mState == State::Destroyed) {
- return;
- }
-
- // Skip handling device facilities if they haven't even been created (or failed doing so)
- if (mState != State::BeingCreated) {
- // The device is being destroyed so it will be lost, call the application callback.
- if (mDeviceLostCallback != nullptr) {
- mDeviceLostCallback(WGPUDeviceLostReason_Destroyed, "Device was destroyed.",
- mDeviceLostUserdata);
- mDeviceLostCallback = nullptr;
- }
-
- // Call all the callbacks immediately as the device is about to shut down.
- // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
- mAsyncTaskManager->WaitAllPendingTasks();
- auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
- for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
- callbackTask->HandleShutDown();
- }
- }
-
- // Disconnect the device, depending on which state we are currently in.
- switch (mState) {
- case State::BeingCreated:
- // The GPU timeline was never started so we don't have to wait.
- break;
-
- case State::Alive:
- // Alive is the only state which can have GPU work happening. Wait for all of it to
- // complete before proceeding with destruction.
- // Ignore errors so that we can continue with destruction
- IgnoreErrors(WaitForIdleForDestruction());
- AssumeCommandsComplete();
- break;
-
- case State::BeingDisconnected:
- // Getting disconnected is a transient state happening in a single API call so there
- // is always an external reference keeping the Device alive, which means the
- // destructor cannot run while BeingDisconnected.
- UNREACHABLE();
- break;
-
- case State::Disconnected:
- break;
-
- case State::Destroyed:
- // If we are already destroyed we should've skipped this work entirely.
- UNREACHABLE();
- break;
- }
- ASSERT(mCompletedSerial == mLastSubmittedSerial);
- ASSERT(mFutureSerial <= mCompletedSerial);
-
- if (mState != State::BeingCreated) {
- // The GPU timeline is finished.
- // Finish destroying all objects owned by the device and tick the queue-related tasks
- // since they should be complete. This must be done before DestroyImpl() it may
- // relinquish resources that will be freed by backends in the DestroyImpl() call.
- DestroyObjects();
- mQueue->Tick(GetCompletedCommandSerial());
- // Call TickImpl once last time to clean up resources
- // Ignore errors so that we can continue with destruction
- IgnoreErrors(TickImpl());
- }
-
- // At this point GPU operations are always finished, so we are in the disconnected state.
- // Note that currently this state change is required because some of the backend
- // implementations of DestroyImpl checks that we are disconnected before doing work.
- mState = State::Disconnected;
-
- mDynamicUploader = nullptr;
- mCallbackTaskManager = nullptr;
- mAsyncTaskManager = nullptr;
- mPersistentCache = nullptr;
- mEmptyBindGroupLayout = nullptr;
- mInternalPipelineStore = nullptr;
-
- AssumeCommandsComplete();
-
- // Now that the GPU timeline is empty, destroy the backend device.
- DestroyImpl();
-
- mCaches = nullptr;
- mState = State::Destroyed;
- }
-
- void DeviceBase::APIDestroy() {
- Destroy();
- }
-
- void DeviceBase::HandleError(InternalErrorType type, const char* message) {
- if (type == InternalErrorType::DeviceLost) {
- mState = State::Disconnected;
-
- // If the ErrorInjector is enabled, then the device loss might be fake and the device
- // still be executing commands. Force a wait for idle in this case, with State being
- // Disconnected so we can detect this case in WaitForIdleForDestruction.
- if (ErrorInjectorEnabled()) {
- IgnoreErrors(WaitForIdleForDestruction());
- }
-
- // A real device lost happened. Set the state to disconnected as the device cannot be
- // used. Also tags all commands as completed since the device stopped running.
- AssumeCommandsComplete();
- } else if (type == InternalErrorType::Internal) {
- // If we receive an internal error, assume the backend can't recover and proceed with
- // device destruction. We first wait for all previous commands to be completed so that
- // backend objects can be freed immediately, before handling the loss.
-
- // Move away from the Alive state so that the application cannot use this device
- // anymore.
- // TODO(crbug.com/dawn/831): Do we need atomics for this to become visible to other
- // threads in a multithreaded scenario?
- mState = State::BeingDisconnected;
-
- // Ignore errors so that we can continue with destruction
- // Assume all commands are complete after WaitForIdleForDestruction (because they were)
- IgnoreErrors(WaitForIdleForDestruction());
- IgnoreErrors(TickImpl());
- AssumeCommandsComplete();
- ASSERT(mFutureSerial <= mCompletedSerial);
- mState = State::Disconnected;
-
- // Now everything is as if the device was lost.
- type = InternalErrorType::DeviceLost;
- }
-
- if (type == InternalErrorType::DeviceLost) {
- // The device was lost, call the application callback.
- if (mDeviceLostCallback != nullptr) {
- mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
- mDeviceLostCallback = nullptr;
- }
-
- mQueue->HandleDeviceLoss();
-
- // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
- mAsyncTaskManager->WaitAllPendingTasks();
- auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
- for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
- callbackTask->HandleDeviceLoss();
- }
-
- // Still forward device loss errors to the error scopes so they all reject.
- mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
- } else {
- // Pass the error to the error scope stack and call the uncaptured error callback
- // if it isn't handled. DeviceLost is not handled here because it should be
- // handled by the lost callback.
- bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
- if (!captured && mUncapturedErrorCallback != nullptr) {
- mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
- mUncapturedErrorUserdata);
- }
- }
- }
-
- void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
- ASSERT(error != nullptr);
- HandleError(error->GetType(), error->GetFormattedMessage().c_str());
- }
-
- void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
- // The registered callback function and userdata pointer are stored and used by deferred
- // callback tasks, and after setting a different callback (especially in the case of
- // resetting) the resources pointed by such pointer may be freed. Flush all deferred
- // callback tasks to guarantee we are never going to use the previous callback after
- // this call.
- if (IsLost()) {
- return;
- }
- FlushCallbackTaskQueue();
- mLoggingCallback = callback;
- mLoggingUserdata = userdata;
- }
-
- void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
- // The registered callback function and userdata pointer are stored and used by deferred
- // callback tasks, and after setting a different callback (especially in the case of
- // resetting) the resources pointed by such pointer may be freed. Flush all deferred
- // callback tasks to guarantee we are never going to use the previous callback after
- // this call.
- if (IsLost()) {
- return;
- }
- FlushCallbackTaskQueue();
- mUncapturedErrorCallback = callback;
- mUncapturedErrorUserdata = userdata;
- }
-
- void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
- // The registered callback function and userdata pointer are stored and used by deferred
- // callback tasks, and after setting a different callback (especially in the case of
- // resetting) the resources pointed by such pointer may be freed. Flush all deferred
- // callback tasks to guarantee we are never going to use the previous callback after
- // this call.
- if (IsLost()) {
- return;
- }
- FlushCallbackTaskQueue();
- mDeviceLostCallback = callback;
- mDeviceLostUserdata = userdata;
- }
-
- void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
- if (ConsumedError(ValidateErrorFilter(filter))) {
- return;
- }
- mErrorScopeStack->Push(filter);
- }
-
- bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
- if (mErrorScopeStack->Empty()) {
- return false;
- }
- ErrorScope scope = mErrorScopeStack->Pop();
- if (callback != nullptr) {
- callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(),
- userdata);
- }
-
- return true;
- }
-
- PersistentCache* DeviceBase::GetPersistentCache() {
- ASSERT(mPersistentCache.get() != nullptr);
- return mPersistentCache.get();
- }
-
- MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
- ASSERT(object != nullptr);
- DAWN_INVALID_IF(object->GetDevice() != this,
- "%s is associated with %s, and cannot be used with %s.", object,
- object->GetDevice(), this);
-
- // TODO(dawn:563): Preserve labels for error objects.
- DAWN_INVALID_IF(object->IsError(), "%s is invalid.", object);
-
- return {};
- }
-
- MaybeError DeviceBase::ValidateIsAlive() const {
- DAWN_INVALID_IF(mState != State::Alive, "%s is lost.", this);
- return {};
- }
-
- void DeviceBase::APILoseForTesting() {
- if (mState != State::Alive) {
- return;
- }
-
- HandleError(InternalErrorType::Internal, "Device lost for testing");
- }
-
- DeviceBase::State DeviceBase::GetState() const {
- return mState;
- }
-
- bool DeviceBase::IsLost() const {
- ASSERT(mState != State::BeingCreated);
- return mState != State::Alive;
- }
-
- void DeviceBase::TrackObject(ApiObjectBase* object) {
- ApiObjectList& objectList = mObjectLists[object->GetType()];
- std::lock_guard<std::mutex> lock(objectList.mutex);
- object->InsertBefore(objectList.objects.head());
- }
-
- std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
- return &mObjectLists[type].mutex;
- }
-
- AdapterBase* DeviceBase::GetAdapter() const {
- return mAdapter;
- }
-
- dawn_platform::Platform* DeviceBase::GetPlatform() const {
- return GetAdapter()->GetInstance()->GetPlatform();
- }
-
- ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
- return mCompletedSerial;
- }
-
- ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
- return mLastSubmittedSerial;
- }
-
- ExecutionSerial DeviceBase::GetFutureSerial() const {
- return mFutureSerial;
- }
-
- InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
- return mInternalPipelineStore.get();
- }
-
- void DeviceBase::IncrementLastSubmittedCommandSerial() {
- mLastSubmittedSerial++;
- }
-
- void DeviceBase::AssumeCommandsComplete() {
- ExecutionSerial maxSerial =
- ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
- mLastSubmittedSerial = maxSerial;
- mCompletedSerial = maxSerial;
- }
-
- bool DeviceBase::IsDeviceIdle() {
- if (mAsyncTaskManager->HasPendingTasks()) {
- return false;
- }
-
- ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
- if (mCompletedSerial == maxSerial) {
- return true;
- }
- return false;
- }
-
- ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
- return mLastSubmittedSerial + ExecutionSerial(1);
- }
-
- void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
- if (serial > mFutureSerial) {
- mFutureSerial = serial;
- }
- }
-
- MaybeError DeviceBase::CheckPassedSerials() {
- ExecutionSerial completedSerial;
- DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
-
- ASSERT(completedSerial <= mLastSubmittedSerial);
- // completedSerial should not be less than mCompletedSerial unless it is 0.
- // It can be 0 when there's no fences to check.
- ASSERT(completedSerial >= mCompletedSerial || completedSerial == ExecutionSerial(0));
-
- if (completedSerial > mCompletedSerial) {
- mCompletedSerial = completedSerial;
- }
-
- return {};
- }
-
- ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
- size_t index = ComputeFormatIndex(format);
- DAWN_INVALID_IF(index >= mFormatTable.size(), "Unknown texture format %s.", format);
-
- const Format* internalFormat = &mFormatTable[index];
- DAWN_INVALID_IF(!internalFormat->isSupported, "Unsupported texture format %s.", format);
-
- return internalFormat;
- }
-
- const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
- size_t index = ComputeFormatIndex(format);
- ASSERT(index < mFormatTable.size());
- ASSERT(mFormatTable[index].isSupported);
- return mFormatTable[index];
- }
-
- ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken,
- ApiObjectBase::kUntrackedByDevice);
-
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
-
- Ref<BindGroupLayoutBase> result;
- auto iter = mCaches->bindGroupLayouts.find(&blueprint);
- if (iter != mCaches->bindGroupLayouts.end()) {
- result = *iter;
- } else {
- DAWN_TRY_ASSIGN(result,
- CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->bindGroupLayouts.insert(result.Get());
- }
-
- return std::move(result);
- }
-
- void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- // Private function used at initialization
- ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
- BindGroupLayoutDescriptor desc = {};
- desc.entryCount = 0;
- desc.entries = nullptr;
-
- return GetOrCreateBindGroupLayout(&desc);
- }
-
- BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
- ASSERT(mEmptyBindGroupLayout != nullptr);
- return mEmptyBindGroupLayout.Get();
- }
-
- Ref<ComputePipelineBase> DeviceBase::GetCachedComputePipeline(
- ComputePipelineBase* uninitializedComputePipeline) {
- Ref<ComputePipelineBase> cachedPipeline;
- auto iter = mCaches->computePipelines.find(uninitializedComputePipeline);
- if (iter != mCaches->computePipelines.end()) {
- cachedPipeline = *iter;
- }
-
- return cachedPipeline;
- }
-
- Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
- RenderPipelineBase* uninitializedRenderPipeline) {
- Ref<RenderPipelineBase> cachedPipeline;
- auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
- if (iter != mCaches->renderPipelines.end()) {
- cachedPipeline = *iter;
- }
- return cachedPipeline;
- }
-
- Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
- Ref<ComputePipelineBase> computePipeline) {
- auto insertion = mCaches->computePipelines.insert(computePipeline.Get());
- if (insertion.second) {
- computePipeline->SetIsCachedReference();
- return computePipeline;
- } else {
- return *(insertion.first);
- }
- }
-
- Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
- Ref<RenderPipelineBase> renderPipeline) {
- auto insertion = mCaches->renderPipelines.insert(renderPipeline.Get());
- if (insertion.second) {
- renderPipeline->SetIsCachedReference();
- return renderPipeline;
- } else {
- return *(insertion.first);
- }
- }
-
- void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->computePipelines.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor) {
- PipelineLayoutBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
-
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
-
- Ref<PipelineLayoutBase> result;
- auto iter = mCaches->pipelineLayouts.find(&blueprint);
- if (iter != mCaches->pipelineLayouts.end()) {
- result = *iter;
- } else {
- DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->pipelineLayouts.insert(result.Get());
- }
-
- return std::move(result);
- }
-
- void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->pipelineLayouts.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->renderPipelines.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
- const SamplerDescriptor* descriptor) {
- SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
-
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
-
- Ref<SamplerBase> result;
- auto iter = mCaches->samplers.find(&blueprint);
- if (iter != mCaches->samplers.end()) {
- result = *iter;
- } else {
- DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->samplers.insert(result.Get());
- }
-
- return std::move(result);
- }
-
- void DeviceBase::UncacheSampler(SamplerBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->samplers.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult,
- OwnedCompilationMessages* compilationMessages) {
- ASSERT(parseResult != nullptr);
-
- ShaderModuleBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
-
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
-
- Ref<ShaderModuleBase> result;
- auto iter = mCaches->shaderModules.find(&blueprint);
- if (iter != mCaches->shaderModules.end()) {
- result = *iter;
- } else {
- if (!parseResult->HasParsedShader()) {
- // We skip the parse on creation if validation isn't enabled which let's us quickly
- // lookup in the cache without validating and parsing. We need the parsed module
- // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
- // we can consider splitting it if additional validation is added.
- ASSERT(!IsValidationEnabled());
- DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult,
- compilationMessages));
- }
- DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->shaderModules.insert(result.Get());
- }
-
- return std::move(result);
- }
-
- void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->shaderModules.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
- AttachmentStateBlueprint* blueprint) {
- auto iter = mCaches->attachmentStates.find(blueprint);
- if (iter != mCaches->attachmentStates.end()) {
- return static_cast<AttachmentState*>(*iter);
- }
-
- Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
- attachmentState->SetIsCachedReference();
- attachmentState->SetContentHash(attachmentState->ComputeContentHash());
- mCaches->attachmentStates.insert(attachmentState.Get());
- return attachmentState;
- }
-
- Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
- const RenderBundleEncoderDescriptor* descriptor) {
- AttachmentStateBlueprint blueprint(descriptor);
- return GetOrCreateAttachmentState(&blueprint);
- }
-
- Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
- const RenderPipelineDescriptor* descriptor) {
- AttachmentStateBlueprint blueprint(descriptor);
- return GetOrCreateAttachmentState(&blueprint);
- }
-
- Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
- const RenderPassDescriptor* descriptor) {
- AttachmentStateBlueprint blueprint(descriptor);
- return GetOrCreateAttachmentState(&blueprint);
- }
-
- void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
- ASSERT(obj->IsCachedReference());
- size_t removedCount = mCaches->attachmentStates.erase(obj);
- ASSERT(removedCount == 1);
- }
-
- // Object creation API methods
-
- BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
- Ref<BindGroupBase> result;
- if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).",
- this, descriptor)) {
- return BindGroupBase::MakeError(this);
- }
- return result.Detach();
- }
- BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor) {
- Ref<BindGroupLayoutBase> result;
- if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
- "calling %s.CreateBindGroupLayout(%s).", this, descriptor)) {
- return BindGroupLayoutBase::MakeError(this);
- }
- return result.Detach();
- }
- BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
- Ref<BufferBase> result = nullptr;
- if (ConsumedError(CreateBuffer(descriptor), &result, "calling %s.CreateBuffer(%s).", this,
- descriptor)) {
- ASSERT(result == nullptr);
- return BufferBase::MakeError(this, descriptor);
- }
- return result.Detach();
- }
- CommandEncoder* DeviceBase::APICreateCommandEncoder(
- const CommandEncoderDescriptor* descriptor) {
- const CommandEncoderDescriptor defaultDescriptor = {};
- if (descriptor == nullptr) {
- descriptor = &defaultDescriptor;
- }
- return new CommandEncoder(this, descriptor);
- }
- ComputePipelineBase* DeviceBase::APICreateComputePipeline(
- const ComputePipelineDescriptor* descriptor) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipeline", "label",
- utils::GetLabelForTrace(descriptor->label));
-
- Ref<ComputePipelineBase> result;
- if (ConsumedError(CreateComputePipeline(descriptor), &result,
- "calling %s.CreateComputePipeline(%s).", this, descriptor)) {
- return ComputePipelineBase::MakeError(this);
- }
- return result.Detach();
- }
- void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipelineAsync", "label",
- utils::GetLabelForTrace(descriptor->label));
-
- MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
-
- // Call the callback directly when a validation error has been found in the front-end
- // validations. If there is no error, then CreateComputePipelineAsync will call the
- // callback.
- if (maybeResult.IsError()) {
- std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
- callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
- userdata);
- }
- }
- PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor) {
- Ref<PipelineLayoutBase> result;
- if (ConsumedError(CreatePipelineLayout(descriptor), &result,
- "calling %s.CreatePipelineLayout(%s).", this, descriptor)) {
- return PipelineLayoutBase::MakeError(this);
- }
- return result.Detach();
- }
- QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
- Ref<QuerySetBase> result;
- if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).",
- this, descriptor)) {
- return QuerySetBase::MakeError(this);
- }
- return result.Detach();
- }
- SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
- Ref<SamplerBase> result;
- if (ConsumedError(CreateSampler(descriptor), &result, "calling %s.CreateSampler(%s).", this,
- descriptor)) {
- return SamplerBase::MakeError(this);
- }
- return result.Detach();
- }
- void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipelineAsync", "label",
- utils::GetLabelForTrace(descriptor->label));
- // TODO(dawn:563): Add validation error context.
- MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
-
- // Call the callback directly when a validation error has been found in the front-end
- // validations. If there is no error, then CreateRenderPipelineAsync will call the
- // callback.
- if (maybeResult.IsError()) {
- std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
- callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
- userdata);
- }
- }
- RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
- const RenderBundleEncoderDescriptor* descriptor) {
- Ref<RenderBundleEncoder> result;
- if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
- "calling %s.CreateRenderBundleEncoder(%s).", this, descriptor)) {
- return RenderBundleEncoder::MakeError(this);
- }
- return result.Detach();
- }
- RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipeline", "label",
- utils::GetLabelForTrace(descriptor->label));
-
- Ref<RenderPipelineBase> result;
- if (ConsumedError(CreateRenderPipeline(descriptor), &result,
- "calling %s.CreateRenderPipeline(%s).", this, descriptor)) {
- return RenderPipelineBase::MakeError(this);
- }
- return result.Detach();
- }
- ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
- TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateShaderModule", "label",
- utils::GetLabelForTrace(descriptor->label));
-
- Ref<ShaderModuleBase> result;
- std::unique_ptr<OwnedCompilationMessages> compilationMessages(
- std::make_unique<OwnedCompilationMessages>());
- if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
- "calling %s.CreateShaderModule(%s).", this, descriptor)) {
- DAWN_ASSERT(result == nullptr);
- result = ShaderModuleBase::MakeError(this);
- }
- // Move compilation messages into ShaderModuleBase and emit tint errors and warnings
- // after all other operations are finished successfully.
- result->InjectCompilationMessages(std::move(compilationMessages));
-
- return result.Detach();
- }
- SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChainBase> result;
- if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
- "calling %s.CreateSwapChain(%s).", this, descriptor)) {
- return SwapChainBase::MakeError(this);
- }
- return result.Detach();
- }
- TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
- Ref<TextureBase> result;
- if (ConsumedError(CreateTexture(descriptor), &result, "calling %s.CreateTexture(%s).", this,
- descriptor)) {
- return TextureBase::MakeError(this);
- }
- return result.Detach();
- }
-
- // For Dawn Wire
-
- BufferBase* DeviceBase::APICreateErrorBuffer() {
- BufferDescriptor desc = {};
- return BufferBase::MakeError(this, &desc);
- }
-
- // Other Device API methods
-
- // Returns true if future ticking is needed.
- bool DeviceBase::APITick() {
- if (ConsumedError(Tick())) {
- return false;
- }
- return !IsDeviceIdle();
- }
-
- MaybeError DeviceBase::Tick() {
- DAWN_TRY(ValidateIsAlive());
-
- // to avoid overly ticking, we only want to tick when:
- // 1. the last submitted serial has moved beyond the completed serial
- // 2. or the completed serial has not reached the future serial set by the trackers
- if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
- DAWN_TRY(CheckPassedSerials());
- DAWN_TRY(TickImpl());
-
- // There is no GPU work in flight, we need to move the serials forward so that
- // so that CPU operations waiting on GPU completion can know they don't have to wait.
- // AssumeCommandsComplete will assign the max serial we must tick to in order to
- // fire the awaiting callbacks.
- if (mCompletedSerial == mLastSubmittedSerial) {
- AssumeCommandsComplete();
- }
-
- // TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
- // tick the dynamic uploader before the backend resource allocators. This would allow
- // reclaiming resources one tick earlier.
- mDynamicUploader->Deallocate(mCompletedSerial);
- mQueue->Tick(mCompletedSerial);
- }
-
- // We have to check callback tasks in every Tick because it is not related to any global
- // serials.
- FlushCallbackTaskQueue();
-
- return {};
- }
-
- QueueBase* DeviceBase::APIGetQueue() {
- // Backends gave the primary queue during initialization.
- ASSERT(mQueue != nullptr);
-
- // Returns a new reference to the queue.
- mQueue->Reference();
- return mQueue.Get();
- }
-
- ExternalTextureBase* DeviceBase::APICreateExternalTexture(
- const ExternalTextureDescriptor* descriptor) {
- Ref<ExternalTextureBase> result = nullptr;
- if (ConsumedError(CreateExternalTexture(descriptor), &result,
- "calling %s.CreateExternalTexture(%s).", this, descriptor)) {
- return ExternalTextureBase::MakeError(this);
- }
-
- return result.Detach();
- }
-
- void DeviceBase::ApplyFeatures(const DawnDeviceDescriptor* deviceDescriptor) {
- ASSERT(deviceDescriptor);
- ASSERT(GetAdapter()->SupportsAllRequestedFeatures(deviceDescriptor->requiredFeatures));
-
- mEnabledFeatures = GetAdapter()->GetInstance()->FeatureNamesToFeaturesSet(
- deviceDescriptor->requiredFeatures);
- }
-
- std::vector<const char*> DeviceBase::GetEnabledFeatures() const {
- return mEnabledFeatures.GetEnabledFeatureNames();
- }
-
- bool DeviceBase::IsFeatureEnabled(Feature feature) const {
- return mEnabledFeatures.IsEnabled(feature);
- }
-
- bool DeviceBase::IsValidationEnabled() const {
- return !IsToggleEnabled(Toggle::SkipValidation);
- }
-
- bool DeviceBase::IsRobustnessEnabled() const {
- return !IsToggleEnabled(Toggle::DisableRobustness);
- }
-
- size_t DeviceBase::GetLazyClearCountForTesting() {
- return mLazyClearCountForTesting;
- }
-
- void DeviceBase::IncrementLazyClearCountForTesting() {
- ++mLazyClearCountForTesting;
- }
-
- size_t DeviceBase::GetDeprecationWarningCountForTesting() {
- return mDeprecationWarnings->count;
- }
-
- void DeviceBase::EmitDeprecationWarning(const char* warning) {
- mDeprecationWarnings->count++;
- if (mDeprecationWarnings->emitted.insert(warning).second) {
- dawn::WarningLog() << warning;
- }
- }
-
- void DeviceBase::EmitLog(const char* message) {
- this->EmitLog(WGPULoggingType_Info, message);
- }
-
- void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
- if (mLoggingCallback != nullptr) {
- // Use the thread-safe CallbackTaskManager routine
- std::unique_ptr<LoggingCallbackTask> callbackTask =
- std::make_unique<LoggingCallbackTask>(mLoggingCallback, loggingType, message,
- mLoggingUserdata);
- mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
- }
- }
-
- bool DeviceBase::APIGetLimits(SupportedLimits* limits) {
- ASSERT(limits != nullptr);
- if (limits->nextInChain != nullptr) {
- return false;
- }
- limits->limits = mLimits.v1;
- return true;
- }
-
- void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
- if (ConsumedError(ValidateErrorType(type))) {
- return;
- }
-
- // This method should only be used to make error scope reject. For DeviceLost there is the
- // LoseForTesting function that can be used instead.
- if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
- HandleError(InternalErrorType::Validation,
- "Invalid injected error, must be Validation or OutOfMemory");
- return;
- }
-
- HandleError(FromWGPUErrorType(type), message);
- }
-
- QueueBase* DeviceBase::GetQueue() const {
- return mQueue.Get();
- }
-
- // Implementation details of object creation
-
- ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
- const BindGroupDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor),
- "validating %s against %s", descriptor, descriptor->layout);
- }
- return CreateBindGroupImpl(descriptor);
- }
-
- ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor,
- bool allowInternalBinding) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(
- ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
- "validating %s", descriptor);
- }
- return GetOrCreateBindGroupLayout(descriptor);
- }
-
- ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s",
- descriptor);
- }
-
- Ref<BufferBase> buffer;
- DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
-
- if (descriptor->mappedAtCreation) {
- DAWN_TRY(buffer->MapAtCreation());
- }
-
- return std::move(buffer);
- }
-
- ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
- const ComputePipelineDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
- }
-
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef;
- ComputePipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
- this, *descriptor, &appliedDescriptor));
-
- Ref<ComputePipelineBase> uninitializedComputePipeline =
- CreateUninitializedComputePipelineImpl(&appliedDescriptor);
- Ref<ComputePipelineBase> cachedComputePipeline =
- GetCachedComputePipeline(uninitializedComputePipeline.Get());
- if (cachedComputePipeline.Get() != nullptr) {
- return cachedComputePipeline;
- }
-
- DAWN_TRY(uninitializedComputePipeline->Initialize());
- return AddOrGetCachedComputePipeline(std::move(uninitializedComputePipeline));
- }
-
- MaybeError DeviceBase::CreateComputePipelineAsync(
- const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
- }
-
- Ref<PipelineLayoutBase> layoutRef;
- ComputePipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
- this, *descriptor, &appliedDescriptor));
-
- Ref<ComputePipelineBase> uninitializedComputePipeline =
- CreateUninitializedComputePipelineImpl(&appliedDescriptor);
-
- // Call the callback directly when we can get a cached compute pipeline object.
- Ref<ComputePipelineBase> cachedComputePipeline =
- GetCachedComputePipeline(uninitializedComputePipeline.Get());
- if (cachedComputePipeline.Get() != nullptr) {
- callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
- "", userdata);
- } else {
- // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
- // where the pipeline object may be initialized asynchronously and the result will be
- // saved to mCreatePipelineAsyncTracker.
- InitializeComputePipelineAsyncImpl(std::move(uninitializedComputePipeline), callback,
- userdata);
- }
-
- return {};
- }
-
- // This function is overwritten with the async version on the backends that supports
- // initializing compute pipelines asynchronously.
- void DeviceBase::InitializeComputePipelineAsyncImpl(
- Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- Ref<ComputePipelineBase> result;
- std::string errorMessage;
-
- MaybeError maybeError = computePipeline->Initialize();
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- errorMessage = error->GetMessage();
- } else {
- result = AddOrGetCachedComputePipeline(std::move(computePipeline));
- }
-
- std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
- std::make_unique<CreateComputePipelineAsyncCallbackTask>(
- std::move(result), errorMessage, callback, userdata);
- mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
- }
-
- // This function is overwritten with the async version on the backends
- // that supports initializing render pipeline asynchronously
- void DeviceBase::InitializeRenderPipelineAsyncImpl(
- Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- Ref<RenderPipelineBase> result;
- std::string errorMessage;
-
- MaybeError maybeError = renderPipeline->Initialize();
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- errorMessage = error->GetMessage();
- } else {
- result = AddOrGetCachedRenderPipeline(std::move(renderPipeline));
- }
-
- std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
- std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
- callback, userdata);
- mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
- }
-
- ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
- }
- return GetOrCreatePipelineLayout(descriptor);
- }
-
- ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTexture(
- const ExternalTextureDescriptor* descriptor) {
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
- descriptor);
- }
-
- return ExternalTextureBase::Create(this, descriptor);
- }
-
- ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(
- const QuerySetDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s",
- descriptor);
- }
- return CreateQuerySetImpl(descriptor);
- }
-
- ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
- const RenderBundleEncoderDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
- }
- return RenderBundleEncoder::Create(this, descriptor);
- }
-
- ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
- }
-
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef;
- RenderPipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
- this, *descriptor, &appliedDescriptor));
-
- Ref<RenderPipelineBase> uninitializedRenderPipeline =
- CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
-
- Ref<RenderPipelineBase> cachedRenderPipeline =
- GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
- if (cachedRenderPipeline != nullptr) {
- return cachedRenderPipeline;
- }
-
- DAWN_TRY(uninitializedRenderPipeline->Initialize());
- return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
- }
-
- MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
- }
-
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef;
- RenderPipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
- this, *descriptor, &appliedDescriptor));
-
- Ref<RenderPipelineBase> uninitializedRenderPipeline =
- CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
-
- // Call the callback directly when we can get a cached render pipeline object.
- Ref<RenderPipelineBase> cachedRenderPipeline =
- GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
- if (cachedRenderPipeline != nullptr) {
- callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
- "", userdata);
- } else {
- // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
- // where the pipeline object may be initialized asynchronously and the result will be
- // saved to mCreatePipelineAsyncTracker.
- InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
- userdata);
- }
-
- return {};
- }
-
- ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
- const SamplerDescriptor defaultDescriptor = {};
- DAWN_TRY(ValidateIsAlive());
- descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s",
- descriptor);
- }
- return GetOrCreateSampler(descriptor);
- }
-
- ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
- const ShaderModuleDescriptor* descriptor,
- OwnedCompilationMessages* compilationMessages) {
- DAWN_TRY(ValidateIsAlive());
-
- // CreateShaderModule can be called from inside dawn_native. If that's the case handle the
- // error directly in Dawn and no compilationMessages held in the shader module. It is ok as
- // long as dawn_native don't use the compilationMessages of these internal shader modules.
- ShaderModuleParseResult parseResult;
-
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(
- ValidateShaderModuleDescriptor(this, descriptor, &parseResult, compilationMessages),
- "validating %s", descriptor);
- }
-
- return GetOrCreateShaderModule(descriptor, &parseResult, compilationMessages);
- }
-
- ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
- Surface* surface,
- const SwapChainDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor),
- "validating %s", descriptor);
- }
-
- // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
- if (surface == nullptr) {
- return CreateSwapChainImpl(descriptor);
- } else {
- ASSERT(descriptor->implementation == 0);
-
- NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
- ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
- CreateSwapChainImpl(surface, previousSwapChain, descriptor);
-
- if (previousSwapChain != nullptr) {
- previousSwapChain->DetachFromSurface();
- }
-
- Ref<NewSwapChainBase> newSwapChain;
- DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
-
- newSwapChain->SetIsAttached();
- surface->SetAttachedSwapChain(newSwapChain.Get());
- return newSwapChain;
- }
- }
-
- ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.",
- descriptor);
- }
- return CreateTextureImpl(descriptor);
- }
-
- ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- DAWN_TRY(ValidateIsAlive());
- DAWN_TRY(ValidateObject(texture));
- TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
- if (IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
- "validating %s against %s.", &desc, texture);
- }
- return CreateTextureViewImpl(texture, &desc);
- }
-
- // Other implementation details
-
- DynamicUploader* DeviceBase::GetDynamicUploader() const {
- return mDynamicUploader.get();
- }
-
- // The Toggle device facility
-
- std::vector<const char*> DeviceBase::GetTogglesUsed() const {
- return mEnabledToggles.GetContainedToggleNames();
- }
-
- bool DeviceBase::IsToggleEnabled(Toggle toggle) const {
- return mEnabledToggles.Has(toggle);
- }
-
- void DeviceBase::SetToggle(Toggle toggle, bool isEnabled) {
- if (!mOverridenToggles.Has(toggle)) {
- mEnabledToggles.Set(toggle, isEnabled);
- }
- }
-
- void DeviceBase::ForceSetToggle(Toggle toggle, bool isEnabled) {
- if (!mOverridenToggles.Has(toggle) && mEnabledToggles.Has(toggle) != isEnabled) {
- dawn::WarningLog() << "Forcing toggle \"" << ToggleEnumToName(toggle) << "\" to "
- << isEnabled << " when it was overriden to be " << !isEnabled;
- }
- mEnabledToggles.Set(toggle, isEnabled);
- }
-
- void DeviceBase::SetDefaultToggles() {
- SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
- SetToggle(Toggle::DisallowUnsafeAPIs, true);
- }
-
- void DeviceBase::ApplyToggleOverrides(const DawnDeviceDescriptor* deviceDescriptor) {
- ASSERT(deviceDescriptor);
-
- for (const char* toggleName : deviceDescriptor->forceEnabledToggles) {
- Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(toggleName);
- if (toggle != Toggle::InvalidEnum) {
- mEnabledToggles.Set(toggle, true);
- mOverridenToggles.Set(toggle, true);
- }
- }
- for (const char* toggleName : deviceDescriptor->forceDisabledToggles) {
- Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(toggleName);
- if (toggle != Toggle::InvalidEnum) {
- mEnabledToggles.Set(toggle, false);
- mOverridenToggles.Set(toggle, true);
- }
- }
- }
-
- void DeviceBase::FlushCallbackTaskQueue() {
- if (!mCallbackTaskManager->IsEmpty()) {
- // If a user calls Queue::Submit inside the callback, then the device will be ticked,
- // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
- // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
- // update mCallbackTaskManager, then call all the callbacks.
- auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
- for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
- callbackTask->Finish();
- }
- }
- }
-
- const CombinedLimits& DeviceBase::GetLimits() const {
- return mLimits;
- }
-
- AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
- return mAsyncTaskManager.get();
- }
-
- CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
- return mCallbackTaskManager.get();
- }
-
- dawn_platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
- return mWorkerTaskPool.get();
- }
-
- void DeviceBase::AddComputePipelineAsyncCallbackTask(
- Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- // CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
- // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
- struct CreateComputePipelineAsyncWaitableCallbackTask final
- : CreateComputePipelineAsyncCallbackTask {
- using CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask;
- void Finish() final {
- // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
- // CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
- // thread-safe.
- if (mPipeline.Get() != nullptr) {
- mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(mPipeline);
- }
-
- CreateComputePipelineAsyncCallbackTask::Finish();
- }
- };
-
- mCallbackTaskManager->AddCallbackTask(
- std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
- std::move(pipeline), errorMessage, callback, userdata));
- }
-
- void DeviceBase::AddRenderPipelineAsyncCallbackTask(
- Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
- // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
- struct CreateRenderPipelineAsyncWaitableCallbackTask final
- : CreateRenderPipelineAsyncCallbackTask {
- using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
-
- void Finish() final {
- // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
- // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
- // thread-safe.
- if (mPipeline.Get() != nullptr) {
- mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
- }
-
- CreateRenderPipelineAsyncCallbackTask::Finish();
- }
- };
-
- mCallbackTaskManager->AddCallbackTask(
- std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
- std::move(pipeline), errorMessage, callback, userdata));
- }
-
- PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
- return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
- }
-
- const std::string& DeviceBase::GetLabel() const {
- return mLabel;
- }
-
- void DeviceBase::APISetLabel(const char* label) {
- mLabel = label;
- SetLabelImpl();
- }
-
- void DeviceBase::SetLabelImpl() {
- }
-
- bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- ComputePipelineBase* computePipeline) const {
- return false;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
deleted file mode 100644
index ff9b7f099a2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ /dev/null
@@ -1,544 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_DEVICE_H_
-#define DAWNNATIVE_DEVICE_H_
-
-#include "dawn_native/Commands.h"
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Features.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/Limits.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/StagingBuffer.h"
-#include "dawn_native/Toggles.h"
-
-#include "dawn_native/DawnNative.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <mutex>
-#include <utility>
-
-namespace dawn_platform {
- class WorkerTaskPool;
-} // namespace dawn_platform
-
-namespace dawn_native {
- class AdapterBase;
- class AsyncTaskManager;
- class AttachmentState;
- class AttachmentStateBlueprint;
- class BindGroupLayoutBase;
- class CallbackTaskManager;
- class DynamicUploader;
- class ErrorScopeStack;
- class ExternalTextureBase;
- class OwnedCompilationMessages;
- class PersistentCache;
- class StagingBufferBase;
- struct CallbackTask;
- struct InternalPipelineStore;
- struct ShaderModuleParseResult;
-
- class DeviceBase : public RefCounted {
- public:
- DeviceBase(AdapterBase* adapter, const DawnDeviceDescriptor* descriptor);
- virtual ~DeviceBase();
-
- void HandleError(InternalErrorType type, const char* message);
-
- bool ConsumedError(MaybeError maybeError) {
- if (DAWN_UNLIKELY(maybeError.IsError())) {
- ConsumeError(maybeError.AcquireError());
- return true;
- }
- return false;
- }
-
- template <typename T>
- bool ConsumedError(ResultOrError<T> resultOrError, T* result) {
- if (DAWN_UNLIKELY(resultOrError.IsError())) {
- ConsumeError(resultOrError.AcquireError());
- return true;
- }
- *result = resultOrError.AcquireSuccess();
- return false;
- }
-
- template <typename... Args>
- bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
- if (DAWN_UNLIKELY(maybeError.IsError())) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- if (error->GetType() == InternalErrorType::Validation) {
- std::string out;
- absl::UntypedFormatSpec format(formatStr);
- if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
- error->AppendContext(std::move(out));
- } else {
- error->AppendContext(
- absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
- }
- }
- ConsumeError(std::move(error));
- return true;
- }
- return false;
- }
-
- template <typename T, typename... Args>
- bool ConsumedError(ResultOrError<T> resultOrError,
- T* result,
- const char* formatStr,
- const Args&... args) {
- if (DAWN_UNLIKELY(resultOrError.IsError())) {
- std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
- if (error->GetType() == InternalErrorType::Validation) {
- std::string out;
- absl::UntypedFormatSpec format(formatStr);
- if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
- error->AppendContext(std::move(out));
- } else {
- error->AppendContext(
- absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
- }
- }
- ConsumeError(std::move(error));
- return true;
- }
- *result = resultOrError.AcquireSuccess();
- return false;
- }
-
- MaybeError ValidateObject(const ApiObjectBase* object) const;
-
- AdapterBase* GetAdapter() const;
- dawn_platform::Platform* GetPlatform() const;
-
- // Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
- // isn't a valid wgpu::TextureFormat or isn't supported by this device.
- // The pointer returned has the same lifetime as the device.
- ResultOrError<const Format*> GetInternalFormat(wgpu::TextureFormat format) const;
-
- // Returns the Format corresponding to the wgpu::TextureFormat and assumes the format is
- // valid and supported.
- // The reference returned has the same lifetime as the device.
- const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
-
- virtual ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) = 0;
-
- ExecutionSerial GetCompletedCommandSerial() const;
- ExecutionSerial GetLastSubmittedCommandSerial() const;
- ExecutionSerial GetFutureSerial() const;
- ExecutionSerial GetPendingCommandSerial() const;
-
- // Many Dawn objects are completely immutable once created which means that if two
- // creations are given the same arguments, they can return the same object. Reusing
- // objects will help make comparisons between objects by a single pointer comparison.
- //
- // Technically no object is immutable as they have a reference count, and an
- // application with reference-counting issues could "see" that objects are reused.
- // This is solved by automatic-reference counting, and also the fact that when using
- // the client-server wire every creation will get a different proxy object, with a
- // different reference count.
- //
- // When trying to create an object, we give both the descriptor and an example of what
- // the created object will be, the "blueprint". The blueprint is just a FooBase object
- // instead of a backend Foo object. If the blueprint doesn't match an object in the
- // cache, then the descriptor is used to make a new object.
- ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
- void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
-
- BindGroupLayoutBase* GetEmptyBindGroupLayout();
-
- void UncacheComputePipeline(ComputePipelineBase* obj);
-
- ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor);
- void UncachePipelineLayout(PipelineLayoutBase* obj);
-
- void UncacheRenderPipeline(RenderPipelineBase* obj);
-
- ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
- void UncacheSampler(SamplerBase* obj);
-
- ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult,
- OwnedCompilationMessages* compilationMessages);
- void UncacheShaderModule(ShaderModuleBase* obj);
-
- Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint);
- Ref<AttachmentState> GetOrCreateAttachmentState(
- const RenderBundleEncoderDescriptor* descriptor);
- Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
- Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
- void UncacheAttachmentState(AttachmentState* obj);
-
- // Object creation methods that be used in a reentrant manner.
- ResultOrError<Ref<BindGroupBase>> CreateBindGroup(const BindGroupDescriptor* descriptor);
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor,
- bool allowInternalBinding = false);
- ResultOrError<Ref<BufferBase>> CreateBuffer(const BufferDescriptor* descriptor);
- ResultOrError<Ref<ComputePipelineBase>> CreateComputePipeline(
- const ComputePipelineDescriptor* descriptor);
- MaybeError CreateComputePipelineAsync(
- const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- ResultOrError<Ref<ExternalTextureBase>> CreateExternalTexture(
- const ExternalTextureDescriptor* descriptor);
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayout(
- const PipelineLayoutDescriptor* descriptor);
- ResultOrError<Ref<QuerySetBase>> CreateQuerySet(const QuerySetDescriptor* descriptor);
- ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoder(
- const RenderBundleEncoderDescriptor* descriptor);
- ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor);
- MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
- ResultOrError<Ref<SamplerBase>> CreateSampler(const SamplerDescriptor* descriptor);
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
- const ShaderModuleDescriptor* descriptor,
- OwnedCompilationMessages* compilationMessages = nullptr);
- ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
- const SwapChainDescriptor* descriptor);
- ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
- ResultOrError<Ref<TextureViewBase>> CreateTextureView(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor);
-
- // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
- BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
- BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
- BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
- CommandEncoder* APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
- ComputePipelineBase* APICreateComputePipeline(const ComputePipelineDescriptor* descriptor);
- PipelineLayoutBase* APICreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
- QuerySetBase* APICreateQuerySet(const QuerySetDescriptor* descriptor);
- void APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- void APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
- RenderBundleEncoder* APICreateRenderBundleEncoder(
- const RenderBundleEncoderDescriptor* descriptor);
- RenderPipelineBase* APICreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
- ExternalTextureBase* APICreateExternalTexture(const ExternalTextureDescriptor* descriptor);
- SamplerBase* APICreateSampler(const SamplerDescriptor* descriptor);
- ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
- SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
- TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
-
- InternalPipelineStore* GetInternalPipelineStore();
-
- // For Dawn Wire
- BufferBase* APICreateErrorBuffer();
-
- QueueBase* APIGetQueue();
-
- bool APIGetLimits(SupportedLimits* limits);
- void APIInjectError(wgpu::ErrorType type, const char* message);
- bool APITick();
-
- void APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
- void APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
- void APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata);
- void APIPushErrorScope(wgpu::ErrorFilter filter);
- bool APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata);
-
- MaybeError ValidateIsAlive() const;
-
- PersistentCache* GetPersistentCache();
-
- virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(
- size_t size) = 0;
- virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) = 0;
- virtual MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) = 0;
-
- DynamicUploader* GetDynamicUploader() const;
-
- // The device state which is a combination of creation state and loss state.
- //
- // - BeingCreated: the device didn't finish creation yet and the frontend cannot be used
- // (both for the application calling WebGPU, or re-entrant calls). No work exists on
- // the GPU timeline.
- // - Alive: the device is usable and might have work happening on the GPU timeline.
- // - BeingDisconnected: the device is no longer usable because we are waiting for all
- // work on the GPU timeline to finish. (this is to make validation prevent the
- // application from adding more work during the transition from Available to
- // Disconnected)
- // - Disconnected: there is no longer work happening on the GPU timeline and the CPU data
- // structures can be safely destroyed without additional synchronization.
- // - Destroyed: the device is disconnected and resources have been reclaimed.
- enum class State {
- BeingCreated,
- Alive,
- BeingDisconnected,
- Disconnected,
- Destroyed,
- };
- State GetState() const;
- bool IsLost() const;
- void TrackObject(ApiObjectBase* object);
- std::mutex* GetObjectListMutex(ObjectType type);
-
- std::vector<const char*> GetEnabledFeatures() const;
- std::vector<const char*> GetTogglesUsed() const;
- bool IsFeatureEnabled(Feature feature) const;
- bool IsToggleEnabled(Toggle toggle) const;
- bool IsValidationEnabled() const;
- bool IsRobustnessEnabled() const;
- size_t GetLazyClearCountForTesting();
- void IncrementLazyClearCountForTesting();
- size_t GetDeprecationWarningCountForTesting();
- void EmitDeprecationWarning(const char* warning);
- void EmitLog(const char* message);
- void EmitLog(WGPULoggingType loggingType, const char* message);
- void APILoseForTesting();
- QueueBase* GetQueue() const;
-
- // AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
- // ticked in order to clean up all pending callback work or to execute asynchronous resource
- // writes. It should be given the serial that a callback is tracked with, so that once that
- // serial is completed, it can be resolved and cleaned up. This is so that when there is no
- // gpu work (the last submitted serial has not moved beyond the completed serial), Tick can
- // still check if we have pending work to take care of, rather than hanging and never
- // reaching the serial the work will be executed on.
- void AddFutureSerial(ExecutionSerial serial);
- // Check for passed fences and set the new completed serial
- MaybeError CheckPassedSerials();
-
- MaybeError Tick();
-
- // TODO(crbug.com/dawn/839): Organize the below backend-specific parameters into the struct
- // BackendMetadata that we can query from the device.
- virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
- virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
-
- virtual float GetTimestampPeriodInNS() const = 0;
-
- virtual bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- ComputePipelineBase* computePipeline) const;
-
- const CombinedLimits& GetLimits() const;
-
- AsyncTaskManager* GetAsyncTaskManager() const;
- CallbackTaskManager* GetCallbackTaskManager() const;
- dawn_platform::WorkerTaskPool* GetWorkerTaskPool() const;
-
- void AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- void AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
- std::string errorMessage,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- PipelineCompatibilityToken GetNextPipelineCompatibilityToken();
-
- const std::string& GetLabel() const;
- void APISetLabel(const char* label);
- void APIDestroy();
-
- protected:
- // Constructor used only for mocking and testing.
- DeviceBase();
-
- void SetToggle(Toggle toggle, bool isEnabled);
- void ForceSetToggle(Toggle toggle, bool isEnabled);
-
- MaybeError Initialize(QueueBase* defaultQueue);
- void DestroyObjects();
- void Destroy();
-
- // Incrememt mLastSubmittedSerial when we submit the next serial
- void IncrementLastSubmittedCommandSerial();
-
- private:
- virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
- virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) = 0;
- virtual ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) = 0;
- // Note that previousSwapChain may be nullptr, or come from a different backend.
- virtual ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) = 0;
- virtual Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) = 0;
- virtual Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) = 0;
- virtual void SetLabelImpl();
-
- virtual MaybeError TickImpl() = 0;
- void FlushCallbackTaskQueue();
-
- ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
-
- Ref<ComputePipelineBase> GetCachedComputePipeline(
- ComputePipelineBase* uninitializedComputePipeline);
- Ref<RenderPipelineBase> GetCachedRenderPipeline(
- RenderPipelineBase* uninitializedRenderPipeline);
- Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
- Ref<ComputePipelineBase> computePipeline);
- Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(
- Ref<RenderPipelineBase> renderPipeline);
- virtual void InitializeComputePipelineAsyncImpl(
- Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- virtual void InitializeRenderPipelineAsyncImpl(
- Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void ApplyToggleOverrides(const DawnDeviceDescriptor* deviceDescriptor);
- void ApplyFeatures(const DawnDeviceDescriptor* deviceDescriptor);
-
- void SetDefaultToggles();
-
- void ConsumeError(std::unique_ptr<ErrorData> error);
-
- // Each backend should implement to check their passed fences if there are any and return a
- // completed serial. Return 0 should indicate no fences to check.
- virtual ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() = 0;
- // During shut down of device, some operations might have been started since the last submit
- // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
- // make all commands look completed.
- void AssumeCommandsComplete();
- bool IsDeviceIdle();
-
- // mCompletedSerial tracks the last completed command serial that the fence has returned.
- // mLastSubmittedSerial tracks the last submitted command serial.
- // During device removal, the serials could be artificially incremented
- // to make it appear as if commands have been compeleted. They can also be artificially
- // incremented when no work is being done in the GPU so CPU operations don't have to wait on
- // stale serials.
- // mFutureSerial tracks the largest serial we need to tick to for asynchronous commands or
- // callbacks to fire
- ExecutionSerial mCompletedSerial = ExecutionSerial(0);
- ExecutionSerial mLastSubmittedSerial = ExecutionSerial(0);
- ExecutionSerial mFutureSerial = ExecutionSerial(0);
-
- // DestroyImpl is used to clean up and release resources used by device, does not wait for
- // GPU or check errors.
- virtual void DestroyImpl() = 0;
-
- // WaitForIdleForDestruction waits for GPU to finish, checks errors and gets ready for
- // destruction. This is only used when properly destructing the device. For a real
- // device loss, this function doesn't need to be called since the driver already closed all
- // resources.
- virtual MaybeError WaitForIdleForDestruction() = 0;
-
- wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
- void* mUncapturedErrorUserdata = nullptr;
-
- wgpu::LoggingCallback mLoggingCallback = nullptr;
- void* mLoggingUserdata = nullptr;
-
- wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
- void* mDeviceLostUserdata = nullptr;
-
- std::unique_ptr<ErrorScopeStack> mErrorScopeStack;
-
- // The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
- // The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
- // The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
- // Instance.
- Ref<InstanceBase> mInstance;
- AdapterBase* mAdapter = nullptr;
-
- // The object caches aren't exposed in the header as they would require a lot of
- // additional includes.
- struct Caches;
- std::unique_ptr<Caches> mCaches;
-
- Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
-
- std::unique_ptr<DynamicUploader> mDynamicUploader;
- std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
- Ref<QueueBase> mQueue;
-
- struct DeprecationWarnings;
- std::unique_ptr<DeprecationWarnings> mDeprecationWarnings;
-
- State mState = State::BeingCreated;
-
- // Encompasses the mutex and the actual list that contains all live objects "owned" by the
- // device.
- struct ApiObjectList {
- std::mutex mutex;
- LinkedList<ApiObjectBase> objects;
- };
- PerObjectType<ApiObjectList> mObjectLists;
-
- FormatTable mFormatTable;
-
- TogglesSet mEnabledToggles;
- TogglesSet mOverridenToggles;
- size_t mLazyClearCountForTesting = 0;
- std::atomic_uint64_t mNextPipelineCompatibilityToken;
-
- CombinedLimits mLimits;
- FeaturesSet mEnabledFeatures;
-
- std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
-
- std::unique_ptr<PersistentCache> mPersistentCache;
-
- std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
- std::unique_ptr<dawn_platform::WorkerTaskPool> mWorkerTaskPool;
- std::string mLabel;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_DEVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
deleted file mode 100644
index a53972361cf..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/DynamicUploader.h"
-#include "common/Math.h"
-#include "dawn_native/Device.h"
-
-namespace dawn_native {
-
- DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
- mRingBuffers.emplace_back(
- std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, {kRingBufferSize}}));
- }
-
- void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
- mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer),
- mDevice->GetPendingCommandSerial());
- }
-
- ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
- ExecutionSerial serial) {
- // Disable further sub-allocation should the request be too large.
- if (allocationSize > kRingBufferSize) {
- std::unique_ptr<StagingBufferBase> stagingBuffer;
- DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(allocationSize));
-
- UploadHandle uploadHandle;
- uploadHandle.mappedBuffer = static_cast<uint8_t*>(stagingBuffer->GetMappedPointer());
- uploadHandle.stagingBuffer = stagingBuffer.get();
-
- ReleaseStagingBuffer(std::move(stagingBuffer));
- return uploadHandle;
- }
-
- // Note: Validation ensures size is already aligned.
- // First-fit: find next smallest buffer large enough to satisfy the allocation request.
- RingBuffer* targetRingBuffer = mRingBuffers.back().get();
- for (auto& ringBuffer : mRingBuffers) {
- const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
- // Prevent overflow.
- ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
- const uint64_t remainingSize =
- ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
- if (allocationSize <= remainingSize) {
- targetRingBuffer = ringBuffer.get();
- break;
- }
- }
-
- uint64_t startOffset = RingBufferAllocator::kInvalidOffset;
- if (targetRingBuffer != nullptr) {
- startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
- }
-
- // Upon failure, append a newly created ring buffer to fulfill the
- // request.
- if (startOffset == RingBufferAllocator::kInvalidOffset) {
- mRingBuffers.emplace_back(
- std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, {kRingBufferSize}}));
-
- targetRingBuffer = mRingBuffers.back().get();
- startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
- }
-
- ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
-
- // Allocate the staging buffer backing the ringbuffer.
- // Note: the first ringbuffer will be lazily created.
- if (targetRingBuffer->mStagingBuffer == nullptr) {
- std::unique_ptr<StagingBufferBase> stagingBuffer;
- DAWN_TRY_ASSIGN(stagingBuffer,
- mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
- targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
- }
-
- ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
-
- UploadHandle uploadHandle;
- uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
- uploadHandle.mappedBuffer =
- static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
- uploadHandle.startOffset = startOffset;
-
- return uploadHandle;
- }
-
- void DynamicUploader::Deallocate(ExecutionSerial lastCompletedSerial) {
- // Reclaim memory within the ring buffers by ticking (or removing requests no longer
- // in-flight).
- for (size_t i = 0; i < mRingBuffers.size(); ++i) {
- mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
-
- // Never erase the last buffer as to prevent re-creating smaller buffers
- // again. The last buffer is the largest.
- if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
- mRingBuffers.erase(mRingBuffers.begin() + i);
- }
- }
- mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
- }
-
- // TODO(dawn:512): Optimize this function so that it doesn't allocate additional memory
- // when it's not necessary.
- ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize,
- ExecutionSerial serial,
- uint64_t offsetAlignment) {
- ASSERT(offsetAlignment > 0);
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- AllocateInternal(allocationSize + offsetAlignment - 1, serial));
- uint64_t additionalOffset =
- Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
- uploadHandle.mappedBuffer =
- static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
- uploadHandle.startOffset += additionalOffset;
- return uploadHandle;
- }
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
deleted file mode 100644
index 9ab0ea2b026..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_DYNAMICUPLOADER_H_
-#define DAWNNATIVE_DYNAMICUPLOADER_H_
-
-#include "dawn_native/Forward.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/RingBufferAllocator.h"
-#include "dawn_native/StagingBuffer.h"
-
-// DynamicUploader is the front-end implementation used to manage multiple ring buffers for upload
-// usage.
-namespace dawn_native {
-
- struct UploadHandle {
- uint8_t* mappedBuffer = nullptr;
- uint64_t startOffset = 0;
- StagingBufferBase* stagingBuffer = nullptr;
- };
-
- class DynamicUploader {
- public:
- DynamicUploader(DeviceBase* device);
- ~DynamicUploader() = default;
-
- // We add functions to Release StagingBuffers to the DynamicUploader as there's
- // currently no place to track the allocated staging buffers such that they're freed after
- // pending commands are finished. This should be changed when better resource allocation is
- // implemented.
- void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
-
- ResultOrError<UploadHandle> Allocate(uint64_t allocationSize,
- ExecutionSerial serial,
- uint64_t offsetAlignment);
- void Deallocate(ExecutionSerial lastCompletedSerial);
-
- private:
- static constexpr uint64_t kRingBufferSize = 4 * 1024 * 1024;
-
- struct RingBuffer {
- std::unique_ptr<StagingBufferBase> mStagingBuffer;
- RingBufferAllocator mAllocator;
- };
-
- ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize,
- ExecutionSerial serial);
-
- std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
- SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
- DeviceBase* mDevice;
- };
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_DYNAMICUPLOADER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
deleted file mode 100644
index b218c51c207..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/EncodingContext.h"
-
-#include "common/Assert.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/IndirectDrawValidationEncoder.h"
-#include "dawn_native/RenderBundleEncoder.h"
-
-namespace dawn_native {
-
- EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
- : mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {
- }
-
- EncodingContext::~EncodingContext() {
- Destroy();
- }
-
- void EncodingContext::Destroy() {
- if (mDestroyed) {
- return;
- }
- if (!mWereCommandsAcquired) {
- FreeCommands(GetIterator());
- }
- // If we weren't already finished, then we want to handle an error here so that any calls
- // to Finish after Destroy will return a meaningful error.
- if (!IsFinished()) {
- HandleError(DAWN_FORMAT_VALIDATION_ERROR("Destroyed encoder cannot be finished."));
- }
- mDestroyed = true;
- mCurrentEncoder = nullptr;
- }
-
- CommandIterator EncodingContext::AcquireCommands() {
- MoveToIterator();
- ASSERT(!mWereCommandsAcquired);
- mWereCommandsAcquired = true;
- return std::move(mIterator);
- }
-
- CommandIterator* EncodingContext::GetIterator() {
- MoveToIterator();
- ASSERT(!mWereCommandsAcquired);
- return &mIterator;
- }
-
- void EncodingContext::MoveToIterator() {
- CommitCommands(std::move(mPendingCommands));
- if (!mWasMovedToIterator) {
- mIterator.AcquireCommandBlocks(std::move(mAllocators));
- mWasMovedToIterator = true;
- }
- }
-
- void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
- // Append in reverse so that the most recently set debug group is printed first, like a
- // call stack.
- for (auto iter = mDebugGroupLabels.rbegin(); iter != mDebugGroupLabels.rend(); ++iter) {
- error->AppendDebugGroup(*iter);
- }
-
- if (!IsFinished()) {
- // Encoding should only generate validation errors.
- ASSERT(error->GetType() == InternalErrorType::Validation);
- // If the encoding context is not finished, errors are deferred until
- // Finish() is called.
- if (mError == nullptr) {
- mError = std::move(error);
- }
- } else {
- mDevice->HandleError(error->GetType(), error->GetFormattedMessage().c_str());
- }
- }
-
- void EncodingContext::WillBeginRenderPass() {
- ASSERT(mCurrentEncoder == mTopLevelEncoder);
- if (mDevice->IsValidationEnabled()) {
- // When validation is enabled, we are going to want to capture all commands encoded
- // between and including BeginRenderPassCmd and EndRenderPassCmd, and defer their
- // sequencing util after we have a chance to insert any necessary validation
- // commands. To support this we commit any current commands now, so that the
- // impending BeginRenderPassCmd starts in a fresh CommandAllocator.
- CommitCommands(std::move(mPendingCommands));
- }
- }
-
- void EncodingContext::EnterPass(const ApiObjectBase* passEncoder) {
- // Assert we're at the top level.
- ASSERT(mCurrentEncoder == mTopLevelEncoder);
- ASSERT(passEncoder != nullptr);
-
- mCurrentEncoder = passEncoder;
- }
-
- MaybeError EncodingContext::ExitRenderPass(const ApiObjectBase* passEncoder,
- RenderPassResourceUsageTracker usageTracker,
- CommandEncoder* commandEncoder,
- IndirectDrawMetadata indirectDrawMetadata) {
- ASSERT(mCurrentEncoder != mTopLevelEncoder);
- ASSERT(mCurrentEncoder == passEncoder);
-
- mCurrentEncoder = mTopLevelEncoder;
-
- if (mDevice->IsValidationEnabled()) {
- // With validation enabled, commands were committed just before BeginRenderPassCmd was
- // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
- // mPendingCommands contains only the commands from BeginRenderPassCmd to
- // EndRenderPassCmd, inclusive. Now we swap out this allocator with a fresh one to give
- // the validation encoder a chance to insert its commands first.
- CommandAllocator renderCommands = std::move(mPendingCommands);
- DAWN_TRY(EncodeIndirectDrawValidationCommands(mDevice, commandEncoder, &usageTracker,
- &indirectDrawMetadata));
- CommitCommands(std::move(mPendingCommands));
- CommitCommands(std::move(renderCommands));
- }
-
- mRenderPassUsages.push_back(usageTracker.AcquireResourceUsage());
- return {};
- }
-
- void EncodingContext::ExitComputePass(const ApiObjectBase* passEncoder,
- ComputePassResourceUsage usages) {
- ASSERT(mCurrentEncoder != mTopLevelEncoder);
- ASSERT(mCurrentEncoder == passEncoder);
-
- mCurrentEncoder = mTopLevelEncoder;
- mComputePassUsages.push_back(std::move(usages));
- }
-
- void EncodingContext::EnsurePassExited(const ApiObjectBase* passEncoder) {
- if (mCurrentEncoder != mTopLevelEncoder && mCurrentEncoder == passEncoder) {
- // The current pass encoder is being deleted. Implicitly end the pass with an error.
- mCurrentEncoder = mTopLevelEncoder;
- HandleError(DAWN_FORMAT_VALIDATION_ERROR(
- "Command buffer recording ended before %s was ended.", passEncoder));
- }
- }
-
- const RenderPassUsages& EncodingContext::GetRenderPassUsages() const {
- ASSERT(!mWereRenderPassUsagesAcquired);
- return mRenderPassUsages;
- }
-
- RenderPassUsages EncodingContext::AcquireRenderPassUsages() {
- ASSERT(!mWereRenderPassUsagesAcquired);
- mWereRenderPassUsagesAcquired = true;
- return std::move(mRenderPassUsages);
- }
-
- const ComputePassUsages& EncodingContext::GetComputePassUsages() const {
- ASSERT(!mWereComputePassUsagesAcquired);
- return mComputePassUsages;
- }
-
- ComputePassUsages EncodingContext::AcquireComputePassUsages() {
- ASSERT(!mWereComputePassUsagesAcquired);
- mWereComputePassUsagesAcquired = true;
- return std::move(mComputePassUsages);
- }
-
- void EncodingContext::PushDebugGroupLabel(const char* groupLabel) {
- mDebugGroupLabels.emplace_back(groupLabel);
- }
-
- void EncodingContext::PopDebugGroupLabel() {
- mDebugGroupLabels.pop_back();
- }
-
- MaybeError EncodingContext::Finish() {
- DAWN_INVALID_IF(IsFinished(), "Command encoding already finished.");
-
- const ApiObjectBase* currentEncoder = mCurrentEncoder;
- const ApiObjectBase* topLevelEncoder = mTopLevelEncoder;
-
- // Even if finish validation fails, it is now invalid to call any encoding commands,
- // so we clear the encoders. Note: mTopLevelEncoder == nullptr is used as a flag for
- // if Finish() has been called.
- mCurrentEncoder = nullptr;
- mTopLevelEncoder = nullptr;
- CommitCommands(std::move(mPendingCommands));
-
- if (mError != nullptr) {
- return std::move(mError);
- }
- DAWN_INVALID_IF(currentEncoder != topLevelEncoder,
- "Command buffer recording ended before %s was ended.", currentEncoder);
- return {};
- }
-
- void EncodingContext::CommitCommands(CommandAllocator allocator) {
- if (!allocator.IsEmpty()) {
- mAllocators.push_back(std::move(allocator));
- }
- }
-
- bool EncodingContext::IsFinished() const {
- return mTopLevelEncoder == nullptr;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
deleted file mode 100644
index a87d5c9712d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ENCODINGCONTEXT_H_
-#define DAWNNATIVE_ENCODINGCONTEXT_H_
-
-#include "dawn_native/CommandAllocator.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/IndirectDrawMetadata.h"
-#include "dawn_native/PassResourceUsageTracker.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <string>
-
-namespace dawn_native {
-
- class CommandEncoder;
- class DeviceBase;
- class ApiObjectBase;
-
- // Base class for allocating/iterating commands.
- // It performs error tracking as well as encoding state for render/compute passes.
- class EncodingContext {
- public:
- EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder);
- ~EncodingContext();
-
- // Marks the encoding context as destroyed so that any future encodes will fail, and all
- // encoded commands are released.
- void Destroy();
-
- CommandIterator AcquireCommands();
- CommandIterator* GetIterator();
-
- // Functions to handle encoder errors
- void HandleError(std::unique_ptr<ErrorData> error);
-
- inline bool ConsumedError(MaybeError maybeError) {
- if (DAWN_UNLIKELY(maybeError.IsError())) {
- HandleError(maybeError.AcquireError());
- return true;
- }
- return false;
- }
-
- template <typename... Args>
- inline bool ConsumedError(MaybeError maybeError,
- const char* formatStr,
- const Args&... args) {
- if (DAWN_UNLIKELY(maybeError.IsError())) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
- if (error->GetType() == InternalErrorType::Validation) {
- std::string out;
- absl::UntypedFormatSpec format(formatStr);
- if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
- error->AppendContext(std::move(out));
- } else {
- error->AppendContext(absl::StrFormat(
- "[Failed to format error message: \"%s\"].", formatStr));
- }
- }
- HandleError(std::move(error));
- return true;
- }
- return false;
- }
-
- inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
- if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
- if (mDestroyed) {
- HandleError(
- DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
- } else if (mCurrentEncoder != mTopLevelEncoder) {
- // The top level encoder was used when a pass encoder was current.
- HandleError(DAWN_FORMAT_VALIDATION_ERROR(
- "Command cannot be recorded while %s is active.", mCurrentEncoder));
- } else {
- HandleError(DAWN_FORMAT_VALIDATION_ERROR(
- "Recording in an error or already ended %s.", encoder));
- }
- return false;
- }
- return true;
- }
-
- template <typename EncodeFunction>
- inline bool TryEncode(const ApiObjectBase* encoder, EncodeFunction&& encodeFunction) {
- if (!CheckCurrentEncoder(encoder)) {
- return false;
- }
- ASSERT(!mWasMovedToIterator);
- return !ConsumedError(encodeFunction(&mPendingCommands));
- }
-
- template <typename EncodeFunction, typename... Args>
- inline bool TryEncode(const ApiObjectBase* encoder,
- EncodeFunction&& encodeFunction,
- const char* formatStr,
- const Args&... args) {
- if (!CheckCurrentEncoder(encoder)) {
- return false;
- }
- ASSERT(!mWasMovedToIterator);
- return !ConsumedError(encodeFunction(&mPendingCommands), formatStr, args...);
- }
-
- // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this
- // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup
- // failed validation before the BeginRenderPassCmd could be encoded.
- void WillBeginRenderPass();
-
- // Functions to set current encoder state
- void EnterPass(const ApiObjectBase* passEncoder);
- MaybeError ExitRenderPass(const ApiObjectBase* passEncoder,
- RenderPassResourceUsageTracker usageTracker,
- CommandEncoder* commandEncoder,
- IndirectDrawMetadata indirectDrawMetadata);
- void ExitComputePass(const ApiObjectBase* passEncoder, ComputePassResourceUsage usages);
- MaybeError Finish();
-
- // Called when a pass encoder is deleted. Provides an opportunity to clean up if it's the
- // mCurrentEncoder.
- void EnsurePassExited(const ApiObjectBase* passEncoder);
-
- const RenderPassUsages& GetRenderPassUsages() const;
- const ComputePassUsages& GetComputePassUsages() const;
- RenderPassUsages AcquireRenderPassUsages();
- ComputePassUsages AcquireComputePassUsages();
-
- void PushDebugGroupLabel(const char* groupLabel);
- void PopDebugGroupLabel();
-
- private:
- void CommitCommands(CommandAllocator allocator);
-
- bool IsFinished() const;
- void MoveToIterator();
-
- DeviceBase* mDevice;
-
- // There can only be two levels of encoders. Top-level and render/compute pass.
- // The top level encoder is the encoder the EncodingContext is created with.
- // It doubles as flag to check if encoding has been Finished.
- const ApiObjectBase* mTopLevelEncoder;
- // The current encoder must be the same as the encoder provided to TryEncode,
- // otherwise an error is produced. It may be nullptr if the EncodingContext is an error.
- // The current encoder changes with Enter/ExitPass which should be called by
- // CommandEncoder::Begin/EndPass.
- const ApiObjectBase* mCurrentEncoder;
-
- RenderPassUsages mRenderPassUsages;
- bool mWereRenderPassUsagesAcquired = false;
- ComputePassUsages mComputePassUsages;
- bool mWereComputePassUsagesAcquired = false;
-
- CommandAllocator mPendingCommands;
-
- std::vector<CommandAllocator> mAllocators;
- CommandIterator mIterator;
- bool mWasMovedToIterator = false;
- bool mWereCommandsAcquired = false;
- bool mDestroyed = false;
-
- std::unique_ptr<ErrorData> mError;
- std::vector<std::string> mDebugGroupLabels;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_ENCODINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/EnumClassBitmasks.h b/chromium/third_party/dawn/src/dawn_native/EnumClassBitmasks.h
deleted file mode 100644
index addaab9ab1c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/EnumClassBitmasks.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ENUMCLASSBITMASK_H_
-#define DAWNNATIVE_ENUMCLASSBITMASK_H_
-
-#include "dawn/EnumClassBitmasks.h"
-
-namespace dawn_native {
-
- // EnumClassBitmmasks is a helper in the dawn:: namespace.
- // Re-export it in the dawn_native namespace.
- DAWN_IMPORT_BITMASK_OPERATORS
-
- // Specify this for usage with EnumMaskIterator
- template <typename T>
- struct EnumBitmaskSize {
- static constexpr unsigned value = 0;
- };
-
- template <typename T>
- constexpr bool HasOneBit(T value) {
- return HasZeroOrOneBits(value) && value != T(0);
- }
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_ENUMCLASSBITMASK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/EnumMaskIterator.h b/chromium/third_party/dawn/src/dawn_native/EnumMaskIterator.h
deleted file mode 100644
index 75d639da0d3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/EnumMaskIterator.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ENUMMASKITERATOR_H_
-#define DAWNNATIVE_ENUMMASKITERATOR_H_
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/EnumClassBitmasks.h"
-
-namespace dawn_native {
-
- template <typename T>
- class EnumMaskIterator final {
- static constexpr size_t N = EnumBitmaskSize<T>::value;
- static_assert(N > 0, "");
-
- using U = std::underlying_type_t<T>;
-
- public:
- EnumMaskIterator(const T& mask) : mBitSetIterator(std::bitset<N>(static_cast<U>(mask))) {
- // If you hit this ASSERT it means that you forgot to update EnumBitmaskSize<T>::value;
- ASSERT(U(mask) == 0 || Log2(uint64_t(U(mask))) < N);
- }
-
- class Iterator final {
- public:
- Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {
- }
-
- Iterator& operator++() {
- ++mIter;
- return *this;
- }
-
- bool operator==(const Iterator& other) const {
- return mIter == other.mIter;
- }
-
- bool operator!=(const Iterator& other) const {
- return mIter != other.mIter;
- }
-
- T operator*() const {
- U value = *mIter;
- return static_cast<T>(U(1) << value);
- }
-
- private:
- typename BitSetIterator<N, U>::Iterator mIter;
- };
-
- Iterator begin() const {
- return Iterator(mBitSetIterator.begin());
- }
-
- Iterator end() const {
- return Iterator(mBitSetIterator.end());
- }
-
- private:
- BitSetIterator<N, U> mBitSetIterator;
- };
-
- template <typename T>
- EnumMaskIterator<T> IterateEnumMask(const T& mask) {
- return EnumMaskIterator<T>(mask);
- }
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_ENUMMASKITERATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Error.cpp b/chromium/third_party/dawn/src/dawn_native/Error.cpp
deleted file mode 100644
index 6dcc3f86c13..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Error.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Error.h"
-
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- void IgnoreErrors(MaybeError maybeError) {
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
- // During shutdown and destruction, device lost errors can be ignored.
- // We can also ignore other unexpected internal errors on shut down and treat it as
- // device lost so that we can continue with destruction.
- ASSERT(errorData->GetType() == InternalErrorType::DeviceLost ||
- errorData->GetType() == InternalErrorType::Internal);
- }
- }
-
- wgpu::ErrorType ToWGPUErrorType(InternalErrorType type) {
- switch (type) {
- case InternalErrorType::Validation:
- return wgpu::ErrorType::Validation;
- case InternalErrorType::OutOfMemory:
- return wgpu::ErrorType::OutOfMemory;
-
- // There is no equivalent of Internal errors in the WebGPU API. Internal errors cause
- // the device at the API level to be lost, so treat it like a DeviceLost error.
- case InternalErrorType::Internal:
- case InternalErrorType::DeviceLost:
- return wgpu::ErrorType::DeviceLost;
-
- default:
- return wgpu::ErrorType::Unknown;
- }
- }
-
- InternalErrorType FromWGPUErrorType(wgpu::ErrorType type) {
- switch (type) {
- case wgpu::ErrorType::Validation:
- return InternalErrorType::Validation;
- case wgpu::ErrorType::OutOfMemory:
- return InternalErrorType::OutOfMemory;
- case wgpu::ErrorType::DeviceLost:
- return InternalErrorType::DeviceLost;
- default:
- return InternalErrorType::Internal;
- }
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Error.h b/chromium/third_party/dawn/src/dawn_native/Error.h
deleted file mode 100644
index 89296a241ea..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Error.h
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ERROR_H_
-#define DAWNNATIVE_ERROR_H_
-
-#include "absl/strings/str_format.h"
-#include "common/Result.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/webgpu_absl_format_autogen.h"
-
-#include <string>
-
-namespace dawn_native {
-
- enum class InternalErrorType : uint32_t {
- Validation,
- DeviceLost,
- Internal,
- OutOfMemory
- };
-
- // MaybeError and ResultOrError are meant to be used as return value for function that are not
- // expected to, but might fail. The handling of error is potentially much slower than successes.
- using MaybeError = Result<void, ErrorData>;
-
- template <typename T>
- using ResultOrError = Result<T, ErrorData>;
-
- // Returning a success is done like so:
- // return {}; // for Error
- // return SomethingOfTypeT; // for ResultOrError<T>
- //
- // Returning an error is done via:
- // return DAWN_MAKE_ERROR(errorType, "My error message");
- //
- // but shorthand version for specific error types are preferred:
- // return DAWN_VALIDATION_ERROR("My error message");
- //
- // There are different types of errors that should be used for different purpose:
- //
- // - Validation: these are errors that show the user did something bad, which causes the
- // whole call to be a no-op. It's most commonly found in the frontend but there can be some
- // backend specific validation in non-conformant backends too.
- //
- // - Out of memory: creation of a Buffer or Texture failed because there isn't enough memory.
- // This is similar to validation errors in that the call becomes a no-op and returns an
- // error object, but is reported separated from validation to the user.
- //
- // - Device loss: the backend driver reported that the GPU has been lost, which means all
- // previous commands magically disappeared and the only thing left to do is clean up.
- // Note: Device loss should be used rarely and in most case you want to use Internal
- // instead.
- //
- // - Internal: something happened that the backend didn't expect, and it doesn't know
- // how to recover from that situation. This causes the device to be lost, but is separate
- // from device loss, because the GPU execution is still happening so we need to clean up
- // more gracefully.
- //
- // - Unimplemented: same as Internal except it puts "unimplemented" in the error message for
- // more clarity.
-
-#define DAWN_MAKE_ERROR(TYPE, MESSAGE) \
- ::dawn_native::ErrorData::Create(TYPE, MESSAGE, __FILE__, __func__, __LINE__)
-
-#define DAWN_VALIDATION_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Validation, MESSAGE)
-
-// TODO(dawn:563): Rename to DAWN_VALIDATION_ERROR once all message format strings have been
-// converted to constexpr.
-#define DAWN_FORMAT_VALIDATION_ERROR(...) \
- DAWN_MAKE_ERROR(InternalErrorType::Validation, absl::StrFormat(__VA_ARGS__))
-
-#define DAWN_INVALID_IF(EXPR, ...) \
- if (DAWN_UNLIKELY(EXPR)) { \
- return DAWN_MAKE_ERROR(InternalErrorType::Validation, absl::StrFormat(__VA_ARGS__)); \
- } \
- for (;;) \
- break
-
-// DAWN_DEVICE_LOST_ERROR means that there was a real unrecoverable native device lost error.
-// We can't even do a graceful shutdown because the Device is gone.
-#define DAWN_DEVICE_LOST_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::DeviceLost, MESSAGE)
-
-// DAWN_INTERNAL_ERROR means Dawn hit an unexpected error in the backend and should try to
-// gracefully shut down.
-#define DAWN_INTERNAL_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Internal, MESSAGE)
-
-#define DAWN_FORMAT_INTERNAL_ERROR(...) \
- DAWN_MAKE_ERROR(InternalErrorType::Internal, absl::StrFormat(__VA_ARGS__))
-
-#define DAWN_UNIMPLEMENTED_ERROR(MESSAGE) \
- DAWN_MAKE_ERROR(InternalErrorType::Internal, std::string("Unimplemented: ") + MESSAGE)
-
-// DAWN_OUT_OF_MEMORY_ERROR means we ran out of memory. It may be used as a signal internally in
-// Dawn to free up unused resources. Or, it may bubble up to the application to signal an allocation
-// was too large or they should free some existing resources.
-#define DAWN_OUT_OF_MEMORY_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::OutOfMemory, MESSAGE)
-
-#define DAWN_CONCAT1(x, y) x##y
-#define DAWN_CONCAT2(x, y) DAWN_CONCAT1(x, y)
-#define DAWN_LOCAL_VAR DAWN_CONCAT2(_localVar, __LINE__)
-
- // When Errors aren't handled explicitly, calls to functions returning errors should be
- // wrapped in an DAWN_TRY. It will return the error if any, otherwise keep executing
- // the current function.
-#define DAWN_TRY(EXPR) DAWN_TRY_WITH_CLEANUP(EXPR, {})
-
-#define DAWN_TRY_CONTEXT(EXPR, ...) \
- DAWN_TRY_WITH_CLEANUP(EXPR, { error->AppendContext(absl::StrFormat(__VA_ARGS__)); })
-
-#define DAWN_TRY_WITH_CLEANUP(EXPR, BODY) \
- { \
- auto DAWN_LOCAL_VAR = EXPR; \
- if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
- std::unique_ptr<::dawn_native::ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
- {BODY} /* comment to force the formatter to insert a newline */ \
- error->AppendBacktrace(__FILE__, __func__, __LINE__); \
- return {std::move(error)}; \
- } \
- } \
- for (;;) \
- break
-
- // DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
- // any, to VAR.
-#define DAWN_TRY_ASSIGN(VAR, EXPR) DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, {})
-
- // Argument helpers are used to determine which macro implementations should be called when
- // overloading with different number of variables.
-#define DAWN_ERROR_UNIMPLEMENTED_MACRO_(...) UNREACHABLE()
-#define DAWN_ERROR_GET_5TH_ARG_HELPER_(_1, _2, _3, _4, NAME, ...) NAME
-#define DAWN_ERROR_GET_5TH_ARG_(args) DAWN_ERROR_GET_5TH_ARG_HELPER_ args
-
- // DAWN_TRY_ASSIGN_WITH_CLEANUP is overloaded with 2 version so that users can override the
- // return value of the macro when necessary. This is particularly useful if the function
- // calling the macro may want to return void instead of the error, i.e. in a test where we may
- // just want to assert and fail if the assign cannot go through. In both the cleanup and return
- // clauses, users can use the `error` variable to access the pointer to the acquired error.
- //
- // Example usages:
- // 3 Argument Case:
- // Result res;
- // DAWN_TRY_ASSIGN_WITH_CLEANUP(
- // res, GetResultOrErrorFunction(), { AddAdditionalErrorInformation(error.get()); }
- // );
- //
- // 4 Argument Case:
- // bool FunctionThatReturnsBool() {
- // DAWN_TRY_ASSIGN_WITH_CLEANUP(
- // res, GetResultOrErrorFunction(),
- // { AddAdditionalErrorInformation(error.get()); },
- // false
- // );
- // }
-#define DAWN_TRY_ASSIGN_WITH_CLEANUP(...) \
- DAWN_ERROR_GET_5TH_ARG_((__VA_ARGS__, DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_, \
- DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_3_, \
- DAWN_ERROR_UNIMPLEMENTED_MACRO_)) \
- (__VA_ARGS__)
-
-#define DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_3_(VAR, EXPR, BODY) \
- DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_(VAR, EXPR, BODY, std::move(error))
-
-#define DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_(VAR, EXPR, BODY, RET) \
- { \
- auto DAWN_LOCAL_VAR = EXPR; \
- if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
- std::unique_ptr<ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
- {BODY} /* comment to force the formatter to insert a newline */ \
- error->AppendBacktrace(__FILE__, __func__, __LINE__); \
- return (RET); \
- } \
- VAR = DAWN_LOCAL_VAR.AcquireSuccess(); \
- } \
- for (;;) \
- break
-
- // Assert that errors are device loss so that we can continue with destruction
- void IgnoreErrors(MaybeError maybeError);
-
- wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
- InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_ERROR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
deleted file mode 100644
index 6e281619856..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ErrorData.h"
-
-#include "dawn_native/Error.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- std::unique_ptr<ErrorData> ErrorData::Create(InternalErrorType type,
- std::string message,
- const char* file,
- const char* function,
- int line) {
- std::unique_ptr<ErrorData> error = std::make_unique<ErrorData>(type, message);
- error->AppendBacktrace(file, function, line);
- return error;
- }
-
- ErrorData::ErrorData(InternalErrorType type, std::string message)
- : mType(type), mMessage(std::move(message)) {
- }
-
- void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
- BacktraceRecord record;
- record.file = file;
- record.function = function;
- record.line = line;
-
- mBacktrace.push_back(std::move(record));
- }
-
- void ErrorData::AppendContext(std::string context) {
- mContexts.push_back(std::move(context));
- }
-
- void ErrorData::AppendDebugGroup(std::string label) {
- mDebugGroups.push_back(std::move(label));
- }
-
- InternalErrorType ErrorData::GetType() const {
- return mType;
- }
-
- const std::string& ErrorData::GetMessage() const {
- return mMessage;
- }
-
- const std::vector<ErrorData::BacktraceRecord>& ErrorData::GetBacktrace() const {
- return mBacktrace;
- }
-
- const std::vector<std::string>& ErrorData::GetContexts() const {
- return mContexts;
- }
-
- const std::vector<std::string>& ErrorData::GetDebugGroups() const {
- return mDebugGroups;
- }
-
- std::string ErrorData::GetFormattedMessage() const {
- std::ostringstream ss;
- ss << mMessage << "\n";
-
- if (!mContexts.empty()) {
- for (auto context : mContexts) {
- ss << " - While " << context << "\n";
- }
- }
-
- // For non-validation errors, or erros that lack a context include the
- // stack trace for debugging purposes.
- if (mContexts.empty() || mType != InternalErrorType::Validation) {
- for (const auto& callsite : mBacktrace) {
- ss << " at " << callsite.function << " (" << callsite.file << ":"
- << callsite.line << ")\n";
- }
- }
-
- if (!mDebugGroups.empty()) {
- ss << "\nDebug group stack:\n";
- for (auto label : mDebugGroups) {
- ss << " > \"" << label << "\"\n";
- }
- }
-
- return ss.str();
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorData.h b/chromium/third_party/dawn/src/dawn_native/ErrorData.h
deleted file mode 100644
index 477a7e721b0..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ErrorData.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ERRORDATA_H_
-#define DAWNNATIVE_ERRORDATA_H_
-
-#include "common/Compiler.h"
-
-#include <cstdint>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace wgpu {
- enum class ErrorType : uint32_t;
-}
-
-namespace dawn {
- using ErrorType = wgpu::ErrorType;
-}
-
-namespace dawn_native {
- enum class InternalErrorType : uint32_t;
-
- class DAWN_NO_DISCARD ErrorData {
- public:
- static DAWN_NO_DISCARD std::unique_ptr<ErrorData> Create(InternalErrorType type,
- std::string message,
- const char* file,
- const char* function,
- int line);
- ErrorData(InternalErrorType type, std::string message);
-
- struct BacktraceRecord {
- const char* file;
- const char* function;
- int line;
- };
- void AppendBacktrace(const char* file, const char* function, int line);
- void AppendContext(std::string context);
- void AppendDebugGroup(std::string label);
-
- InternalErrorType GetType() const;
- const std::string& GetMessage() const;
- const std::vector<BacktraceRecord>& GetBacktrace() const;
- const std::vector<std::string>& GetContexts() const;
- const std::vector<std::string>& GetDebugGroups() const;
-
- std::string GetFormattedMessage() const;
-
- private:
- InternalErrorType mType;
- std::string mMessage;
- std::vector<BacktraceRecord> mBacktrace;
- std::vector<std::string> mContexts;
- std::vector<std::string> mDebugGroups;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_ERRORDATA_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorInjector.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorInjector.cpp
deleted file mode 100644
index 836ef1ecab6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ErrorInjector.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ErrorInjector.h"
-
-#include "common/Assert.h"
-#include "dawn_native/DawnNative.h"
-
-namespace dawn_native {
-
- namespace {
-
- bool sIsEnabled = false;
- uint64_t sNextIndex = 0;
- uint64_t sInjectedFailureIndex = 0;
- bool sHasPendingInjectedError = false;
-
- } // anonymous namespace
-
- void EnableErrorInjector() {
- sIsEnabled = true;
- }
-
- void DisableErrorInjector() {
- sIsEnabled = false;
- }
-
- void ClearErrorInjector() {
- sNextIndex = 0;
- sHasPendingInjectedError = false;
- }
-
- bool ErrorInjectorEnabled() {
- return sIsEnabled;
- }
-
- uint64_t AcquireErrorInjectorCallCount() {
- uint64_t count = sNextIndex;
- ClearErrorInjector();
- return count;
- }
-
- bool ShouldInjectError() {
- uint64_t index = sNextIndex++;
- if (sHasPendingInjectedError && index == sInjectedFailureIndex) {
- sHasPendingInjectedError = false;
- return true;
- }
- return false;
- }
-
- void InjectErrorAt(uint64_t index) {
- // Only one error can be injected at a time.
- ASSERT(!sHasPendingInjectedError);
- sInjectedFailureIndex = index;
- sHasPendingInjectedError = true;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorInjector.h b/chromium/third_party/dawn/src/dawn_native/ErrorInjector.h
deleted file mode 100644
index 4d7d2b8a2b6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ErrorInjector.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ERRORINJECTOR_H_
-#define DAWNNATIVE_ERRORINJECTOR_H_
-
-#include <stdint.h>
-#include <type_traits>
-
-namespace dawn_native {
-
- template <typename ErrorType>
- struct InjectedErrorResult {
- ErrorType error;
- bool injected;
- };
-
- bool ErrorInjectorEnabled();
-
- bool ShouldInjectError();
-
- template <typename ErrorType>
- InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType) {
- return InjectedErrorResult<ErrorType>{errorType, ShouldInjectError()};
- }
-
- template <typename ErrorType, typename... ErrorTypes>
- InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType, ErrorTypes... errorTypes) {
- if (ShouldInjectError()) {
- return InjectedErrorResult<ErrorType>{errorType, true};
- }
- return MaybeInjectError(errorTypes...);
- }
-
-} // namespace dawn_native
-
-#if defined(DAWN_ENABLE_ERROR_INJECTION)
-
-# define INJECT_ERROR_OR_RUN(stmt, ...) \
- [&]() { \
- if (DAWN_UNLIKELY(::dawn_native::ErrorInjectorEnabled())) { \
- /* Only used for testing and fuzzing, so it's okay if this is deoptimized */ \
- auto injectedError = ::dawn_native::MaybeInjectError(__VA_ARGS__); \
- if (injectedError.injected) { \
- return injectedError.error; \
- } \
- } \
- return (stmt); \
- }()
-
-#else
-
-# define INJECT_ERROR_OR_RUN(stmt, ...) stmt
-
-#endif
-
-#endif // DAWNNATIVE_ERRORINJECTOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
deleted file mode 100644
index 4aebf1a8d79..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ErrorScope.h"
-
-#include "common/Assert.h"
-
-namespace dawn_native {
-
- namespace {
-
- wgpu::ErrorType ErrorFilterToErrorType(wgpu::ErrorFilter filter) {
- switch (filter) {
- case wgpu::ErrorFilter::Validation:
- return wgpu::ErrorType::Validation;
- case wgpu::ErrorFilter::OutOfMemory:
- return wgpu::ErrorType::OutOfMemory;
- }
- UNREACHABLE();
- }
-
- } // namespace
-
- ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
- : mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {
- }
-
- wgpu::ErrorType ErrorScope::GetErrorType() const {
- return mCapturedError;
- }
-
- const char* ErrorScope::GetErrorMessage() const {
- return mErrorMessage.c_str();
- }
-
- void ErrorScopeStack::Push(wgpu::ErrorFilter filter) {
- mScopes.push_back(ErrorScope(filter));
- }
-
- ErrorScope ErrorScopeStack::Pop() {
- ASSERT(!mScopes.empty());
- ErrorScope scope = std::move(mScopes.back());
- mScopes.pop_back();
- return scope;
- }
-
- bool ErrorScopeStack::Empty() const {
- return mScopes.empty();
- }
-
- bool ErrorScopeStack::HandleError(wgpu::ErrorType type, const char* message) {
- for (auto it = mScopes.rbegin(); it != mScopes.rend(); ++it) {
- if (it->mMatchedErrorType != type) {
- // Error filter does not match. Move on to the next scope.
- continue;
- }
-
- // Filter matches.
- // Record the error if the scope doesn't have one yet.
- if (it->mCapturedError == wgpu::ErrorType::NoError) {
- it->mCapturedError = type;
- it->mErrorMessage = message;
- }
-
- if (type == wgpu::ErrorType::DeviceLost) {
- if (it->mCapturedError != wgpu::ErrorType::DeviceLost) {
- // DeviceLost overrides any other error that is not a DeviceLost.
- it->mCapturedError = type;
- it->mErrorMessage = message;
- }
- } else {
- // Errors that are not device lost are captured and stop propogating.
- return true;
- }
- }
-
- // The error was not captured.
- return false;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScope.h b/chromium/third_party/dawn/src/dawn_native/ErrorScope.h
deleted file mode 100644
index 2d74456f717..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ErrorScope.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_ERRORSCOPE_H_
-#define DAWNNATIVE_ERRORSCOPE_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include <string>
-#include <vector>
-
-namespace dawn_native {
-
- class ErrorScope {
- public:
- wgpu::ErrorType GetErrorType() const;
- const char* GetErrorMessage() const;
-
- private:
- friend class ErrorScopeStack;
- explicit ErrorScope(wgpu::ErrorFilter errorFilter);
-
- wgpu::ErrorType mMatchedErrorType;
- wgpu::ErrorType mCapturedError = wgpu::ErrorType::NoError;
- std::string mErrorMessage = "";
- };
-
- class ErrorScopeStack {
- public:
- void Push(wgpu::ErrorFilter errorFilter);
- ErrorScope Pop();
-
- bool Empty() const;
-
- // Pass an error to the scopes in the stack. Returns true if one of the scopes
- // captured the error. Returns false if the error should be forwarded to the
- // uncaptured error callback.
- bool HandleError(wgpu::ErrorType type, const char* message);
-
- private:
- std::vector<ErrorScope> mScopes;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_ERRORSCOPE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
deleted file mode 100644
index 14a9e1dbbb5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ExternalTexture.h"
-
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/Texture.h"
-
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- MaybeError ValidateExternalTexturePlane(const TextureViewBase* textureView,
- wgpu::TextureFormat format) {
- if (textureView->GetFormat().format != format) {
- return DAWN_VALIDATION_ERROR(
- "The external texture descriptor specifies a texture format that is different from "
- "at least one of the passed texture views.");
- }
-
- DAWN_INVALID_IF(
- (textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) == 0,
- "The external texture plane (%s) usage (%s) doesn't include the required usage (%s)",
- textureView, textureView->GetTexture()->GetUsage(), wgpu::TextureUsage::TextureBinding);
-
- DAWN_INVALID_IF(textureView->GetDimension() != wgpu::TextureViewDimension::e2D,
- "The external texture plane (%s) dimension (%s) is not 2D.", textureView,
- textureView->GetDimension());
-
- DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
- "The external texture plane (%s) mip level count (%u) is not 1.",
- textureView, textureView->GetLevelCount());
-
- DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
- "The external texture plane (%s) sample count (%u) is not one.",
- textureView, textureView->GetTexture()->GetSampleCount());
-
- return {};
- }
-
- MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
- const ExternalTextureDescriptor* descriptor) {
- ASSERT(descriptor);
- ASSERT(descriptor->plane0);
-
- DAWN_TRY(device->ValidateObject(descriptor->plane0));
-
- const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
- DAWN_UNUSED(format);
-
- switch (descriptor->format) {
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::RGBA16Float:
- DAWN_TRY_CONTEXT(
- ValidateExternalTexturePlane(descriptor->plane0, descriptor->format),
- "validating plane0 against the external texture format (%s)",
- descriptor->format);
- break;
- default:
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Format (%s) is not a supported external texture format.", descriptor->format);
- }
-
- return {};
- }
-
- // static
- ResultOrError<Ref<ExternalTextureBase>> ExternalTextureBase::Create(
- DeviceBase* device,
- const ExternalTextureDescriptor* descriptor) {
- Ref<ExternalTextureBase> externalTexture =
- AcquireRef(new ExternalTextureBase(device, descriptor));
- return std::move(externalTexture);
- }
-
- ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
- const ExternalTextureDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label), mState(ExternalTextureState::Alive) {
- textureViews[0] = descriptor->plane0;
- TrackInDevice();
- }
-
- ExternalTextureBase::ExternalTextureBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented), mState(ExternalTextureState::Alive) {
- TrackInDevice();
- }
-
- ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>&
- ExternalTextureBase::GetTextureViews() const {
- return textureViews;
- }
-
- MaybeError ExternalTextureBase::ValidateCanUseInSubmitNow() const {
- ASSERT(!IsError());
- DAWN_INVALID_IF(mState == ExternalTextureState::Destroyed,
- "Destroyed external texture %s is used in a submit.", this);
- return {};
- }
-
- void ExternalTextureBase::APIDestroy() {
- if (GetDevice()->ConsumedError(GetDevice()->ValidateObject(this))) {
- return;
- }
- Destroy();
- }
-
- void ExternalTextureBase::DestroyImpl() {
- mState = ExternalTextureState::Destroyed;
- }
-
- // static
- ExternalTextureBase* ExternalTextureBase::MakeError(DeviceBase* device) {
- return new ExternalTextureBase(device, ObjectBase::kError);
- }
-
- ObjectType ExternalTextureBase::GetType() const {
- return ObjectType::ExternalTexture;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h
deleted file mode 100644
index e8596fb8e67..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_EXTERNALTEXTURE_H_
-#define DAWNNATIVE_EXTERNALTEXTURE_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/Subresource.h"
-
-#include <array>
-
-namespace dawn_native {
-
- struct ExternalTextureDescriptor;
- class TextureViewBase;
-
- MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
- const ExternalTextureDescriptor* descriptor);
-
- class ExternalTextureBase : public ApiObjectBase {
- public:
- static ResultOrError<Ref<ExternalTextureBase>> Create(
- DeviceBase* device,
- const ExternalTextureDescriptor* descriptor);
-
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& GetTextureViews() const;
-
- MaybeError ValidateCanUseInSubmitNow() const;
-
- static ExternalTextureBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- void APIDestroy();
-
- protected:
- // Constructor used only for mocking and testing.
- ExternalTextureBase(DeviceBase* device);
- void DestroyImpl() override;
-
- private:
- enum class ExternalTextureState { Alive, Destroyed };
- ExternalTextureBase(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
- ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> textureViews;
- ExternalTextureState mState;
- };
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_EXTERNALTEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Features.cpp b/chromium/third_party/dawn/src/dawn_native/Features.cpp
deleted file mode 100644
index b09caf20b1d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Features.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <array>
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-#include "dawn_native/Features.h"
-
-namespace dawn_native {
- namespace {
-
- struct FeatureEnumAndInfo {
- Feature feature;
- FeatureInfo info;
- bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
- };
-
- using FeatureEnumAndInfoList =
- std::array<FeatureEnumAndInfo, static_cast<size_t>(Feature::EnumCount)>;
-
- static constexpr FeatureEnumAndInfoList kFeatureNameAndInfoList = {
- {{Feature::TextureCompressionBC,
- {"texture-compression-bc", "Support Block Compressed (BC) texture formats",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
- &WGPUDeviceProperties::textureCompressionBC},
- {Feature::TextureCompressionETC2,
- {"texture-compression-etc2",
- "Support Ericsson Texture Compressed (ETC2/EAC) texture "
- "formats",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
- &WGPUDeviceProperties::textureCompressionETC2},
- {Feature::TextureCompressionASTC,
- {"texture-compression-astc",
- "Support Adaptable Scalable Texture Compressed (ASTC) "
- "texture formats",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
- &WGPUDeviceProperties::textureCompressionASTC},
- {Feature::ShaderFloat16,
- {"shader-float16",
- "Support 16bit float arithmetic and declarations in uniform and storage buffers",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
- &WGPUDeviceProperties::shaderFloat16},
- {Feature::PipelineStatisticsQuery,
- {"pipeline-statistics-query", "Support Pipeline Statistics Query",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
- &WGPUDeviceProperties::pipelineStatisticsQuery},
- {Feature::TimestampQuery,
- {"timestamp-query", "Support Timestamp Query",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
- &WGPUDeviceProperties::timestampQuery},
- {Feature::DepthClamping,
- {"depth-clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
- &WGPUDeviceProperties::depthClamping},
- {Feature::Depth24UnormStencil8,
- {"depth24unorm-stencil8", "Support depth24unorm-stencil8 texture format",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
- &WGPUDeviceProperties::depth24UnormStencil8},
- {Feature::Depth32FloatStencil8,
- {"depth32float-stencil8", "Support depth32float-stencil8 texture format",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
- &WGPUDeviceProperties::depth32FloatStencil8},
- {Feature::DawnInternalUsages,
- {"dawn-internal-usages",
- "Add internal usages to resources to affect how the texture is allocated, but not "
- "frontend validation. Other internal commands may access this usage.",
- "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/features/"
- "dawn_internal_usages.md"},
- &WGPUDeviceProperties::dawnInternalUsages},
- {Feature::MultiPlanarFormats,
- {"multiplanar-formats",
- "Import and use multi-planar texture formats with per plane views",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
- &WGPUDeviceProperties::multiPlanarFormats}}};
-
- } // anonymous namespace
-
- void FeaturesSet::EnableFeature(Feature feature) {
- ASSERT(feature != Feature::InvalidEnum);
- const size_t featureIndex = static_cast<size_t>(feature);
- featuresBitSet.set(featureIndex);
- }
-
- bool FeaturesSet::IsEnabled(Feature feature) const {
- ASSERT(feature != Feature::InvalidEnum);
- const size_t featureIndex = static_cast<size_t>(feature);
- return featuresBitSet[featureIndex];
- }
-
- std::vector<const char*> FeaturesSet::GetEnabledFeatureNames() const {
- std::vector<const char*> enabledFeatureNames(featuresBitSet.count());
-
- uint32_t index = 0;
- for (uint32_t i : IterateBitSet(featuresBitSet)) {
- const char* featureName = FeatureEnumToName(static_cast<Feature>(i));
- enabledFeatureNames[index] = featureName;
- ++index;
- }
- return enabledFeatureNames;
- }
-
- void FeaturesSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
- ASSERT(properties != nullptr);
-
- for (uint32_t i : IterateBitSet(featuresBitSet)) {
- properties->*(kFeatureNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
- }
- }
-
- const char* FeatureEnumToName(Feature feature) {
- ASSERT(feature != Feature::InvalidEnum);
-
- const FeatureEnumAndInfo& featureNameAndInfo =
- kFeatureNameAndInfoList[static_cast<size_t>(feature)];
- ASSERT(featureNameAndInfo.feature == feature);
- return featureNameAndInfo.info.name;
- }
-
- FeaturesInfo::FeaturesInfo() {
- for (size_t index = 0; index < kFeatureNameAndInfoList.size(); ++index) {
- const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[index];
- ASSERT(index == static_cast<size_t>(featureNameAndInfo.feature));
- mFeatureNameToEnumMap[featureNameAndInfo.info.name] = featureNameAndInfo.feature;
- }
- }
-
- const FeatureInfo* FeaturesInfo::GetFeatureInfo(const char* featureName) const {
- ASSERT(featureName);
-
- const auto& iter = mFeatureNameToEnumMap.find(featureName);
- if (iter != mFeatureNameToEnumMap.cend()) {
- return &kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].info;
- }
- return nullptr;
- }
-
- Feature FeaturesInfo::FeatureNameToEnum(const char* featureName) const {
- ASSERT(featureName);
-
- const auto& iter = mFeatureNameToEnumMap.find(featureName);
- if (iter != mFeatureNameToEnumMap.cend()) {
- return kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].feature;
- }
-
- // TODO(dawn:550): Remove this fallback logic when Chromium is updated.
- constexpr std::array<std::pair<const char*, const char*>, 6>
- kReplacementsForDeprecatedNames = {{
- {"texture_compression_bc", "texture-compression-bc"},
- {"depth_clamping", "depth-clamping"},
- {"pipeline_statistics_query", "pipeline-statistics-query"},
- {"shader_float16", "shader-float16"},
- {"timestamp_query", "timestamp-query"},
- {"multiplanar_formats", "multiplanar-formats"},
- }};
- for (const auto& replacement : kReplacementsForDeprecatedNames) {
- if (strcmp(featureName, replacement.first) == 0) {
- return FeatureNameToEnum(replacement.second);
- }
- }
-
- return Feature::InvalidEnum;
- }
-
- FeaturesSet FeaturesInfo::FeatureNamesToFeaturesSet(
- const std::vector<const char*>& requiredFeatures) const {
- FeaturesSet featuresSet;
-
- for (const char* featureName : requiredFeatures) {
- Feature featureEnum = FeatureNameToEnum(featureName);
- ASSERT(featureEnum != Feature::InvalidEnum);
- featuresSet.EnableFeature(featureEnum);
- }
- return featuresSet;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Features.h b/chromium/third_party/dawn/src/dawn_native/Features.h
deleted file mode 100644
index 699ddc794e7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Features.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_FEATURES_H_
-#define DAWNNATIVE_FEATURES_H_
-
-#include <bitset>
-#include <unordered_map>
-#include <vector>
-
-#include "dawn_native/DawnNative.h"
-
-namespace dawn_native {
-
- enum class Feature {
- TextureCompressionBC,
- TextureCompressionETC2,
- TextureCompressionASTC,
- ShaderFloat16,
- PipelineStatisticsQuery,
- TimestampQuery,
- DepthClamping,
- Depth24UnormStencil8,
- Depth32FloatStencil8,
-
- // Dawn-specific
- DawnInternalUsages,
- MultiPlanarFormats,
-
- EnumCount,
- InvalidEnum = EnumCount,
- FeatureMin = TextureCompressionBC,
- };
-
- // A wrapper of the bitset to store if an feature is enabled or not. This wrapper provides the
- // convenience to convert the enums of enum class Feature to the indices of a bitset.
- struct FeaturesSet {
- std::bitset<static_cast<size_t>(Feature::EnumCount)> featuresBitSet;
-
- void EnableFeature(Feature feature);
- bool IsEnabled(Feature feature) const;
- std::vector<const char*> GetEnabledFeatureNames() const;
- void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
- };
-
- const char* FeatureEnumToName(Feature feature);
-
- class FeaturesInfo {
- public:
- FeaturesInfo();
-
- // Used to query the details of an feature. Return nullptr if featureName is not a valid
- // name of an feature supported in Dawn
- const FeatureInfo* GetFeatureInfo(const char* featureName) const;
- Feature FeatureNameToEnum(const char* featureName) const;
- FeaturesSet FeatureNamesToFeaturesSet(
- const std::vector<const char*>& requiredFeatures) const;
-
- private:
- std::unordered_map<std::string, Feature> mFeatureNameToEnumMap;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_FEATURES_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.cpp b/chromium/third_party/dawn/src/dawn_native/Format.cpp
deleted file mode 100644
index c8d05883692..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Format.cpp
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Format.h"
-
-#include "dawn_native/Device.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/Features.h"
-#include "dawn_native/Texture.h"
-
-#include <bitset>
-
-namespace dawn_native {
-
- // Format
-
- // TODO(dawn:527): Remove when unused.
- SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type) {
- switch (type) {
- case wgpu::TextureComponentType::Float:
- return SampleTypeBit::Float;
- case wgpu::TextureComponentType::Sint:
- return SampleTypeBit::Sint;
- case wgpu::TextureComponentType::Uint:
- return SampleTypeBit::Uint;
- case wgpu::TextureComponentType::DepthComparison:
- return SampleTypeBit::Depth;
- }
- UNREACHABLE();
- }
-
- SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType) {
- switch (sampleType) {
- case wgpu::TextureSampleType::Float:
- case wgpu::TextureSampleType::UnfilterableFloat:
- case wgpu::TextureSampleType::Sint:
- case wgpu::TextureSampleType::Uint:
- case wgpu::TextureSampleType::Depth:
- case wgpu::TextureSampleType::Undefined:
- // When the compiler complains that you need to add a case statement here, please
- // also add a corresponding static assert below!
- break;
- }
-
- static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0, "");
- if (sampleType == wgpu::TextureSampleType::Undefined) {
- return SampleTypeBit::None;
- }
-
- // Check that SampleTypeBit bits are in the same position / order as the respective
- // wgpu::TextureSampleType value.
- static_assert(SampleTypeBit::Float ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)),
- "");
- static_assert(
- SampleTypeBit::UnfilterableFloat ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)),
- "");
- static_assert(SampleTypeBit::Uint ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)),
- "");
- static_assert(SampleTypeBit::Sint ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)),
- "");
- static_assert(SampleTypeBit::Depth ==
- static_cast<SampleTypeBit>(
- 1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)),
- "");
- return static_cast<SampleTypeBit>(1 << (static_cast<uint32_t>(sampleType) - 1));
- }
-
- bool Format::IsColor() const {
- return aspects == Aspect::Color;
- }
-
- bool Format::HasDepth() const {
- return (aspects & Aspect::Depth) != 0;
- }
-
- bool Format::HasStencil() const {
- return (aspects & Aspect::Stencil) != 0;
- }
-
- bool Format::HasDepthOrStencil() const {
- return (aspects & (Aspect::Depth | Aspect::Stencil)) != 0;
- }
-
- bool Format::IsMultiPlanar() const {
- return (aspects & (Aspect::Plane0 | Aspect::Plane1)) != 0;
- }
-
- const AspectInfo& Format::GetAspectInfo(wgpu::TextureAspect aspect) const {
- return GetAspectInfo(SelectFormatAspects(*this, aspect));
- }
-
- const AspectInfo& Format::GetAspectInfo(Aspect aspect) const {
- ASSERT(HasOneBit(aspect));
- ASSERT(aspects & aspect);
- const size_t aspectIndex = GetAspectIndex(aspect);
- ASSERT(aspectIndex < GetAspectCount(aspects));
- return aspectInfo[aspectIndex];
- }
-
- size_t Format::GetIndex() const {
- return ComputeFormatIndex(format);
- }
-
- // Implementation details of the format table of the DeviceBase
-
- // For the enum for formats are packed but this might change when we have a broader feature
- // mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
- size_t ComputeFormatIndex(wgpu::TextureFormat format) {
- // This takes advantage of overflows to make the index of TextureFormat::Undefined outside
- // of the range of the FormatTable.
- static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 > kKnownFormatCount,
- "");
- return static_cast<size_t>(static_cast<uint32_t>(format) - 1);
- }
-
- FormatTable BuildFormatTable(const DeviceBase* device) {
- FormatTable table;
- std::bitset<kKnownFormatCount> formatsSet;
-
- static constexpr SampleTypeBit kAnyFloat =
- SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
-
- auto AddFormat = [&table, &formatsSet](Format format) {
- size_t index = ComputeFormatIndex(format.format);
- ASSERT(index < table.size());
-
- // This checks that each format is set at most once, the first part of checking that all
- // formats are set exactly once.
- ASSERT(!formatsSet[index]);
-
- // Vulkan describes bytesPerRow in units of texels. If there's any format for which this
- // ASSERT isn't true, then additional validation on bytesPerRow must be added.
- const bool hasMultipleAspects = !HasOneBit(format.aspects);
- ASSERT(hasMultipleAspects ||
- (kTextureBytesPerRowAlignment % format.aspectInfo[0].block.byteSize) == 0);
-
- table[index] = format;
- formatsSet.set(index);
- };
-
- auto AddColorFormat = [&AddFormat](wgpu::TextureFormat format, bool renderable,
- bool supportsStorageUsage, uint32_t byteSize,
- SampleTypeBit sampleTypes, uint8_t componentCount) {
- Format internalFormat;
- internalFormat.format = format;
- internalFormat.isRenderable = renderable;
- internalFormat.isCompressed = false;
- internalFormat.isSupported = true;
- internalFormat.supportsStorageUsage = supportsStorageUsage;
- internalFormat.aspects = Aspect::Color;
- internalFormat.componentCount = componentCount;
- AspectInfo* firstAspect = internalFormat.aspectInfo.data();
- firstAspect->block.byteSize = byteSize;
- firstAspect->block.width = 1;
- firstAspect->block.height = 1;
- if (HasOneBit(sampleTypes)) {
- switch (sampleTypes) {
- case SampleTypeBit::Float:
- case SampleTypeBit::UnfilterableFloat:
- firstAspect->baseType = wgpu::TextureComponentType::Float;
- break;
- case SampleTypeBit::Sint:
- firstAspect->baseType = wgpu::TextureComponentType::Sint;
- break;
- case SampleTypeBit::Uint:
- firstAspect->baseType = wgpu::TextureComponentType::Uint;
- break;
- default:
- UNREACHABLE();
- }
- } else {
- ASSERT((sampleTypes & SampleTypeBit::Float) != 0);
- firstAspect->baseType = wgpu::TextureComponentType::Float;
- }
- firstAspect->supportedSampleTypes = sampleTypes;
- firstAspect->format = format;
- AddFormat(internalFormat);
- };
-
- auto AddDepthFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
- bool isSupported) {
- Format internalFormat;
- internalFormat.format = format;
- internalFormat.isRenderable = true;
- internalFormat.isCompressed = false;
- internalFormat.isSupported = isSupported;
- internalFormat.supportsStorageUsage = false;
- internalFormat.aspects = Aspect::Depth;
- internalFormat.componentCount = 1;
- AspectInfo* firstAspect = internalFormat.aspectInfo.data();
- firstAspect->block.byteSize = byteSize;
- firstAspect->block.width = 1;
- firstAspect->block.height = 1;
- firstAspect->baseType = wgpu::TextureComponentType::Float;
- firstAspect->supportedSampleTypes = SampleTypeBit::Depth;
- firstAspect->format = format;
- AddFormat(internalFormat);
- };
-
- auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format, bool isSupported) {
- Format internalFormat;
- internalFormat.format = format;
- internalFormat.isRenderable = true;
- internalFormat.isCompressed = false;
- internalFormat.isSupported = isSupported;
- internalFormat.supportsStorageUsage = false;
- internalFormat.aspects = Aspect::Stencil;
- internalFormat.componentCount = 1;
- AspectInfo* firstAspect = internalFormat.aspectInfo.data();
- firstAspect->block.byteSize = 1;
- firstAspect->block.width = 1;
- firstAspect->block.height = 1;
- firstAspect->baseType = wgpu::TextureComponentType::Uint;
- firstAspect->supportedSampleTypes = SampleTypeBit::Uint;
- firstAspect->format = format;
- AddFormat(internalFormat);
- };
-
- auto AddCompressedFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
- uint32_t width, uint32_t height, bool isSupported,
- uint8_t componentCount) {
- Format internalFormat;
- internalFormat.format = format;
- internalFormat.isRenderable = false;
- internalFormat.isCompressed = true;
- internalFormat.isSupported = isSupported;
- internalFormat.supportsStorageUsage = false;
- internalFormat.aspects = Aspect::Color;
- internalFormat.componentCount = componentCount;
- AspectInfo* firstAspect = internalFormat.aspectInfo.data();
- firstAspect->block.byteSize = byteSize;
- firstAspect->block.width = width;
- firstAspect->block.height = height;
- firstAspect->baseType = wgpu::TextureComponentType::Float;
- firstAspect->supportedSampleTypes = kAnyFloat;
- firstAspect->format = format;
- AddFormat(internalFormat);
- };
-
- auto AddMultiAspectFormat =
- [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
- wgpu::TextureFormat firstFormat, wgpu::TextureFormat secondFormat,
- bool isRenderable, bool isSupported, uint8_t componentCount) {
- Format internalFormat;
- internalFormat.format = format;
- internalFormat.isRenderable = isRenderable;
- internalFormat.isCompressed = false;
- internalFormat.isSupported = isSupported;
- internalFormat.supportsStorageUsage = false;
- internalFormat.aspects = aspects;
- internalFormat.componentCount = componentCount;
- const size_t firstFormatIndex = ComputeFormatIndex(firstFormat);
- const size_t secondFormatIndex = ComputeFormatIndex(secondFormat);
-
- internalFormat.aspectInfo[0] = table[firstFormatIndex].aspectInfo[0];
- internalFormat.aspectInfo[1] = table[secondFormatIndex].aspectInfo[0];
-
- AddFormat(internalFormat);
- };
-
- // clang-format off
- // 1 byte color formats
- AddColorFormat(wgpu::TextureFormat::R8Unorm, true, false, 1, kAnyFloat, 1);
- AddColorFormat(wgpu::TextureFormat::R8Snorm, false, false, 1, kAnyFloat, 1);
- AddColorFormat(wgpu::TextureFormat::R8Uint, true, false, 1, SampleTypeBit::Uint, 1);
- AddColorFormat(wgpu::TextureFormat::R8Sint, true, false, 1, SampleTypeBit::Sint, 1);
-
- // 2 bytes color formats
- AddColorFormat(wgpu::TextureFormat::R16Uint, true, false, 2, SampleTypeBit::Uint, 1);
- AddColorFormat(wgpu::TextureFormat::R16Sint, true, false, 2, SampleTypeBit::Sint, 1);
- AddColorFormat(wgpu::TextureFormat::R16Float, true, false, 2, kAnyFloat, 1);
- AddColorFormat(wgpu::TextureFormat::RG8Unorm, true, false, 2, kAnyFloat, 2);
- AddColorFormat(wgpu::TextureFormat::RG8Snorm, false, false, 2, kAnyFloat, 2);
- AddColorFormat(wgpu::TextureFormat::RG8Uint, true, false, 2, SampleTypeBit::Uint, 2);
- AddColorFormat(wgpu::TextureFormat::RG8Sint, true, false, 2, SampleTypeBit::Sint, 2);
-
- // 4 bytes color formats
- AddColorFormat(wgpu::TextureFormat::R32Uint, true, true, 4, SampleTypeBit::Uint, 1);
- AddColorFormat(wgpu::TextureFormat::R32Sint, true, true, 4, SampleTypeBit::Sint, 1);
- AddColorFormat(wgpu::TextureFormat::R32Float, true, true, 4, SampleTypeBit::UnfilterableFloat, 1);
- AddColorFormat(wgpu::TextureFormat::RG16Uint, true, false, 4, SampleTypeBit::Uint, 2);
- AddColorFormat(wgpu::TextureFormat::RG16Sint, true, false, 4, SampleTypeBit::Sint, 2);
- AddColorFormat(wgpu::TextureFormat::RG16Float, true, false, 4, kAnyFloat, 2);
- AddColorFormat(wgpu::TextureFormat::RGBA8Unorm, true, true, 4, kAnyFloat, 4);
- AddColorFormat(wgpu::TextureFormat::RGBA8UnormSrgb, true, false, 4, kAnyFloat, 4);
- AddColorFormat(wgpu::TextureFormat::RGBA8Snorm, false, true, 4, kAnyFloat, 4);
- AddColorFormat(wgpu::TextureFormat::RGBA8Uint, true, true, 4, SampleTypeBit::Uint, 4);
- AddColorFormat(wgpu::TextureFormat::RGBA8Sint, true, true, 4, SampleTypeBit::Sint, 4);
- AddColorFormat(wgpu::TextureFormat::BGRA8Unorm, true, false, 4, kAnyFloat, 4);
- AddColorFormat(wgpu::TextureFormat::BGRA8UnormSrgb, true, false, 4, kAnyFloat, 4);
- AddColorFormat(wgpu::TextureFormat::RGB10A2Unorm, true, false, 4, kAnyFloat, 4);
-
- AddColorFormat(wgpu::TextureFormat::RG11B10Ufloat, false, false, 4, kAnyFloat, 3);
- AddColorFormat(wgpu::TextureFormat::RGB9E5Ufloat, false, false, 4, kAnyFloat, 3);
-
- // 8 bytes color formats
- AddColorFormat(wgpu::TextureFormat::RG32Uint, true, true, 8, SampleTypeBit::Uint, 2);
- AddColorFormat(wgpu::TextureFormat::RG32Sint, true, true, 8, SampleTypeBit::Sint, 2);
- AddColorFormat(wgpu::TextureFormat::RG32Float, true, true, 8, SampleTypeBit::UnfilterableFloat, 2);
- AddColorFormat(wgpu::TextureFormat::RGBA16Uint, true, true, 8, SampleTypeBit::Uint, 4);
- AddColorFormat(wgpu::TextureFormat::RGBA16Sint, true, true, 8, SampleTypeBit::Sint, 4);
- AddColorFormat(wgpu::TextureFormat::RGBA16Float, true, true, 8, kAnyFloat, 4);
-
- // 16 bytes color formats
- AddColorFormat(wgpu::TextureFormat::RGBA32Uint, true, true, 16, SampleTypeBit::Uint, 4);
- AddColorFormat(wgpu::TextureFormat::RGBA32Sint, true, true, 16, SampleTypeBit::Sint, 4);
- AddColorFormat(wgpu::TextureFormat::RGBA32Float, true, true, 16, SampleTypeBit::UnfilterableFloat, 4);
-
- // Depth-stencil formats
- // TODO(dawn:666): Implement the stencil8 format
- AddStencilFormat(wgpu::TextureFormat::Stencil8, false);
- AddDepthFormat(wgpu::TextureFormat::Depth16Unorm, 2, true);
- // TODO(crbug.com/dawn/843): This is 4 because we read this to perform zero initialization,
- // and textures are always use depth32float. We should improve this to be more robust. Perhaps,
- // using 0 here to mean "unsized" and adding a backend-specific query for the block size.
- AddDepthFormat(wgpu::TextureFormat::Depth24Plus, 4, true);
- AddMultiAspectFormat(wgpu::TextureFormat::Depth24PlusStencil8,
- Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Stencil8, true, true, 2);
- bool isD24S8Supported = device->IsFeatureEnabled(Feature::Depth24UnormStencil8);
- AddMultiAspectFormat(wgpu::TextureFormat::Depth24UnormStencil8,
- Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Stencil8, true, isD24S8Supported, 2);
- AddDepthFormat(wgpu::TextureFormat::Depth32Float, 4, true);
- bool isD32S8Supported = device->IsFeatureEnabled(Feature::Depth32FloatStencil8);
- AddMultiAspectFormat(wgpu::TextureFormat::Depth32FloatStencil8,
- Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth32Float, wgpu::TextureFormat::Stencil8, true, isD32S8Supported, 2);
-
- // BC compressed formats
- bool isBCFormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionBC);
- AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnorm, 8, 4, 4, isBCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, 8, 4, 4, isBCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::BC4RSnorm, 8, 4, 4, isBCFormatSupported, 1);
- AddCompressedFormat(wgpu::TextureFormat::BC4RUnorm, 8, 4, 4, isBCFormatSupported, 1);
- AddCompressedFormat(wgpu::TextureFormat::BC2RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::BC2RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::BC3RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::BC3RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::BC5RGSnorm, 16, 4, 4, isBCFormatSupported, 2);
- AddCompressedFormat(wgpu::TextureFormat::BC5RGUnorm, 16, 4, 4, isBCFormatSupported, 2);
- AddCompressedFormat(wgpu::TextureFormat::BC6HRGBFloat, 16, 4, 4, isBCFormatSupported, 3);
- AddCompressedFormat(wgpu::TextureFormat::BC6HRGBUfloat, 16, 4, 4, isBCFormatSupported, 3);
- AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4);
-
- // ETC2/EAC compressed formats
- bool isETC2FormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionETC2);
- AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8Unorm, 8, 4, 4, isETC2FormatSupported, 3);
- AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8UnormSrgb, 8, 4, 4, isETC2FormatSupported, 3);
- AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8A1Unorm, 8, 4, 4, isETC2FormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8A1UnormSrgb, 8, 4, 4, isETC2FormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ETC2RGBA8Unorm, 16, 4, 4, isETC2FormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ETC2RGBA8UnormSrgb, 16, 4, 4, isETC2FormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::EACR11Unorm, 8, 4, 4, isETC2FormatSupported, 1);
- AddCompressedFormat(wgpu::TextureFormat::EACR11Snorm, 8, 4, 4, isETC2FormatSupported, 1);
- AddCompressedFormat(wgpu::TextureFormat::EACRG11Unorm, 16, 4, 4, isETC2FormatSupported, 2);
- AddCompressedFormat(wgpu::TextureFormat::EACRG11Snorm, 16, 4, 4, isETC2FormatSupported, 2);
-
- // ASTC compressed formats
- bool isASTCFormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionASTC);
- AddCompressedFormat(wgpu::TextureFormat::ASTC4x4Unorm, 16, 4, 4, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC4x4UnormSrgb, 16, 4, 4, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC5x4Unorm, 16, 5, 4, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC5x4UnormSrgb, 16, 5, 4, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC5x5Unorm, 16, 5, 5, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC5x5UnormSrgb, 16, 5, 5, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC6x5Unorm, 16, 6, 5, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC6x5UnormSrgb, 16, 6, 5, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC6x6Unorm, 16, 6, 6, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC6x6UnormSrgb, 16, 6, 6, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC8x5Unorm, 16, 8, 5, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC8x5UnormSrgb, 16, 8, 5, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC8x6Unorm, 16, 8, 6, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC8x6UnormSrgb, 16, 8, 6, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC8x8Unorm, 16, 8, 8, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC8x8UnormSrgb, 16, 8, 8, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC10x5Unorm, 16, 10, 5, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC10x5UnormSrgb, 16, 10, 5, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC10x6Unorm, 16, 10, 6, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC10x6UnormSrgb, 16, 10, 6, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC10x8Unorm, 16, 10, 8, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC10x8UnormSrgb, 16, 10, 8, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC10x10Unorm, 16, 10, 10, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC10x10UnormSrgb, 16, 10, 10, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC12x10Unorm, 16, 12, 10, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC12x10UnormSrgb, 16, 12, 10, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC12x12Unorm, 16, 12, 12, isASTCFormatSupported, 4);
- AddCompressedFormat(wgpu::TextureFormat::ASTC12x12UnormSrgb, 16, 12, 12, isASTCFormatSupported, 4);
-
- // multi-planar formats
- const bool isMultiPlanarFormatSupported = device->IsFeatureEnabled(Feature::MultiPlanarFormats);
- AddMultiAspectFormat(wgpu::TextureFormat::R8BG8Biplanar420Unorm, Aspect::Plane0 | Aspect::Plane1,
- wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::RG8Unorm, false, isMultiPlanarFormatSupported, 3);
-
- // clang-format on
-
- // This checks that each format is set at least once, the second part of checking that all
- // formats are checked exactly once.
- ASSERT(formatsSet.all());
-
- return table;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.h b/chromium/third_party/dawn/src/dawn_native/Format.h
deleted file mode 100644
index 2f604d366d6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Format.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_FORMAT_H_
-#define DAWNNATIVE_FORMAT_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include "common/ityp_bitset.h"
-#include "dawn_native/EnumClassBitmasks.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Subresource.h"
-
-#include <array>
-
-// About multi-planar formats.
-//
-// Dawn supports additional multi-planar formats when the multiplanar-formats extension is enabled.
-// When enabled, Dawn treats planar data as sub-resources (ie. 1 sub-resource == 1 view == 1 plane).
-// A multi-planar format name encodes the channel mapping and order of planes. For example,
-// R8BG8Biplanar420Unorm is YUV 4:2:0 where Plane 0 = R8, and Plane 1 = BG8.
-//
-// Requirements:
-// * Plane aspects cannot be combined with color, depth, or stencil aspects.
-// * Only compatible multi-planar formats of planes can be used with multi-planar texture
-// formats.
-// * Can't access multiple planes without creating per plane views (no color conversion).
-// * Multi-planar format cannot be written or read without a per plane view.
-//
-// TODO(dawn:551): Consider moving this comment.
-
-namespace dawn_native {
-
- enum class Aspect : uint8_t;
- class DeviceBase;
-
- // This mirrors wgpu::TextureSampleType as a bitmask instead.
- enum class SampleTypeBit : uint8_t {
- None = 0x0,
- Float = 0x1,
- UnfilterableFloat = 0x2,
- Depth = 0x4,
- Sint = 0x8,
- Uint = 0x10,
- };
-
- // Converts an wgpu::TextureComponentType to its bitmask representation.
- SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type);
- // Converts an wgpu::TextureSampleType to its bitmask representation.
- SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType);
-
- struct TexelBlockInfo {
- uint32_t byteSize;
- uint32_t width;
- uint32_t height;
- };
-
- struct AspectInfo {
- TexelBlockInfo block;
- // TODO(crbug.com/dawn/367): Replace TextureComponentType with TextureSampleType, or make it
- // an internal Dawn enum.
- wgpu::TextureComponentType baseType;
- SampleTypeBit supportedSampleTypes;
- wgpu::TextureFormat format;
- };
-
- // The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
- // exact number of known format.
- static constexpr size_t kKnownFormatCount = 96;
-
- struct Format;
- using FormatTable = std::array<Format, kKnownFormatCount>;
-
- // A wgpu::TextureFormat along with all the information about it necessary for validation.
- struct Format {
- wgpu::TextureFormat format;
- bool isRenderable;
- bool isCompressed;
- // A format can be known but not supported because it is part of a disabled extension.
- bool isSupported;
- bool supportsStorageUsage;
- Aspect aspects;
- // Only used for renderable color formats, number of color channels.
- uint8_t componentCount;
-
- bool IsColor() const;
- bool HasDepth() const;
- bool HasStencil() const;
- bool HasDepthOrStencil() const;
-
- // IsMultiPlanar() returns true if the format allows selecting a plane index. This is only
- // allowed by multi-planar formats (ex. NV12).
- bool IsMultiPlanar() const;
-
- const AspectInfo& GetAspectInfo(wgpu::TextureAspect aspect) const;
- const AspectInfo& GetAspectInfo(Aspect aspect) const;
-
- // The index of the format in the list of all known formats: a unique number for each format
- // in [0, kKnownFormatCount)
- size_t GetIndex() const;
-
- private:
- // Used to store the aspectInfo for one or more planes. For single plane "color" formats,
- // only the first aspect info or aspectInfo[0] is valid. For depth-stencil, the first aspect
- // info is depth and the second aspect info is stencil. For multi-planar formats,
- // aspectInfo[i] is the ith plane.
- std::array<AspectInfo, kMaxPlanesPerFormat> aspectInfo;
-
- friend FormatTable BuildFormatTable(const DeviceBase* device);
- };
-
- // Implementation details of the format table in the device.
-
- // Returns the index of a format in the FormatTable.
- size_t ComputeFormatIndex(wgpu::TextureFormat format);
- // Builds the format table with the extensions enabled on the device.
- FormatTable BuildFormatTable(const DeviceBase* device);
-
-} // namespace dawn_native
-
-namespace dawn {
-
- template <>
- struct IsDawnBitmask<dawn_native::SampleTypeBit> {
- static constexpr bool enable = true;
- };
-
-} // namespace dawn
-
-#endif // DAWNNATIVE_FORMAT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Forward.h b/chromium/third_party/dawn/src/dawn_native/Forward.h
deleted file mode 100644
index 2716f63c14d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Forward.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_FORWARD_H_
-#define DAWNNATIVE_FORWARD_H_
-
-#include <cstdint>
-
-template <typename T>
-class Ref;
-
-namespace dawn_native {
-
- enum class ObjectType : uint32_t;
-
- class AdapterBase;
- class BindGroupBase;
- class BindGroupLayoutBase;
- class BufferBase;
- class ComputePipelineBase;
- class CommandBufferBase;
- class CommandEncoder;
- class ComputePassEncoder;
- class ExternalTextureBase;
- class InstanceBase;
- class PipelineBase;
- class PipelineLayoutBase;
- class QuerySetBase;
- class QueueBase;
- class RenderBundleBase;
- class RenderBundleEncoder;
- class RenderPassEncoder;
- class RenderPipelineBase;
- class ResourceHeapBase;
- class SamplerBase;
- class Surface;
- class ShaderModuleBase;
- class StagingBufferBase;
- class SwapChainBase;
- class NewSwapChainBase;
- class TextureBase;
- class TextureViewBase;
-
- class DeviceBase;
-
- template <typename T>
- class PerStage;
-
- struct Format;
-
- // Aliases for frontend-only types.
- using CommandEncoderBase = CommandEncoder;
- using ComputePassEncoderBase = ComputePassEncoder;
- using RenderBundleEncoderBase = RenderBundleEncoder;
- using RenderPassEncoderBase = RenderPassEncoder;
- using SurfaceBase = Surface;
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.cpp b/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.cpp
deleted file mode 100644
index 9d514f33df2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/IndirectDrawMetadata.h"
-
-#include "common/Constants.h"
-#include "common/RefCounted.h"
-#include "dawn_native/IndirectDrawValidationEncoder.h"
-#include "dawn_native/Limits.h"
-#include "dawn_native/RenderBundle.h"
-
-#include <algorithm>
-#include <utility>
-
-namespace dawn_native {
-
- uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits) {
- return limits.v1.maxStorageBufferBindingSize - limits.v1.minStorageBufferOffsetAlignment -
- kDrawIndexedIndirectSize;
- }
-
- IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
- BufferBase* indirectBuffer)
- : mIndirectBuffer(indirectBuffer) {
- }
-
- void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndexedIndirectDraw(
- uint32_t maxDrawCallsPerIndirectValidationBatch,
- uint32_t maxBatchOffsetRange,
- IndexedIndirectDraw draw) {
- const uint64_t newOffset = draw.clientBufferOffset;
- auto it = mBatches.begin();
- while (it != mBatches.end()) {
- IndexedIndirectValidationBatch& batch = *it;
- if (batch.draws.size() >= maxDrawCallsPerIndirectValidationBatch) {
- // This batch is full. If its minOffset is to the right of the new offset, we can
- // just insert a new batch here.
- if (newOffset < batch.minOffset) {
- break;
- }
-
- // Otherwise keep looking.
- ++it;
- continue;
- }
-
- if (newOffset >= batch.minOffset && newOffset <= batch.maxOffset) {
- batch.draws.push_back(std::move(draw));
- return;
- }
-
- if (newOffset < batch.minOffset && batch.maxOffset - newOffset <= maxBatchOffsetRange) {
- // We can extend this batch to the left in order to fit the new offset.
- batch.minOffset = newOffset;
- batch.draws.push_back(std::move(draw));
- return;
- }
-
- if (newOffset > batch.maxOffset && newOffset - batch.minOffset <= maxBatchOffsetRange) {
- // We can extend this batch to the right in order to fit the new offset.
- batch.maxOffset = newOffset;
- batch.draws.push_back(std::move(draw));
- return;
- }
-
- if (newOffset < batch.minOffset) {
- // We want to insert a new batch just before this one.
- break;
- }
-
- ++it;
- }
-
- IndexedIndirectValidationBatch newBatch;
- newBatch.minOffset = newOffset;
- newBatch.maxOffset = newOffset;
- newBatch.draws.push_back(std::move(draw));
-
- mBatches.insert(it, std::move(newBatch));
- }
-
- void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
- uint32_t maxDrawCallsPerIndirectValidationBatch,
- uint32_t maxBatchOffsetRange,
- const IndexedIndirectValidationBatch& newBatch) {
- auto it = mBatches.begin();
- while (it != mBatches.end()) {
- IndexedIndirectValidationBatch& batch = *it;
- uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
- uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
- if (max - min <= maxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
- maxDrawCallsPerIndirectValidationBatch) {
- // This batch fits within the limits of an existing batch. Merge it.
- batch.minOffset = min;
- batch.maxOffset = max;
- batch.draws.insert(batch.draws.end(), newBatch.draws.begin(), newBatch.draws.end());
- return;
- }
-
- if (newBatch.minOffset < batch.minOffset) {
- break;
- }
-
- ++it;
- }
- mBatches.push_back(newBatch);
- }
-
- const std::vector<IndirectDrawMetadata::IndexedIndirectValidationBatch>&
- IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::GetBatches() const {
- return mBatches;
- }
-
- IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
- : mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)),
- mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)) {
- }
-
- IndirectDrawMetadata::~IndirectDrawMetadata() = default;
-
- IndirectDrawMetadata::IndirectDrawMetadata(IndirectDrawMetadata&&) = default;
-
- IndirectDrawMetadata& IndirectDrawMetadata::operator=(IndirectDrawMetadata&&) = default;
-
- IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap*
- IndirectDrawMetadata::GetIndexedIndirectBufferValidationInfo() {
- return &mIndexedIndirectBufferValidationInfo;
- }
-
- void IndirectDrawMetadata::AddBundle(RenderBundleBase* bundle) {
- auto result = mAddedBundles.insert(bundle);
- if (!result.second) {
- return;
- }
-
- for (const auto& entry :
- bundle->GetIndirectDrawMetadata().mIndexedIndirectBufferValidationInfo) {
- const IndexedIndirectConfig& config = entry.first;
- auto it = mIndexedIndirectBufferValidationInfo.lower_bound(config);
- if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
- // We already have batches for the same config. Merge the new ones in.
- for (const IndexedIndirectValidationBatch& batch : entry.second.GetBatches()) {
- it->second.AddBatch(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, batch);
- }
- } else {
- mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, entry.second);
- }
- }
- }
-
- void IndirectDrawMetadata::AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
- uint64_t indexBufferSize,
- BufferBase* indirectBuffer,
- uint64_t indirectOffset,
- DrawIndexedIndirectCmd* cmd) {
- uint64_t numIndexBufferElements;
- switch (indexFormat) {
- case wgpu::IndexFormat::Uint16:
- numIndexBufferElements = indexBufferSize / 2;
- break;
- case wgpu::IndexFormat::Uint32:
- numIndexBufferElements = indexBufferSize / 4;
- break;
- case wgpu::IndexFormat::Undefined:
- UNREACHABLE();
- }
-
- const IndexedIndirectConfig config(indirectBuffer, numIndexBufferElements);
- auto it = mIndexedIndirectBufferValidationInfo.find(config);
- if (it == mIndexedIndirectBufferValidationInfo.end()) {
- auto result = mIndexedIndirectBufferValidationInfo.emplace(
- config, IndexedIndirectBufferValidationInfo(indirectBuffer));
- it = result.first;
- }
-
- IndexedIndirectDraw draw;
- draw.clientBufferOffset = indirectOffset;
- draw.cmd = cmd;
- it->second.AddIndexedIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange,
- std::move(draw));
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.h b/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.h
deleted file mode 100644
index e6527502b3a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_INDIRECTDRAWMETADATA_H_
-#define DAWNNATIVE_INDIRECTDRAWMETADATA_H_
-
-#include "common/NonCopyable.h"
-#include "common/RefCounted.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandBufferStateTracker.h"
-#include "dawn_native/Commands.h"
-
-#include <cstdint>
-#include <map>
-#include <set>
-#include <utility>
-#include <vector>
-
-namespace dawn_native {
-
- class RenderBundleBase;
- struct CombinedLimits;
-
- // In the unlikely scenario that indirect offsets used over a single buffer span more than
- // this length of the buffer, we split the validation work into multiple batches.
- uint32_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits);
-
- // Metadata corresponding to the validation requirements of a single render pass. This metadata
- // is accumulated while its corresponding render pass is encoded, and is later used to encode
- // validation commands to be inserted into the command buffer just before the render pass's own
- // commands.
- class IndirectDrawMetadata : public NonCopyable {
- public:
- struct IndexedIndirectDraw {
- uint64_t clientBufferOffset;
- // This is a pointer to the command that should be populated with the validated
- // indirect scratch buffer. It is only valid up until the encoded command buffer
- // is submitted.
- DrawIndexedIndirectCmd* cmd;
- };
-
- struct IndexedIndirectValidationBatch {
- uint64_t minOffset;
- uint64_t maxOffset;
- std::vector<IndexedIndirectDraw> draws;
- };
-
- // Tracks information about every draw call in this render pass which uses the same indirect
- // buffer and the same-sized index buffer. Calls are grouped by indirect offset ranges so
- // that validation work can be chunked efficiently if necessary.
- class IndexedIndirectBufferValidationInfo {
- public:
- explicit IndexedIndirectBufferValidationInfo(BufferBase* indirectBuffer);
-
- // Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
- // assigned (and deferred) buffer ref and relative offset before returning.
- void AddIndexedIndirectDraw(uint32_t maxDrawCallsPerIndirectValidationBatch,
- uint32_t maxBatchOffsetRange,
- IndexedIndirectDraw draw);
-
- // Adds draw calls from an already-computed batch, e.g. from a previously encoded
- // RenderBundle. The added batch is merged into an existing batch if possible, otherwise
- // it's added to mBatch.
- void AddBatch(uint32_t maxDrawCallsPerIndirectValidationBatch,
- uint32_t maxBatchOffsetRange,
- const IndexedIndirectValidationBatch& batch);
-
- const std::vector<IndexedIndirectValidationBatch>& GetBatches() const;
-
- private:
- Ref<BufferBase> mIndirectBuffer;
-
- // A list of information about validation batches that will need to be executed for the
- // corresponding indirect buffer prior to a single render pass. These are kept sorted by
- // minOffset and may overlap iff the number of offsets in one batch would otherwise
- // exceed some large upper bound (roughly ~33M draw calls).
- //
- // Since the most common expected cases will overwhelmingly require only a single
- // validation pass per render pass, this is optimized for efficient updates to a single
- // batch rather than for efficient manipulation of a large number of batches.
- std::vector<IndexedIndirectValidationBatch> mBatches;
- };
-
- // Combination of an indirect buffer reference, and the number of addressable index buffer
- // elements at the time of a draw call.
- using IndexedIndirectConfig = std::pair<BufferBase*, uint64_t>;
- using IndexedIndirectBufferValidationInfoMap =
- std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
-
- explicit IndirectDrawMetadata(const CombinedLimits& limits);
- ~IndirectDrawMetadata();
-
- IndirectDrawMetadata(IndirectDrawMetadata&&);
- IndirectDrawMetadata& operator=(IndirectDrawMetadata&&);
-
- IndexedIndirectBufferValidationInfoMap* GetIndexedIndirectBufferValidationInfo();
-
- void AddBundle(RenderBundleBase* bundle);
- void AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
- uint64_t indexBufferSize,
- BufferBase* indirectBuffer,
- uint64_t indirectOffset,
- DrawIndexedIndirectCmd* cmd);
-
- private:
- IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
- std::set<RenderBundleBase*> mAddedBundles;
-
- uint32_t mMaxDrawCallsPerBatch;
- uint32_t mMaxBatchOffsetRange;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_INDIRECTDRAWMETADATA_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.cpp
deleted file mode 100644
index a58f9b03cbe..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.cpp
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/IndirectDrawValidationEncoder.h"
-
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/ComputePassEncoder.h"
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/InternalPipelineStore.h"
-#include "dawn_native/Queue.h"
-#include "dawn_native/utils/WGPUHelpers.h"
-
-#include <cstdlib>
-#include <limits>
-
-namespace dawn_native {
-
- namespace {
- // NOTE: This must match the workgroup_size attribute on the compute entry point below.
- constexpr uint64_t kWorkgroupSize = 64;
-
- // Equivalent to the BatchInfo struct defined in the shader below.
- struct BatchInfo {
- uint64_t numIndexBufferElements;
- uint32_t numDraws;
- uint32_t padding;
- };
-
- // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this shader in
- // various failure modes.
- static const char sRenderValidationShaderSource[] = R"(
- let kNumIndirectParamsPerDrawCall = 5u;
-
- let kIndexCountEntry = 0u;
- let kInstanceCountEntry = 1u;
- let kFirstIndexEntry = 2u;
- let kBaseVertexEntry = 3u;
- let kFirstInstanceEntry = 4u;
-
- [[block]] struct BatchInfo {
- numIndexBufferElementsLow: u32;
- numIndexBufferElementsHigh: u32;
- numDraws: u32;
- padding: u32;
- indirectOffsets: array<u32>;
- };
-
- [[block]] struct IndirectParams {
- data: array<u32>;
- };
-
- [[group(0), binding(0)]] var<storage, read> batch: BatchInfo;
- [[group(0), binding(1)]] var<storage, read_write> clientParams: IndirectParams;
- [[group(0), binding(2)]] var<storage, write> validatedParams: IndirectParams;
-
- fn fail(drawIndex: u32) {
- let index = drawIndex * kNumIndirectParamsPerDrawCall;
- validatedParams.data[index + kIndexCountEntry] = 0u;
- validatedParams.data[index + kInstanceCountEntry] = 0u;
- validatedParams.data[index + kFirstIndexEntry] = 0u;
- validatedParams.data[index + kBaseVertexEntry] = 0u;
- validatedParams.data[index + kFirstInstanceEntry] = 0u;
- }
-
- fn pass(drawIndex: u32) {
- let vIndex = drawIndex * kNumIndirectParamsPerDrawCall;
- let cIndex = batch.indirectOffsets[drawIndex];
- validatedParams.data[vIndex + kIndexCountEntry] =
- clientParams.data[cIndex + kIndexCountEntry];
- validatedParams.data[vIndex + kInstanceCountEntry] =
- clientParams.data[cIndex + kInstanceCountEntry];
- validatedParams.data[vIndex + kFirstIndexEntry] =
- clientParams.data[cIndex + kFirstIndexEntry];
- validatedParams.data[vIndex + kBaseVertexEntry] =
- clientParams.data[cIndex + kBaseVertexEntry];
- validatedParams.data[vIndex + kFirstInstanceEntry] =
- clientParams.data[cIndex + kFirstInstanceEntry];
- }
-
- [[stage(compute), workgroup_size(64, 1, 1)]]
- fn main([[builtin(global_invocation_id)]] id : vec3<u32>) {
- if (id.x >= batch.numDraws) {
- return;
- }
-
- let clientIndex = batch.indirectOffsets[id.x];
- let firstInstance = clientParams.data[clientIndex + kFirstInstanceEntry];
- if (firstInstance != 0u) {
- fail(id.x);
- return;
- }
-
- if (batch.numIndexBufferElementsHigh >= 2u) {
- // firstIndex and indexCount are both u32. The maximum possible sum of these
- // values is 0x1fffffffe, which is less than 0x200000000. Nothing to validate.
- pass(id.x);
- return;
- }
-
- let firstIndex = clientParams.data[clientIndex + kFirstIndexEntry];
- if (batch.numIndexBufferElementsHigh == 0u &&
- batch.numIndexBufferElementsLow < firstIndex) {
- fail(id.x);
- return;
- }
-
- // Note that this subtraction may underflow, but only when
- // numIndexBufferElementsHigh is 1u. The result is still correct in that case.
- let maxIndexCount = batch.numIndexBufferElementsLow - firstIndex;
- let indexCount = clientParams.data[clientIndex + kIndexCountEntry];
- if (indexCount > maxIndexCount) {
- fail(id.x);
- return;
- }
- pass(id.x);
- }
- )";
-
- ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(
- DeviceBase* device) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
-
- if (store->renderValidationPipeline == nullptr) {
- // Create compute shader module if not cached before.
- if (store->renderValidationShader == nullptr) {
- DAWN_TRY_ASSIGN(
- store->renderValidationShader,
- utils::CreateShaderModule(device, sRenderValidationShaderSource));
- }
-
- Ref<BindGroupLayoutBase> bindGroupLayout;
- DAWN_TRY_ASSIGN(
- bindGroupLayout,
- utils::MakeBindGroupLayout(
- device,
- {
- {0, wgpu::ShaderStage::Compute,
- wgpu::BufferBindingType::ReadOnlyStorage},
- {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
- {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
- },
- /* allowInternalBinding */ true));
-
- Ref<PipelineLayoutBase> pipelineLayout;
- DAWN_TRY_ASSIGN(pipelineLayout,
- utils::MakeBasicPipelineLayout(device, bindGroupLayout));
-
- ComputePipelineDescriptor computePipelineDescriptor = {};
- computePipelineDescriptor.layout = pipelineLayout.Get();
- computePipelineDescriptor.compute.module = store->renderValidationShader.Get();
- computePipelineDescriptor.compute.entryPoint = "main";
-
- DAWN_TRY_ASSIGN(store->renderValidationPipeline,
- device->CreateComputePipeline(&computePipelineDescriptor));
- }
-
- return store->renderValidationPipeline.Get();
- }
-
- size_t GetBatchDataSize(uint32_t numDraws) {
- return sizeof(BatchInfo) + numDraws * sizeof(uint32_t);
- }
-
- } // namespace
-
- uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits) {
- const uint64_t batchDrawCallLimitByDispatchSize =
- static_cast<uint64_t>(limits.v1.maxComputeWorkgroupsPerDimension) * kWorkgroupSize;
- const uint64_t batchDrawCallLimitByStorageBindingSize =
- (limits.v1.maxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
- return static_cast<uint32_t>(
- std::min({batchDrawCallLimitByDispatchSize, batchDrawCallLimitByStorageBindingSize,
- uint64_t(std::numeric_limits<uint32_t>::max())}));
- }
-
- MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
- CommandEncoder* commandEncoder,
- RenderPassResourceUsageTracker* usageTracker,
- IndirectDrawMetadata* indirectDrawMetadata) {
- struct Batch {
- const IndirectDrawMetadata::IndexedIndirectValidationBatch* metadata;
- uint64_t numIndexBufferElements;
- uint64_t dataBufferOffset;
- uint64_t dataSize;
- uint64_t clientIndirectOffset;
- uint64_t clientIndirectSize;
- uint64_t validatedParamsOffset;
- uint64_t validatedParamsSize;
- BatchInfo* batchInfo;
- };
-
- struct Pass {
- BufferBase* clientIndirectBuffer;
- uint64_t validatedParamsSize = 0;
- uint64_t batchDataSize = 0;
- std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
- std::vector<Batch> batches;
- };
-
- // First stage is grouping all batches into passes. We try to pack as many batches into a
- // single pass as possible. Batches can be grouped together as long as they're validating
- // data from the same indirect buffer, but they may still be split into multiple passes if
- // the number of draw calls in a pass would exceed some (very high) upper bound.
- size_t validatedParamsSize = 0;
- std::vector<Pass> passes;
- IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
- *indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
- if (bufferInfoMap.empty()) {
- return {};
- }
-
- const uint32_t maxStorageBufferBindingSize =
- device->GetLimits().v1.maxStorageBufferBindingSize;
- const uint32_t minStorageBufferOffsetAlignment =
- device->GetLimits().v1.minStorageBufferOffsetAlignment;
-
- for (auto& entry : bufferInfoMap) {
- const IndirectDrawMetadata::IndexedIndirectConfig& config = entry.first;
- BufferBase* clientIndirectBuffer = config.first;
- for (const IndirectDrawMetadata::IndexedIndirectValidationBatch& batch :
- entry.second.GetBatches()) {
- const uint64_t minOffsetFromAlignedBoundary =
- batch.minOffset % minStorageBufferOffsetAlignment;
- const uint64_t minOffsetAlignedDown =
- batch.minOffset - minOffsetFromAlignedBoundary;
-
- Batch newBatch;
- newBatch.metadata = &batch;
- newBatch.numIndexBufferElements = config.second;
- newBatch.dataSize = GetBatchDataSize(batch.draws.size());
- newBatch.clientIndirectOffset = minOffsetAlignedDown;
- newBatch.clientIndirectSize =
- batch.maxOffset + kDrawIndexedIndirectSize - minOffsetAlignedDown;
-
- newBatch.validatedParamsSize = batch.draws.size() * kDrawIndexedIndirectSize;
- newBatch.validatedParamsOffset =
- Align(validatedParamsSize, minStorageBufferOffsetAlignment);
- validatedParamsSize = newBatch.validatedParamsOffset + newBatch.validatedParamsSize;
- if (validatedParamsSize > maxStorageBufferBindingSize) {
- return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
- }
-
- Pass* currentPass = passes.empty() ? nullptr : &passes.back();
- if (currentPass && currentPass->clientIndirectBuffer == clientIndirectBuffer) {
- uint64_t nextBatchDataOffset =
- Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
- uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
- if (newPassBatchDataSize <= maxStorageBufferBindingSize) {
- // We can fit this batch in the current pass.
- newBatch.dataBufferOffset = nextBatchDataOffset;
- currentPass->batchDataSize = newPassBatchDataSize;
- currentPass->batches.push_back(newBatch);
- continue;
- }
- }
-
- // We need to start a new pass for this batch.
- newBatch.dataBufferOffset = 0;
-
- Pass newPass;
- newPass.clientIndirectBuffer = clientIndirectBuffer;
- newPass.batchDataSize = newBatch.dataSize;
- newPass.batches.push_back(newBatch);
- passes.push_back(std::move(newPass));
- }
- }
-
- auto* const store = device->GetInternalPipelineStore();
- ScratchBuffer& validatedParamsBuffer = store->scratchIndirectStorage;
- ScratchBuffer& batchDataBuffer = store->scratchStorage;
-
- uint64_t requiredBatchDataBufferSize = 0;
- for (const Pass& pass : passes) {
- requiredBatchDataBufferSize = std::max(requiredBatchDataBufferSize, pass.batchDataSize);
- }
- DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
- usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
-
- DAWN_TRY(validatedParamsBuffer.EnsureCapacity(validatedParamsSize));
- usageTracker->BufferUsedAs(validatedParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
-
- // Now we allocate and populate host-side batch data to be copied to the GPU.
- for (Pass& pass : passes) {
- // We use std::malloc here because it guarantees maximal scalar alignment.
- pass.batchData = {std::malloc(pass.batchDataSize), std::free};
- memset(pass.batchData.get(), 0, pass.batchDataSize);
- uint8_t* batchData = static_cast<uint8_t*>(pass.batchData.get());
- for (Batch& batch : pass.batches) {
- batch.batchInfo = new (&batchData[batch.dataBufferOffset]) BatchInfo();
- batch.batchInfo->numIndexBufferElements = batch.numIndexBufferElements;
- batch.batchInfo->numDraws = static_cast<uint32_t>(batch.metadata->draws.size());
-
- uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
- uint64_t validatedParamsOffset = batch.validatedParamsOffset;
- for (auto& draw : batch.metadata->draws) {
- // The shader uses this to index an array of u32, hence the division by 4 bytes.
- *indirectOffsets++ = static_cast<uint32_t>(
- (draw.clientBufferOffset - batch.clientIndirectOffset) / 4);
-
- draw.cmd->indirectBuffer = validatedParamsBuffer.GetBuffer();
- draw.cmd->indirectOffset = validatedParamsOffset;
-
- validatedParamsOffset += kDrawIndexedIndirectSize;
- }
- }
- }
-
- ComputePipelineBase* pipeline;
- DAWN_TRY_ASSIGN(pipeline, GetOrCreateRenderValidationPipeline(device));
-
- Ref<BindGroupLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
-
- BindGroupEntry bindings[3];
- BindGroupEntry& bufferDataBinding = bindings[0];
- bufferDataBinding.binding = 0;
- bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
-
- BindGroupEntry& clientIndirectBinding = bindings[1];
- clientIndirectBinding.binding = 1;
-
- BindGroupEntry& validatedParamsBinding = bindings[2];
- validatedParamsBinding.binding = 2;
- validatedParamsBinding.buffer = validatedParamsBuffer.GetBuffer();
-
- BindGroupDescriptor bindGroupDescriptor = {};
- bindGroupDescriptor.layout = layout.Get();
- bindGroupDescriptor.entryCount = 3;
- bindGroupDescriptor.entries = bindings;
-
- // Finally, we can now encode our validation passes. Each pass first does a single
- // WriteBuffer to get batch data over to the GPU, followed by a single compute pass. The
- // compute pass encodes a separate SetBindGroup and Dispatch command for each batch.
- for (const Pass& pass : passes) {
- commandEncoder->APIWriteBuffer(batchDataBuffer.GetBuffer(), 0,
- static_cast<const uint8_t*>(pass.batchData.get()),
- pass.batchDataSize);
-
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- ComputePassDescriptor descriptor = {};
- Ref<ComputePassEncoder> passEncoder =
- AcquireRef(commandEncoder->APIBeginComputePass(&descriptor));
- passEncoder->APISetPipeline(pipeline);
-
- clientIndirectBinding.buffer = pass.clientIndirectBuffer;
-
- for (const Batch& batch : pass.batches) {
- bufferDataBinding.offset = batch.dataBufferOffset;
- bufferDataBinding.size = batch.dataSize;
- clientIndirectBinding.offset = batch.clientIndirectOffset;
- clientIndirectBinding.size = batch.clientIndirectSize;
- validatedParamsBinding.offset = batch.validatedParamsOffset;
- validatedParamsBinding.size = batch.validatedParamsSize;
-
- Ref<BindGroupBase> bindGroup;
- DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));
-
- const uint32_t numDrawsRoundedUp =
- (batch.batchInfo->numDraws + kWorkgroupSize - 1) / kWorkgroupSize;
- passEncoder->APISetBindGroup(0, bindGroup.Get());
- passEncoder->APIDispatch(numDrawsRoundedUp);
- }
-
- passEncoder->APIEndPass();
- }
-
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.h b/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.h
deleted file mode 100644
index aa56b099754..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
-#define DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/IndirectDrawMetadata.h"
-
-namespace dawn_native {
-
- class CommandEncoder;
- struct CombinedLimits;
- class DeviceBase;
- class RenderPassResourceUsageTracker;
-
- // The maximum number of draws call we can fit into a single validation batch. This is
- // essentially limited by the number of indirect parameter blocks that can fit into the maximum
- // allowed storage binding size (with the base limits, it is about 6.7M).
- uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits);
-
- MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
- CommandEncoder* commandEncoder,
- RenderPassResourceUsageTracker* usageTracker,
- IndirectDrawMetadata* indirectDrawMetadata);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.cpp b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
deleted file mode 100644
index ce0d85bc420..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Instance.cpp
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Instance.h"
-
-#include "common/Assert.h"
-#include "common/Log.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/Surface.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-#include "dawn_platform/DawnPlatform.h"
-
-#if defined(DAWN_USE_X11)
-# include "dawn_native/XlibXcbFunctions.h"
-#endif // defined(DAWN_USE_X11)
-
-namespace dawn_native {
-
- // Forward definitions of each backend's "Connect" function that creates new BackendConnection.
- // Conditionally compiled declarations are used to avoid using static constructors instead.
-#if defined(DAWN_ENABLE_BACKEND_D3D12)
- namespace d3d12 {
- BackendConnection* Connect(InstanceBase* instance);
- }
-#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- namespace metal {
- BackendConnection* Connect(InstanceBase* instance);
- }
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-#if defined(DAWN_ENABLE_BACKEND_NULL)
- namespace null {
- BackendConnection* Connect(InstanceBase* instance);
- }
-#endif // defined(DAWN_ENABLE_BACKEND_NULL)
-#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- namespace opengl {
- BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType);
- }
-#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
-#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- namespace vulkan {
- BackendConnection* Connect(InstanceBase* instance);
- }
-#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
-
- namespace {
-
- BackendsBitset GetEnabledBackends() {
- BackendsBitset enabledBackends;
-#if defined(DAWN_ENABLE_BACKEND_NULL)
- enabledBackends.set(wgpu::BackendType::Null);
-#endif // defined(DAWN_ENABLE_BACKEND_NULL)
-#if defined(DAWN_ENABLE_BACKEND_D3D12)
- enabledBackends.set(wgpu::BackendType::D3D12);
-#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- enabledBackends.set(wgpu::BackendType::Metal);
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- enabledBackends.set(wgpu::BackendType::Vulkan);
-#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
-#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
- enabledBackends.set(wgpu::BackendType::OpenGL);
-#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
- enabledBackends.set(wgpu::BackendType::OpenGLES);
-#endif // defined(DAWN_ENABLE_BACKEND_OPENGLES)
-
- return enabledBackends;
- }
-
- } // anonymous namespace
-
- // InstanceBase
-
- // static
- InstanceBase* InstanceBase::Create(const InstanceDescriptor* descriptor) {
- Ref<InstanceBase> instance = AcquireRef(new InstanceBase);
- if (!instance->Initialize(descriptor)) {
- return nullptr;
- }
- return instance.Detach();
- }
-
- // TODO(crbug.com/dawn/832): make the platform an initialization parameter of the instance.
- bool InstanceBase::Initialize(const InstanceDescriptor*) {
- return true;
- }
-
- void InstanceBase::DiscoverDefaultAdapters() {
- for (wgpu::BackendType b : IterateBitSet(GetEnabledBackends())) {
- EnsureBackendConnection(b);
- }
-
- if (mDiscoveredDefaultAdapters) {
- return;
- }
-
- // Query and merge all default adapters for all backends
- for (std::unique_ptr<BackendConnection>& backend : mBackends) {
- std::vector<std::unique_ptr<AdapterBase>> backendAdapters =
- backend->DiscoverDefaultAdapters();
-
- for (std::unique_ptr<AdapterBase>& adapter : backendAdapters) {
- ASSERT(adapter->GetBackendType() == backend->GetType());
- ASSERT(adapter->GetInstance() == this);
- mAdapters.push_back(std::move(adapter));
- }
- }
-
- mDiscoveredDefaultAdapters = true;
- }
-
- // This is just a wrapper around the real logic that uses Error.h error handling.
- bool InstanceBase::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
- return !ConsumedError(DiscoverAdaptersInternal(options));
- }
-
- const ToggleInfo* InstanceBase::GetToggleInfo(const char* toggleName) {
- return mTogglesInfo.GetToggleInfo(toggleName);
- }
-
- Toggle InstanceBase::ToggleNameToEnum(const char* toggleName) {
- return mTogglesInfo.ToggleNameToEnum(toggleName);
- }
-
- const FeatureInfo* InstanceBase::GetFeatureInfo(const char* featureName) {
- return mFeaturesInfo.GetFeatureInfo(featureName);
- }
-
- Feature InstanceBase::FeatureNameToEnum(const char* featureName) {
- return mFeaturesInfo.FeatureNameToEnum(featureName);
- }
-
- FeaturesSet InstanceBase::FeatureNamesToFeaturesSet(
- const std::vector<const char*>& requiredFeatures) {
- return mFeaturesInfo.FeatureNamesToFeaturesSet(requiredFeatures);
- }
-
- const std::vector<std::unique_ptr<AdapterBase>>& InstanceBase::GetAdapters() const {
- return mAdapters;
- }
-
- void InstanceBase::EnsureBackendConnection(wgpu::BackendType backendType) {
- if (mBackendsConnected[backendType]) {
- return;
- }
-
- auto Register = [this](BackendConnection* connection, wgpu::BackendType expectedType) {
- if (connection != nullptr) {
- ASSERT(connection->GetType() == expectedType);
- ASSERT(connection->GetInstance() == this);
- mBackends.push_back(std::unique_ptr<BackendConnection>(connection));
- }
- };
-
- switch (backendType) {
-#if defined(DAWN_ENABLE_BACKEND_NULL)
- case wgpu::BackendType::Null:
- Register(null::Connect(this), wgpu::BackendType::Null);
- break;
-#endif // defined(DAWN_ENABLE_BACKEND_NULL)
-
-#if defined(DAWN_ENABLE_BACKEND_D3D12)
- case wgpu::BackendType::D3D12:
- Register(d3d12::Connect(this), wgpu::BackendType::D3D12);
- break;
-#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
-
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- case wgpu::BackendType::Metal:
- Register(metal::Connect(this), wgpu::BackendType::Metal);
- break;
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- case wgpu::BackendType::Vulkan:
- Register(vulkan::Connect(this), wgpu::BackendType::Vulkan);
- break;
-#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
-
-#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
- case wgpu::BackendType::OpenGL:
- Register(opengl::Connect(this, wgpu::BackendType::OpenGL),
- wgpu::BackendType::OpenGL);
- break;
-#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-
-#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
- case wgpu::BackendType::OpenGLES:
- Register(opengl::Connect(this, wgpu::BackendType::OpenGLES),
- wgpu::BackendType::OpenGLES);
- break;
-#endif // defined(DAWN_ENABLE_BACKEND_OPENGLES)
-
- default:
- UNREACHABLE();
- }
-
- mBackendsConnected.set(backendType);
- }
-
- MaybeError InstanceBase::DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options) {
- wgpu::BackendType backendType = static_cast<wgpu::BackendType>(options->backendType);
- DAWN_TRY(ValidateBackendType(backendType));
-
- if (!GetEnabledBackends()[backendType]) {
- return DAWN_FORMAT_VALIDATION_ERROR("%s not supported.", backendType);
- }
-
- EnsureBackendConnection(backendType);
-
- bool foundBackend = false;
- for (std::unique_ptr<BackendConnection>& backend : mBackends) {
- if (backend->GetType() != backendType) {
- continue;
- }
- foundBackend = true;
-
- std::vector<std::unique_ptr<AdapterBase>> newAdapters;
- DAWN_TRY_ASSIGN(newAdapters, backend->DiscoverAdapters(options));
-
- for (std::unique_ptr<AdapterBase>& adapter : newAdapters) {
- ASSERT(adapter->GetBackendType() == backend->GetType());
- ASSERT(adapter->GetInstance() == this);
- mAdapters.push_back(std::move(adapter));
- }
- }
-
- DAWN_INVALID_IF(!foundBackend, "%s not available.", backendType);
- return {};
- }
-
- bool InstanceBase::ConsumedError(MaybeError maybeError) {
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
-
- ASSERT(error != nullptr);
- dawn::InfoLog() << error->GetFormattedMessage();
-
- return true;
- }
- return false;
- }
-
- bool InstanceBase::IsBackendValidationEnabled() const {
- return mBackendValidationLevel != BackendValidationLevel::Disabled;
- }
-
- void InstanceBase::SetBackendValidationLevel(BackendValidationLevel level) {
- mBackendValidationLevel = level;
- }
-
- BackendValidationLevel InstanceBase::GetBackendValidationLevel() const {
- return mBackendValidationLevel;
- }
-
- void InstanceBase::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
- mBeginCaptureOnStartup = beginCaptureOnStartup;
- }
-
- bool InstanceBase::IsBeginCaptureOnStartupEnabled() const {
- return mBeginCaptureOnStartup;
- }
-
- void InstanceBase::SetPlatform(dawn_platform::Platform* platform) {
- mPlatform = platform;
- }
-
- dawn_platform::Platform* InstanceBase::GetPlatform() {
- if (mPlatform != nullptr) {
- return mPlatform;
- }
-
- if (mDefaultPlatform == nullptr) {
- mDefaultPlatform = std::make_unique<dawn_platform::Platform>();
- }
- return mDefaultPlatform.get();
- }
-
- const XlibXcbFunctions* InstanceBase::GetOrCreateXlibXcbFunctions() {
-#if defined(DAWN_USE_X11)
- if (mXlibXcbFunctions == nullptr) {
- mXlibXcbFunctions = std::make_unique<XlibXcbFunctions>();
- }
- return mXlibXcbFunctions.get();
-#else
- UNREACHABLE();
-#endif // defined(DAWN_USE_X11)
- }
-
- Surface* InstanceBase::APICreateSurface(const SurfaceDescriptor* descriptor) {
- if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
- return nullptr;
- }
-
- return new Surface(this, descriptor);
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.h b/chromium/third_party/dawn/src/dawn_native/Instance.h
deleted file mode 100644
index a636875b25e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Instance.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_INSTANCE_H_
-#define DAWNNATIVE_INSTANCE_H_
-
-#include "common/RefCounted.h"
-#include "common/ityp_bitset.h"
-#include "dawn_native/Adapter.h"
-#include "dawn_native/BackendConnection.h"
-#include "dawn_native/Features.h"
-#include "dawn_native/Toggles.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-#include <memory>
-#include <unordered_map>
-#include <vector>
-
-namespace dawn_platform {
- class Platform;
-} // namespace dawn_platform
-
-namespace dawn_native {
-
- class Surface;
- class XlibXcbFunctions;
-
- using BackendsBitset = ityp::bitset<wgpu::BackendType, kEnumCount<wgpu::BackendType>>;
-
- // This is called InstanceBase for consistency across the frontend, even if the backends don't
- // specialize this class.
- class InstanceBase final : public RefCounted {
- public:
- static InstanceBase* Create(const InstanceDescriptor* descriptor = nullptr);
-
- void DiscoverDefaultAdapters();
- bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
-
- const std::vector<std::unique_ptr<AdapterBase>>& GetAdapters() const;
-
- // Used to handle error that happen up to device creation.
- bool ConsumedError(MaybeError maybeError);
-
- // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
- // of a toggle supported in Dawn.
- const ToggleInfo* GetToggleInfo(const char* toggleName);
- Toggle ToggleNameToEnum(const char* toggleName);
-
- // Used to query the details of an feature. Return nullptr if featureName is not a valid
- // name of an feature supported in Dawn.
- const FeatureInfo* GetFeatureInfo(const char* featureName);
- Feature FeatureNameToEnum(const char* featureName);
- FeaturesSet FeatureNamesToFeaturesSet(const std::vector<const char*>& requiredFeatures);
-
- bool IsBackendValidationEnabled() const;
- void SetBackendValidationLevel(BackendValidationLevel level);
- BackendValidationLevel GetBackendValidationLevel() const;
-
- void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
- bool IsBeginCaptureOnStartupEnabled() const;
-
- void SetPlatform(dawn_platform::Platform* platform);
- dawn_platform::Platform* GetPlatform();
-
- // Get backend-independent libraries that need to be loaded dynamically.
- const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
-
- // Dawn API
- Surface* APICreateSurface(const SurfaceDescriptor* descriptor);
-
- private:
- InstanceBase() = default;
- ~InstanceBase() = default;
-
- InstanceBase(const InstanceBase& other) = delete;
- InstanceBase& operator=(const InstanceBase& other) = delete;
-
- bool Initialize(const InstanceDescriptor* descriptor);
-
- // Lazily creates connections to all backends that have been compiled.
- void EnsureBackendConnection(wgpu::BackendType backendType);
-
- MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
-
- BackendsBitset mBackendsConnected;
-
- bool mDiscoveredDefaultAdapters = false;
-
- bool mBeginCaptureOnStartup = false;
- BackendValidationLevel mBackendValidationLevel = BackendValidationLevel::Disabled;
-
- dawn_platform::Platform* mPlatform = nullptr;
- std::unique_ptr<dawn_platform::Platform> mDefaultPlatform;
-
- std::vector<std::unique_ptr<BackendConnection>> mBackends;
- std::vector<std::unique_ptr<AdapterBase>> mAdapters;
-
- FeaturesInfo mFeaturesInfo;
- TogglesInfo mTogglesInfo;
-
-#if defined(DAWN_USE_X11)
- std::unique_ptr<XlibXcbFunctions> mXlibXcbFunctions;
-#endif // defined(DAWN_USE_X11)
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_INSTANCE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/IntegerTypes.h b/chromium/third_party/dawn/src/dawn_native/IntegerTypes.h
deleted file mode 100644
index fbbaf4ed1af..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/IntegerTypes.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_INTEGERTYPES_H_
-#define DAWNNATIVE_INTEGERTYPES_H_
-
-#include "common/Constants.h"
-#include "common/TypedInteger.h"
-
-#include <cstdint>
-
-namespace dawn_native {
- // Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
- using BindingNumber = TypedInteger<struct BindingNumberT, uint32_t>;
-
- // Binding numbers get mapped to a packed range of indices
- using BindingIndex = TypedInteger<struct BindingIndexT, uint32_t>;
-
- using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
-
- static constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
-
- using ColorAttachmentIndex = TypedInteger<struct ColorAttachmentIndexT, uint8_t>;
-
- constexpr ColorAttachmentIndex kMaxColorAttachmentsTyped =
- ColorAttachmentIndex(kMaxColorAttachments);
-
- using VertexBufferSlot = TypedInteger<struct VertexBufferSlotT, uint8_t>;
- using VertexAttributeLocation = TypedInteger<struct VertexAttributeLocationT, uint8_t>;
-
- constexpr VertexBufferSlot kMaxVertexBuffersTyped = VertexBufferSlot(kMaxVertexBuffers);
- constexpr VertexAttributeLocation kMaxVertexAttributesTyped =
- VertexAttributeLocation(kMaxVertexAttributes);
-
- // Serials are 64bit integers that are incremented by one each time to produce unique values.
- // Some serials (like queue serials) are compared numerically to know which one is before
- // another, while some serials are only checked for equality. We call serials only checked
- // for equality IDs.
-
- // Buffer mapping requests are stored outside of the buffer while they are being processed and
- // cannot be invalidated. Instead they are associated with an ID, and when a map request is
- // finished, the mapping callback is fired only if its ID matches the ID if the last request
- // that was sent.
- using MapRequestID = TypedInteger<struct MapRequestIDT, uint64_t>;
-
- // The type for the WebGPU API fence serial values.
- using FenceAPISerial = TypedInteger<struct FenceAPISerialT, uint64_t>;
-
- // A serial used to watch the progression of GPU execution on a queue, each time operations
- // that need to be followed individually are scheduled for execution on a queue, the serial
- // is incremented by one. This way to know if something is done executing, we just need to
- // compare its serial with the currently completed serial.
- using ExecutionSerial = TypedInteger<struct QueueSerialT, uint64_t>;
- constexpr ExecutionSerial kMaxExecutionSerial = ExecutionSerial(~uint64_t(0));
-
- // An identifier that indicates which Pipeline a BindGroupLayout is compatible with. Pipelines
- // created with a default layout will produce BindGroupLayouts with a non-zero compatibility
- // token, which prevents them (and any BindGroups created with them) from being used with any
- // other pipelines.
- using PipelineCompatibilityToken = TypedInteger<struct PipelineCompatibilityTokenT, uint64_t>;
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_INTEGERTYPES_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.cpp b/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.cpp
deleted file mode 100644
index edfd115f5ea..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/InternalPipelineStore.h"
-
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/ShaderModule.h"
-
-#include <unordered_map>
-
-namespace dawn_native {
-
- class RenderPipelineBase;
- class ShaderModuleBase;
-
- InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
- : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
- scratchIndirectStorage(device,
- wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect |
- wgpu::BufferUsage::Storage) {
- }
-
- InternalPipelineStore::~InternalPipelineStore() = default;
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h b/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h
deleted file mode 100644
index 803e0dfd38e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_INTERNALPIPELINESTORE_H_
-#define DAWNNATIVE_INTERNALPIPELINESTORE_H_
-
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/ScratchBuffer.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <unordered_map>
-
-namespace dawn_native {
-
- class DeviceBase;
- class RenderPipelineBase;
- class ShaderModuleBase;
-
- // Every DeviceBase owns an InternalPipelineStore. This is a general-purpose cache for
- // long-lived objects scoped to a device and used to support arbitrary pipeline operations.
- struct InternalPipelineStore {
- explicit InternalPipelineStore(DeviceBase* device);
- ~InternalPipelineStore();
-
- std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>>
- copyTextureForBrowserPipelines;
-
- Ref<ShaderModuleBase> copyTextureForBrowser;
-
- Ref<ComputePipelineBase> timestampComputePipeline;
- Ref<ShaderModuleBase> timestampCS;
-
- Ref<ShaderModuleBase> dummyFragmentShader;
-
- // A scratch buffer suitable for use as a copy destination and storage binding.
- ScratchBuffer scratchStorage;
-
- // A scratch buffer suitable for use as a copy destination, storage binding, and indirect
- // buffer for indirect dispatch or draw calls.
- ScratchBuffer scratchIndirectStorage;
-
- Ref<ComputePipelineBase> renderValidationPipeline;
- Ref<ShaderModuleBase> renderValidationShader;
- Ref<ComputePipelineBase> dispatchIndirectValidationPipeline;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_INTERNALPIPELINESTORE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Limits.cpp b/chromium/third_party/dawn/src/dawn_native/Limits.cpp
deleted file mode 100644
index 949ce8331c0..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Limits.cpp
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Limits.h"
-
-#include "common/Assert.h"
-
-#include <array>
-
-// clang-format off
-// TODO(crbug.com/dawn/685):
-// For now, only expose these tiers until metrics can determine better ones.
-#define LIMITS_WORKGROUP_STORAGE_SIZE(X) \
- X(Higher, maxComputeWorkgroupStorageSize, 16352, 32768, 49152, 65536)
-
-#define LIMITS_STORAGE_BUFFER_BINDING_SIZE(X) \
- X(Higher, maxStorageBufferBindingSize, 134217728, 1073741824, 2147483647, 4294967295)
-
-// TODO(crbug.com/dawn/685):
-// These limits don't have tiers yet. Define two tiers with the same values since the macros
-// in this file expect more than one tier.
-#define LIMITS_OTHER(X) \
- X(Higher, maxTextureDimension1D, 8192, 8192) \
- X(Higher, maxTextureDimension2D, 8192, 8192) \
- X(Higher, maxTextureDimension3D, 2048, 2048) \
- X(Higher, maxTextureArrayLayers, 256, 256) \
- X(Higher, maxBindGroups, 4, 4) \
- X(Higher, maxDynamicUniformBuffersPerPipelineLayout, 8, 8) \
- X(Higher, maxDynamicStorageBuffersPerPipelineLayout, 4, 4) \
- X(Higher, maxSampledTexturesPerShaderStage, 16, 16) \
- X(Higher, maxSamplersPerShaderStage, 16, 16) \
- X(Higher, maxStorageBuffersPerShaderStage, 8, 8) \
- X(Higher, maxStorageTexturesPerShaderStage, 4, 4) \
- X(Higher, maxUniformBuffersPerShaderStage, 12, 12) \
- X(Higher, maxUniformBufferBindingSize, 16384, 16384) \
- X( Lower, minUniformBufferOffsetAlignment, 256, 256) \
- X( Lower, minStorageBufferOffsetAlignment, 256, 256) \
- X(Higher, maxVertexBuffers, 8, 8) \
- X(Higher, maxVertexAttributes, 16, 16) \
- X(Higher, maxVertexBufferArrayStride, 2048, 2048) \
- X(Higher, maxInterStageShaderComponents, 60, 60) \
- X(Higher, maxComputeInvocationsPerWorkgroup, 256, 256) \
- X(Higher, maxComputeWorkgroupSizeX, 256, 256) \
- X(Higher, maxComputeWorkgroupSizeY, 256, 256) \
- X(Higher, maxComputeWorkgroupSizeZ, 64, 64) \
- X(Higher, maxComputeWorkgroupsPerDimension, 65535, 65535)
-// clang-format on
-
-#define LIMITS_EACH_GROUP(X) \
- X(LIMITS_WORKGROUP_STORAGE_SIZE) \
- X(LIMITS_STORAGE_BUFFER_BINDING_SIZE) \
- X(LIMITS_OTHER)
-
-#define LIMITS(X) \
- LIMITS_WORKGROUP_STORAGE_SIZE(X) \
- LIMITS_STORAGE_BUFFER_BINDING_SIZE(X) \
- LIMITS_OTHER(X)
-
-namespace dawn_native {
- namespace {
- template <uint32_t A, uint32_t B>
- constexpr void StaticAssertSame() {
- static_assert(A == B, "Mismatching tier count in limit group.");
- }
-
- template <uint32_t I, uint32_t... Is>
- constexpr uint32_t ReduceSameValue(std::integer_sequence<uint32_t, I, Is...>) {
- int unused[] = {0, (StaticAssertSame<I, Is>(), 0)...};
- DAWN_UNUSED(unused);
- return I;
- }
-
- enum class LimitBetterDirection {
- Lower,
- Higher,
- };
-
- template <LimitBetterDirection Better>
- struct CheckLimit;
-
- template <>
- struct CheckLimit<LimitBetterDirection::Lower> {
- template <typename T>
- static bool IsBetter(T lhs, T rhs) {
- return lhs < rhs;
- }
-
- template <typename T>
- static MaybeError Validate(T supported, T required) {
- DAWN_INVALID_IF(IsBetter(required, supported),
- "Required limit (%u) is lower than the supported limit (%u).",
- required, supported);
- return {};
- }
- };
-
- template <>
- struct CheckLimit<LimitBetterDirection::Higher> {
- template <typename T>
- static bool IsBetter(T lhs, T rhs) {
- return lhs > rhs;
- }
-
- template <typename T>
- static MaybeError Validate(T supported, T required) {
- DAWN_INVALID_IF(IsBetter(required, supported),
- "Required limit (%u) is greater than the supported limit (%u).",
- required, supported);
- return {};
- }
- };
-
- template <typename T>
- bool IsLimitUndefined(T value) {
- static_assert(sizeof(T) != sizeof(T), "IsLimitUndefined not implemented for this type");
- return false;
- }
-
- template <>
- bool IsLimitUndefined<uint32_t>(uint32_t value) {
- return value == wgpu::kLimitU32Undefined;
- }
-
- template <>
- bool IsLimitUndefined<uint64_t>(uint64_t value) {
- return value == wgpu::kLimitU64Undefined;
- }
-
- } // namespace
-
- void GetDefaultLimits(Limits* limits) {
- ASSERT(limits != nullptr);
-#define X(Better, limitName, base, ...) limits->limitName = base;
- LIMITS(X)
-#undef X
- }
-
- Limits ReifyDefaultLimits(const Limits& limits) {
- Limits out;
-#define X(Better, limitName, base, ...) \
- if (IsLimitUndefined(limits.limitName) || \
- CheckLimit<LimitBetterDirection::Better>::IsBetter( \
- static_cast<decltype(limits.limitName)>(base), limits.limitName)) { \
- /* If the limit is undefined or the default is better, use the default */ \
- out.limitName = base; \
- } else { \
- out.limitName = limits.limitName; \
- }
- LIMITS(X)
-#undef X
- return out;
- }
-
- MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits) {
-#define X(Better, limitName, ...) \
- if (!IsLimitUndefined(requiredLimits.limitName)) { \
- DAWN_TRY_CONTEXT(CheckLimit<LimitBetterDirection::Better>::Validate( \
- supportedLimits.limitName, requiredLimits.limitName), \
- "validating " #limitName); \
- }
- LIMITS(X)
-#undef X
- return {};
- }
-
- Limits ApplyLimitTiers(Limits limits) {
-#define X_TIER_COUNT(Better, limitName, ...) , std::integer_sequence<uint64_t, __VA_ARGS__>{}.size()
-#define GET_TIER_COUNT(LIMIT_GROUP) \
- ReduceSameValue(std::integer_sequence<uint32_t LIMIT_GROUP(X_TIER_COUNT)>{})
-
-#define X_EACH_GROUP(LIMIT_GROUP) \
- { \
- constexpr uint32_t kTierCount = GET_TIER_COUNT(LIMIT_GROUP); \
- for (uint32_t i = kTierCount; i != 0; --i) { \
- LIMIT_GROUP(X_CHECK_BETTER_AND_CLAMP) \
- /* Limits fit in tier and have been clamped. Break. */ \
- break; \
- } \
- }
-
-#define X_CHECK_BETTER_AND_CLAMP(Better, limitName, ...) \
- { \
- constexpr std::array<decltype(Limits::limitName), kTierCount> tiers{__VA_ARGS__}; \
- decltype(Limits::limitName) tierValue = tiers[i - 1]; \
- if (CheckLimit<LimitBetterDirection::Better>::IsBetter(tierValue, limits.limitName)) { \
- /* The tier is better. Go to the next tier. */ \
- continue; \
- } else if (tierValue != limits.limitName) { \
- /* Better than the tier. Degrade |limits| to the tier. */ \
- limits.limitName = tiers[i - 1]; \
- } \
- }
-
- LIMITS_EACH_GROUP(X_EACH_GROUP)
-#undef X_CHECK_BETTER
-#undef X_EACH_GROUP
-#undef GET_TIER_COUNT
-#undef X_TIER_COUNT
- return limits;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Limits.h b/chromium/third_party/dawn/src/dawn_native/Limits.h
deleted file mode 100644
index 4beed2e780e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Limits.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_LIMITS_H_
-#define DAWNNATIVE_LIMITS_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- struct CombinedLimits {
- Limits v1;
- };
-
- // Populate |limits| with the default limits.
- void GetDefaultLimits(Limits* limits);
-
- // Returns a copy of |limits| where all undefined values are replaced
- // with their defaults. Also clamps to the defaults if the provided limits
- // are worse.
- Limits ReifyDefaultLimits(const Limits& limits);
-
- // Validate that |requiredLimits| are no better than |supportedLimits|.
- MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits);
-
- // Returns a copy of |limits| where limit tiers are applied.
- Limits ApplyLimitTiers(Limits limits);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_LIMITS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp b/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp
deleted file mode 100644
index c6f091a9740..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/Device.h"
-
-#include <mutex>
-
-namespace dawn_native {
-
- static constexpr uint64_t kErrorPayload = 0;
- static constexpr uint64_t kNotErrorPayload = 1;
-
- ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {
- }
-
- ObjectBase::ObjectBase(DeviceBase* device, ErrorTag)
- : RefCounted(kErrorPayload), mDevice(device) {
- }
-
- DeviceBase* ObjectBase::GetDevice() const {
- return mDevice;
- }
-
- bool ObjectBase::IsError() const {
- return GetRefCountPayload() == kErrorPayload;
- }
-
- ApiObjectBase::ApiObjectBase(DeviceBase* device, const char* label) : ObjectBase(device) {
- if (label) {
- mLabel = label;
- }
- }
-
- ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {
- }
-
- ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag)
- : ObjectBase(device) {
- }
-
- ApiObjectBase::~ApiObjectBase() {
- ASSERT(!IsAlive());
- }
-
- void ApiObjectBase::APISetLabel(const char* label) {
- mLabel = label;
- SetLabelImpl();
- }
-
- const std::string& ApiObjectBase::GetLabel() const {
- return mLabel;
- }
-
- void ApiObjectBase::SetLabelImpl() {
- }
-
- bool ApiObjectBase::IsAlive() const {
- return IsInList();
- }
-
- void ApiObjectBase::DeleteThis() {
- Destroy();
- RefCounted::DeleteThis();
- }
-
- void ApiObjectBase::TrackInDevice() {
- ASSERT(GetDevice() != nullptr);
- GetDevice()->TrackObject(this);
- }
-
- void ApiObjectBase::Destroy() {
- const std::lock_guard<std::mutex> lock(*GetDevice()->GetObjectListMutex(GetType()));
- if (RemoveFromList()) {
- DestroyImpl();
- }
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ObjectBase.h b/chromium/third_party/dawn/src/dawn_native/ObjectBase.h
deleted file mode 100644
index 5c916287485..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ObjectBase.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OBJECTBASE_H_
-#define DAWNNATIVE_OBJECTBASE_H_
-
-#include "common/LinkedList.h"
-#include "common/RefCounted.h"
-#include "dawn_native/Forward.h"
-
-#include <string>
-
-namespace dawn_native {
-
- class DeviceBase;
-
- class ObjectBase : public RefCounted {
- public:
- struct ErrorTag {};
- static constexpr ErrorTag kError = {};
-
- explicit ObjectBase(DeviceBase* device);
- ObjectBase(DeviceBase* device, ErrorTag tag);
-
- DeviceBase* GetDevice() const;
- bool IsError() const;
-
- private:
- // Pointer to owning device.
- DeviceBase* mDevice;
- };
-
- class ApiObjectBase : public ObjectBase, public LinkNode<ApiObjectBase> {
- public:
- struct LabelNotImplementedTag {};
- static constexpr LabelNotImplementedTag kLabelNotImplemented = {};
- struct UntrackedByDeviceTag {};
- static constexpr UntrackedByDeviceTag kUntrackedByDevice = {};
-
- ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag);
- ApiObjectBase(DeviceBase* device, const char* label);
- ApiObjectBase(DeviceBase* device, ErrorTag tag);
- ~ApiObjectBase() override;
-
- virtual ObjectType GetType() const = 0;
- const std::string& GetLabel() const;
-
- // The ApiObjectBase is considered alive if it is tracked in a respective linked list owned
- // by the owning device.
- bool IsAlive() const;
-
- // This needs to be public because it can be called from the device owning the object.
- void Destroy();
-
- // Dawn API
- void APISetLabel(const char* label);
-
- protected:
- // Overriding of the RefCounted's DeleteThis function ensures that instances of objects
- // always call their derived class implementation of Destroy prior to the derived
- // class being destroyed. This guarantees that when ApiObjects' reference counts drop to 0,
- // then the underlying backend's Destroy calls are executed. We cannot naively put the call
- // to Destroy in the destructor of this class because it calls DestroyImpl
- // which is a virtual function often implemented in the Derived class which would already
- // have been destroyed by the time ApiObject's destructor is called by C++'s destruction
- // order. Note that some classes like BindGroup may override the DeleteThis function again,
- // and they should ensure that their overriding versions call this underlying version
- // somewhere.
- void DeleteThis() override;
- void TrackInDevice();
-
- // Sub-classes may override this function multiple times. Whenever overriding this function,
- // however, users should be sure to call their parent's version in the new override to make
- // sure that all destroy functionality is kept. This function is guaranteed to only be
- // called once through the exposed Destroy function.
- virtual void DestroyImpl() = 0;
-
- private:
- virtual void SetLabelImpl();
-
- std::string mLabel;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_OBJECTBASE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ObjectContentHasher.cpp b/chromium/third_party/dawn/src/dawn_native/ObjectContentHasher.cpp
deleted file mode 100644
index 901e4cc7027..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ObjectContentHasher.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ObjectContentHasher.h"
-
-namespace dawn_native {
-
- size_t ObjectContentHasher::GetContentHash() const {
- return mContentHash;
- }
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ObjectContentHasher.h b/chromium/third_party/dawn/src/dawn_native/ObjectContentHasher.h
deleted file mode 100644
index 16509bcb9c6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ObjectContentHasher.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
-#define DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
-
-#include "common/HashUtils.h"
-
-#include <string>
-#include <vector>
-
-namespace dawn_native {
-
- // ObjectContentHasher records a hash that can be used as a key to lookup a cached object in a
- // cache.
- class ObjectContentHasher {
- public:
- // Record calls the appropriate record function based on the type.
- template <typename T, typename... Args>
- void Record(const T& value, const Args&... args) {
- RecordImpl<T, Args...>::Call(this, value, args...);
- }
-
- size_t GetContentHash() const;
-
- private:
- template <typename T, typename... Args>
- struct RecordImpl {
- static constexpr void Call(ObjectContentHasher* recorder,
- const T& value,
- const Args&... args) {
- HashCombine(&recorder->mContentHash, value, args...);
- }
- };
-
- template <typename T>
- struct RecordImpl<T*> {
- static constexpr void Call(ObjectContentHasher* recorder, T* obj) {
- // Calling Record(objPtr) is not allowed. This check exists to only prevent such
- // mistakes.
- static_assert(obj == nullptr, "");
- }
- };
-
- template <typename T>
- struct RecordImpl<std::vector<T>> {
- static constexpr void Call(ObjectContentHasher* recorder, const std::vector<T>& vec) {
- recorder->RecordIterable<std::vector<T>>(vec);
- }
- };
-
- template <typename IteratorT>
- constexpr void RecordIterable(const IteratorT& iterable) {
- for (auto it = iterable.begin(); it != iterable.end(); ++it) {
- Record(*it);
- }
- }
-
- size_t mContentHash = 0;
- };
-
- template <>
- struct ObjectContentHasher::RecordImpl<std::string> {
- static constexpr void Call(ObjectContentHasher* recorder, const std::string& str) {
- recorder->RecordIterable<std::string>(str);
- }
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_OBJECT_CONTENT_HASHER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
deleted file mode 100644
index 555eb0fc87a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_PASSRESOURCEUSAGE_H
-#define DAWNNATIVE_PASSRESOURCEUSAGE_H
-
-#include "dawn_native/SubresourceStorage.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <set>
-#include <vector>
-
-namespace dawn_native {
-
- // This file declares various "ResourceUsage" structures. They are produced by the frontend
- // while recording commands to be used for later validation and also some operations in the
- // backends. The are produced by the "Encoder" objects that finalize them on "EndPass" or
- // "Finish". Internally the "Encoder" may use the "StateTracker" to create them.
-
- class BufferBase;
- class QuerySetBase;
- class TextureBase;
-
- // The texture usage inside passes must be tracked per-subresource.
- using TextureSubresourceUsage = SubresourceStorage<wgpu::TextureUsage>;
-
- // Which resources are used by a synchronization scope and how they are used. The command
- // buffer validation pre-computes this information so that backends with explicit barriers
- // don't have to re-compute it.
- struct SyncScopeResourceUsage {
- std::vector<BufferBase*> buffers;
- std::vector<wgpu::BufferUsage> bufferUsages;
-
- std::vector<TextureBase*> textures;
- std::vector<TextureSubresourceUsage> textureUsages;
-
- std::vector<ExternalTextureBase*> externalTextures;
- };
-
- // Contains all the resource usage data for a compute pass.
- //
- // Essentially a list of SyncScopeResourceUsage, one per Dispatch as required by the WebGPU
- // specification. ComputePassResourceUsage also stores nline the set of all buffers and
- // textures used, because some unused BindGroups may not be used at all in synchronization
- // scope but their resources still need to be validated on Queue::Submit.
- struct ComputePassResourceUsage {
- // Somehow without this defaulted constructor, MSVC or its STDlib have an issue where they
- // use the copy constructor (that's deleted) when doing operations on a
- // vector<ComputePassResourceUsage>
- ComputePassResourceUsage(ComputePassResourceUsage&&) = default;
- ComputePassResourceUsage() = default;
-
- std::vector<SyncScopeResourceUsage> dispatchUsages;
-
- // All the resources referenced by this compute pass for validation in Queue::Submit.
- std::set<BufferBase*> referencedBuffers;
- std::set<TextureBase*> referencedTextures;
- std::set<ExternalTextureBase*> referencedExternalTextures;
- };
-
- // Contains all the resource usage data for a render pass.
- //
- // In the WebGPU specification render passes are synchronization scopes but we also need to
- // track additional data. It is stored for render passes used by a CommandBuffer, but also in
- // RenderBundle so they can be merged into the render passes' usage on ExecuteBundles().
- struct RenderPassResourceUsage : public SyncScopeResourceUsage {
- // Storage to track the occlusion queries used during the pass.
- std::vector<QuerySetBase*> querySets;
- std::vector<std::vector<bool>> queryAvailabilities;
- };
-
- using RenderPassUsages = std::vector<RenderPassResourceUsage>;
- using ComputePassUsages = std::vector<ComputePassResourceUsage>;
-
- // Contains a hierarchy of "ResourceUsage" that mirrors the hierarchy of the CommandBuffer and
- // is used for validation and to produce barriers and lazy clears in the backends.
- struct CommandBufferResourceUsage {
- RenderPassUsages renderPasses;
- ComputePassUsages computePasses;
-
- // Resources used in commands that aren't in a pass.
- std::set<BufferBase*> topLevelBuffers;
- std::set<TextureBase*> topLevelTextures;
- std::set<QuerySetBase*> usedQuerySets;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_PASSRESOURCEUSAGE_H
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
deleted file mode 100644
index 470eee17fcb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/PassResourceUsageTracker.h"
-
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/ExternalTexture.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/Texture.h"
-
-#include <utility>
-
-namespace dawn_native {
-
- void SyncScopeUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
- // std::map's operator[] will create the key and return 0 if the key didn't exist
- // before.
- mBufferUsages[buffer] |= usage;
- }
-
- void SyncScopeUsageTracker::TextureViewUsedAs(TextureViewBase* view, wgpu::TextureUsage usage) {
- TextureBase* texture = view->GetTexture();
- const SubresourceRange& range = view->GetSubresourceRange();
-
- // Get or create a new TextureSubresourceUsage for that texture (initially filled with
- // wgpu::TextureUsage::None)
- auto it = mTextureUsages.emplace(
- std::piecewise_construct, std::forward_as_tuple(texture),
- std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
- texture->GetNumMipLevels(), wgpu::TextureUsage::None));
- TextureSubresourceUsage& textureUsage = it.first->second;
-
- textureUsage.Update(range,
- [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
- // TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
- // branches.
- if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
- (usage & wgpu::TextureUsage::RenderAttachment) != 0) {
- // Using the same subresource as an attachment for two different
- // render attachments is a write-write hazard. Add this internal
- // usage so we will fail the check that a subresource with
- // writable usage is the single usage.
- *storedUsage |= kAgainAsRenderAttachment;
- }
- *storedUsage |= usage;
- });
- }
-
- void SyncScopeUsageTracker::AddRenderBundleTextureUsage(
- TextureBase* texture,
- const TextureSubresourceUsage& textureUsage) {
- // Get or create a new TextureSubresourceUsage for that texture (initially filled with
- // wgpu::TextureUsage::None)
- auto it = mTextureUsages.emplace(
- std::piecewise_construct, std::forward_as_tuple(texture),
- std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
- texture->GetNumMipLevels(), wgpu::TextureUsage::None));
- TextureSubresourceUsage* passTextureUsage = &it.first->second;
-
- passTextureUsage->Merge(
- textureUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
- const wgpu::TextureUsage& addedUsage) {
- ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
- *storedUsage |= addedUsage;
- });
- }
-
- void SyncScopeUsageTracker::AddBindGroup(BindGroupBase* group) {
- for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
- ++bindingIndex) {
- const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
- break;
- case wgpu::BufferBindingType::Storage:
- BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
- break;
- case kInternalStorageBufferBinding:
- BufferUsedAs(buffer, kInternalStorageBuffer);
- break;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- BufferUsedAs(buffer, kReadOnlyStorageBuffer);
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- break;
- }
-
- case BindingInfoType::Texture: {
- TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
- TextureViewUsedAs(view, wgpu::TextureUsage::TextureBinding);
- break;
- }
-
- case BindingInfoType::StorageTexture: {
- TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::WriteOnly:
- TextureViewUsedAs(view, wgpu::TextureUsage::StorageBinding);
- break;
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
- break;
- }
-
- case BindingInfoType::ExternalTexture: {
- ExternalTextureBase* externalTexture =
- group->GetBindingAsExternalTexture(bindingIndex);
-
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& textureViews =
- externalTexture->GetTextureViews();
-
- // Only single-plane formats are supported right now, so assert only one
- // view exists.
- ASSERT(textureViews[1].Get() == nullptr);
- ASSERT(textureViews[2].Get() == nullptr);
-
- mExternalTextureUsages.insert(externalTexture);
- TextureViewUsedAs(textureViews[0].Get(), wgpu::TextureUsage::TextureBinding);
- break;
- }
-
- case BindingInfoType::Sampler:
- break;
- }
- }
- }
-
- SyncScopeResourceUsage SyncScopeUsageTracker::AcquireSyncScopeUsage() {
- SyncScopeResourceUsage result;
- result.buffers.reserve(mBufferUsages.size());
- result.bufferUsages.reserve(mBufferUsages.size());
- result.textures.reserve(mTextureUsages.size());
- result.textureUsages.reserve(mTextureUsages.size());
-
- for (auto& it : mBufferUsages) {
- result.buffers.push_back(it.first);
- result.bufferUsages.push_back(it.second);
- }
-
- for (auto& it : mTextureUsages) {
- result.textures.push_back(it.first);
- result.textureUsages.push_back(std::move(it.second));
- }
-
- for (auto& it : mExternalTextureUsages) {
- result.externalTextures.push_back(it);
- }
-
- mBufferUsages.clear();
- mTextureUsages.clear();
- mExternalTextureUsages.clear();
-
- return result;
- }
-
- void ComputePassResourceUsageTracker::AddDispatch(SyncScopeResourceUsage scope) {
- mUsage.dispatchUsages.push_back(std::move(scope));
- }
-
- void ComputePassResourceUsageTracker::AddReferencedBuffer(BufferBase* buffer) {
- mUsage.referencedBuffers.insert(buffer);
- }
-
- void ComputePassResourceUsageTracker::AddResourcesReferencedByBindGroup(BindGroupBase* group) {
- for (BindingIndex index{0}; index < group->GetLayout()->GetBindingCount(); ++index) {
- const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(index);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- mUsage.referencedBuffers.insert(group->GetBindingAsBufferBinding(index).buffer);
- break;
- }
-
- case BindingInfoType::Texture: {
- mUsage.referencedTextures.insert(
- group->GetBindingAsTextureView(index)->GetTexture());
- break;
- }
-
- case BindingInfoType::ExternalTexture: {
- ExternalTextureBase* externalTexture =
- group->GetBindingAsExternalTexture(index);
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& textureViews =
- externalTexture->GetTextureViews();
-
- // Only single-plane formats are supported right now, so assert only one
- // view exists.
- ASSERT(textureViews[1].Get() == nullptr);
- ASSERT(textureViews[2].Get() == nullptr);
-
- mUsage.referencedExternalTextures.insert(externalTexture);
- mUsage.referencedTextures.insert(textureViews[0].Get()->GetTexture());
- break;
- }
-
- case BindingInfoType::StorageTexture:
- case BindingInfoType::Sampler:
- break;
- }
- }
- }
-
- ComputePassResourceUsage ComputePassResourceUsageTracker::AcquireResourceUsage() {
- return std::move(mUsage);
- }
-
- RenderPassResourceUsage RenderPassResourceUsageTracker::AcquireResourceUsage() {
- RenderPassResourceUsage result;
- *static_cast<SyncScopeResourceUsage*>(&result) = AcquireSyncScopeUsage();
-
- result.querySets.reserve(mQueryAvailabilities.size());
- result.queryAvailabilities.reserve(mQueryAvailabilities.size());
-
- for (auto& it : mQueryAvailabilities) {
- result.querySets.push_back(it.first);
- result.queryAvailabilities.push_back(std::move(it.second));
- }
-
- mQueryAvailabilities.clear();
-
- return result;
- }
-
- void RenderPassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
- uint32_t queryIndex) {
- // The query availability only needs to be tracked again on render passes for checking
- // query overwrite on render pass and resetting query sets on the Vulkan backend.
- DAWN_ASSERT(querySet != nullptr);
-
- // Gets the iterator for that querySet or create a new vector of bool set to false
- // if the querySet wasn't registered.
- auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
- it->second[queryIndex] = true;
- }
-
- const QueryAvailabilityMap& RenderPassResourceUsageTracker::GetQueryAvailabilityMap() const {
- return mQueryAvailabilities;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
deleted file mode 100644
index 33f33ff0323..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
-#define DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
-
-#include "dawn_native/PassResourceUsage.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <map>
-
-namespace dawn_native {
-
- class BindGroupBase;
- class BufferBase;
- class ExternalTextureBase;
- class QuerySetBase;
- class TextureBase;
-
- using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
-
- // Helper class to build SyncScopeResourceUsages
- class SyncScopeUsageTracker {
- public:
- void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
- void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
- void AddRenderBundleTextureUsage(TextureBase* texture,
- const TextureSubresourceUsage& textureUsage);
-
- // Walks the bind groups and tracks all its resources.
- void AddBindGroup(BindGroupBase* group);
-
- // Returns the per-pass usage for use by backends for APIs with explicit barriers.
- SyncScopeResourceUsage AcquireSyncScopeUsage();
-
- private:
- std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
- std::map<TextureBase*, TextureSubresourceUsage> mTextureUsages;
- std::set<ExternalTextureBase*> mExternalTextureUsages;
- };
-
- // Helper class to build ComputePassResourceUsages
- class ComputePassResourceUsageTracker {
- public:
- void AddDispatch(SyncScopeResourceUsage scope);
- void AddReferencedBuffer(BufferBase* buffer);
- void AddResourcesReferencedByBindGroup(BindGroupBase* group);
-
- ComputePassResourceUsage AcquireResourceUsage();
-
- private:
- ComputePassResourceUsage mUsage;
- };
-
- // Helper class to build RenderPassResourceUsages
- class RenderPassResourceUsageTracker : public SyncScopeUsageTracker {
- public:
- void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
- const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
-
- RenderPassResourceUsage AcquireResourceUsage();
-
- private:
- // Hide AcquireSyncScopeUsage since users of this class should use AcquireResourceUsage
- // instead.
- using SyncScopeUsageTracker::AcquireSyncScopeUsage;
-
- // Tracks queries used in the render pass to validate that they aren't written twice.
- QueryAvailabilityMap mQueryAvailabilities;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/PerStage.cpp b/chromium/third_party/dawn/src/dawn_native/PerStage.cpp
deleted file mode 100644
index 469fd9fd6c4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PerStage.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/PerStage.h"
-
-namespace dawn_native {
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- SingleShaderStage value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case SingleShaderStage::Compute:
- s->Append("Compute");
- break;
- case SingleShaderStage::Vertex:
- s->Append("Vertex");
- break;
- case SingleShaderStage::Fragment:
- s->Append("Fragment");
- break;
- default:
- UNREACHABLE();
- }
- return {true};
- }
-
- BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages) {
- std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
- return BitSetIterator<kNumStages, SingleShaderStage>(bits);
- }
-
- wgpu::ShaderStage StageBit(SingleShaderStage stage) {
- ASSERT(static_cast<uint32_t>(stage) < kNumStages);
- return static_cast<wgpu::ShaderStage>(1 << static_cast<uint32_t>(stage));
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PerStage.h b/chromium/third_party/dawn/src/dawn_native/PerStage.h
deleted file mode 100644
index a67a08a36f6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PerStage.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_PERSTAGE_H_
-#define DAWNNATIVE_PERSTAGE_H_
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-#include "common/Constants.h"
-#include "dawn_native/Error.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-
-namespace dawn_native {
-
- enum class SingleShaderStage { Vertex, Fragment, Compute };
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- SingleShaderStage value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
- static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages, "");
- static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages, "");
- static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages, "");
-
- static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
- (1 << static_cast<uint32_t>(SingleShaderStage::Vertex)),
- "");
- static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
- (1 << static_cast<uint32_t>(SingleShaderStage::Fragment)),
- "");
- static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
- (1 << static_cast<uint32_t>(SingleShaderStage::Compute)),
- "");
-
- BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
- wgpu::ShaderStage StageBit(SingleShaderStage stage);
-
- static constexpr wgpu::ShaderStage kAllStages =
- static_cast<wgpu::ShaderStage>((1 << kNumStages) - 1);
-
- template <typename T>
- class PerStage {
- public:
- PerStage() = default;
- PerStage(const T& initialValue) {
- mData.fill(initialValue);
- }
-
- T& operator[](SingleShaderStage stage) {
- DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
- return mData[static_cast<uint32_t>(stage)];
- }
- const T& operator[](SingleShaderStage stage) const {
- DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
- return mData[static_cast<uint32_t>(stage)];
- }
-
- T& operator[](wgpu::ShaderStage stageBit) {
- uint32_t bit = static_cast<uint32_t>(stageBit);
- DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
- return mData[Log2(bit)];
- }
- const T& operator[](wgpu::ShaderStage stageBit) const {
- uint32_t bit = static_cast<uint32_t>(stageBit);
- DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
- return mData[Log2(bit)];
- }
-
- private:
- std::array<T, kNumStages> mData;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_PERSTAGE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/PersistentCache.cpp b/chromium/third_party/dawn/src/dawn_native/PersistentCache.cpp
deleted file mode 100644
index 944fd5c1734..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PersistentCache.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/PersistentCache.h"
-
-#include "common/Assert.h"
-#include "dawn_native/Device.h"
-#include "dawn_platform/DawnPlatform.h"
-
-namespace dawn_native {
-
- PersistentCache::PersistentCache(DeviceBase* device)
- : mDevice(device), mCache(GetPlatformCache()) {
- }
-
- ScopedCachedBlob PersistentCache::LoadData(const PersistentCacheKey& key) {
- ScopedCachedBlob blob = {};
- if (mCache == nullptr) {
- return blob;
- }
- std::lock_guard<std::mutex> lock(mMutex);
- blob.bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(), nullptr, 0);
- if (blob.bufferSize > 0) {
- blob.buffer.reset(new uint8_t[blob.bufferSize]);
- const size_t bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(),
- blob.buffer.get(), blob.bufferSize);
- ASSERT(bufferSize == blob.bufferSize);
- return blob;
- }
- return blob;
- }
-
- void PersistentCache::StoreData(const PersistentCacheKey& key, const void* value, size_t size) {
- if (mCache == nullptr) {
- return;
- }
- ASSERT(value != nullptr);
- ASSERT(size > 0);
- std::lock_guard<std::mutex> lock(mMutex);
- mCache->StoreData(ToAPI(mDevice), key.data(), key.size(), value, size);
- }
-
- dawn_platform::CachingInterface* PersistentCache::GetPlatformCache() {
- // TODO(dawn:549): Create a fingerprint of concatenated version strings (ex. Tint commit
- // hash, Dawn commit hash). This will be used by the client so it may know when to discard
- // previously cached Dawn objects should this fingerprint change.
- dawn_platform::Platform* platform = mDevice->GetPlatform();
- if (platform != nullptr) {
- return platform->GetCachingInterface(/*fingerprint*/ nullptr, /*fingerprintSize*/ 0);
- }
- return nullptr;
- }
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PersistentCache.h b/chromium/third_party/dawn/src/dawn_native/PersistentCache.h
deleted file mode 100644
index 46a3cb29696..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PersistentCache.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_PERSISTENTCACHE_H_
-#define DAWNNATIVE_PERSISTENTCACHE_H_
-
-#include "dawn_native/Error.h"
-
-#include <mutex>
-#include <vector>
-
-namespace dawn_platform {
- class CachingInterface;
-}
-
-namespace dawn_native {
-
- using PersistentCacheKey = std::vector<uint8_t>;
-
- struct ScopedCachedBlob {
- std::unique_ptr<uint8_t[]> buffer;
- size_t bufferSize = 0;
- };
-
- class DeviceBase;
-
- enum class PersistentKeyType { Shader };
-
- // This class should always be thread-safe as it is used in Create*PipelineAsync() where it is
- // called asynchronously.
- // The thread-safety of any access to mCache (the function LoadData() and StoreData()) is
- // protected by mMutex.
- class PersistentCache {
- public:
- PersistentCache(DeviceBase* device);
-
- // Combines load/store operations into a single call.
- // If the load was successful, a non-empty blob is returned to the caller.
- // Else, the creation callback |createFn| gets invoked with a callback
- // |doCache| to store the newly created blob back in the cache.
- //
- // Example usage:
- //
- // ScopedCachedBlob cachedBlob = {};
- // DAWN_TRY_ASSIGN(cachedBlob, GetOrCreate(key, [&](auto doCache)) {
- // // Create a new blob to be stored
- // doCache(newBlobPtr, newBlobSize); // store
- // }));
- //
- template <typename CreateFn>
- ResultOrError<ScopedCachedBlob> GetOrCreate(const PersistentCacheKey& key,
- CreateFn&& createFn) {
- // Attempt to load an existing blob from the cache.
- ScopedCachedBlob blob = LoadData(key);
- if (blob.bufferSize > 0) {
- return std::move(blob);
- }
-
- // Allow the caller to create a new blob to be stored for the given key.
- DAWN_TRY(createFn([this, key](const void* value, size_t size) {
- this->StoreData(key, value, size);
- }));
-
- return std::move(blob);
- }
-
- private:
- // PersistentCache impl
- ScopedCachedBlob LoadData(const PersistentCacheKey& key);
- void StoreData(const PersistentCacheKey& key, const void* value, size_t size);
-
- dawn_platform::CachingInterface* GetPlatformCache();
-
- DeviceBase* mDevice = nullptr;
-
- std::mutex mMutex;
- dawn_platform::CachingInterface* mCache = nullptr;
- };
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_PERSISTENTCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
deleted file mode 100644
index 7b244604515..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Pipeline.h"
-
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/ObjectContentHasher.h"
-#include "dawn_native/PipelineLayout.h"
-#include "dawn_native/ShaderModule.h"
-
-namespace dawn_native {
- MaybeError ValidateProgrammableStage(DeviceBase* device,
- const ShaderModuleBase* module,
- const std::string& entryPoint,
- uint32_t constantCount,
- const ConstantEntry* constants,
- const PipelineLayoutBase* layout,
- SingleShaderStage stage) {
- DAWN_TRY(device->ValidateObject(module));
-
- DAWN_INVALID_IF(!module->HasEntryPoint(entryPoint),
- "Entry point \"%s\" doesn't exist in the shader module %s.", entryPoint,
- module);
-
- const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
-
- DAWN_INVALID_IF(metadata.stage != stage,
- "The stage (%s) of the entry point \"%s\" isn't the expected one (%s).",
- metadata.stage, entryPoint, stage);
-
- if (layout != nullptr) {
- DAWN_TRY(ValidateCompatibilityWithPipelineLayout(device, metadata, layout));
- }
-
- if (constantCount > 0u && device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline overridable constants are disallowed because they are partially "
- "implemented.");
- }
-
- // Validate if overridable constants exist in shader module
- // pipelineBase is not yet constructed at this moment so iterate constants from descriptor
- size_t numUninitializedConstants = metadata.uninitializedOverridableConstants.size();
- // Keep an initialized constants sets to handle duplicate initialization cases
- std::unordered_set<std::string> stageInitializedConstantIdentifiers;
- for (uint32_t i = 0; i < constantCount; i++) {
- DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
- "Pipeline overridable constant \"%s\" not found in %s.",
- constants[i].key, module);
-
- if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
- if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
- numUninitializedConstants--;
- }
- stageInitializedConstantIdentifiers.insert(constants[i].key);
- } else {
- // There are duplicate initializations
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Pipeline overridable constants \"%s\" is set more than once in %s",
- constants[i].key, module);
- }
- }
-
- // Validate if any overridable constant is left uninitialized
- if (DAWN_UNLIKELY(numUninitializedConstants > 0)) {
- std::string uninitializedConstantsArray;
- bool isFirst = true;
- for (std::string identifier : metadata.uninitializedOverridableConstants) {
- if (stageInitializedConstantIdentifiers.count(identifier) > 0) {
- continue;
- }
-
- if (isFirst) {
- isFirst = false;
- } else {
- uninitializedConstantsArray.append(", ");
- }
- uninitializedConstantsArray.append(identifier);
- }
-
- return DAWN_FORMAT_VALIDATION_ERROR(
- "There are uninitialized pipeline overridable constants in shader module %s, their "
- "identifiers:[%s]",
- module, uninitializedConstantsArray);
- }
-
- return {};
- }
-
- // PipelineBase
-
- PipelineBase::PipelineBase(DeviceBase* device,
- PipelineLayoutBase* layout,
- const char* label,
- std::vector<StageAndDescriptor> stages)
- : ApiObjectBase(device, label), mLayout(layout) {
- ASSERT(!stages.empty());
-
- for (const StageAndDescriptor& stage : stages) {
- // Extract argument for this stage.
- SingleShaderStage shaderStage = stage.shaderStage;
- ShaderModuleBase* module = stage.module;
- const char* entryPointName = stage.entryPoint.c_str();
-
- const EntryPointMetadata& metadata = module->GetEntryPoint(entryPointName);
- ASSERT(metadata.stage == shaderStage);
-
- // Record them internally.
- bool isFirstStage = mStageMask == wgpu::ShaderStage::None;
- mStageMask |= StageBit(shaderStage);
- mStages[shaderStage] = {module, entryPointName, &metadata, {}};
- auto& constants = mStages[shaderStage].constants;
- for (uint32_t i = 0; i < stage.constantCount; i++) {
- constants.emplace(stage.constants[i].key, stage.constants[i].value);
- }
-
- // Compute the max() of all minBufferSizes across all stages.
- RequiredBufferSizes stageMinBufferSizes =
- ComputeRequiredBufferSizesForLayout(metadata, layout);
-
- if (isFirstStage) {
- mMinBufferSizes = std::move(stageMinBufferSizes);
- } else {
- for (BindGroupIndex group(0); group < mMinBufferSizes.size(); ++group) {
- ASSERT(stageMinBufferSizes[group].size() == mMinBufferSizes[group].size());
-
- for (size_t i = 0; i < stageMinBufferSizes[group].size(); ++i) {
- mMinBufferSizes[group][i] =
- std::max(mMinBufferSizes[group][i], stageMinBufferSizes[group][i]);
- }
- }
- }
- }
- }
-
- PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- }
-
- PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- PipelineBase::~PipelineBase() = default;
-
- PipelineLayoutBase* PipelineBase::GetLayout() {
- ASSERT(!IsError());
- return mLayout.Get();
- }
-
- const PipelineLayoutBase* PipelineBase::GetLayout() const {
- ASSERT(!IsError());
- return mLayout.Get();
- }
-
- const RequiredBufferSizes& PipelineBase::GetMinBufferSizes() const {
- ASSERT(!IsError());
- return mMinBufferSizes;
- }
-
- const ProgrammableStage& PipelineBase::GetStage(SingleShaderStage stage) const {
- ASSERT(!IsError());
- return mStages[stage];
- }
-
- const PerStage<ProgrammableStage>& PipelineBase::GetAllStages() const {
- return mStages;
- }
-
- wgpu::ShaderStage PipelineBase::GetStageMask() const {
- return mStageMask;
- }
-
- MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
- DAWN_INVALID_IF(
- groupIndex >= kMaxBindGroups,
- "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
- groupIndex, kMaxBindGroups);
- return {};
- }
-
- ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(
- uint32_t groupIndexIn) {
- DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
-
- BindGroupIndex groupIndex(groupIndexIn);
- if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
- return Ref<BindGroupLayoutBase>(GetDevice()->GetEmptyBindGroupLayout());
- } else {
- return Ref<BindGroupLayoutBase>(mLayout->GetBindGroupLayout(groupIndex));
- }
- }
-
- BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
- Ref<BindGroupLayoutBase> result;
- if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result,
- "Validating GetBindGroupLayout (%u) on %s", groupIndexIn,
- this)) {
- return BindGroupLayoutBase::MakeError(GetDevice());
- }
- return result.Detach();
- }
-
- size_t PipelineBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mLayout->GetContentHash());
-
- recorder.Record(mStageMask);
- for (SingleShaderStage stage : IterateStages(mStageMask)) {
- recorder.Record(mStages[stage].module->GetContentHash());
- recorder.Record(mStages[stage].entryPoint);
- }
-
- return recorder.GetContentHash();
- }
-
- // static
- bool PipelineBase::EqualForCache(const PipelineBase* a, const PipelineBase* b) {
- // The layout is deduplicated so it can be compared by pointer.
- if (a->mLayout.Get() != b->mLayout.Get() || a->mStageMask != b->mStageMask) {
- return false;
- }
-
- for (SingleShaderStage stage : IterateStages(a->mStageMask)) {
- // The module is deduplicated so it can be compared by pointer.
- if (a->mStages[stage].module.Get() != b->mStages[stage].module.Get() ||
- a->mStages[stage].entryPoint != b->mStages[stage].entryPoint) {
- return false;
- }
- }
-
- return true;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.h b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
deleted file mode 100644
index 10aec50ca75..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_PIPELINE_H_
-#define DAWNNATIVE_PIPELINE_H_
-
-#include "dawn_native/CachedObject.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/PerStage.h"
-#include "dawn_native/PipelineLayout.h"
-#include "dawn_native/ShaderModule.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-#include <bitset>
-
-namespace dawn_native {
-
- MaybeError ValidateProgrammableStage(DeviceBase* device,
- const ShaderModuleBase* module,
- const std::string& entryPoint,
- uint32_t constantCount,
- const ConstantEntry* constants,
- const PipelineLayoutBase* layout,
- SingleShaderStage stage);
-
- // Use map to make sure constant keys are sorted for creating shader cache keys
- using PipelineConstantEntries = std::map<std::string, double>;
-
- struct ProgrammableStage {
- Ref<ShaderModuleBase> module;
- std::string entryPoint;
-
- // The metadata lives as long as module, that's ref-ed in the same structure.
- const EntryPointMetadata* metadata = nullptr;
-
- PipelineConstantEntries constants;
- };
-
- class PipelineBase : public ApiObjectBase, public CachedObject {
- public:
- ~PipelineBase() override;
-
- PipelineLayoutBase* GetLayout();
- const PipelineLayoutBase* GetLayout() const;
- const RequiredBufferSizes& GetMinBufferSizes() const;
- const ProgrammableStage& GetStage(SingleShaderStage stage) const;
- const PerStage<ProgrammableStage>& GetAllStages() const;
- wgpu::ShaderStage GetStageMask() const;
-
- ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
-
- // Helper functions for std::unordered_map-based pipeline caches.
- size_t ComputeContentHash() override;
- static bool EqualForCache(const PipelineBase* a, const PipelineBase* b);
-
- // Implementation of the API entrypoint. Do not use in a reentrant manner.
- BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
-
- // Initialize() should only be called once by the frontend.
- virtual MaybeError Initialize() = 0;
-
- protected:
- PipelineBase(DeviceBase* device,
- PipelineLayoutBase* layout,
- const char* label,
- std::vector<StageAndDescriptor> stages);
- PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- // Constructor used only for mocking and testing.
- PipelineBase(DeviceBase* device);
-
- private:
- MaybeError ValidateGetBindGroupLayout(uint32_t group);
-
- wgpu::ShaderStage mStageMask = wgpu::ShaderStage::None;
- PerStage<ProgrammableStage> mStages;
-
- Ref<PipelineLayoutBase> mLayout;
- RequiredBufferSizes mMinBufferSizes;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_PIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
deleted file mode 100644
index 2851695c2c5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/PipelineLayout.h"
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-#include "common/ityp_stack_vec.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectContentHasher.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/ShaderModule.h"
-
-namespace dawn_native {
-
- MaybeError ValidatePipelineLayoutDescriptor(
- DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
-
- if (descriptor->bindGroupLayoutCount > kMaxBindGroups) {
- return DAWN_VALIDATION_ERROR("too many bind group layouts");
- }
-
- BindingCounts bindingCounts = {};
- for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
- DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
- if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() !=
- pipelineCompatibilityToken) {
- return DAWN_VALIDATION_ERROR(
- "cannot create a pipeline layout using a bind group layout that was created as "
- "part of a pipeline's default layout");
- }
- AccumulateBindingCounts(&bindingCounts,
- descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
- }
-
- DAWN_TRY(ValidateBindingCounts(bindingCounts));
- return {};
- }
-
- // PipelineLayoutBase
-
- PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag)
- : ApiObjectBase(device, descriptor->label) {
- ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
- for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
- ++group) {
- mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)];
- mMask.set(group);
- }
- }
-
- PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor)
- : PipelineLayoutBase(device, descriptor, kUntrackedByDevice) {
- TrackInDevice();
- }
-
- PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- PipelineLayoutBase::~PipelineLayoutBase() = default;
-
- void PipelineLayoutBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncachePipelineLayout(this);
- }
- }
-
- // static
- PipelineLayoutBase* PipelineLayoutBase::MakeError(DeviceBase* device) {
- return new PipelineLayoutBase(device, ObjectBase::kError);
- }
-
- // static
- ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault(
- DeviceBase* device,
- std::vector<StageAndDescriptor> stages) {
- using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>;
-
- // Merges two entries at the same location, if they are allowed to be merged.
- auto MergeEntries = [](BindGroupLayoutEntry* modifiedEntry,
- const BindGroupLayoutEntry& mergedEntry) -> MaybeError {
- // Visibility is excluded because we take the OR across stages.
- bool compatible =
- modifiedEntry->binding == mergedEntry.binding &&
- modifiedEntry->buffer.type == mergedEntry.buffer.type &&
- modifiedEntry->sampler.type == mergedEntry.sampler.type &&
- // Compatibility between these sample types is checked below.
- (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) ==
- (mergedEntry.texture.sampleType != wgpu::TextureSampleType::Undefined) &&
- modifiedEntry->storageTexture.access == mergedEntry.storageTexture.access;
-
- // Minimum buffer binding size excluded because we take the maximum seen across stages.
- if (modifiedEntry->buffer.type != wgpu::BufferBindingType::Undefined) {
- compatible = compatible && modifiedEntry->buffer.hasDynamicOffset ==
- mergedEntry.buffer.hasDynamicOffset;
- }
-
- if (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) {
- // Sample types are compatible if they are exactly equal,
- // or if the |modifiedEntry| is Float and the |mergedEntry| is UnfilterableFloat.
- // Note that the |mergedEntry| never has type Float. Texture bindings all start
- // as UnfilterableFloat and are promoted to Float if they are statically used with
- // a sampler.
- ASSERT(mergedEntry.texture.sampleType != wgpu::TextureSampleType::Float);
- bool compatibleSampleTypes =
- modifiedEntry->texture.sampleType == mergedEntry.texture.sampleType ||
- (modifiedEntry->texture.sampleType == wgpu::TextureSampleType::Float &&
- mergedEntry.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat);
- compatible =
- compatible && compatibleSampleTypes &&
- modifiedEntry->texture.viewDimension == mergedEntry.texture.viewDimension &&
- modifiedEntry->texture.multisampled == mergedEntry.texture.multisampled;
- }
-
- if (modifiedEntry->storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
- compatible =
- compatible &&
- modifiedEntry->storageTexture.format == mergedEntry.storageTexture.format &&
- modifiedEntry->storageTexture.viewDimension ==
- mergedEntry.storageTexture.viewDimension;
- }
-
- // Check if any properties are incompatible with existing entry
- // If compatible, we will merge some properties
- if (!compatible) {
- return DAWN_VALIDATION_ERROR(
- "Duplicate binding in default pipeline layout initialization "
- "not compatible with previous declaration");
- }
-
- // Use the max |minBufferBindingSize| we find.
- modifiedEntry->buffer.minBindingSize =
- std::max(modifiedEntry->buffer.minBindingSize, mergedEntry.buffer.minBindingSize);
-
- // Use the OR of all the stages at which we find this binding.
- modifiedEntry->visibility |= mergedEntry.visibility;
-
- return {};
- };
-
- // Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry
- auto ConvertMetadataToEntry =
- [](const ShaderBindingInfo& shaderBinding,
- const ExternalTextureBindingLayout* externalTextureBindingEntry)
- -> BindGroupLayoutEntry {
- BindGroupLayoutEntry entry = {};
- switch (shaderBinding.bindingType) {
- case BindingInfoType::Buffer:
- entry.buffer.type = shaderBinding.buffer.type;
- entry.buffer.hasDynamicOffset = shaderBinding.buffer.hasDynamicOffset;
- entry.buffer.minBindingSize = shaderBinding.buffer.minBindingSize;
- break;
- case BindingInfoType::Sampler:
- if (shaderBinding.sampler.isComparison) {
- entry.sampler.type = wgpu::SamplerBindingType::Comparison;
- } else {
- entry.sampler.type = wgpu::SamplerBindingType::Filtering;
- }
- break;
- case BindingInfoType::Texture:
- switch (shaderBinding.texture.compatibleSampleTypes) {
- case SampleTypeBit::Depth:
- entry.texture.sampleType = wgpu::TextureSampleType::Depth;
- break;
- case SampleTypeBit::Sint:
- entry.texture.sampleType = wgpu::TextureSampleType::Sint;
- break;
- case SampleTypeBit::Uint:
- entry.texture.sampleType = wgpu::TextureSampleType::Uint;
- break;
- case SampleTypeBit::Float:
- case SampleTypeBit::UnfilterableFloat:
- case SampleTypeBit::None:
- UNREACHABLE();
- break;
- default:
- if (shaderBinding.texture.compatibleSampleTypes ==
- (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
- // Default to UnfilterableFloat. It will be promoted to Float if it
- // is used with a sampler.
- entry.texture.sampleType =
- wgpu::TextureSampleType::UnfilterableFloat;
- } else {
- UNREACHABLE();
- }
- }
- entry.texture.viewDimension = shaderBinding.texture.viewDimension;
- entry.texture.multisampled = shaderBinding.texture.multisampled;
- break;
- case BindingInfoType::StorageTexture:
- entry.storageTexture.access = shaderBinding.storageTexture.access;
- entry.storageTexture.format = shaderBinding.storageTexture.format;
- entry.storageTexture.viewDimension = shaderBinding.storageTexture.viewDimension;
- break;
- case BindingInfoType::ExternalTexture:
- entry.nextInChain = externalTextureBindingEntry;
- break;
- }
- return entry;
- };
-
- PipelineCompatibilityToken pipelineCompatibilityToken =
- device->GetNextPipelineCompatibilityToken();
-
- // Creates the BGL from the entries for a stage, checking it is valid.
- auto CreateBGL = [](DeviceBase* device, const EntryMap& entries,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- -> ResultOrError<Ref<BindGroupLayoutBase>> {
- std::vector<BindGroupLayoutEntry> entryVec;
- entryVec.reserve(entries.size());
- for (auto& it : entries) {
- entryVec.push_back(it.second);
- }
-
- BindGroupLayoutDescriptor desc = {};
- desc.entries = entryVec.data();
- desc.entryCount = entryVec.size();
-
- if (device->IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(device, &desc), "validating %s",
- &desc);
- }
- return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken);
- };
-
- ASSERT(!stages.empty());
-
- // Data which BindGroupLayoutDescriptor will point to for creation
- ityp::array<BindGroupIndex, std::map<BindingNumber, BindGroupLayoutEntry>, kMaxBindGroups>
- entryData = {};
-
- // External texture binding layouts are chained structs that are set as a pointer within
- // the bind group layout entry. We declare an entry here so that it can be used when needed
- // in each BindGroupLayoutEntry and so it can stay alive until the call to
- // GetOrCreateBindGroupLayout. Because ExternalTextureBindingLayout is an empty struct,
- // there's no issue with using the same struct multiple times.
- ExternalTextureBindingLayout externalTextureBindingLayout;
-
- // Loops over all the reflected BindGroupLayoutEntries from shaders.
- for (const StageAndDescriptor& stage : stages) {
- const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
-
- for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) {
- for (const auto& bindingIt : metadata.bindings[group]) {
- BindingNumber bindingNumber = bindingIt.first;
- const ShaderBindingInfo& shaderBinding = bindingIt.second;
-
- // Create the BindGroupLayoutEntry
- BindGroupLayoutEntry entry =
- ConvertMetadataToEntry(shaderBinding, &externalTextureBindingLayout);
- entry.binding = static_cast<uint32_t>(bindingNumber);
- entry.visibility = StageBit(stage.shaderStage);
-
- // Add it to our map of all entries, if there is an existing entry, then we
- // need to merge, if we can.
- const auto& insertion = entryData[group].insert({bindingNumber, entry});
- if (!insertion.second) {
- DAWN_TRY(MergeEntries(&insertion.first->second, entry));
- }
- }
- }
-
- // Promote any Unfilterable textures used with a sampler to Filtering.
- for (const EntryPointMetadata::SamplerTexturePair& pair :
- metadata.samplerTexturePairs) {
- BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
- if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
- entry->texture.sampleType = wgpu::TextureSampleType::Float;
- }
- }
- }
-
- // Create the bind group layouts. We need to keep track of the last non-empty BGL because
- // Dawn doesn't yet know that an empty BGL and a null BGL are the same thing.
- // TODO(cwallez@chromium.org): remove this when Dawn knows that empty and null BGL are the
- // same.
- BindGroupIndex pipelineBGLCount = BindGroupIndex(0);
- ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
- for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
- DAWN_TRY_ASSIGN(bindGroupLayouts[group],
- CreateBGL(device, entryData[group], pipelineCompatibilityToken));
- if (entryData[group].size() != 0) {
- pipelineBGLCount = group + BindGroupIndex(1);
- }
- }
-
- // Create the deduced pipeline layout, validating if it is valid.
- ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
- for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
- bgls[group] = bindGroupLayouts[group].Get();
- }
-
- PipelineLayoutDescriptor desc = {};
- desc.bindGroupLayouts = bgls.data();
- desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
-
- DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken));
-
- Ref<PipelineLayoutBase> result;
- DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
- ASSERT(!result->IsError());
-
- // Sanity check in debug that the pipeline layout is compatible with the current
- // pipeline.
- for (const StageAndDescriptor& stage : stages) {
- const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
- ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())
- .IsSuccess());
- }
-
- return std::move(result);
- }
-
- ObjectType PipelineLayoutBase::GetType() const {
- return ObjectType::PipelineLayout;
- }
-
- const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
- ASSERT(!IsError());
- ASSERT(group < kMaxBindGroupsTyped);
- ASSERT(mMask[group]);
- const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
- ASSERT(bgl != nullptr);
- return bgl;
- }
-
- BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) {
- ASSERT(!IsError());
- ASSERT(group < kMaxBindGroupsTyped);
- ASSERT(mMask[group]);
- BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
- ASSERT(bgl != nullptr);
- return bgl;
- }
-
- const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const {
- ASSERT(!IsError());
- return mMask;
- }
-
- BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(
- const PipelineLayoutBase* other) const {
- ASSERT(!IsError());
- return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
- }
-
- BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
- ASSERT(!IsError());
-
- for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
- if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) {
- return i;
- }
- }
- return kMaxBindGroupsTyped;
- }
-
- size_t PipelineLayoutBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mMask);
-
- for (BindGroupIndex group : IterateBitSet(mMask)) {
- recorder.Record(GetBindGroupLayout(group)->GetContentHash());
- }
-
- return recorder.GetContentHash();
- }
-
- bool PipelineLayoutBase::EqualityFunc::operator()(const PipelineLayoutBase* a,
- const PipelineLayoutBase* b) const {
- if (a->mMask != b->mMask) {
- return false;
- }
-
- for (BindGroupIndex group : IterateBitSet(a->mMask)) {
- if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) {
- return false;
- }
- }
-
- return true;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
deleted file mode 100644
index 948b3446a79..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_PIPELINELAYOUT_H_
-#define DAWNNATIVE_PIPELINELAYOUT_H_
-
-#include "common/Constants.h"
-#include "common/ityp_array.h"
-#include "common/ityp_bitset.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/CachedObject.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-#include <bitset>
-
-namespace dawn_native {
-
- MaybeError ValidatePipelineLayoutDescriptor(
- DeviceBase*,
- const PipelineLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
-
- using BindGroupLayoutArray =
- ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
- using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
-
- struct StageAndDescriptor {
- SingleShaderStage shaderStage;
- ShaderModuleBase* module;
- std::string entryPoint;
- uint32_t constantCount = 0u;
- ConstantEntry const* constants = nullptr;
- };
-
- class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
- public:
- PipelineLayoutBase(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag);
- PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
- ~PipelineLayoutBase() override;
-
- static PipelineLayoutBase* MakeError(DeviceBase* device);
- static ResultOrError<Ref<PipelineLayoutBase>> CreateDefault(
- DeviceBase* device,
- std::vector<StageAndDescriptor> stages);
-
- ObjectType GetType() const override;
-
- const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
- BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
- const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
-
- // Utility functions to compute inherited bind groups.
- // Returns the inherited bind groups as a mask.
- BindGroupLayoutMask InheritedGroupsMask(const PipelineLayoutBase* other) const;
-
- // Returns the index of the first incompatible bind group in the range
- // [0, kMaxBindGroups]
- BindGroupIndex GroupsInheritUpTo(const PipelineLayoutBase* other) const;
-
- // Functions necessary for the unordered_set<PipelineLayoutBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const PipelineLayoutBase* a, const PipelineLayoutBase* b) const;
- };
-
- protected:
- // Constructor used only for mocking and testing.
- PipelineLayoutBase(DeviceBase* device);
- PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- void DestroyImpl() override;
-
- BindGroupLayoutArray mBindGroupLayouts;
- BindGroupLayoutMask mMask;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_PIPELINELAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.cpp
deleted file mode 100644
index 2b6d44ae1e2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/PooledResourceMemoryAllocator.h"
-#include "dawn_native/Device.h"
-
-namespace dawn_native {
-
- PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(
- ResourceHeapAllocator* heapAllocator)
- : mHeapAllocator(heapAllocator) {
- }
-
- void PooledResourceMemoryAllocator::DestroyPool() {
- for (auto& resourceHeap : mPool) {
- ASSERT(resourceHeap != nullptr);
- mHeapAllocator->DeallocateResourceHeap(std::move(resourceHeap));
- }
-
- mPool.clear();
- }
-
- ResultOrError<std::unique_ptr<ResourceHeapBase>>
- PooledResourceMemoryAllocator::AllocateResourceHeap(uint64_t size) {
- // Pooled memory is LIFO because memory can be evicted by LRU. However, this means
- // pooling is disabled in-frame when the memory is still pending. For high in-frame
- // memory users, FIFO might be preferable when memory consumption is a higher priority.
- std::unique_ptr<ResourceHeapBase> memory;
- if (!mPool.empty()) {
- memory = std::move(mPool.front());
- mPool.pop_front();
- }
-
- if (memory == nullptr) {
- DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(size));
- }
-
- return std::move(memory);
- }
-
- void PooledResourceMemoryAllocator::DeallocateResourceHeap(
- std::unique_ptr<ResourceHeapBase> allocation) {
- mPool.push_front(std::move(allocation));
- }
-
- uint64_t PooledResourceMemoryAllocator::GetPoolSizeForTesting() const {
- return mPool.size();
- }
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.h b/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.h
deleted file mode 100644
index 5b6b816ee6a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
-#define DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
-
-#include "common/SerialQueue.h"
-#include "dawn_native/ResourceHeapAllocator.h"
-
-#include <deque>
-
-namespace dawn_native {
-
- class DeviceBase;
-
- // |PooledResourceMemoryAllocator| allocates a fixed-size resource memory from a resource memory
- // pool. Internally, it manages a list of heaps using LIFO (newest heaps are recycled first).
- // The heap is in one of two states: AVAILABLE or not. Upon de-allocate, the heap is returned
- // the pool and made AVAILABLE.
- class PooledResourceMemoryAllocator : public ResourceHeapAllocator {
- public:
- PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
- ~PooledResourceMemoryAllocator() override = default;
-
- ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
- uint64_t size) override;
- void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
-
- void DestroyPool();
-
- // For testing purposes.
- uint64_t GetPoolSizeForTesting() const;
-
- private:
- ResourceHeapAllocator* mHeapAllocator = nullptr;
-
- std::deque<std::unique_ptr<ResourceHeapBase>> mPool;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammableEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ProgrammableEncoder.cpp
deleted file mode 100644
index 99e2c7772b7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammableEncoder.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ProgrammableEncoder.h"
-
-#include "common/BitSetIterator.h"
-#include "common/ityp_array.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-#include <cstring>
-
-namespace dawn_native {
-
- ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
- const char* label,
- EncodingContext* encodingContext)
- : ApiObjectBase(device, label),
- mEncodingContext(encodingContext),
- mValidationEnabled(device->IsValidationEnabled()) {
- }
-
- ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
- : ApiObjectBase(device, errorTag),
- mEncodingContext(encodingContext),
- mValidationEnabled(device->IsValidationEnabled()) {
- }
-
- bool ProgrammableEncoder::IsValidationEnabled() const {
- return mValidationEnabled;
- }
-
- MaybeError ProgrammableEncoder::ValidateProgrammableEncoderEnd() const {
- DAWN_INVALID_IF(mDebugGroupStackSize != 0,
- "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup.",
- mDebugGroupStackSize);
- return {};
- }
-
- void ProgrammableEncoder::APIInsertDebugMarker(const char* groupLabel) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- InsertDebugMarkerCmd* cmd =
- allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
- cmd->length = strlen(groupLabel);
-
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
-
- return {};
- },
- "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
- }
-
- void ProgrammableEncoder::APIPopDebugGroup() {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(
- mDebugGroupStackSize == 0,
- "PopDebugGroup called when no debug groups are currently pushed.");
- }
- allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
- mDebugGroupStackSize--;
- mEncodingContext->PopDebugGroupLabel();
-
- return {};
- },
- "encoding %s.PopDebugGroup().", this);
- }
-
- void ProgrammableEncoder::APIPushDebugGroup(const char* groupLabel) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- PushDebugGroupCmd* cmd =
- allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
- cmd->length = strlen(groupLabel);
-
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
-
- mDebugGroupStackSize++;
- mEncodingContext->PushDebugGroupLabel(groupLabel);
-
- return {};
- },
- "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
- }
-
- MaybeError ProgrammableEncoder::ValidateSetBindGroup(BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCountIn,
- const uint32_t* dynamicOffsetsIn) const {
- DAWN_TRY(GetDevice()->ValidateObject(group));
-
- DAWN_INVALID_IF(index >= kMaxBindGroupsTyped,
- "Bind group index (%u) exceeds the maximum (%u).",
- static_cast<uint32_t>(index), kMaxBindGroups);
-
- ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
- BindingIndex(dynamicOffsetCountIn));
-
- // Dynamic offsets count must match the number required by the layout perfectly.
- const BindGroupLayoutBase* layout = group->GetLayout();
- DAWN_INVALID_IF(
- layout->GetDynamicBufferCount() != dynamicOffsets.size(),
- "The number of dynamic offsets (%u) does not match the number of dynamic buffers (%u) "
- "in %s.",
- static_cast<uint32_t>(dynamicOffsets.size()),
- static_cast<uint32_t>(layout->GetDynamicBufferCount()), layout);
-
- for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
- const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
-
- // BGL creation sorts bindings such that the dynamic buffer bindings are first.
- // ASSERT that this true.
- ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
- ASSERT(bindingInfo.buffer.hasDynamicOffset);
-
- uint64_t requiredAlignment;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- requiredAlignment = GetDevice()->GetLimits().v1.minUniformBufferOffsetAlignment;
- break;
- case wgpu::BufferBindingType::Storage:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- case kInternalStorageBufferBinding:
- requiredAlignment = GetDevice()->GetLimits().v1.minStorageBufferOffsetAlignment;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
- "Dynamic Offset[%u] (%u) is not %u byte aligned.",
- static_cast<uint32_t>(i), dynamicOffsets[i], requiredAlignment);
-
- BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
-
- // During BindGroup creation, validation ensures binding offset + binding size
- // <= buffer size.
- ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
- ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >= bufferBinding.offset);
-
- if ((dynamicOffsets[i] >
- bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
- DAWN_INVALID_IF(
- (bufferBinding.buffer->GetSize() - bufferBinding.offset) == bufferBinding.size,
- "Dynamic Offset[%u] (%u) is out of bounds of %s with a size of %u and a bound "
- "range of (offset: %u, size: %u). The binding goes to the end of the buffer "
- "even with a dynamic offset of 0. Did you forget to specify "
- "the binding's size?",
- static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
- bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
-
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Dynamic Offset[%u] (%u) is out of bounds of "
- "%s with a size of %u and a bound range of (offset: %u, size: %u).",
- static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
- bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
- }
- }
-
- return {};
- }
-
- void ProgrammableEncoder::RecordSetBindGroup(CommandAllocator* allocator,
- BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) const {
- SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
- cmd->index = index;
- cmd->group = group;
- cmd->dynamicOffsetCount = dynamicOffsetCount;
- if (dynamicOffsetCount > 0) {
- uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
- memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
- }
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammableEncoder.h b/chromium/third_party/dawn/src/dawn_native/ProgrammableEncoder.h
deleted file mode 100644
index 9f88f28a3cd..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammableEncoder.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_PROGRAMMABLEENCODER_H_
-#define DAWNNATIVE_PROGRAMMABLEENCODER_H_
-
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- class DeviceBase;
-
- // Base class for shared functionality between programmable encoders.
- class ProgrammableEncoder : public ApiObjectBase {
- public:
- ProgrammableEncoder(DeviceBase* device,
- const char* label,
- EncodingContext* encodingContext);
-
- void APIInsertDebugMarker(const char* groupLabel);
- void APIPopDebugGroup();
- void APIPushDebugGroup(const char* groupLabel);
-
- protected:
- bool IsValidationEnabled() const;
- MaybeError ValidateProgrammableEncoderEnd() const;
-
- // Compute and render passes do different things on SetBindGroup. These are helper functions
- // for the logic they have in common.
- MaybeError ValidateSetBindGroup(BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCountIn,
- const uint32_t* dynamicOffsetsIn) const;
- void RecordSetBindGroup(CommandAllocator* allocator,
- BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) const;
-
- // Construct an "error" programmable pass encoder.
- ProgrammableEncoder(DeviceBase* device,
- EncodingContext* encodingContext,
- ErrorTag errorTag);
-
- EncodingContext* mEncodingContext = nullptr;
-
- uint64_t mDebugGroupStackSize = 0;
-
- private:
- const bool mValidationEnabled;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_PROGRAMMABLEENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp b/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp
deleted file mode 100644
index 03d21943bed..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/QueryHelper.h"
-
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/ComputePassEncoder.h"
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/InternalPipelineStore.h"
-#include "dawn_native/utils/WGPUHelpers.h"
-
-namespace dawn_native {
-
- namespace {
-
- // Assert the offsets in dawn_native::TimestampParams are same with the ones in the shader
- static_assert(offsetof(dawn_native::TimestampParams, first) == 0, "");
- static_assert(offsetof(dawn_native::TimestampParams, count) == 4, "");
- static_assert(offsetof(dawn_native::TimestampParams, offset) == 8, "");
- static_assert(offsetof(dawn_native::TimestampParams, period) == 12, "");
-
- static const char sConvertTimestampsToNanoseconds[] = R"(
- struct Timestamp {
- low : u32;
- high : u32;
- };
-
- [[block]] struct TimestampArr {
- t : array<Timestamp>;
- };
-
- [[block]] struct AvailabilityArr {
- v : array<u32>;
- };
-
- [[block]] struct TimestampParams {
- first : u32;
- count : u32;
- offset : u32;
- period : f32;
- };
-
- [[group(0), binding(0)]]
- var<storage, read_write> timestamps : TimestampArr;
- [[group(0), binding(1)]]
- var<storage, read> availability : AvailabilityArr;
- [[group(0), binding(2)]] var<uniform> params : TimestampParams;
-
-
- let sizeofTimestamp : u32 = 8u;
-
- [[stage(compute), workgroup_size(8, 1, 1)]]
- fn main([[builtin(global_invocation_id)]] GlobalInvocationID : vec3<u32>) {
- if (GlobalInvocationID.x >= params.count) { return; }
-
- var index = GlobalInvocationID.x + params.offset / sizeofTimestamp;
-
- var timestamp = timestamps.t[index];
-
- // Return 0 for the unavailable value.
- if (availability.v[GlobalInvocationID.x + params.first] == 0u) {
- timestamps.t[index].low = 0u;
- timestamps.t[index].high = 0u;
- return;
- }
-
- // Multiply the values in timestamps buffer by the period.
- var period = params.period;
- var w = 0u;
-
- // If the product of low 32-bits and the period does not exceed the maximum of u32,
- // directly do the multiplication, otherwise, use two u32 to represent the high
- // 16-bits and low 16-bits of this u32, then multiply them by the period separately.
- if (timestamp.low <= u32(f32(0xFFFFFFFFu) / period)) {
- timestamps.t[index].low = u32(round(f32(timestamp.low) * period));
- } else {
- var lo = timestamp.low & 0xFFFFu;
- var hi = timestamp.low >> 16u;
-
- var t0 = u32(round(f32(lo) * period));
- var t1 = u32(round(f32(hi) * period)) + (t0 >> 16u);
- w = t1 >> 16u;
-
- var result = t1 << 16u;
- result = result | (t0 & 0xFFFFu);
- timestamps.t[index].low = result;
- }
-
- // Get the nearest integer to the float result. For high 32-bits, the round
- // function will greatly help reduce the accuracy loss of the final result.
- timestamps.t[index].high = u32(round(f32(timestamp.high) * period)) + w;
- }
- )";
-
- ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(
- DeviceBase* device) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
-
- if (store->timestampComputePipeline == nullptr) {
- // Create compute shader module if not cached before.
- if (store->timestampCS == nullptr) {
- DAWN_TRY_ASSIGN(
- store->timestampCS,
- utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
- }
-
- // Create binding group layout
- Ref<BindGroupLayoutBase> bgl;
- DAWN_TRY_ASSIGN(
- bgl, utils::MakeBindGroupLayout(
- device,
- {
- {0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
- {1, wgpu::ShaderStage::Compute,
- wgpu::BufferBindingType::ReadOnlyStorage},
- {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
- },
- /* allowInternalBinding */ true));
-
- // Create pipeline layout
- Ref<PipelineLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, utils::MakeBasicPipelineLayout(device, bgl));
-
- // Create ComputePipeline.
- ComputePipelineDescriptor computePipelineDesc = {};
- // Generate the layout based on shader module.
- computePipelineDesc.layout = layout.Get();
- computePipelineDesc.compute.module = store->timestampCS.Get();
- computePipelineDesc.compute.entryPoint = "main";
-
- DAWN_TRY_ASSIGN(store->timestampComputePipeline,
- device->CreateComputePipeline(&computePipelineDesc));
- }
-
- return store->timestampComputePipeline.Get();
- }
-
- } // anonymous namespace
-
- MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
- BufferBase* timestamps,
- BufferBase* availability,
- BufferBase* params) {
- DeviceBase* device = encoder->GetDevice();
-
- ComputePipelineBase* pipeline;
- DAWN_TRY_ASSIGN(pipeline, GetOrCreateTimestampComputePipeline(device));
-
- // Prepare bind group layout.
- Ref<BindGroupLayoutBase> layout;
- DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
-
- // Create bind group after all binding entries are set.
- Ref<BindGroupBase> bindGroup;
- DAWN_TRY_ASSIGN(bindGroup,
- utils::MakeBindGroup(device, layout,
- {{0, timestamps}, {1, availability}, {2, params}}));
-
- // Create compute encoder and issue dispatch.
- ComputePassDescriptor passDesc = {};
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<ComputePassEncoder> pass = AcquireRef(encoder->APIBeginComputePass(&passDesc));
- pass->APISetPipeline(pipeline);
- pass->APISetBindGroup(0, bindGroup.Get());
- pass->APIDispatch(
- static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
- pass->APIEndPass();
-
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/QueryHelper.h b/chromium/third_party/dawn/src/dawn_native/QueryHelper.h
deleted file mode 100644
index 90f3398294b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/QueryHelper.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_QUERYHELPER_H_
-#define DAWNNATIVE_QUERYHELPER_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/ObjectBase.h"
-
-namespace dawn_native {
-
- class BufferBase;
- class CommandEncoder;
-
- struct TimestampParams {
- uint32_t first;
- uint32_t count;
- uint32_t offset;
- float period;
- };
-
- MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
- BufferBase* timestamps,
- BufferBase* availability,
- BufferBase* params);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_QUERYHELPER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
deleted file mode 100644
index fbe385629c7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/QuerySet.h"
-
-#include "dawn_native/Device.h"
-#include "dawn_native/Features.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-#include <set>
-
-namespace dawn_native {
-
- namespace {
-
- class ErrorQuerySet final : public QuerySetBase {
- public:
- ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {
- }
-
- private:
- void DestroyImpl() override {
- UNREACHABLE();
- }
- };
-
- } // anonymous namespace
-
- MaybeError ValidateQuerySetDescriptor(DeviceBase* device,
- const QuerySetDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-
- DAWN_TRY(ValidateQueryType(descriptor->type));
-
- DAWN_INVALID_IF(descriptor->count > kMaxQueryCount,
- "Query count (%u) exceeds the maximum query count (%u).", descriptor->count,
- kMaxQueryCount);
-
- switch (descriptor->type) {
- case wgpu::QueryType::Occlusion:
- DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
- "Pipeline statistics specified for a query of type %s.",
- descriptor->type);
- break;
-
- case wgpu::QueryType::PipelineStatistics: {
- // TODO(crbug.com/1177506): Pipeline statistics query is not fully implemented.
- // Disallow it as unsafe until the implementaion is completed.
- DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
- "Pipeline statistics queries are disallowed because they are not "
- "fully implemented");
-
- DAWN_INVALID_IF(
- !device->IsFeatureEnabled(Feature::PipelineStatisticsQuery),
- "Pipeline statistics query set created without the feature being enabled.");
-
- DAWN_INVALID_IF(descriptor->pipelineStatisticsCount == 0,
- "Pipeline statistics query set created with 0 statistics.");
-
- std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
- for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
- DAWN_TRY(ValidatePipelineStatisticName(descriptor->pipelineStatistics[i]));
-
- std::pair<std::set<wgpu::PipelineStatisticName>::iterator, bool> res =
- pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
- DAWN_INVALID_IF(!res.second, "Statistic %s is specified more than once.",
- descriptor->pipelineStatistics[i]);
- }
- } break;
-
- case wgpu::QueryType::Timestamp:
- DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
- "Timestamp queries are disallowed because they may expose precise "
- "timing information.");
-
- DAWN_INVALID_IF(!device->IsFeatureEnabled(Feature::TimestampQuery),
- "Timestamp query set created without the feature being enabled.");
-
- DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
- "Pipeline statistics specified for a query of type %s.",
- descriptor->type);
- break;
-
- default:
- break;
- }
-
- return {};
- }
-
- QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
- : ApiObjectBase(device, descriptor->label),
- mQueryType(descriptor->type),
- mQueryCount(descriptor->count),
- mState(QuerySetState::Available) {
- for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
- mPipelineStatistics.push_back(descriptor->pipelineStatistics[i]);
- }
-
- mQueryAvailability.resize(descriptor->count);
- TrackInDevice();
- }
-
- QuerySetBase::QuerySetBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- QuerySetBase::~QuerySetBase() {
- // Uninitialized or already destroyed
- ASSERT(mState == QuerySetState::Unavailable || mState == QuerySetState::Destroyed);
- }
-
- void QuerySetBase::DestroyImpl() {
- mState = QuerySetState::Destroyed;
- }
-
- // static
- QuerySetBase* QuerySetBase::MakeError(DeviceBase* device) {
- return new ErrorQuerySet(device);
- }
-
- ObjectType QuerySetBase::GetType() const {
- return ObjectType::QuerySet;
- }
-
- wgpu::QueryType QuerySetBase::GetQueryType() const {
- return mQueryType;
- }
-
- uint32_t QuerySetBase::GetQueryCount() const {
- return mQueryCount;
- }
-
- const std::vector<wgpu::PipelineStatisticName>& QuerySetBase::GetPipelineStatistics() const {
- return mPipelineStatistics;
- }
-
- const std::vector<bool>& QuerySetBase::GetQueryAvailability() const {
- return mQueryAvailability;
- }
-
- void QuerySetBase::SetQueryAvailability(uint32_t index, bool available) {
- mQueryAvailability[index] = available;
- }
-
- MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
- ASSERT(!IsError());
- DAWN_INVALID_IF(mState == QuerySetState::Destroyed, "%s used while destroyed.", this);
- return {};
- }
-
- void QuerySetBase::APIDestroy() {
- if (GetDevice()->ConsumedError(ValidateDestroy())) {
- return;
- }
- Destroy();
- }
-
- MaybeError QuerySetBase::ValidateDestroy() const {
- DAWN_TRY(GetDevice()->ValidateObject(this));
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.h b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
deleted file mode 100644
index 58ebc1a926e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/QuerySet.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_QUERYSET_H_
-#define DAWNNATIVE_QUERYSET_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
-
- class QuerySetBase : public ApiObjectBase {
- public:
- QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
-
- static QuerySetBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- wgpu::QueryType GetQueryType() const;
- uint32_t GetQueryCount() const;
- const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
-
- const std::vector<bool>& GetQueryAvailability() const;
- void SetQueryAvailability(uint32_t index, bool available);
-
- MaybeError ValidateCanUseInSubmitNow() const;
-
- void APIDestroy();
-
- protected:
- QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- // Constructor used only for mocking and testing.
- QuerySetBase(DeviceBase* device);
- void DestroyImpl() override;
-
- ~QuerySetBase() override;
-
- private:
- MaybeError ValidateDestroy() const;
-
- wgpu::QueryType mQueryType;
- uint32_t mQueryCount;
- std::vector<wgpu::PipelineStatisticName> mPipelineStatistics;
-
- enum class QuerySetState { Unavailable, Available, Destroyed };
- QuerySetState mState = QuerySetState::Unavailable;
-
- // Indicates the available queries on the query set for resolving
- std::vector<bool> mQueryAvailability;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_QUERYSET_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
deleted file mode 100644
index 0fb7ab23de6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Queue.h"
-
-#include "common/Constants.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/CopyTextureForBrowserHelper.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/ExternalTexture.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/RenderPassEncoder.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/Texture.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-
-#include <cstring>
-
-namespace dawn_native {
-
- namespace {
-
- void CopyTextureData(uint8_t* dstPointer,
- const uint8_t* srcPointer,
- uint32_t depth,
- uint32_t rowsPerImage,
- uint64_t imageAdditionalStride,
- uint32_t actualBytesPerRow,
- uint32_t dstBytesPerRow,
- uint32_t srcBytesPerRow) {
- bool copyWholeLayer =
- actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
- bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
-
- if (!copyWholeLayer) { // copy row by row
- for (uint32_t d = 0; d < depth; ++d) {
- for (uint32_t h = 0; h < rowsPerImage; ++h) {
- memcpy(dstPointer, srcPointer, actualBytesPerRow);
- dstPointer += dstBytesPerRow;
- srcPointer += srcBytesPerRow;
- }
- srcPointer += imageAdditionalStride;
- }
- } else {
- uint64_t layerSize = uint64_t(rowsPerImage) * actualBytesPerRow;
- if (!copyWholeData) { // copy layer by layer
- for (uint32_t d = 0; d < depth; ++d) {
- memcpy(dstPointer, srcPointer, layerSize);
- dstPointer += layerSize;
- srcPointer += layerSize + imageAdditionalStride;
- }
- } else { // do a single copy
- memcpy(dstPointer, srcPointer, layerSize * depth);
- }
- }
- }
-
- ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
- DeviceBase* device,
- const void* data,
- uint32_t alignedBytesPerRow,
- uint32_t optimallyAlignedBytesPerRow,
- uint32_t alignedRowsPerImage,
- const TextureDataLayout& dataLayout,
- bool hasDepthOrStencil,
- const TexelBlockInfo& blockInfo,
- const Extent3D& writeSizePixel) {
- uint64_t newDataSizeBytes;
- DAWN_TRY_ASSIGN(
- newDataSizeBytes,
- ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
- alignedRowsPerImage));
-
- uint64_t optimalOffsetAlignment =
- device->GetOptimalBufferToTextureCopyOffsetAlignment();
- ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
- ASSERT(IsPowerOfTwo(blockInfo.byteSize));
- // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
- // since both of them are powers of two, we only need to align to the max value.
- uint64_t offsetAlignment =
- std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
-
- // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
- // by WebGPU and Vulkan SPEC.
- if (hasDepthOrStencil) {
- constexpr uint64_t kOffsetAlignmentForDepthStencil = 4;
- offsetAlignment = std::max(offsetAlignment, kOffsetAlignmentForDepthStencil);
- }
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- newDataSizeBytes, device->GetPendingCommandSerial(),
- offsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
-
- uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
- const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
- srcPointer += dataLayout.offset;
-
- uint32_t dataRowsPerImage = dataLayout.rowsPerImage;
- if (dataRowsPerImage == 0) {
- dataRowsPerImage = writeSizePixel.height / blockInfo.height;
- }
-
- ASSERT(dataRowsPerImage >= alignedRowsPerImage);
- uint64_t imageAdditionalStride =
- dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
-
- CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers,
- alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow,
- optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
-
- return uploadHandle;
- }
-
- struct SubmittedWorkDone : QueueBase::TaskInFlight {
- SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
- : mCallback(callback), mUserdata(userdata) {
- }
- void Finish() override {
- ASSERT(mCallback != nullptr);
- mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
- mCallback = nullptr;
- }
- void HandleDeviceLoss() override {
- ASSERT(mCallback != nullptr);
- mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
- mCallback = nullptr;
- }
- ~SubmittedWorkDone() override = default;
-
- private:
- WGPUQueueWorkDoneCallback mCallback = nullptr;
- void* mUserdata;
- };
-
- class ErrorQueue : public QueueBase {
- public:
- ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {
- }
-
- private:
- MaybeError SubmitImpl(uint32_t commandCount,
- CommandBufferBase* const* commands) override {
- UNREACHABLE();
- }
- };
- } // namespace
-
- // QueueBase
-
- QueueBase::TaskInFlight::~TaskInFlight() {
- }
-
- QueueBase::QueueBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- }
-
- QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- QueueBase::~QueueBase() {
- ASSERT(mTasksInFlight.Empty());
- }
-
- void QueueBase::DestroyImpl() {
- }
-
- // static
- QueueBase* QueueBase::MakeError(DeviceBase* device) {
- return new ErrorQueue(device);
- }
-
- ObjectType QueueBase::GetType() const {
- return ObjectType::Queue;
- }
-
- void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
- SubmitInternal(commandCount, commands);
-
- for (uint32_t i = 0; i < commandCount; ++i) {
- commands[i]->Destroy();
- }
- }
-
- void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata) {
- // The error status depends on the type of error so we let the validation function choose it
- WGPUQueueWorkDoneStatus status;
- if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
- callback(status, userdata);
- return;
- }
-
- std::unique_ptr<SubmittedWorkDone> task =
- std::make_unique<SubmittedWorkDone>(callback, userdata);
-
- // Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
- // also used to make sure ALL queue work is finished in tests, so we also wait for pending
- // commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
- // spec).
- TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
- }
-
- void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
- mTasksInFlight.Enqueue(std::move(task), serial);
- GetDevice()->AddFutureSerial(serial);
- }
-
- void QueueBase::Tick(ExecutionSerial finishedSerial) {
- // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
- // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
- // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
- // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
- // callbacks.
- std::vector<std::unique_ptr<TaskInFlight>> tasks;
- for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
- tasks.push_back(std::move(task));
- }
- mTasksInFlight.ClearUpTo(finishedSerial);
-
- for (auto& task : tasks) {
- task->Finish();
- }
- }
-
- void QueueBase::HandleDeviceLoss() {
- for (auto& task : mTasksInFlight.IterateAll()) {
- task->HandleDeviceLoss();
- }
- mTasksInFlight.Clear();
- }
-
- void QueueBase::APIWriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
- }
-
- MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
- DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
- return WriteBufferImpl(buffer, bufferOffset, data, size);
- }
-
- MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- if (size == 0) {
- return {};
- }
-
- DeviceBase* device = GetDevice();
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
-
- memcpy(uploadHandle.mappedBuffer, data, size);
-
- device->AddFutureSerial(device->GetPendingCommandSerial());
-
- return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
- buffer, bufferOffset, size);
- }
-
- void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout* dataLayout,
- const Extent3D* writeSize) {
- GetDevice()->ConsumedError(
- WriteTextureInternal(destination, data, dataSize, *dataLayout, writeSize));
- }
-
- MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout& dataLayout,
- const Extent3D* writeSize) {
- DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
-
- if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
- return {};
- }
-
- const TexelBlockInfo& blockInfo =
- destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
- TextureDataLayout layout = dataLayout;
- ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
- return WriteTextureImpl(*destination, data, layout, *writeSize);
- }
-
- MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& writeSizePixel) {
- const Format& format = destination.texture->GetFormat();
- const TexelBlockInfo& blockInfo = format.GetAspectInfo(destination.aspect).block;
-
- // We are only copying the part of the data that will appear in the texture.
- // Note that validating texture copy range ensures that writeSizePixel->width and
- // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
- ASSERT(writeSizePixel.width % blockInfo.width == 0);
- ASSERT(writeSizePixel.height % blockInfo.height == 0);
- uint32_t alignedBytesPerRow = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
- uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
-
- uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
- uint32_t optimallyAlignedBytesPerRow =
- Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- UploadTextureDataAligningBytesPerRowAndOffset(
- GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
- alignedRowsPerImage, dataLayout, format.HasDepthOrStencil(), blockInfo,
- writeSizePixel));
-
- TextureDataLayout passDataLayout = dataLayout;
- passDataLayout.offset = uploadHandle.startOffset;
- passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
- passDataLayout.rowsPerImage = alignedRowsPerImage;
-
- TextureCopy textureCopy;
- textureCopy.texture = destination.texture;
- textureCopy.mipLevel = destination.mipLevel;
- textureCopy.origin = destination.origin;
- textureCopy.aspect = ConvertAspect(format, destination.aspect);
-
- DeviceBase* device = GetDevice();
-
- device->AddFutureSerial(device->GetPendingCommandSerial());
-
- return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
- &textureCopy, writeSizePixel);
- }
-
- void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
- GetDevice()->ConsumedError(
- CopyTextureForBrowserInternal(source, destination, copySize, options));
- }
-
- MaybeError QueueBase::CopyTextureForBrowserInternal(
- const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY_CONTEXT(
- ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
- "validating CopyTextureForBrowser from %s to %s", source->texture,
- destination->texture);
- }
-
- return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
- }
-
- MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
- CommandBufferBase* const* commands) const {
- TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
- DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
-
- const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
-
- for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
- for (const BufferBase* buffer : scope.buffers) {
- DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
- }
-
- for (const TextureBase* texture : scope.textures) {
- DAWN_TRY(texture->ValidateCanUseInSubmitNow());
- }
-
- for (const ExternalTextureBase* externalTexture : scope.externalTextures) {
- DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
- }
- }
-
- for (const ComputePassResourceUsage& pass : usages.computePasses) {
- for (const BufferBase* buffer : pass.referencedBuffers) {
- DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
- }
- for (const TextureBase* texture : pass.referencedTextures) {
- DAWN_TRY(texture->ValidateCanUseInSubmitNow());
- }
- for (const ExternalTextureBase* externalTexture : pass.referencedExternalTextures) {
- DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
- }
- }
-
- for (const BufferBase* buffer : usages.topLevelBuffers) {
- DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
- }
- for (const TextureBase* texture : usages.topLevelTextures) {
- DAWN_TRY(texture->ValidateCanUseInSubmitNow());
- }
- for (const QuerySetBase* querySet : usages.usedQuerySets) {
- DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
- }
- }
-
- return {};
- }
-
- MaybeError QueueBase::ValidateOnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneStatus* status) const {
- *status = WGPUQueueWorkDoneStatus_DeviceLost;
- DAWN_TRY(GetDevice()->ValidateIsAlive());
-
- *status = WGPUQueueWorkDoneStatus_Error;
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- DAWN_INVALID_IF(signalValue != 0, "SignalValue (%u) is not 0.", signalValue);
-
- return {};
- }
-
- MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
- size_t dataSize,
- const TextureDataLayout& dataLayout,
- const Extent3D* writeSize) const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
-
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
-
- DAWN_INVALID_IF(dataLayout.offset > dataSize,
- "Data offset (%u) is greater than the data size (%u).", dataLayout.offset,
- dataSize);
-
- DAWN_INVALID_IF(!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst),
- "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
- destination->texture, wgpu::TextureUsage::CopyDst);
-
- DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1,
- "Sample count (%u) of %s is not 1", destination->texture->GetSampleCount(),
- destination->texture);
-
- DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
- // We validate texture copy range before validating linear texture data,
- // because in the latter we divide copyExtent.width by blockWidth and
- // copyExtent.height by blockHeight while the divisibility conditions are
- // checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
-
- const TexelBlockInfo& blockInfo =
- destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
-
- DAWN_TRY(ValidateLinearTextureData(dataLayout, dataSize, blockInfo, *writeSize));
-
- DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
-
- return {};
- }
-
- void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
- DeviceBase* device = GetDevice();
- if (device->ConsumedError(device->ValidateIsAlive())) {
- // If device is lost, don't let any commands be submitted
- return;
- }
-
- TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
- if (device->IsValidationEnabled() &&
- device->ConsumedError(ValidateSubmit(commandCount, commands))) {
- return;
- }
- ASSERT(!IsError());
-
- if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
- return;
- }
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.h b/chromium/third_party/dawn/src/dawn_native/Queue.h
deleted file mode 100644
index 204a4ae0a94..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Queue.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_QUEUE_H_
-#define DAWNNATIVE_QUEUE_H_
-
-#include "common/SerialQueue.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- class QueueBase : public ApiObjectBase {
- public:
- struct TaskInFlight {
- virtual ~TaskInFlight();
- virtual void Finish() = 0;
- virtual void HandleDeviceLoss() = 0;
- };
-
- ~QueueBase() override;
-
- static QueueBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- // Dawn API
- void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
- void APIOnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata);
- void APIWriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size);
- void APIWriteTexture(const ImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout* dataLayout,
- const Extent3D* writeSize);
- void APICopyTextureForBrowser(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
-
- MaybeError WriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size);
- void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
- void Tick(ExecutionSerial finishedSerial);
- void HandleDeviceLoss();
-
- protected:
- QueueBase(DeviceBase* device);
- QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- void DestroyImpl() override;
-
- private:
- MaybeError WriteTextureInternal(const ImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout& dataLayout,
- const Extent3D* writeSize);
- MaybeError CopyTextureForBrowserInternal(const ImageCopyTexture* source,
- const ImageCopyTexture* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
-
- virtual MaybeError SubmitImpl(uint32_t commandCount,
- CommandBufferBase* const* commands) = 0;
- virtual MaybeError WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size);
- virtual MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& writeSize);
-
- MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
- MaybeError ValidateOnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneStatus* status) const;
- MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
- size_t dataSize,
- const TextureDataLayout& dataLayout,
- const Extent3D* writeSize) const;
-
- void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
-
- SerialQueue<ExecutionSerial, std::unique_ptr<TaskInFlight>> mTasksInFlight;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_QUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
deleted file mode 100644
index 9ee937070f6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/RenderBundle.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/RenderBundleEncoder.h"
-
-namespace dawn_native {
-
- RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
- const RenderBundleDescriptor* descriptor,
- Ref<AttachmentState> attachmentState,
- bool depthReadOnly,
- bool stencilReadOnly,
- RenderPassResourceUsage resourceUsage,
- IndirectDrawMetadata indirectDrawMetadata)
- : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
- mCommands(encoder->AcquireCommands()),
- mIndirectDrawMetadata(std::move(indirectDrawMetadata)),
- mAttachmentState(std::move(attachmentState)),
- mDepthReadOnly(depthReadOnly),
- mStencilReadOnly(stencilReadOnly),
- mResourceUsage(std::move(resourceUsage)) {
- TrackInDevice();
- }
-
- void RenderBundleBase::DestroyImpl() {
- FreeCommands(&mCommands);
-
- // Remove reference to the attachment state so that we don't have lingering references to
- // it preventing it from being uncached in the device.
- mAttachmentState = nullptr;
- }
-
- // static
- RenderBundleBase* RenderBundleBase::MakeError(DeviceBase* device) {
- return new RenderBundleBase(device, ObjectBase::kError);
- }
-
- RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
- : ApiObjectBase(device, errorTag), mIndirectDrawMetadata(device->GetLimits()) {
- }
-
- ObjectType RenderBundleBase::GetType() const {
- return ObjectType::RenderBundle;
- }
-
- CommandIterator* RenderBundleBase::GetCommands() {
- return &mCommands;
- }
-
- const AttachmentState* RenderBundleBase::GetAttachmentState() const {
- ASSERT(!IsError());
- return mAttachmentState.Get();
- }
-
- bool RenderBundleBase::IsDepthReadOnly() const {
- ASSERT(!IsError());
- return mDepthReadOnly;
- }
-
- bool RenderBundleBase::IsStencilReadOnly() const {
- ASSERT(!IsError());
- return mStencilReadOnly;
- }
-
- const RenderPassResourceUsage& RenderBundleBase::GetResourceUsage() const {
- ASSERT(!IsError());
- return mResourceUsage;
- }
-
- const IndirectDrawMetadata& RenderBundleBase::GetIndirectDrawMetadata() {
- return mIndirectDrawMetadata;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundle.h b/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
deleted file mode 100644
index 4ad8dafbc02..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RENDERBUNDLE_H_
-#define DAWNNATIVE_RENDERBUNDLE_H_
-
-#include "common/Constants.h"
-#include "dawn_native/AttachmentState.h"
-#include "dawn_native/CommandAllocator.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/IndirectDrawMetadata.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/PassResourceUsage.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <bitset>
-
-namespace dawn_native {
-
- struct RenderBundleDescriptor;
- class RenderBundleEncoder;
-
- class RenderBundleBase final : public ApiObjectBase {
- public:
- RenderBundleBase(RenderBundleEncoder* encoder,
- const RenderBundleDescriptor* descriptor,
- Ref<AttachmentState> attachmentState,
- bool depthReadOnly,
- bool stencilReadOnly,
- RenderPassResourceUsage resourceUsage,
- IndirectDrawMetadata indirectDrawMetadata);
-
- static RenderBundleBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- CommandIterator* GetCommands();
-
- const AttachmentState* GetAttachmentState() const;
- bool IsDepthReadOnly() const;
- bool IsStencilReadOnly() const;
- const RenderPassResourceUsage& GetResourceUsage() const;
- const IndirectDrawMetadata& GetIndirectDrawMetadata();
-
- private:
- RenderBundleBase(DeviceBase* device, ErrorTag errorTag);
-
- void DestroyImpl() override;
-
- CommandIterator mCommands;
- IndirectDrawMetadata mIndirectDrawMetadata;
- Ref<AttachmentState> mAttachmentState;
- bool mDepthReadOnly;
- bool mStencilReadOnly;
- RenderPassResourceUsage mResourceUsage;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RENDERBUNDLE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
deleted file mode 100644
index f7aab722104..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/RenderBundleEncoder.h"
-
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-
-namespace dawn_native {
-
- MaybeError ValidateColorAttachmentFormat(const DeviceBase* device,
- wgpu::TextureFormat textureFormat) {
- DAWN_TRY(ValidateTextureFormat(textureFormat));
- const Format* format = nullptr;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
- DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
- "Texture format %s is not color renderable.", textureFormat);
- return {};
- }
-
- MaybeError ValidateDepthStencilAttachmentFormat(const DeviceBase* device,
- wgpu::TextureFormat textureFormat,
- bool depthReadOnly,
- bool stencilReadOnly) {
- DAWN_TRY(ValidateTextureFormat(textureFormat));
- const Format* format = nullptr;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
- DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
- "Texture format %s is not depth/stencil renderable.", textureFormat);
-
- DAWN_INVALID_IF(
- format->HasDepth() && format->HasStencil() && depthReadOnly != stencilReadOnly,
- "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when format %s has "
- "both depth and stencil aspects.",
- depthReadOnly, stencilReadOnly, textureFormat);
-
- return {};
- }
-
- MaybeError ValidateRenderBundleEncoderDescriptor(
- const DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor) {
- DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
- "Sample count (%u) is not supported.", descriptor->sampleCount);
-
- DAWN_INVALID_IF(
- descriptor->colorFormatsCount > kMaxColorAttachments,
- "Color formats count (%u) exceeds maximum number of color attachements (%u).",
- descriptor->colorFormatsCount, kMaxColorAttachments);
-
- DAWN_INVALID_IF(descriptor->colorFormatsCount == 0 &&
- descriptor->depthStencilFormat == wgpu::TextureFormat::Undefined,
- "No color or depth/stencil attachment formats specified.");
-
- for (uint32_t i = 0; i < descriptor->colorFormatsCount; ++i) {
- DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, descriptor->colorFormats[i]),
- "validating colorFormats[%u]", i);
- }
-
- if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
- DAWN_TRY_CONTEXT(ValidateDepthStencilAttachmentFormat(
- device, descriptor->depthStencilFormat, descriptor->depthReadOnly,
- descriptor->stencilReadOnly),
- "validating depthStencilFormat");
- }
-
- return {};
- }
-
- RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor)
- : RenderEncoderBase(device,
- descriptor->label,
- &mBundleEncodingContext,
- device->GetOrCreateAttachmentState(descriptor),
- descriptor->depthReadOnly,
- descriptor->stencilReadOnly),
- mBundleEncodingContext(device, this) {
- TrackInDevice();
- }
-
- RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag)
- : RenderEncoderBase(device, &mBundleEncodingContext, errorTag),
- mBundleEncodingContext(device, this) {
- }
-
- void RenderBundleEncoder::DestroyImpl() {
- RenderEncoderBase::DestroyImpl();
- mBundleEncodingContext.Destroy();
- }
-
- // static
- Ref<RenderBundleEncoder> RenderBundleEncoder::Create(
- DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor) {
- return AcquireRef(new RenderBundleEncoder(device, descriptor));
- }
-
- // static
- RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
- return new RenderBundleEncoder(device, ObjectBase::kError);
- }
-
- ObjectType RenderBundleEncoder::GetType() const {
- return ObjectType::RenderBundleEncoder;
- }
-
- CommandIterator RenderBundleEncoder::AcquireCommands() {
- return mBundleEncodingContext.AcquireCommands();
- }
-
- RenderBundleBase* RenderBundleEncoder::APIFinish(const RenderBundleDescriptor* descriptor) {
- RenderBundleBase* result = nullptr;
-
- if (GetDevice()->ConsumedError(FinishImpl(descriptor), &result, "calling %s.Finish(%s).",
- this, descriptor)) {
- return RenderBundleBase::MakeError(GetDevice());
- }
-
- return result;
- }
-
- ResultOrError<RenderBundleBase*> RenderBundleEncoder::FinishImpl(
- const RenderBundleDescriptor* descriptor) {
- // Even if mBundleEncodingContext.Finish() validation fails, calling it will mutate the
- // internal state of the encoding context. Subsequent calls to encode commands will generate
- // errors.
- DAWN_TRY(mBundleEncodingContext.Finish());
-
- RenderPassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(ValidateProgrammableEncoderEnd());
- DAWN_TRY(ValidateFinish(usages));
- }
-
- return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), IsDepthReadOnly(),
- IsStencilReadOnly(), std::move(usages),
- std::move(mIndirectDrawMetadata));
- }
-
- MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
- TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(ValidateSyncScopeResourceUsage(usages));
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
deleted file mode 100644
index 11411b05b9f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RENDERBUNDLEENCODER_H_
-#define DAWNNATIVE_RENDERBUNDLEENCODER_H_
-
-#include "dawn_native/EncodingContext.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/RenderBundle.h"
-#include "dawn_native/RenderEncoderBase.h"
-
-namespace dawn_native {
-
- MaybeError ValidateRenderBundleEncoderDescriptor(
- const DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor);
-
- class RenderBundleEncoder final : public RenderEncoderBase {
- public:
- static Ref<RenderBundleEncoder> Create(DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor);
- static RenderBundleEncoder* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
-
- CommandIterator AcquireCommands();
-
- private:
- RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
- RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
-
- void DestroyImpl() override;
-
- ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
- MaybeError ValidateFinish(const RenderPassResourceUsage& usages) const;
-
- EncodingContext mBundleEncodingContext;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RENDERBUNDLEENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
deleted file mode 100644
index b30f5173a7b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/RenderEncoderBase.h"
-
-#include "common/Constants.h"
-#include "common/Log.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-#include <math.h>
-#include <cstring>
-
-namespace dawn_native {
-
- RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
- const char* label,
- EncodingContext* encodingContext,
- Ref<AttachmentState> attachmentState,
- bool depthReadOnly,
- bool stencilReadOnly)
- : ProgrammableEncoder(device, label, encodingContext),
- mIndirectDrawMetadata(device->GetLimits()),
- mAttachmentState(std::move(attachmentState)),
- mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
- mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
- mDepthReadOnly = depthReadOnly;
- mStencilReadOnly = stencilReadOnly;
- }
-
- RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
- : ProgrammableEncoder(device, encodingContext, errorTag),
- mIndirectDrawMetadata(device->GetLimits()),
- mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
- mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
- }
-
- void RenderEncoderBase::DestroyImpl() {
- // Remove reference to the attachment state so that we don't have lingering references to
- // it preventing it from being uncached in the device.
- mAttachmentState = nullptr;
- }
-
- const AttachmentState* RenderEncoderBase::GetAttachmentState() const {
- ASSERT(!IsError());
- ASSERT(mAttachmentState != nullptr);
- return mAttachmentState.Get();
- }
-
- bool RenderEncoderBase::IsDepthReadOnly() const {
- ASSERT(!IsError());
- return mDepthReadOnly;
- }
-
- bool RenderEncoderBase::IsStencilReadOnly() const {
- ASSERT(!IsError());
- return mStencilReadOnly;
- }
-
- Ref<AttachmentState> RenderEncoderBase::AcquireAttachmentState() {
- return std::move(mAttachmentState);
- }
-
- void RenderEncoderBase::APIDraw(uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstVertex,
- uint32_t firstInstance) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDraw());
-
- DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
- "First instance (%u) must be zero.", firstInstance);
-
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
- firstVertex));
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
- instanceCount, firstInstance));
- }
-
- DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
- draw->vertexCount = vertexCount;
- draw->instanceCount = instanceCount;
- draw->firstVertex = firstVertex;
- draw->firstInstance = firstInstance;
-
- return {};
- },
- "encoding %s.Draw(%u, %u, %u, %u).", this, vertexCount, instanceCount, firstVertex,
- firstInstance);
- }
-
- void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t baseVertex,
- uint32_t firstInstance) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
-
- DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
- "First instance (%u) must be zero.", firstInstance);
-
- DAWN_INVALID_IF(mDisableBaseVertex && baseVertex != 0,
- "Base vertex (%u) must be zero.", baseVertex);
-
- DAWN_TRY(
- mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
-
- // Although we don't know actual vertex access range in CPU, we still call the
- // ValidateBufferInRangeForVertexBuffer in order to deal with those vertex step
- // mode vertex buffer with an array stride of zero.
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(0, 0));
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
- instanceCount, firstInstance));
- }
-
- DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
- draw->indexCount = indexCount;
- draw->instanceCount = instanceCount;
- draw->firstIndex = firstIndex;
- draw->baseVertex = baseVertex;
- draw->firstInstance = firstInstance;
-
- return {};
- },
- "encoding %s.DrawIndexed(%u, %u, %u, %i, %u).", this, indexCount, instanceCount,
- firstIndex, baseVertex, firstInstance);
- }
-
- void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDraw());
-
- DAWN_INVALID_IF(indirectOffset % 4 != 0,
- "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
-
- DAWN_INVALID_IF(
- indirectOffset >= indirectBuffer->GetSize() ||
- kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset,
- "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
- indirectOffset, indirectBuffer, indirectBuffer->GetSize());
- }
-
- DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
- cmd->indirectBuffer = indirectBuffer;
- cmd->indirectOffset = indirectOffset;
-
- mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
-
- return {};
- },
- "encoding %s.DrawIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
- }
-
- void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
- uint64_t indirectOffset) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
-
- DAWN_INVALID_IF(indirectOffset % 4 != 0,
- "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
-
- DAWN_INVALID_IF(
- (indirectOffset >= indirectBuffer->GetSize() ||
- kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset),
- "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
- indirectOffset, indirectBuffer, indirectBuffer->GetSize());
- }
-
- DrawIndexedIndirectCmd* cmd =
- allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
- if (IsValidationEnabled()) {
- // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
- // buffer which will store the validated indirect data. The buffer and offset
- // will be updated to point to it.
- // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
- // render pass, while the |cmd| pointer is still valid.
- cmd->indirectBuffer = nullptr;
-
- mIndirectDrawMetadata.AddIndexedIndirectDraw(
- mCommandBufferState.GetIndexFormat(),
- mCommandBufferState.GetIndexBufferSize(), indirectBuffer, indirectOffset,
- cmd);
- } else {
- cmd->indirectBuffer = indirectBuffer;
- cmd->indirectOffset = indirectOffset;
- }
-
- // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
- // validation, but it will unecessarily transition to indirectBuffer usage in the
- // backend.
- mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
-
- return {};
- },
- "encoding %s.DrawIndexedIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
- }
-
- void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(pipeline));
-
- // TODO(dawn:563): More detail about why the states are incompatible would be
- // nice.
- DAWN_INVALID_IF(
- pipeline->GetAttachmentState() != mAttachmentState.Get(),
- "Attachment state of %s is not compatible with the attachment state of %s",
- pipeline, this);
-
- DAWN_INVALID_IF(pipeline->WritesDepth() && mDepthReadOnly,
- "%s writes depth while %s's depthReadOnly is true", pipeline,
- this);
-
- DAWN_INVALID_IF(pipeline->WritesStencil() && mStencilReadOnly,
- "%s writes stencil while %s's stencilReadOnly is true",
- pipeline, this);
- }
-
- mCommandBufferState.SetRenderPipeline(pipeline);
-
- SetRenderPipelineCmd* cmd =
- allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
- cmd->pipeline = pipeline;
-
- return {};
- },
- "encoding %s.SetPipeline(%s).", this, pipeline);
- }
-
- void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
- wgpu::IndexFormat format,
- uint64_t offset,
- uint64_t size) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
- DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
-
- DAWN_TRY(ValidateIndexFormat(format));
-
- DAWN_INVALID_IF(format == wgpu::IndexFormat::Undefined,
- "Index format must be specified");
-
- DAWN_INVALID_IF(offset % uint64_t(IndexFormatSize(format)) != 0,
- "Index buffer offset (%u) is not a multiple of the size (%u)"
- "of %s.",
- offset, IndexFormatSize(format), format);
-
- uint64_t bufferSize = buffer->GetSize();
- DAWN_INVALID_IF(offset > bufferSize,
- "Index buffer offset (%u) is larger than the size (%u) of %s.",
- offset, bufferSize, buffer);
-
- uint64_t remainingSize = bufferSize - offset;
-
- if (size == wgpu::kWholeSize) {
- size = remainingSize;
- } else {
- DAWN_INVALID_IF(size > remainingSize,
- "Index buffer range (offset: %u, size: %u) doesn't fit in "
- "the size (%u) of "
- "%s.",
- offset, size, bufferSize, buffer);
- }
- } else {
- if (size == wgpu::kWholeSize) {
- DAWN_ASSERT(buffer->GetSize() >= offset);
- size = buffer->GetSize() - offset;
- }
- }
-
- mCommandBufferState.SetIndexBuffer(format, size);
-
- SetIndexBufferCmd* cmd =
- allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
- cmd->buffer = buffer;
- cmd->format = format;
- cmd->offset = offset;
- cmd->size = size;
-
- mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
-
- return {};
- },
- "encoding %s.SetIndexBuffer(%s, %s, %u, %u).", this, buffer, format, offset, size);
- }
-
- void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
- BufferBase* buffer,
- uint64_t offset,
- uint64_t size) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
- DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
-
- DAWN_INVALID_IF(slot >= kMaxVertexBuffers,
- "Vertex buffer slot (%u) is larger the maximum (%u)", slot,
- kMaxVertexBuffers - 1);
-
- DAWN_INVALID_IF(offset % 4 != 0,
- "Vertex buffer offset (%u) is not a multiple of 4", offset);
-
- uint64_t bufferSize = buffer->GetSize();
- DAWN_INVALID_IF(offset > bufferSize,
- "Vertex buffer offset (%u) is larger than the size (%u) of %s.",
- offset, bufferSize, buffer);
-
- uint64_t remainingSize = bufferSize - offset;
-
- if (size == wgpu::kWholeSize) {
- size = remainingSize;
- } else {
- DAWN_INVALID_IF(size > remainingSize,
- "Vertex buffer range (offset: %u, size: %u) doesn't fit in "
- "the size (%u) "
- "of %s.",
- offset, size, bufferSize, buffer);
- }
- } else {
- if (size == wgpu::kWholeSize) {
- DAWN_ASSERT(buffer->GetSize() >= offset);
- size = buffer->GetSize() - offset;
- }
- }
-
- mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
-
- SetVertexBufferCmd* cmd =
- allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
- cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
- cmd->buffer = buffer;
- cmd->offset = offset;
- cmd->size = size;
-
- mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
-
- return {};
- },
- "encoding %s.SetVertexBuffer(%u, %s, %u, %u).", this, slot, buffer, offset, size);
- }
-
- void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- const uint32_t* dynamicOffsets) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- BindGroupIndex groupIndex(groupIndexIn);
-
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
- dynamicOffsets));
- }
-
- RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
- dynamicOffsets);
- mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
- dynamicOffsets);
- mUsageTracker.AddBindGroup(group);
-
- return {};
- },
- "encoding %s.SetBindGroup(%u, %s, %u, ...).", this, groupIndexIn, group,
- dynamicOffsetCount);
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
deleted file mode 100644
index 21531782597..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RENDERENCODERBASE_H_
-#define DAWNNATIVE_RENDERENCODERBASE_H_
-
-#include "dawn_native/AttachmentState.h"
-#include "dawn_native/CommandBufferStateTracker.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/IndirectDrawMetadata.h"
-#include "dawn_native/PassResourceUsageTracker.h"
-#include "dawn_native/ProgrammableEncoder.h"
-
-namespace dawn_native {
-
- class RenderEncoderBase : public ProgrammableEncoder {
- public:
- RenderEncoderBase(DeviceBase* device,
- const char* label,
- EncodingContext* encodingContext,
- Ref<AttachmentState> attachmentState,
- bool depthReadOnly,
- bool stencilReadOnly);
-
- void APIDraw(uint32_t vertexCount,
- uint32_t instanceCount = 1,
- uint32_t firstVertex = 0,
- uint32_t firstInstance = 0);
- void APIDrawIndexed(uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t baseVertex,
- uint32_t firstInstance);
-
- void APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
- void APIDrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
-
- void APISetPipeline(RenderPipelineBase* pipeline);
-
- void APISetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
- void APISetIndexBuffer(BufferBase* buffer,
- wgpu::IndexFormat format,
- uint64_t offset,
- uint64_t size);
-
- void APISetBindGroup(uint32_t groupIndex,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount = 0,
- const uint32_t* dynamicOffsets = nullptr);
-
- const AttachmentState* GetAttachmentState() const;
- bool IsDepthReadOnly() const;
- bool IsStencilReadOnly() const;
- Ref<AttachmentState> AcquireAttachmentState();
-
- protected:
- // Construct an "error" render encoder base.
- RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
-
- void DestroyImpl() override;
-
- CommandBufferStateTracker mCommandBufferState;
- RenderPassResourceUsageTracker mUsageTracker;
- IndirectDrawMetadata mIndirectDrawMetadata;
-
- private:
- Ref<AttachmentState> mAttachmentState;
- const bool mDisableBaseVertex;
- const bool mDisableBaseInstance;
- bool mDepthReadOnly = false;
- bool mStencilReadOnly = false;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RENDERENCODERBASE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
deleted file mode 100644
index 279fe45a524..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/RenderPassEncoder.h"
-
-#include "common/Constants.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/RenderBundle.h"
-#include "dawn_native/RenderPipeline.h"
-
-#include <math.h>
-#include <cstring>
-
-namespace dawn_native {
- namespace {
-
- // Check the query at queryIndex is unavailable, otherwise it cannot be written.
- MaybeError ValidateQueryIndexOverwrite(QuerySetBase* querySet,
- uint32_t queryIndex,
- const QueryAvailabilityMap& queryAvailabilityMap) {
- auto it = queryAvailabilityMap.find(querySet);
- DAWN_INVALID_IF(it != queryAvailabilityMap.end() && it->second[queryIndex],
- "Query index %u of %s is written to twice in a render pass.",
- queryIndex, querySet);
-
- return {};
- }
-
- } // namespace
-
- // The usage tracker is passed in here, because it is prepopulated with usages from the
- // BeginRenderPassCmd. If we had RenderPassEncoder responsible for recording the
- // command, then this wouldn't be necessary.
- RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
- const RenderPassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- RenderPassResourceUsageTracker usageTracker,
- Ref<AttachmentState> attachmentState,
- QuerySetBase* occlusionQuerySet,
- uint32_t renderTargetWidth,
- uint32_t renderTargetHeight,
- bool depthReadOnly,
- bool stencilReadOnly)
- : RenderEncoderBase(device,
- descriptor->label,
- encodingContext,
- std::move(attachmentState),
- depthReadOnly,
- stencilReadOnly),
- mCommandEncoder(commandEncoder),
- mRenderTargetWidth(renderTargetWidth),
- mRenderTargetHeight(renderTargetHeight),
- mOcclusionQuerySet(occlusionQuerySet) {
- mUsageTracker = std::move(usageTracker);
- TrackInDevice();
- }
-
- RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
- : RenderEncoderBase(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
- }
-
- RenderPassEncoder* RenderPassEncoder::MakeError(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext) {
- return new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
- }
-
- void RenderPassEncoder::DestroyImpl() {
- RenderEncoderBase::DestroyImpl();
- // Ensure that the pass has exited. This is done for passes only since validation requires
- // they exit before destruction while bundles do not.
- mEncodingContext->EnsurePassExited(this);
- }
-
- ObjectType RenderPassEncoder::GetType() const {
- return ObjectType::RenderPassEncoder;
- }
-
- void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
- DAWN_ASSERT(querySet != nullptr);
-
- // Track the query availability with true on render pass for rewrite validation and query
- // reset on render pass on Vulkan
- mUsageTracker.TrackQueryAvailability(querySet, queryIndex);
-
- // Track it again on command encoder for zero-initializing when resolving unused queries.
- mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
- }
-
- void RenderPassEncoder::APIEndPass() {
- if (mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateProgrammableEncoderEnd());
-
- DAWN_INVALID_IF(
- mOcclusionQueryActive,
- "Render pass %s ended with incomplete occlusion query index %u of %s.",
- this, mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
- }
-
- allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
- DAWN_TRY(mEncodingContext->ExitRenderPass(this, std::move(mUsageTracker),
- mCommandEncoder.Get(),
- std::move(mIndirectDrawMetadata)));
- return {};
- },
- "encoding %s.EndPass().", this)) {
- }
- }
-
- void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- SetStencilReferenceCmd* cmd =
- allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
- cmd->reference = reference;
-
- return {};
- },
- "encoding %s.SetStencilReference(%u).", this, reference);
- }
-
- void RenderPassEncoder::APISetBlendConstant(const Color* color) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- SetBlendConstantCmd* cmd =
- allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
- cmd->color = *color;
-
- return {};
- },
- "encoding %s.SetBlendConstant(%s).", this, color);
- }
-
- void RenderPassEncoder::APISetViewport(float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(
- (isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
- isnan(maxDepth)),
- "A parameter of the viewport (x: %f, y: %f, width: %f, height: %f, "
- "minDepth: %f, maxDepth: %f) is NaN.",
- x, y, width, height, minDepth, maxDepth);
-
- DAWN_INVALID_IF(
- x < 0 || y < 0 || width < 0 || height < 0,
- "Viewport bounds (x: %f, y: %f, width: %f, height: %f) contains a negative "
- "value.",
- x, y, width, height);
-
- DAWN_INVALID_IF(
- x + width > mRenderTargetWidth || y + height > mRenderTargetHeight,
- "Viewport bounds (x: %f, y: %f, width: %f, height: %f) are not contained "
- "in "
- "the render target dimensions (%u x %u).",
- x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
-
- // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
- DAWN_INVALID_IF(minDepth < 0 || minDepth > maxDepth || maxDepth > 1,
- "Viewport minDepth (%f) and maxDepth (%f) are not in [0, 1] or "
- "minDepth was "
- "greater than maxDepth.",
- minDepth, maxDepth);
- }
-
- SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
- cmd->x = x;
- cmd->y = y;
- cmd->width = width;
- cmd->height = height;
- cmd->minDepth = minDepth;
- cmd->maxDepth = maxDepth;
-
- return {};
- },
- "encoding %s.SetViewport(%f, %f, %f, %f, %f, %f).", this, x, y, width, height, minDepth,
- maxDepth);
- }
-
- void RenderPassEncoder::APISetScissorRect(uint32_t x,
- uint32_t y,
- uint32_t width,
- uint32_t height) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(
- width > mRenderTargetWidth || height > mRenderTargetHeight ||
- x > mRenderTargetWidth - width || y > mRenderTargetHeight - height,
- "Scissor rect (x: %u, y: %u, width: %u, height: %u) is not contained in "
- "the render target dimensions (%u x %u).",
- x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
- }
-
- SetScissorRectCmd* cmd =
- allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
- cmd->x = x;
- cmd->y = y;
- cmd->width = width;
- cmd->height = height;
-
- return {};
- },
- "encoding %s.SetScissorRect(%u, %u, %u, %u).", this, x, y, width, height);
- }
-
- void RenderPassEncoder::APIExecuteBundles(uint32_t count,
- RenderBundleBase* const* renderBundles) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- const AttachmentState* attachmentState = GetAttachmentState();
- bool depthReadOnlyInPass = IsDepthReadOnly();
- bool stencilReadOnlyInPass = IsStencilReadOnly();
- for (uint32_t i = 0; i < count; ++i) {
- DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
-
- // TODO(dawn:563): Give more detail about why the states are incompatible.
- DAWN_INVALID_IF(
- attachmentState != renderBundles[i]->GetAttachmentState(),
- "Attachment state of renderBundles[%i] (%s) is not compatible with "
- "attachment state of %s.",
- i, renderBundles[i], this);
-
- bool depthReadOnlyInBundle = renderBundles[i]->IsDepthReadOnly();
- DAWN_INVALID_IF(
- depthReadOnlyInPass && !depthReadOnlyInBundle,
- "DepthReadOnly (%u) of renderBundle[%i] (%s) is not compatible "
- "with DepthReadOnly (%u) of %s.",
- depthReadOnlyInBundle, i, renderBundles[i], depthReadOnlyInPass, this);
-
- bool stencilReadOnlyInBundle = renderBundles[i]->IsStencilReadOnly();
- DAWN_INVALID_IF(stencilReadOnlyInPass && !stencilReadOnlyInBundle,
- "StencilReadOnly (%u) of renderBundle[%i] (%s) is not "
- "compatible with StencilReadOnly (%u) of %s.",
- stencilReadOnlyInBundle, i, renderBundles[i],
- stencilReadOnlyInPass, this);
- }
- }
-
- mCommandBufferState = CommandBufferStateTracker{};
-
- ExecuteBundlesCmd* cmd =
- allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
- cmd->count = count;
-
- Ref<RenderBundleBase>* bundles =
- allocator->AllocateData<Ref<RenderBundleBase>>(count);
- for (uint32_t i = 0; i < count; ++i) {
- bundles[i] = renderBundles[i];
-
- const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
- for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
- mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
- }
-
- for (uint32_t i = 0; i < usages.textures.size(); ++i) {
- mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
- usages.textureUsages[i]);
- }
-
- if (IsValidationEnabled()) {
- mIndirectDrawMetadata.AddBundle(renderBundles[i]);
- }
- }
-
- return {};
- },
- "encoding %s.ExecuteBundles(%u, ...).", this, count);
- }
-
- void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(mOcclusionQuerySet.Get() == nullptr,
- "The occlusionQuerySet in RenderPassDescriptor is not set.");
-
- // The type of querySet has been validated by ValidateRenderPassDescriptor
-
- DAWN_INVALID_IF(queryIndex >= mOcclusionQuerySet->GetQueryCount(),
- "Query index (%u) exceeds the number of queries (%u) in %s.",
- queryIndex, mOcclusionQuerySet->GetQueryCount(),
- mOcclusionQuerySet.Get());
-
- DAWN_INVALID_IF(mOcclusionQueryActive,
- "An occlusion query (%u) in %s is already active.",
- mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
-
- DAWN_TRY_CONTEXT(
- ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
- mUsageTracker.GetQueryAvailabilityMap()),
- "validating the occlusion query index (%u) in %s", queryIndex,
- mOcclusionQuerySet.Get());
- }
-
- // Record the current query index for endOcclusionQuery.
- mCurrentOcclusionQueryIndex = queryIndex;
- mOcclusionQueryActive = true;
-
- BeginOcclusionQueryCmd* cmd =
- allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
- cmd->querySet = mOcclusionQuerySet.Get();
- cmd->queryIndex = queryIndex;
-
- return {};
- },
- "encoding %s.BeginOcclusionQuery(%u).", this, queryIndex);
- }
-
- void RenderPassEncoder::APIEndOcclusionQuery() {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_INVALID_IF(!mOcclusionQueryActive, "No occlusion queries are active.");
- }
-
- TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
-
- mOcclusionQueryActive = false;
-
- EndOcclusionQueryCmd* cmd =
- allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
- cmd->querySet = mOcclusionQuerySet.Get();
- cmd->queryIndex = mCurrentOcclusionQueryIndex;
-
- return {};
- },
- "encoding %s.EndOcclusionQuery().", this);
- }
-
- void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext->TryEncode(
- this,
- [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
- DAWN_TRY_CONTEXT(
- ValidateQueryIndexOverwrite(querySet, queryIndex,
- mUsageTracker.GetQueryAvailabilityMap()),
- "validating the timestamp query index (%u) of %s", queryIndex, querySet);
- }
-
- TrackQueryAvailability(querySet, queryIndex);
-
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
-
- return {};
- },
- "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
deleted file mode 100644
index adaa8da1f25..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RENDERPASSENCODER_H_
-#define DAWNNATIVE_RENDERPASSENCODER_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/RenderEncoderBase.h"
-
-namespace dawn_native {
-
- class RenderBundleBase;
-
- class RenderPassEncoder final : public RenderEncoderBase {
- public:
- RenderPassEncoder(DeviceBase* device,
- const RenderPassDescriptor* descriptor,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- RenderPassResourceUsageTracker usageTracker,
- Ref<AttachmentState> attachmentState,
- QuerySetBase* occlusionQuerySet,
- uint32_t renderTargetWidth,
- uint32_t renderTargetHeight,
- bool depthReadOnly,
- bool stencilReadOnly);
-
- static RenderPassEncoder* MakeError(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext);
-
- ObjectType GetType() const override;
-
- void APIEndPass();
-
- void APISetStencilReference(uint32_t reference);
- void APISetBlendConstant(const Color* color);
- void APISetViewport(float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth);
- void APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
- void APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
-
- void APIBeginOcclusionQuery(uint32_t queryIndex);
- void APIEndOcclusionQuery();
-
- void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
-
- protected:
- RenderPassEncoder(DeviceBase* device,
- CommandEncoder* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag);
-
- private:
- void DestroyImpl() override;
-
- void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
-
- // For render and compute passes, the encoding context is borrowed from the command encoder.
- // Keep a reference to the encoder to make sure the context isn't freed.
- Ref<CommandEncoder> mCommandEncoder;
-
- uint32_t mRenderTargetWidth;
- uint32_t mRenderTargetHeight;
-
- // The resources for occlusion query
- Ref<QuerySetBase> mOcclusionQuerySet;
- uint32_t mCurrentOcclusionQueryIndex = 0;
- bool mOcclusionQueryActive = false;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RENDERPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
deleted file mode 100644
index fb9ad467681..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ /dev/null
@@ -1,1052 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/RenderPipeline.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/ChainUtils_autogen.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/InternalPipelineStore.h"
-#include "dawn_native/ObjectContentHasher.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-#include "dawn_native/VertexFormat.h"
-
-#include <cmath>
-#include <sstream>
-
-namespace dawn_native {
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- VertexFormatBaseType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case VertexFormatBaseType::Float:
- s->Append("Float");
- break;
- case VertexFormatBaseType::Uint:
- s->Append("Uint");
- break;
- case VertexFormatBaseType::Sint:
- s->Append("Sint");
- break;
- default:
- UNREACHABLE();
- }
- return {true};
- }
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterStageComponentType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case InterStageComponentType::Float:
- s->Append("Float");
- break;
- case InterStageComponentType::Uint:
- s->Append("Uint");
- break;
- case InterStageComponentType::Sint:
- s->Append("Sint");
- break;
- default:
- UNREACHABLE();
- }
- return {true};
- }
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterpolationType value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case InterpolationType::Perspective:
- s->Append("Perspective");
- break;
- case InterpolationType::Linear:
- s->Append("Linear");
- break;
- case InterpolationType::Flat:
- s->Append("Flat");
- break;
- default:
- UNREACHABLE();
- }
- return {true};
- }
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- InterpolationSampling value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case InterpolationSampling::None:
- s->Append("None");
- break;
- case InterpolationSampling::Center:
- s->Append("Center");
- break;
- case InterpolationSampling::Centroid:
- s->Append("Centroid");
- break;
- case InterpolationSampling::Sample:
- s->Append("Sample");
- break;
- default:
- UNREACHABLE();
- }
- return {true};
- }
-
- // Helper functions
- namespace {
- MaybeError ValidateVertexAttribute(
- DeviceBase* device,
- const VertexAttribute* attribute,
- const EntryPointMetadata& metadata,
- uint64_t vertexBufferStride,
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
- DAWN_TRY(ValidateVertexFormat(attribute->format));
- const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format);
-
- DAWN_INVALID_IF(
- attribute->shaderLocation >= kMaxVertexAttributes,
- "Attribute shader location (%u) exceeds the maximum number of vertex attributes "
- "(%u).",
- attribute->shaderLocation, kMaxVertexAttributes);
-
- VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation));
-
- // No underflow is possible because the max vertex format size is smaller than
- // kMaxVertexBufferArrayStride.
- ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize);
- DAWN_INVALID_IF(
- attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize,
- "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex "
- "buffer stride (%u).",
- attribute->offset, attribute->format, formatInfo.byteSize,
- kMaxVertexBufferArrayStride);
-
- // No overflow is possible because the offset is already validated to be less
- // than kMaxVertexBufferArrayStride.
- ASSERT(attribute->offset < kMaxVertexBufferArrayStride);
- DAWN_INVALID_IF(
- vertexBufferStride > 0 &&
- attribute->offset + formatInfo.byteSize > vertexBufferStride,
- "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer "
- "stride (%u).",
- attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride);
-
- DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0,
- "Attribute offset (%u) in not a multiple of %u.", attribute->offset,
- std::min(4u, formatInfo.byteSize));
-
- DAWN_INVALID_IF(metadata.usedVertexInputs[location] &&
- formatInfo.baseType != metadata.vertexInputBaseTypes[location],
- "Attribute base type (%s) does not match the "
- "shader's base type (%s) in location (%u).",
- formatInfo.baseType, metadata.vertexInputBaseTypes[location],
- attribute->shaderLocation);
-
- DAWN_INVALID_IF((*attributesSetMask)[location],
- "Attribute shader location (%u) is used more than once.",
- attribute->shaderLocation);
-
- attributesSetMask->set(location);
- return {};
- }
-
- MaybeError ValidateVertexBufferLayout(
- DeviceBase* device,
- const VertexBufferLayout* buffer,
- const EntryPointMetadata& metadata,
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
- DAWN_TRY(ValidateVertexStepMode(buffer->stepMode));
- DAWN_INVALID_IF(
- buffer->arrayStride > kMaxVertexBufferArrayStride,
- "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).",
- buffer->arrayStride, kMaxVertexBufferArrayStride);
-
- DAWN_INVALID_IF(buffer->arrayStride % 4 != 0,
- "Vertex buffer arrayStride (%u) is not a multiple of 4.",
- buffer->arrayStride);
-
- for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
- DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
- buffer->arrayStride, attributesSetMask),
- "validating attributes[%u].", i);
- }
-
- return {};
- }
-
- MaybeError ValidateVertexState(DeviceBase* device,
- const VertexState* descriptor,
- const PipelineLayoutBase* layout) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_INVALID_IF(
- descriptor->bufferCount > kMaxVertexBuffers,
- "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).",
- descriptor->bufferCount, kMaxVertexBuffers);
-
- DAWN_TRY_CONTEXT(
- ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
- descriptor->constantCount, descriptor->constants, layout,
- SingleShaderStage::Vertex),
- "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module,
- descriptor->entryPoint);
- const EntryPointMetadata& vertexMetadata =
- descriptor->module->GetEntryPoint(descriptor->entryPoint);
-
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask;
- uint32_t totalAttributesNum = 0;
- for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
- DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i],
- vertexMetadata, &attributesSetMask),
- "validating buffers[%u].", i);
- totalAttributesNum += descriptor->buffers[i].attributeCount;
- }
-
- // Every vertex attribute has a member called shaderLocation, and there are some
- // requirements for shaderLocation: 1) >=0, 2) values are different across different
- // attributes, 3) can't exceed kMaxVertexAttributes. So it can ensure that total
- // attribute number never exceed kMaxVertexAttributes.
- ASSERT(totalAttributesNum <= kMaxVertexAttributes);
-
- // TODO(dawn:563): Specify which inputs were not used in error message.
- DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask),
- "Pipeline vertex stage uses vertex buffers not in the vertex state");
-
- return {};
- }
-
- MaybeError ValidatePrimitiveState(const DeviceBase* device,
- const PrimitiveState* descriptor) {
- DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
- wgpu::SType::PrimitiveDepthClampingState));
- const PrimitiveDepthClampingState* clampInfo = nullptr;
- FindInChain(descriptor->nextInChain, &clampInfo);
- if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) {
- return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
- }
- DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
- DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
- DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
- DAWN_TRY(ValidateCullMode(descriptor->cullMode));
-
- // Pipeline descriptors must have stripIndexFormat == undefined if they are using
- // non-strip topologies.
- if (!IsStripPrimitiveTopology(descriptor->topology)) {
- DAWN_INVALID_IF(
- descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined,
- "StripIndexFormat (%s) is not undefined when using a non-strip primitive "
- "topology (%s).",
- descriptor->stripIndexFormat, descriptor->topology);
- }
-
- return {};
- }
-
- MaybeError ValidateDepthStencilState(const DeviceBase* device,
- const DepthStencilState* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
-
- DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
- DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
- DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
-
- const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
- DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
- "Depth stencil format (%s) is not depth-stencil renderable.",
- descriptor->format);
-
- DAWN_INVALID_IF(std::isnan(descriptor->depthBiasSlopeScale) ||
- std::isnan(descriptor->depthBiasClamp),
- "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.",
- descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp);
-
- return {};
- }
-
- MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count),
- "Multisample count (%u) is not supported.", descriptor->count);
-
- DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1,
- "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.",
- descriptor->count);
-
- return {};
- }
-
- MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
- DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
- DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
- DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
- DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
- DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
- DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
- return {};
- }
-
- bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
- return blendFactor == wgpu::BlendFactor::SrcAlpha ||
- blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
- blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
- }
-
- MaybeError ValidateColorTargetState(
- DeviceBase* device,
- const ColorTargetState* descriptor,
- bool fragmentWritten,
- const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- if (descriptor->blend) {
- DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend),
- "validating blend state.");
- }
-
- DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
-
- const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
- DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
- "Color format (%s) is not color renderable.", descriptor->format);
-
- DAWN_INVALID_IF(
- descriptor->blend && !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes &
- SampleTypeBit::Float),
- "Blending is enabled but color format (%s) is not blendable.", descriptor->format);
-
- if (fragmentWritten) {
- DAWN_INVALID_IF(fragmentOutputVariable.baseType !=
- format->GetAspectInfo(Aspect::Color).baseType,
- "Color format (%s) base type (%s) doesn't match the fragment "
- "module output type (%s).",
- descriptor->format, format->GetAspectInfo(Aspect::Color).baseType,
- fragmentOutputVariable.baseType);
-
- DAWN_INVALID_IF(
- fragmentOutputVariable.componentCount < format->componentCount,
- "The fragment stage has fewer output components (%u) than the color format "
- "(%s) component count (%u).",
- fragmentOutputVariable.componentCount, descriptor->format,
- format->componentCount);
-
- if (descriptor->blend) {
- if (fragmentOutputVariable.componentCount < 4u) {
- // No alpha channel output
- // Make sure there's no alpha involved in the blending operation
- DAWN_INVALID_IF(
- BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
- BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor),
- "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha "
- "but it is missing from fragment output.",
- descriptor->blend->color.srcFactor, descriptor->blend->color.dstFactor);
- }
- }
- } else {
- DAWN_INVALID_IF(
- descriptor->writeMask != wgpu::ColorWriteMask::None,
- "Color target has no corresponding fragment stage output but writeMask (%s) is "
- "not zero.",
- descriptor->writeMask);
- }
-
- return {};
- }
-
- MaybeError ValidateFragmentState(DeviceBase* device,
- const FragmentState* descriptor,
- const PipelineLayoutBase* layout) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- DAWN_TRY_CONTEXT(
- ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
- descriptor->constantCount, descriptor->constants, layout,
- SingleShaderStage::Fragment),
- "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module,
- descriptor->entryPoint);
-
- DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments,
- "Number of targets (%u) exceeds the maximum (%u).",
- descriptor->targetCount, kMaxColorAttachments);
-
- const EntryPointMetadata& fragmentMetadata =
- descriptor->module->GetEntryPoint(descriptor->entryPoint);
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
- DAWN_TRY_CONTEXT(
- ValidateColorTargetState(device, &descriptor->targets[static_cast<uint8_t>(i)],
- fragmentMetadata.fragmentOutputsWritten[i],
- fragmentMetadata.fragmentOutputVariables[i]),
- "validating targets[%u].", static_cast<uint8_t>(i));
- }
-
- return {};
- }
-
- MaybeError ValidateInterStageMatching(DeviceBase* device,
- const VertexState& vertexState,
- const FragmentState& fragmentState) {
- const EntryPointMetadata& vertexMetadata =
- vertexState.module->GetEntryPoint(vertexState.entryPoint);
- const EntryPointMetadata& fragmentMetadata =
- fragmentState.module->GetEntryPoint(fragmentState.entryPoint);
-
- // TODO(dawn:563): Can this message give more details?
- DAWN_INVALID_IF(
- vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables,
- "One or more fragment inputs and vertex outputs are not one-to-one matching");
-
- // TODO(dawn:802): Validate interpolation types and interpolition sampling types
- for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) {
- const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i];
- const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i];
- DAWN_INVALID_IF(
- vertexOutputInfo.baseType != fragmentInputInfo.baseType,
- "The base type (%s) of the vertex output at location %u is different from the "
- "base type (%s) of the fragment input at location %u.",
- vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i);
-
- DAWN_INVALID_IF(
- vertexOutputInfo.componentCount != fragmentInputInfo.componentCount,
- "The component count (%u) of the vertex output at location %u is different "
- "from the component count (%u) of the fragment input at location %u.",
- vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i);
-
- DAWN_INVALID_IF(
- vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType,
- "The interpolation type (%s) of the vertex output at location %u is different "
- "from the interpolation type (%s) of the fragment input at location %u.",
- vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i);
-
- DAWN_INVALID_IF(
- vertexOutputInfo.interpolationSampling !=
- fragmentInputInfo.interpolationSampling,
- "The interpolation sampling (%s) of the vertex output at location %u is "
- "different from the interpolation sampling (%s) of the fragment input at "
- "location %u.",
- vertexOutputInfo.interpolationSampling, i,
- fragmentInputInfo.interpolationSampling, i);
- }
-
- return {};
- }
- } // anonymous namespace
-
- // Helper functions
- size_t IndexFormatSize(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Uint16:
- return sizeof(uint16_t);
- case wgpu::IndexFormat::Uint32:
- return sizeof(uint32_t);
- case wgpu::IndexFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
- return primitiveTopology == wgpu::PrimitiveTopology::LineStrip ||
- primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip;
- }
-
- MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- if (descriptor->layout != nullptr) {
- DAWN_TRY(device->ValidateObject(descriptor->layout));
- }
-
- DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout),
- "validating vertex state.");
-
- DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive),
- "validating primitive state.");
-
- if (descriptor->depthStencil) {
- DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil),
- "validating depthStencil state.");
- }
-
- DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample),
- "validating multisample state.");
-
- if (descriptor->fragment != nullptr) {
- DAWN_TRY_CONTEXT(
- ValidateFragmentState(device, descriptor->fragment, descriptor->layout),
- "validating fragment state.");
-
- DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil,
- "Must have at least one color or depthStencil target.");
-
- DAWN_TRY(
- ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
- }
-
- return {};
- }
-
- std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
- DeviceBase* device,
- const RenderPipelineDescriptor* descriptor) {
- std::vector<StageAndDescriptor> stages;
- stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module,
- descriptor->vertex.entryPoint, descriptor->vertex.constantCount,
- descriptor->vertex.constants});
- if (descriptor->fragment != nullptr) {
- stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
- descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
- descriptor->fragment->constants});
- } else if (device->IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
- InternalPipelineStore* store = device->GetInternalPipelineStore();
- // The dummy fragment shader module should already be initialized
- DAWN_ASSERT(store->dummyFragmentShader != nullptr);
- ShaderModuleBase* dummyFragmentShader = store->dummyFragmentShader.Get();
- stages.push_back(
- {SingleShaderStage::Fragment, dummyFragmentShader, "fs_empty_main", 0, nullptr});
- }
- return stages;
- }
-
- bool StencilTestEnabled(const DepthStencilState* depthStencil) {
- return depthStencil->stencilBack.compare != wgpu::CompareFunction::Always ||
- depthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilFront.compare != wgpu::CompareFunction::Always ||
- depthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
- depthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep;
- }
-
- // RenderPipelineBase
-
- RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor)
- : PipelineBase(device,
- descriptor->layout,
- descriptor->label,
- GetRenderStagesAndSetDummyShader(device, descriptor)),
- mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
- mVertexBufferCount = descriptor->vertex.bufferCount;
- const VertexBufferLayout* buffers = descriptor->vertex.buffers;
- for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) {
- if (buffers[slot].attributeCount == 0) {
- continue;
- }
-
- VertexBufferSlot typedSlot(slot);
-
- mVertexBufferSlotsUsed.set(typedSlot);
- mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride;
- mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode;
- mVertexBufferInfos[typedSlot].usedBytesInStride = 0;
- switch (buffers[slot].stepMode) {
- case wgpu::VertexStepMode::Vertex:
- mVertexBufferSlotsUsedAsVertexBuffer.set(typedSlot);
- break;
- case wgpu::VertexStepMode::Instance:
- mVertexBufferSlotsUsedAsInstanceBuffer.set(typedSlot);
- break;
- default:
- DAWN_UNREACHABLE();
- }
-
- for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) {
- VertexAttributeLocation location = VertexAttributeLocation(
- static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation));
- mAttributeLocationsUsed.set(location);
- mAttributeInfos[location].shaderLocation = location;
- mAttributeInfos[location].vertexBufferSlot = typedSlot;
- mAttributeInfos[location].offset = buffers[slot].attributes[i].offset;
- mAttributeInfos[location].format = buffers[slot].attributes[i].format;
- // Compute the access boundary of this attribute by adding attribute format size to
- // attribute offset. Although offset is in uint64_t, such sum must be no larger than
- // maxVertexBufferArrayStride (2048), which is promised by the GPUVertexBufferLayout
- // validation of creating render pipeline. Therefore, calculating in uint16_t will
- // cause no overflow.
- DAWN_ASSERT(buffers[slot].attributes[i].offset <= 2048);
- uint16_t accessBoundary =
- uint16_t(buffers[slot].attributes[i].offset) +
- uint16_t(GetVertexFormatInfo(buffers[slot].attributes[i].format).byteSize);
- mVertexBufferInfos[typedSlot].usedBytesInStride =
- std::max(mVertexBufferInfos[typedSlot].usedBytesInStride, accessBoundary);
- }
- }
-
- mPrimitive = descriptor->primitive;
- const PrimitiveDepthClampingState* clampInfo = nullptr;
- FindInChain(mPrimitive.nextInChain, &clampInfo);
- if (clampInfo) {
- mClampDepth = clampInfo->clampDepth;
- }
- mMultisample = descriptor->multisample;
-
- if (mAttachmentState->HasDepthStencilAttachment()) {
- mDepthStencil = *descriptor->depthStencil;
- mWritesDepth = mDepthStencil.depthWriteEnabled;
- if (mDepthStencil.stencilWriteMask) {
- if ((mPrimitive.cullMode != wgpu::CullMode::Front &&
- (mDepthStencil.stencilFront.failOp != wgpu::StencilOperation::Keep ||
- mDepthStencil.stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
- mDepthStencil.stencilFront.passOp != wgpu::StencilOperation::Keep)) ||
- (mPrimitive.cullMode != wgpu::CullMode::Back &&
- (mDepthStencil.stencilBack.failOp != wgpu::StencilOperation::Keep ||
- mDepthStencil.stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
- mDepthStencil.stencilBack.passOp != wgpu::StencilOperation::Keep))) {
- mWritesStencil = true;
- }
- }
- } else {
- // These default values below are useful for backends to fill information.
- // The values indicate that depth and stencil test are disabled when backends
- // set their own depth stencil states/descriptors according to the values in
- // mDepthStencil.
- mDepthStencil.format = wgpu::TextureFormat::Undefined;
- mDepthStencil.depthWriteEnabled = false;
- mDepthStencil.depthCompare = wgpu::CompareFunction::Always;
- mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always;
- mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always;
- mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep;
- mDepthStencil.stencilReadMask = 0xff;
- mDepthStencil.stencilWriteMask = 0xff;
- mDepthStencil.depthBias = 0;
- mDepthStencil.depthBiasSlopeScale = 0.0f;
- mDepthStencil.depthBiasClamp = 0.0f;
- }
-
- for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
- // Vertex-only render pipeline have no color attachment. For a render pipeline with
- // color attachments, there must be a valid FragmentState.
- ASSERT(descriptor->fragment != nullptr);
- const ColorTargetState* target =
- &descriptor->fragment->targets[static_cast<uint8_t>(i)];
- mTargets[i] = *target;
-
- if (target->blend != nullptr) {
- mTargetBlend[i] = *target->blend;
- mTargets[i].blend = &mTargetBlend[i];
- }
- }
-
- SetContentHash(ComputeContentHash());
- TrackInDevice();
- }
-
- RenderPipelineBase::RenderPipelineBase(DeviceBase* device) : PipelineBase(device) {
- TrackInDevice();
- }
-
- RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : PipelineBase(device, tag) {
- }
-
- RenderPipelineBase::~RenderPipelineBase() = default;
-
- void RenderPipelineBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheRenderPipeline(this);
- }
-
- // Remove reference to the attachment state so that we don't have lingering references to
- // it preventing it from being uncached in the device.
- mAttachmentState = nullptr;
- }
-
- // static
- RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) {
- class ErrorRenderPipeline final : public RenderPipelineBase {
- public:
- ErrorRenderPipeline(DeviceBase* device)
- : RenderPipelineBase(device, ObjectBase::kError) {
- }
-
- MaybeError Initialize() override {
- UNREACHABLE();
- return {};
- }
- };
-
- return new ErrorRenderPipeline(device);
- }
-
- ObjectType RenderPipelineBase::GetType() const {
- return ObjectType::RenderPipeline;
- }
-
- const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
- RenderPipelineBase::GetAttributeLocationsUsed() const {
- ASSERT(!IsError());
- return mAttributeLocationsUsed;
- }
-
- const VertexAttributeInfo& RenderPipelineBase::GetAttribute(
- VertexAttributeLocation location) const {
- ASSERT(!IsError());
- ASSERT(mAttributeLocationsUsed[location]);
- return mAttributeInfos[location];
- }
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- RenderPipelineBase::GetVertexBufferSlotsUsed() const {
- ASSERT(!IsError());
- return mVertexBufferSlotsUsed;
- }
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- RenderPipelineBase::GetVertexBufferSlotsUsedAsVertexBuffer() const {
- ASSERT(!IsError());
- return mVertexBufferSlotsUsedAsVertexBuffer;
- }
-
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- RenderPipelineBase::GetVertexBufferSlotsUsedAsInstanceBuffer() const {
- ASSERT(!IsError());
- return mVertexBufferSlotsUsedAsInstanceBuffer;
- }
-
- const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(VertexBufferSlot slot) const {
- ASSERT(!IsError());
- ASSERT(mVertexBufferSlotsUsed[slot]);
- return mVertexBufferInfos[slot];
- }
-
- uint32_t RenderPipelineBase::GetVertexBufferCount() const {
- ASSERT(!IsError());
- return mVertexBufferCount;
- }
-
- const ColorTargetState* RenderPipelineBase::GetColorTargetState(
- ColorAttachmentIndex attachmentSlot) const {
- ASSERT(!IsError());
- ASSERT(attachmentSlot < mTargets.size());
- return &mTargets[attachmentSlot];
- }
-
- const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const {
- ASSERT(!IsError());
- return &mDepthStencil;
- }
-
- wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
- ASSERT(!IsError());
- return mPrimitive.topology;
- }
-
- wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const {
- ASSERT(!IsError());
- return mPrimitive.stripIndexFormat;
- }
-
- wgpu::CullMode RenderPipelineBase::GetCullMode() const {
- ASSERT(!IsError());
- return mPrimitive.cullMode;
- }
-
- wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
- ASSERT(!IsError());
- return mPrimitive.frontFace;
- }
-
- bool RenderPipelineBase::IsDepthBiasEnabled() const {
- ASSERT(!IsError());
- return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0;
- }
-
- int32_t RenderPipelineBase::GetDepthBias() const {
- ASSERT(!IsError());
- return mDepthStencil.depthBias;
- }
-
- float RenderPipelineBase::GetDepthBiasSlopeScale() const {
- ASSERT(!IsError());
- return mDepthStencil.depthBiasSlopeScale;
- }
-
- float RenderPipelineBase::GetDepthBiasClamp() const {
- ASSERT(!IsError());
- return mDepthStencil.depthBiasClamp;
- }
-
- bool RenderPipelineBase::ShouldClampDepth() const {
- ASSERT(!IsError());
- return mClampDepth;
- }
-
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
- RenderPipelineBase::GetColorAttachmentsMask() const {
- ASSERT(!IsError());
- return mAttachmentState->GetColorAttachmentsMask();
- }
-
- bool RenderPipelineBase::HasDepthStencilAttachment() const {
- ASSERT(!IsError());
- return mAttachmentState->HasDepthStencilAttachment();
- }
-
- wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(
- ColorAttachmentIndex attachment) const {
- ASSERT(!IsError());
- return mTargets[attachment].format;
- }
-
- wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
- ASSERT(!IsError());
- ASSERT(mAttachmentState->HasDepthStencilAttachment());
- return mDepthStencil.format;
- }
-
- uint32_t RenderPipelineBase::GetSampleCount() const {
- ASSERT(!IsError());
- return mAttachmentState->GetSampleCount();
- }
-
- uint32_t RenderPipelineBase::GetSampleMask() const {
- ASSERT(!IsError());
- return mMultisample.mask;
- }
-
- bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
- ASSERT(!IsError());
- return mMultisample.alphaToCoverageEnabled;
- }
-
- const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
- ASSERT(!IsError());
-
- return mAttachmentState.Get();
- }
-
- bool RenderPipelineBase::WritesDepth() const {
- ASSERT(!IsError());
-
- return mWritesDepth;
- }
-
- bool RenderPipelineBase::WritesStencil() const {
- ASSERT(!IsError());
-
- return mWritesStencil;
- }
-
- size_t RenderPipelineBase::ComputeContentHash() {
- ObjectContentHasher recorder;
-
- // Record modules and layout
- recorder.Record(PipelineBase::ComputeContentHash());
-
- // Hierarchically record the attachment state.
- // It contains the attachments set, texture formats, and sample count.
- recorder.Record(mAttachmentState->GetContentHash());
-
- // Record attachments
- for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
- const ColorTargetState& desc = *GetColorTargetState(i);
- recorder.Record(desc.writeMask);
- if (desc.blend != nullptr) {
- recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor,
- desc.blend->color.dstFactor);
- recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor,
- desc.blend->alpha.dstFactor);
- }
- }
-
- if (mAttachmentState->HasDepthStencilAttachment()) {
- const DepthStencilState& desc = mDepthStencil;
- recorder.Record(desc.depthWriteEnabled, desc.depthCompare);
- recorder.Record(desc.stencilReadMask, desc.stencilWriteMask);
- recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp,
- desc.stencilFront.depthFailOp, desc.stencilFront.passOp);
- recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp,
- desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
- recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
- }
-
- // Record vertex state
- recorder.Record(mAttributeLocationsUsed);
- for (VertexAttributeLocation location : IterateBitSet(mAttributeLocationsUsed)) {
- const VertexAttributeInfo& desc = GetAttribute(location);
- recorder.Record(desc.shaderLocation, desc.vertexBufferSlot, desc.offset, desc.format);
- }
-
- recorder.Record(mVertexBufferSlotsUsed);
- for (VertexBufferSlot slot : IterateBitSet(mVertexBufferSlotsUsed)) {
- const VertexBufferInfo& desc = GetVertexBuffer(slot);
- recorder.Record(desc.arrayStride, desc.stepMode);
- }
-
- // Record primitive state
- recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace,
- mPrimitive.cullMode, mClampDepth);
-
- // Record multisample state
- // Sample count hashed as part of the attachment state
- recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled);
-
- return recorder.GetContentHash();
- }
-
- bool RenderPipelineBase::EqualityFunc::operator()(const RenderPipelineBase* a,
- const RenderPipelineBase* b) const {
- // Check the layout and shader stages.
- if (!PipelineBase::EqualForCache(a, b)) {
- return false;
- }
-
- // Check the attachment state.
- // It contains the attachments set, texture formats, and sample count.
- if (a->mAttachmentState.Get() != b->mAttachmentState.Get()) {
- return false;
- }
-
- if (a->mAttachmentState.Get() != nullptr) {
- for (ColorAttachmentIndex i :
- IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) {
- const ColorTargetState& descA = *a->GetColorTargetState(i);
- const ColorTargetState& descB = *b->GetColorTargetState(i);
- if (descA.writeMask != descB.writeMask) {
- return false;
- }
- if ((descA.blend == nullptr) != (descB.blend == nullptr)) {
- return false;
- }
- if (descA.blend != nullptr) {
- if (descA.blend->color.operation != descB.blend->color.operation ||
- descA.blend->color.srcFactor != descB.blend->color.srcFactor ||
- descA.blend->color.dstFactor != descB.blend->color.dstFactor) {
- return false;
- }
- if (descA.blend->alpha.operation != descB.blend->alpha.operation ||
- descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor ||
- descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) {
- return false;
- }
- }
- }
-
- // Check depth/stencil state
- if (a->mAttachmentState->HasDepthStencilAttachment()) {
- const DepthStencilState& stateA = a->mDepthStencil;
- const DepthStencilState& stateB = b->mDepthStencil;
-
- ASSERT(!std::isnan(stateA.depthBiasSlopeScale));
- ASSERT(!std::isnan(stateB.depthBiasSlopeScale));
- ASSERT(!std::isnan(stateA.depthBiasClamp));
- ASSERT(!std::isnan(stateB.depthBiasClamp));
-
- if (stateA.depthWriteEnabled != stateB.depthWriteEnabled ||
- stateA.depthCompare != stateB.depthCompare ||
- stateA.depthBias != stateB.depthBias ||
- stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale ||
- stateA.depthBiasClamp != stateB.depthBiasClamp) {
- return false;
- }
- if (stateA.stencilFront.compare != stateB.stencilFront.compare ||
- stateA.stencilFront.failOp != stateB.stencilFront.failOp ||
- stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp ||
- stateA.stencilFront.passOp != stateB.stencilFront.passOp) {
- return false;
- }
- if (stateA.stencilBack.compare != stateB.stencilBack.compare ||
- stateA.stencilBack.failOp != stateB.stencilBack.failOp ||
- stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp ||
- stateA.stencilBack.passOp != stateB.stencilBack.passOp) {
- return false;
- }
- if (stateA.stencilReadMask != stateB.stencilReadMask ||
- stateA.stencilWriteMask != stateB.stencilWriteMask) {
- return false;
- }
- }
- }
-
- // Check vertex state
- if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) {
- return false;
- }
-
- for (VertexAttributeLocation loc : IterateBitSet(a->mAttributeLocationsUsed)) {
- const VertexAttributeInfo& descA = a->GetAttribute(loc);
- const VertexAttributeInfo& descB = b->GetAttribute(loc);
- if (descA.shaderLocation != descB.shaderLocation ||
- descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset ||
- descA.format != descB.format) {
- return false;
- }
- }
-
- if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) {
- return false;
- }
-
- for (VertexBufferSlot slot : IterateBitSet(a->mVertexBufferSlotsUsed)) {
- const VertexBufferInfo& descA = a->GetVertexBuffer(slot);
- const VertexBufferInfo& descB = b->GetVertexBuffer(slot);
- if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) {
- return false;
- }
- }
-
- // Check primitive state
- {
- const PrimitiveState& stateA = a->mPrimitive;
- const PrimitiveState& stateB = b->mPrimitive;
- if (stateA.topology != stateB.topology ||
- stateA.stripIndexFormat != stateB.stripIndexFormat ||
- stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode ||
- a->mClampDepth != b->mClampDepth) {
- return false;
- }
- }
-
- // Check multisample state
- {
- const MultisampleState& stateA = a->mMultisample;
- const MultisampleState& stateB = b->mMultisample;
- // Sample count already checked as part of the attachment state.
- if (stateA.mask != stateB.mask ||
- stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) {
- return false;
- }
- }
-
- return true;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
deleted file mode 100644
index f09ca84760f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RENDERPIPELINE_H_
-#define DAWNNATIVE_RENDERPIPELINE_H_
-
-#include "common/TypedInteger.h"
-#include "dawn_native/AttachmentState.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/Pipeline.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-#include <bitset>
-
-namespace dawn_native {
-
- class DeviceBase;
-
- MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor);
-
- std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
- DeviceBase* device,
- const RenderPipelineDescriptor* descriptor);
-
- size_t IndexFormatSize(wgpu::IndexFormat format);
-
- bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology);
-
- bool StencilTestEnabled(const DepthStencilState* depthStencil);
-
- struct VertexAttributeInfo {
- wgpu::VertexFormat format;
- uint64_t offset;
- VertexAttributeLocation shaderLocation;
- VertexBufferSlot vertexBufferSlot;
- };
-
- struct VertexBufferInfo {
- uint64_t arrayStride;
- wgpu::VertexStepMode stepMode;
- uint16_t usedBytesInStride;
- };
-
- class RenderPipelineBase : public PipelineBase {
- public:
- RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
- ~RenderPipelineBase() override;
-
- static RenderPipelineBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
- GetAttributeLocationsUsed() const;
- const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- GetVertexBufferSlotsUsedAsVertexBuffer() const;
- const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
- GetVertexBufferSlotsUsedAsInstanceBuffer() const;
- const VertexBufferInfo& GetVertexBuffer(VertexBufferSlot slot) const;
- uint32_t GetVertexBufferCount() const;
-
- const ColorTargetState* GetColorTargetState(ColorAttachmentIndex attachmentSlot) const;
- const DepthStencilState* GetDepthStencilState() const;
- wgpu::PrimitiveTopology GetPrimitiveTopology() const;
- wgpu::IndexFormat GetStripIndexFormat() const;
- wgpu::CullMode GetCullMode() const;
- wgpu::FrontFace GetFrontFace() const;
- bool IsDepthBiasEnabled() const;
- int32_t GetDepthBias() const;
- float GetDepthBiasSlopeScale() const;
- float GetDepthBiasClamp() const;
- bool ShouldClampDepth() const;
-
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
- bool HasDepthStencilAttachment() const;
- wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex attachment) const;
- wgpu::TextureFormat GetDepthStencilFormat() const;
- uint32_t GetSampleCount() const;
- uint32_t GetSampleMask() const;
- bool IsAlphaToCoverageEnabled() const;
- bool WritesDepth() const;
- bool WritesStencil() const;
-
- const AttachmentState* GetAttachmentState() const;
-
- // Functions necessary for the unordered_set<RenderPipelineBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const RenderPipelineBase* a, const RenderPipelineBase* b) const;
- };
-
- protected:
- // Constructor used only for mocking and testing.
- RenderPipelineBase(DeviceBase* device);
- void DestroyImpl() override;
-
- private:
- RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- // Vertex state
- uint32_t mVertexBufferCount;
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> mAttributeLocationsUsed;
- ityp::array<VertexAttributeLocation, VertexAttributeInfo, kMaxVertexAttributes>
- mAttributeInfos;
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsVertexBuffer;
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsInstanceBuffer;
- ityp::array<VertexBufferSlot, VertexBufferInfo, kMaxVertexBuffers> mVertexBufferInfos;
-
- // Attachments
- Ref<AttachmentState> mAttachmentState;
- ityp::array<ColorAttachmentIndex, ColorTargetState, kMaxColorAttachments> mTargets;
- ityp::array<ColorAttachmentIndex, BlendState, kMaxColorAttachments> mTargetBlend;
-
- // Other state
- PrimitiveState mPrimitive;
- DepthStencilState mDepthStencil;
- MultisampleState mMultisample;
- bool mClampDepth = false;
- bool mWritesDepth = false;
- bool mWritesStencil = false;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RENDERPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h b/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h
deleted file mode 100644
index e9a4a672263..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RESOURCEHEAP_H_
-#define DAWNNATIVE_RESOURCEHEAP_H_
-
-#include "dawn_native/Error.h"
-
-namespace dawn_native {
-
- // Wrapper for a resource backed by a heap.
- class ResourceHeapBase {
- public:
- ResourceHeapBase() = default;
- virtual ~ResourceHeapBase() = default;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RESOURCEHEAP_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceHeapAllocator.h b/chromium/third_party/dawn/src/dawn_native/ResourceHeapAllocator.h
deleted file mode 100644
index 1b0fd621de6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ResourceHeapAllocator.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
-#define DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/ResourceHeap.h"
-
-#include <memory>
-
-namespace dawn_native {
-
- // Interface for backend allocators that create memory heaps resoruces can be suballocated in.
- class ResourceHeapAllocator {
- public:
- virtual ~ResourceHeapAllocator() = default;
-
- virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
- uint64_t size) = 0;
- virtual void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) = 0;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp
deleted file mode 100644
index b1c35d41727..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ResourceMemoryAllocation.h"
-#include "common/Assert.h"
-
-namespace dawn_native {
-
- ResourceMemoryAllocation::ResourceMemoryAllocation()
- : mOffset(0), mResourceHeap(nullptr), mMappedPointer(nullptr) {
- }
-
- ResourceMemoryAllocation::ResourceMemoryAllocation(const AllocationInfo& info,
- uint64_t offset,
- ResourceHeapBase* resourceHeap,
- uint8_t* mappedPointer)
- : mInfo(info), mOffset(offset), mResourceHeap(resourceHeap), mMappedPointer(mappedPointer) {
- }
-
- ResourceHeapBase* ResourceMemoryAllocation::GetResourceHeap() const {
- ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
- return mResourceHeap;
- }
-
- uint64_t ResourceMemoryAllocation::GetOffset() const {
- ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
- return mOffset;
- }
-
- AllocationInfo ResourceMemoryAllocation::GetInfo() const {
- return mInfo;
- }
-
- uint8_t* ResourceMemoryAllocation::GetMappedPointer() const {
- return mMappedPointer;
- }
-
- void ResourceMemoryAllocation::Invalidate() {
- mResourceHeap = nullptr;
- mInfo = {};
- }
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
deleted file mode 100644
index f8b05f509e2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
-#define DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
-
-#include <cstdint>
-
-namespace dawn_native {
-
- class ResourceHeapBase;
-
- // Allocation method determines how memory was sub-divided.
- // Used by the device to get the allocator that was responsible for the allocation.
- enum class AllocationMethod {
-
- // Memory not sub-divided.
- kDirect,
-
- // Memory sub-divided using one or more blocks of various sizes.
- kSubAllocated,
-
- // Memory was allocated outside of Dawn.
- kExternal,
-
- // Memory not allocated or freed.
- kInvalid
- };
-
- // Metadata that describes how the allocation was allocated.
- struct AllocationInfo {
- // AllocationInfo contains a separate offset to not confuse block vs memory offsets.
- // The block offset is within the entire allocator memory range and only required by the
- // buddy sub-allocator to get the corresponding memory. Unlike the block offset, the
- // allocation offset is always local to the memory.
- uint64_t mBlockOffset = 0;
-
- AllocationMethod mMethod = AllocationMethod::kInvalid;
- };
-
- // Handle into a resource heap pool.
- class ResourceMemoryAllocation {
- public:
- ResourceMemoryAllocation();
- ResourceMemoryAllocation(const AllocationInfo& info,
- uint64_t offset,
- ResourceHeapBase* resourceHeap,
- uint8_t* mappedPointer = nullptr);
- virtual ~ResourceMemoryAllocation() = default;
-
- ResourceMemoryAllocation(const ResourceMemoryAllocation&) = default;
- ResourceMemoryAllocation& operator=(const ResourceMemoryAllocation&) = default;
-
- ResourceHeapBase* GetResourceHeap() const;
- uint64_t GetOffset() const;
- uint8_t* GetMappedPointer() const;
- AllocationInfo GetInfo() const;
-
- virtual void Invalidate();
-
- private:
- AllocationInfo mInfo;
- uint64_t mOffset;
- ResourceHeapBase* mResourceHeap;
- uint8_t* mMappedPointer;
- };
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp
deleted file mode 100644
index c77f0a58eed..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/RingBufferAllocator.h"
-
-// Note: Current RingBufferAllocator implementation uses two indices (start and end) to implement a
-// circular queue. However, this approach defines a full queue when one element is still unused.
-//
-// For example, [E,E,E,E] would be equivelent to [U,U,U,U].
-// ^ ^
-// S=E=1 S=E=1
-//
-// The latter case is eliminated by counting used bytes >= capacity. This definition prevents
-// (the last) byte and requires an extra variable to count used bytes. Alternatively, we could use
-// only two indices that keep increasing (unbounded) but can be still indexed using bit masks.
-// However, this 1) requires the size to always be a power-of-two and 2) remove tests that check
-// used bytes.
-namespace dawn_native {
-
- RingBufferAllocator::RingBufferAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
- }
-
- void RingBufferAllocator::Deallocate(ExecutionSerial lastCompletedSerial) {
- // Reclaim memory from previously recorded blocks.
- for (Request& request : mInflightRequests.IterateUpTo(lastCompletedSerial)) {
- mUsedStartOffset = request.endOffset;
- mUsedSize -= request.size;
- }
-
- // Dequeue previously recorded requests.
- mInflightRequests.ClearUpTo(lastCompletedSerial);
- }
-
- uint64_t RingBufferAllocator::GetSize() const {
- return mMaxBlockSize;
- }
-
- uint64_t RingBufferAllocator::GetUsedSize() const {
- return mUsedSize;
- }
-
- bool RingBufferAllocator::Empty() const {
- return mInflightRequests.Empty();
- }
-
- // Sub-allocate the ring-buffer by requesting a chunk of the specified size.
- // This is a serial-based resource scheme, the life-span of resources (and the allocations) get
- // tracked by GPU progress via serials. Memory can be reused by determining if the GPU has
- // completed up to a given serial. Each sub-allocation request is tracked in the serial offset
- // queue, which identifies an existing (or new) frames-worth of resources. Internally, the
- // ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
- // in FIFO order as older frames would free resources before newer ones.
- uint64_t RingBufferAllocator::Allocate(uint64_t allocationSize, ExecutionSerial serial) {
- // Check if the buffer is full by comparing the used size.
- // If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
- // subsequent sub-alloc could fail where the used size was previously adjusted to include
- // the wasted.
- if (mUsedSize >= mMaxBlockSize) {
- return kInvalidOffset;
- }
-
- // Ensure adding allocationSize does not overflow.
- const uint64_t remainingSize = (mMaxBlockSize - mUsedSize);
- if (allocationSize > remainingSize) {
- return kInvalidOffset;
- }
-
- uint64_t startOffset = kInvalidOffset;
-
- // Check if the buffer is NOT split (i.e sub-alloc on ends)
- if (mUsedStartOffset <= mUsedEndOffset) {
- // Order is important (try to sub-alloc at end first).
- // This is due to FIFO order where sub-allocs are inserted from left-to-right (when not
- // wrapped).
- if (mUsedEndOffset + allocationSize <= mMaxBlockSize) {
- startOffset = mUsedEndOffset;
- mUsedEndOffset += allocationSize;
- mUsedSize += allocationSize;
- mCurrentRequestSize += allocationSize;
- } else if (allocationSize <= mUsedStartOffset) { // Try to sub-alloc at front.
- // Count the space at the end so that a subsequent
- // sub-alloc cannot not succeed when the buffer is full.
- const uint64_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
-
- startOffset = 0;
- mUsedEndOffset = allocationSize;
- mUsedSize += requestSize;
- mCurrentRequestSize += requestSize;
- }
- } else if (mUsedEndOffset + allocationSize <=
- mUsedStartOffset) { // Otherwise, buffer is split where sub-alloc must be
- // in-between.
- startOffset = mUsedEndOffset;
- mUsedEndOffset += allocationSize;
- mUsedSize += allocationSize;
- mCurrentRequestSize += allocationSize;
- }
-
- if (startOffset != kInvalidOffset) {
- Request request;
- request.endOffset = mUsedEndOffset;
- request.size = mCurrentRequestSize;
-
- mInflightRequests.Enqueue(std::move(request), serial);
- mCurrentRequestSize = 0; // reset
- }
-
- return startOffset;
- }
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
deleted file mode 100644
index 3e9fb4355f0..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_RINGBUFFERALLOCATOR_H_
-#define DAWNNATIVE_RINGBUFFERALLOCATOR_H_
-
-#include "common/SerialQueue.h"
-#include "dawn_native/IntegerTypes.h"
-
-#include <limits>
-#include <memory>
-
-// RingBufferAllocator is the front-end implementation used to manage a ring buffer in GPU memory.
-namespace dawn_native {
-
- class RingBufferAllocator {
- public:
- RingBufferAllocator() = default;
- RingBufferAllocator(uint64_t maxSize);
- ~RingBufferAllocator() = default;
- RingBufferAllocator(const RingBufferAllocator&) = default;
- RingBufferAllocator& operator=(const RingBufferAllocator&) = default;
-
- uint64_t Allocate(uint64_t allocationSize, ExecutionSerial serial);
- void Deallocate(ExecutionSerial lastCompletedSerial);
-
- uint64_t GetSize() const;
- bool Empty() const;
- uint64_t GetUsedSize() const;
-
- static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
-
- private:
- struct Request {
- uint64_t endOffset;
- uint64_t size;
- };
-
- SerialQueue<ExecutionSerial, Request>
- mInflightRequests; // Queue of the recorded sub-alloc requests
- // (e.g. frame of resources).
-
- uint64_t mUsedEndOffset = 0; // Tail of used sub-alloc requests (in bytes).
- uint64_t mUsedStartOffset = 0; // Head of used sub-alloc requests (in bytes).
- uint64_t mMaxBlockSize = 0; // Max size of the ring buffer (in bytes).
- uint64_t mUsedSize = 0; // Size of the sub-alloc requests (in bytes) of the ring buffer.
- uint64_t mCurrentRequestSize =
- 0; // Size of the sub-alloc requests (in bytes) of the current serial.
- };
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_RINGBUFFERALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
deleted file mode 100644
index 12ab8cb2b27..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Sampler.h"
-
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectContentHasher.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-#include <cmath>
-
-namespace dawn_native {
-
- MaybeError ValidateSamplerDescriptor(DeviceBase*, const SamplerDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-
- DAWN_INVALID_IF(std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp),
- "LOD clamp bounds [%f, %f] contain a NaN.", descriptor->lodMinClamp,
- descriptor->lodMaxClamp);
-
- DAWN_INVALID_IF(descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0,
- "LOD clamp bounds [%f, %f] contain contain a negative number.",
- descriptor->lodMinClamp, descriptor->lodMaxClamp);
-
- DAWN_INVALID_IF(descriptor->lodMinClamp > descriptor->lodMaxClamp,
- "LOD min clamp (%f) is larger than the max clamp (%f).",
- descriptor->lodMinClamp, descriptor->lodMaxClamp);
-
- if (descriptor->maxAnisotropy > 1) {
- DAWN_INVALID_IF(descriptor->minFilter != wgpu::FilterMode::Linear ||
- descriptor->magFilter != wgpu::FilterMode::Linear ||
- descriptor->mipmapFilter != wgpu::FilterMode::Linear,
- "One of minFilter (%s), magFilter (%s) or mipmapFilter (%s) is not %s "
- "while using anisotropic filter (maxAnisotropy is %f)",
- descriptor->magFilter, descriptor->minFilter, descriptor->mipmapFilter,
- wgpu::FilterMode::Linear, descriptor->maxAnisotropy);
- } else if (descriptor->maxAnisotropy == 0u) {
- return DAWN_FORMAT_VALIDATION_ERROR("Max anisotropy (%f) is less than 1.",
- descriptor->maxAnisotropy);
- }
-
- DAWN_TRY(ValidateFilterMode(descriptor->minFilter));
- DAWN_TRY(ValidateFilterMode(descriptor->magFilter));
- DAWN_TRY(ValidateFilterMode(descriptor->mipmapFilter));
- DAWN_TRY(ValidateAddressMode(descriptor->addressModeU));
- DAWN_TRY(ValidateAddressMode(descriptor->addressModeV));
- DAWN_TRY(ValidateAddressMode(descriptor->addressModeW));
-
- // CompareFunction::Undefined is tagged as invalid because it can't be used, except for the
- // SamplerDescriptor where it is a special value that means the sampler is not a
- // comparison-sampler.
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- DAWN_TRY(ValidateCompareFunction(descriptor->compare));
- }
-
- return {};
- }
-
- // SamplerBase
-
- SamplerBase::SamplerBase(DeviceBase* device,
- const SamplerDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag)
- : ApiObjectBase(device, descriptor->label),
- mAddressModeU(descriptor->addressModeU),
- mAddressModeV(descriptor->addressModeV),
- mAddressModeW(descriptor->addressModeW),
- mMagFilter(descriptor->magFilter),
- mMinFilter(descriptor->minFilter),
- mMipmapFilter(descriptor->mipmapFilter),
- mLodMinClamp(descriptor->lodMinClamp),
- mLodMaxClamp(descriptor->lodMaxClamp),
- mCompareFunction(descriptor->compare),
- mMaxAnisotropy(descriptor->maxAnisotropy) {
- }
-
- SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
- : SamplerBase(device, descriptor, kUntrackedByDevice) {
- TrackInDevice();
- }
-
- SamplerBase::SamplerBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- SamplerBase::~SamplerBase() = default;
-
- void SamplerBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheSampler(this);
- }
- }
-
- // static
- SamplerBase* SamplerBase::MakeError(DeviceBase* device) {
- return new SamplerBase(device, ObjectBase::kError);
- }
-
- ObjectType SamplerBase::GetType() const {
- return ObjectType::Sampler;
- }
-
- bool SamplerBase::IsComparison() const {
- return mCompareFunction != wgpu::CompareFunction::Undefined;
- }
-
- bool SamplerBase::IsFiltering() const {
- return mMinFilter == wgpu::FilterMode::Linear || mMagFilter == wgpu::FilterMode::Linear ||
- mMipmapFilter == wgpu::FilterMode::Linear;
- }
-
- size_t SamplerBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mAddressModeU, mAddressModeV, mAddressModeW, mMagFilter, mMinFilter,
- mMipmapFilter, mLodMinClamp, mLodMaxClamp, mCompareFunction,
- mMaxAnisotropy);
- return recorder.GetContentHash();
- }
-
- bool SamplerBase::EqualityFunc::operator()(const SamplerBase* a, const SamplerBase* b) const {
- if (a == b) {
- return true;
- }
-
- ASSERT(!std::isnan(a->mLodMinClamp));
- ASSERT(!std::isnan(b->mLodMinClamp));
- ASSERT(!std::isnan(a->mLodMaxClamp));
- ASSERT(!std::isnan(b->mLodMaxClamp));
-
- return a->mAddressModeU == b->mAddressModeU && a->mAddressModeV == b->mAddressModeV &&
- a->mAddressModeW == b->mAddressModeW && a->mMagFilter == b->mMagFilter &&
- a->mMinFilter == b->mMinFilter && a->mMipmapFilter == b->mMipmapFilter &&
- a->mLodMinClamp == b->mLodMinClamp && a->mLodMaxClamp == b->mLodMaxClamp &&
- a->mCompareFunction == b->mCompareFunction && a->mMaxAnisotropy == b->mMaxAnisotropy;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Sampler.h b/chromium/third_party/dawn/src/dawn_native/Sampler.h
deleted file mode 100644
index 0116d47c9e1..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Sampler.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_SAMPLER_H_
-#define DAWNNATIVE_SAMPLER_H_
-
-#include "dawn_native/CachedObject.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- class DeviceBase;
-
- MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
-
- class SamplerBase : public ApiObjectBase, public CachedObject {
- public:
- SamplerBase(DeviceBase* device,
- const SamplerDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag);
- SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
- ~SamplerBase() override;
-
- static SamplerBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- bool IsComparison() const;
- bool IsFiltering() const;
-
- // Functions necessary for the unordered_set<SamplerBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const SamplerBase* a, const SamplerBase* b) const;
- };
-
- uint16_t GetMaxAnisotropy() const {
- return mMaxAnisotropy;
- }
-
- protected:
- // Constructor used only for mocking and testing.
- SamplerBase(DeviceBase* device);
- void DestroyImpl() override;
-
- private:
- SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- // TODO(cwallez@chromium.org): Store a crypto hash of the items instead?
- wgpu::AddressMode mAddressModeU;
- wgpu::AddressMode mAddressModeV;
- wgpu::AddressMode mAddressModeW;
- wgpu::FilterMode mMagFilter;
- wgpu::FilterMode mMinFilter;
- wgpu::FilterMode mMipmapFilter;
- float mLodMinClamp;
- float mLodMaxClamp;
- wgpu::CompareFunction mCompareFunction;
- uint16_t mMaxAnisotropy;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_SAMPLER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.cpp
deleted file mode 100644
index 976214cb912..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ScratchBuffer.h"
-
-#include "dawn_native/Device.h"
-
-namespace dawn_native {
-
- ScratchBuffer::ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage)
- : mDevice(device), mUsage(usage) {
- }
-
- ScratchBuffer::~ScratchBuffer() = default;
-
- void ScratchBuffer::Reset() {
- mBuffer = nullptr;
- }
-
- MaybeError ScratchBuffer::EnsureCapacity(uint64_t capacity) {
- if (!mBuffer.Get() || mBuffer->GetSize() < capacity) {
- BufferDescriptor descriptor;
- descriptor.size = capacity;
- descriptor.usage = mUsage;
- DAWN_TRY_ASSIGN(mBuffer, mDevice->CreateBuffer(&descriptor));
- mBuffer->SetIsDataInitialized();
- }
- return {};
- }
-
- BufferBase* ScratchBuffer::GetBuffer() const {
- ASSERT(mBuffer.Get() != nullptr);
- return mBuffer.Get();
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.h b/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.h
deleted file mode 100644
index 7bb446dfa29..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_SCRATCHBUFFER_H_
-#define DAWNNATIVE_SCRATCHBUFFER_H_
-
-#include "common/RefCounted.h"
-#include "dawn_native/Buffer.h"
-
-#include <cstdint>
-
-namespace dawn_native {
-
- class DeviceBase;
-
- // A ScratchBuffer is a lazily allocated and lazily grown GPU buffer for intermittent use by
- // commands in the GPU queue. Note that scratch buffers are not zero-initialized, so users must
- // be careful not to exposed uninitialized bytes to client shaders.
- class ScratchBuffer {
- public:
- // Note that this object does not retain a reference to `device`, so `device` MUST outlive
- // this object.
- ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage);
- ~ScratchBuffer();
-
- // Resets this ScratchBuffer, guaranteeing that the next EnsureCapacity call allocates a
- // fresh buffer.
- void Reset();
-
- // Ensures that this ScratchBuffer is backed by a buffer on `device` with at least
- // `capacity` bytes of storage.
- MaybeError EnsureCapacity(uint64_t capacity);
-
- BufferBase* GetBuffer() const;
-
- private:
- DeviceBase* const mDevice;
- const wgpu::BufferUsage mUsage;
- Ref<BufferBase> mBuffer;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_SCRATCHBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
deleted file mode 100644
index c0e29e8401b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ /dev/null
@@ -1,1312 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/ShaderModule.h"
-
-#include "absl/strings/str_format.h"
-#include "common/Constants.h"
-#include "common/HashUtils.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/ChainUtils_autogen.h"
-#include "dawn_native/CompilationMessages.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectContentHasher.h"
-#include "dawn_native/Pipeline.h"
-#include "dawn_native/PipelineLayout.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/TintUtils.h"
-
-#include <tint/tint.h>
-
-#include <sstream>
-
-namespace dawn_native {
-
- namespace {
-
- tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- return tint::transform::VertexFormat::kUint8x2;
- case wgpu::VertexFormat::Uint8x4:
- return tint::transform::VertexFormat::kUint8x4;
- case wgpu::VertexFormat::Sint8x2:
- return tint::transform::VertexFormat::kSint8x2;
- case wgpu::VertexFormat::Sint8x4:
- return tint::transform::VertexFormat::kSint8x4;
- case wgpu::VertexFormat::Unorm8x2:
- return tint::transform::VertexFormat::kUnorm8x2;
- case wgpu::VertexFormat::Unorm8x4:
- return tint::transform::VertexFormat::kUnorm8x4;
- case wgpu::VertexFormat::Snorm8x2:
- return tint::transform::VertexFormat::kSnorm8x2;
- case wgpu::VertexFormat::Snorm8x4:
- return tint::transform::VertexFormat::kSnorm8x4;
- case wgpu::VertexFormat::Uint16x2:
- return tint::transform::VertexFormat::kUint16x2;
- case wgpu::VertexFormat::Uint16x4:
- return tint::transform::VertexFormat::kUint16x4;
- case wgpu::VertexFormat::Sint16x2:
- return tint::transform::VertexFormat::kSint16x2;
- case wgpu::VertexFormat::Sint16x4:
- return tint::transform::VertexFormat::kSint16x4;
- case wgpu::VertexFormat::Unorm16x2:
- return tint::transform::VertexFormat::kUnorm16x2;
- case wgpu::VertexFormat::Unorm16x4:
- return tint::transform::VertexFormat::kUnorm16x4;
- case wgpu::VertexFormat::Snorm16x2:
- return tint::transform::VertexFormat::kSnorm16x2;
- case wgpu::VertexFormat::Snorm16x4:
- return tint::transform::VertexFormat::kSnorm16x4;
- case wgpu::VertexFormat::Float16x2:
- return tint::transform::VertexFormat::kFloat16x2;
- case wgpu::VertexFormat::Float16x4:
- return tint::transform::VertexFormat::kFloat16x4;
- case wgpu::VertexFormat::Float32:
- return tint::transform::VertexFormat::kFloat32;
- case wgpu::VertexFormat::Float32x2:
- return tint::transform::VertexFormat::kFloat32x2;
- case wgpu::VertexFormat::Float32x3:
- return tint::transform::VertexFormat::kFloat32x3;
- case wgpu::VertexFormat::Float32x4:
- return tint::transform::VertexFormat::kFloat32x4;
- case wgpu::VertexFormat::Uint32:
- return tint::transform::VertexFormat::kUint32;
- case wgpu::VertexFormat::Uint32x2:
- return tint::transform::VertexFormat::kUint32x2;
- case wgpu::VertexFormat::Uint32x3:
- return tint::transform::VertexFormat::kUint32x3;
- case wgpu::VertexFormat::Uint32x4:
- return tint::transform::VertexFormat::kUint32x4;
- case wgpu::VertexFormat::Sint32:
- return tint::transform::VertexFormat::kSint32;
- case wgpu::VertexFormat::Sint32x2:
- return tint::transform::VertexFormat::kSint32x2;
- case wgpu::VertexFormat::Sint32x3:
- return tint::transform::VertexFormat::kSint32x3;
- case wgpu::VertexFormat::Sint32x4:
- return tint::transform::VertexFormat::kSint32x4;
-
- case wgpu::VertexFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- tint::transform::VertexStepMode ToTintVertexStepMode(wgpu::VertexStepMode mode) {
- switch (mode) {
- case wgpu::VertexStepMode::Vertex:
- return tint::transform::VertexStepMode::kVertex;
- case wgpu::VertexStepMode::Instance:
- return tint::transform::VertexStepMode::kInstance;
- }
- UNREACHABLE();
- }
-
- ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(
- tint::ast::PipelineStage stage) {
- switch (stage) {
- case tint::ast::PipelineStage::kVertex:
- return SingleShaderStage::Vertex;
- case tint::ast::PipelineStage::kFragment:
- return SingleShaderStage::Fragment;
- case tint::ast::PipelineStage::kCompute:
- return SingleShaderStage::Compute;
- case tint::ast::PipelineStage::kNone:
- break;
- }
- UNREACHABLE();
- }
-
- BindingInfoType TintResourceTypeToBindingInfoType(
- tint::inspector::ResourceBinding::ResourceType type) {
- switch (type) {
- case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
- case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
- case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
- return BindingInfoType::Buffer;
- case tint::inspector::ResourceBinding::ResourceType::kSampler:
- case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
- return BindingInfoType::Sampler;
- case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
- case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
- case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
- case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
- return BindingInfoType::Texture;
- case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
- return BindingInfoType::StorageTexture;
- case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
- return BindingInfoType::ExternalTexture;
-
- default:
- UNREACHABLE();
- return BindingInfoType::Buffer;
- }
- }
-
- wgpu::TextureFormat TintImageFormatToTextureFormat(
- tint::inspector::ResourceBinding::ImageFormat format) {
- switch (format) {
- case tint::inspector::ResourceBinding::ImageFormat::kR8Unorm:
- return wgpu::TextureFormat::R8Unorm;
- case tint::inspector::ResourceBinding::ImageFormat::kR8Snorm:
- return wgpu::TextureFormat::R8Snorm;
- case tint::inspector::ResourceBinding::ImageFormat::kR8Uint:
- return wgpu::TextureFormat::R8Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kR8Sint:
- return wgpu::TextureFormat::R8Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kR16Uint:
- return wgpu::TextureFormat::R16Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kR16Sint:
- return wgpu::TextureFormat::R16Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kR16Float:
- return wgpu::TextureFormat::R16Float;
- case tint::inspector::ResourceBinding::ImageFormat::kRg8Unorm:
- return wgpu::TextureFormat::RG8Unorm;
- case tint::inspector::ResourceBinding::ImageFormat::kRg8Snorm:
- return wgpu::TextureFormat::RG8Snorm;
- case tint::inspector::ResourceBinding::ImageFormat::kRg8Uint:
- return wgpu::TextureFormat::RG8Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kRg8Sint:
- return wgpu::TextureFormat::RG8Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kR32Uint:
- return wgpu::TextureFormat::R32Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kR32Sint:
- return wgpu::TextureFormat::R32Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kR32Float:
- return wgpu::TextureFormat::R32Float;
- case tint::inspector::ResourceBinding::ImageFormat::kRg16Uint:
- return wgpu::TextureFormat::RG16Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kRg16Sint:
- return wgpu::TextureFormat::RG16Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kRg16Float:
- return wgpu::TextureFormat::RG16Float;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba8Unorm:
- return wgpu::TextureFormat::RGBA8Unorm;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba8UnormSrgb:
- return wgpu::TextureFormat::RGBA8UnormSrgb;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba8Snorm:
- return wgpu::TextureFormat::RGBA8Snorm;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba8Uint:
- return wgpu::TextureFormat::RGBA8Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba8Sint:
- return wgpu::TextureFormat::RGBA8Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kBgra8Unorm:
- return wgpu::TextureFormat::BGRA8Unorm;
- case tint::inspector::ResourceBinding::ImageFormat::kBgra8UnormSrgb:
- return wgpu::TextureFormat::BGRA8UnormSrgb;
- case tint::inspector::ResourceBinding::ImageFormat::kRgb10A2Unorm:
- return wgpu::TextureFormat::RGB10A2Unorm;
- case tint::inspector::ResourceBinding::ImageFormat::kRg11B10Float:
- return wgpu::TextureFormat::RG11B10Ufloat;
- case tint::inspector::ResourceBinding::ImageFormat::kRg32Uint:
- return wgpu::TextureFormat::RG32Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kRg32Sint:
- return wgpu::TextureFormat::RG32Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kRg32Float:
- return wgpu::TextureFormat::RG32Float;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba16Uint:
- return wgpu::TextureFormat::RGBA16Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba16Sint:
- return wgpu::TextureFormat::RGBA16Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba16Float:
- return wgpu::TextureFormat::RGBA16Float;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba32Uint:
- return wgpu::TextureFormat::RGBA32Uint;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba32Sint:
- return wgpu::TextureFormat::RGBA32Sint;
- case tint::inspector::ResourceBinding::ImageFormat::kRgba32Float:
- return wgpu::TextureFormat::RGBA32Float;
- case tint::inspector::ResourceBinding::ImageFormat::kNone:
- return wgpu::TextureFormat::Undefined;
- }
- UNREACHABLE();
- }
-
- wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
- tint::inspector::ResourceBinding::TextureDimension dim) {
- switch (dim) {
- case tint::inspector::ResourceBinding::TextureDimension::k1d:
- return wgpu::TextureViewDimension::e1D;
- case tint::inspector::ResourceBinding::TextureDimension::k2d:
- return wgpu::TextureViewDimension::e2D;
- case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
- return wgpu::TextureViewDimension::e2DArray;
- case tint::inspector::ResourceBinding::TextureDimension::k3d:
- return wgpu::TextureViewDimension::e3D;
- case tint::inspector::ResourceBinding::TextureDimension::kCube:
- return wgpu::TextureViewDimension::Cube;
- case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
- return wgpu::TextureViewDimension::CubeArray;
- case tint::inspector::ResourceBinding::TextureDimension::kNone:
- return wgpu::TextureViewDimension::Undefined;
- }
- UNREACHABLE();
- }
-
- SampleTypeBit TintSampledKindToSampleTypeBit(
- tint::inspector::ResourceBinding::SampledKind s) {
- switch (s) {
- case tint::inspector::ResourceBinding::SampledKind::kSInt:
- return SampleTypeBit::Sint;
- case tint::inspector::ResourceBinding::SampledKind::kUInt:
- return SampleTypeBit::Uint;
- case tint::inspector::ResourceBinding::SampledKind::kFloat:
- return SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
- case tint::inspector::ResourceBinding::SampledKind::kUnknown:
- return SampleTypeBit::None;
- }
- UNREACHABLE();
- }
-
- ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
- tint::inspector::ComponentType type) {
- switch (type) {
- case tint::inspector::ComponentType::kFloat:
- return wgpu::TextureComponentType::Float;
- case tint::inspector::ComponentType::kSInt:
- return wgpu::TextureComponentType::Sint;
- case tint::inspector::ComponentType::kUInt:
- return wgpu::TextureComponentType::Uint;
- case tint::inspector::ComponentType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' component type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<VertexFormatBaseType> TintComponentTypeToVertexFormatBaseType(
- tint::inspector::ComponentType type) {
- switch (type) {
- case tint::inspector::ComponentType::kFloat:
- return VertexFormatBaseType::Float;
- case tint::inspector::ComponentType::kSInt:
- return VertexFormatBaseType::Sint;
- case tint::inspector::ComponentType::kUInt:
- return VertexFormatBaseType::Uint;
- case tint::inspector::ComponentType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' component type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
- tint::inspector::ResourceBinding::ResourceType resource_type) {
- switch (resource_type) {
- case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
- return wgpu::BufferBindingType::Uniform;
- case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
- return wgpu::BufferBindingType::Storage;
- case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
- return wgpu::BufferBindingType::ReadOnlyStorage;
- default:
- return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
- }
- UNREACHABLE();
- }
-
- ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
- tint::inspector::ResourceBinding::ResourceType resource_type) {
- switch (resource_type) {
- case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
- return wgpu::StorageTextureAccess::WriteOnly;
- default:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert non-storage texture resource type");
- }
- UNREACHABLE();
- }
-
- ResultOrError<InterStageComponentType> TintComponentTypeToInterStageComponentType(
- tint::inspector::ComponentType type) {
- switch (type) {
- case tint::inspector::ComponentType::kFloat:
- return InterStageComponentType::Float;
- case tint::inspector::ComponentType::kSInt:
- return InterStageComponentType::Sint;
- case tint::inspector::ComponentType::kUInt:
- return InterStageComponentType::Uint;
- case tint::inspector::ComponentType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' component type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<uint32_t> TintCompositionTypeToInterStageComponentCount(
- tint::inspector::CompositionType type) {
- switch (type) {
- case tint::inspector::CompositionType::kScalar:
- return 1u;
- case tint::inspector::CompositionType::kVec2:
- return 2u;
- case tint::inspector::CompositionType::kVec3:
- return 3u;
- case tint::inspector::CompositionType::kVec4:
- return 4u;
- case tint::inspector::CompositionType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempt to convert 'Unknown' composition type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<InterpolationType> TintInterpolationTypeToInterpolationType(
- tint::inspector::InterpolationType type) {
- switch (type) {
- case tint::inspector::InterpolationType::kPerspective:
- return InterpolationType::Perspective;
- case tint::inspector::InterpolationType::kLinear:
- return InterpolationType::Linear;
- case tint::inspector::InterpolationType::kFlat:
- return InterpolationType::Flat;
- case tint::inspector::InterpolationType::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' interpolation type from Tint");
- }
- UNREACHABLE();
- }
-
- ResultOrError<InterpolationSampling> TintInterpolationSamplingToInterpolationSamplingType(
- tint::inspector::InterpolationSampling type) {
- switch (type) {
- case tint::inspector::InterpolationSampling::kNone:
- return InterpolationSampling::None;
- case tint::inspector::InterpolationSampling::kCenter:
- return InterpolationSampling::Center;
- case tint::inspector::InterpolationSampling::kCentroid:
- return InterpolationSampling::Centroid;
- case tint::inspector::InterpolationSampling::kSample:
- return InterpolationSampling::Sample;
- case tint::inspector::InterpolationSampling::kUnknown:
- return DAWN_VALIDATION_ERROR(
- "Attempted to convert 'Unknown' interpolation sampling type from Tint");
- }
- UNREACHABLE();
- }
-
- EntryPointMetadata::OverridableConstant::Type FromTintOverridableConstantType(
- tint::inspector::OverridableConstant::Type type) {
- switch (type) {
- case tint::inspector::OverridableConstant::Type::kBool:
- return EntryPointMetadata::OverridableConstant::Type::Boolean;
- case tint::inspector::OverridableConstant::Type::kFloat32:
- return EntryPointMetadata::OverridableConstant::Type::Float32;
- case tint::inspector::OverridableConstant::Type::kInt32:
- return EntryPointMetadata::OverridableConstant::Type::Int32;
- case tint::inspector::OverridableConstant::Type::kUint32:
- return EntryPointMetadata::OverridableConstant::Type::Uint32;
- default:
- UNREACHABLE();
- }
- }
-
- ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
- OwnedCompilationMessages* outMessages) {
- tint::Program program = tint::reader::wgsl::Parse(file);
- if (outMessages != nullptr) {
- outMessages->AddMessages(program.Diagnostics());
- }
- if (!program.IsValid()) {
- return DAWN_FORMAT_VALIDATION_ERROR(
- "Tint WGSL reader failure:\nParser: %s\nShader:\n%s\n",
- program.Diagnostics().str(), file->content.data);
- }
-
- return std::move(program);
- }
-
- ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv,
- OwnedCompilationMessages* outMessages) {
- tint::Program program = tint::reader::spirv::Parse(spirv);
- if (outMessages != nullptr) {
- outMessages->AddMessages(program.Diagnostics());
- }
- if (!program.IsValid()) {
- return DAWN_FORMAT_VALIDATION_ERROR("Tint SPIR-V reader failure:\nParser: %s\n",
- program.Diagnostics().str());
- }
-
- return std::move(program);
- }
-
- std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingGroupInfoMap& shaderBindings,
- const BindGroupLayoutBase* layout) {
- std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
- uint32_t packedIdx = 0;
-
- for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount();
- ++bindingIndex) {
- const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
- if (bindingInfo.buffer.minBindingSize != 0) {
- // Skip bindings that have minimum buffer size set in the layout
- continue;
- }
-
- ASSERT(packedIdx < requiredBufferSizes.size());
- const auto& shaderInfo = shaderBindings.find(bindingInfo.binding);
- if (shaderInfo != shaderBindings.end()) {
- requiredBufferSizes[packedIdx] = shaderInfo->second.buffer.minBindingSize;
- } else {
- // We have to include buffers if they are included in the bind group's
- // packed vector. We don't actually need to check these at draw time, so
- // if this is a problem in the future we can optimize it further.
- requiredBufferSizes[packedIdx] = 0;
- }
- ++packedIdx;
- }
-
- return requiredBufferSizes;
- }
-
- MaybeError ValidateCompatibilityOfSingleBindingWithLayout(
- const DeviceBase* device,
- const BindGroupLayoutBase* layout,
- SingleShaderStage entryPointStage,
- BindingNumber bindingNumber,
- const ShaderBindingInfo& shaderInfo) {
- const BindGroupLayoutBase::BindingMap& layoutBindings = layout->GetBindingMap();
-
- const auto& bindingIt = layoutBindings.find(bindingNumber);
- DAWN_INVALID_IF(bindingIt == layoutBindings.end(), "Binding doesn't exist in %s.",
- layout);
-
- BindingIndex bindingIndex(bindingIt->second);
- const BindingInfo& layoutInfo = layout->GetBindingInfo(bindingIndex);
-
- // TODO(dawn:563): Provide info about the binding types.
- DAWN_INVALID_IF(layoutInfo.bindingType != shaderInfo.bindingType,
- "Binding type (buffer vs. texture vs. sampler) doesn't match the type "
- "in the layout.");
-
- // TODO(dawn:563): Provide info about the visibility.
- DAWN_INVALID_IF(
- (layoutInfo.visibility & StageBit(entryPointStage)) == 0,
- "Entry point's stage is not in the binding visibility in the layout (%s)",
- layoutInfo.visibility);
-
- switch (layoutInfo.bindingType) {
- case BindingInfoType::Texture: {
- DAWN_INVALID_IF(
- layoutInfo.texture.multisampled != shaderInfo.texture.multisampled,
- "Binding multisampled flag (%u) doesn't match the layout's multisampled "
- "flag (%u)",
- layoutInfo.texture.multisampled, shaderInfo.texture.multisampled);
-
- // TODO(dawn:563): Provide info about the sample types.
- DAWN_INVALID_IF((SampleTypeToSampleTypeBit(layoutInfo.texture.sampleType) &
- shaderInfo.texture.compatibleSampleTypes) == 0,
- "The sample type in the shader is not compatible with the "
- "sample type of the layout.");
-
- DAWN_INVALID_IF(
- layoutInfo.texture.viewDimension != shaderInfo.texture.viewDimension,
- "The shader's binding dimension (%s) doesn't match the shader's binding "
- "dimension (%s).",
- layoutInfo.texture.viewDimension, shaderInfo.texture.viewDimension);
- break;
- }
-
- case BindingInfoType::StorageTexture: {
- ASSERT(layoutInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
- ASSERT(shaderInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
-
- DAWN_INVALID_IF(
- layoutInfo.storageTexture.access != shaderInfo.storageTexture.access,
- "The layout's binding access (%s) isn't compatible with the shader's "
- "binding access (%s).",
- layoutInfo.storageTexture.access, shaderInfo.storageTexture.access);
-
- DAWN_INVALID_IF(
- layoutInfo.storageTexture.format != shaderInfo.storageTexture.format,
- "The layout's binding format (%s) doesn't match the shader's binding "
- "format (%s).",
- layoutInfo.storageTexture.format, shaderInfo.storageTexture.format);
-
- DAWN_INVALID_IF(layoutInfo.storageTexture.viewDimension !=
- shaderInfo.storageTexture.viewDimension,
- "The layout's binding dimension (%s) doesn't match the "
- "shader's binding dimension (%s).",
- layoutInfo.storageTexture.viewDimension,
- shaderInfo.storageTexture.viewDimension);
- break;
- }
-
- case BindingInfoType::ExternalTexture: {
- // Nothing to validate! (yet?)
- break;
- }
-
- case BindingInfoType::Buffer: {
- // Binding mismatch between shader and bind group is invalid. For example, a
- // writable binding in the shader with a readonly storage buffer in the bind
- // group layout is invalid. However, a readonly binding in the shader with a
- // writable storage buffer in the bind group layout is valid, a storage
- // binding in the shader with an internal storage buffer in the bind group
- // layout is also valid.
- bool validBindingConversion =
- (layoutInfo.buffer.type == wgpu::BufferBindingType::Storage &&
- shaderInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage) ||
- (layoutInfo.buffer.type == kInternalStorageBufferBinding &&
- shaderInfo.buffer.type == wgpu::BufferBindingType::Storage);
-
- DAWN_INVALID_IF(
- layoutInfo.buffer.type != shaderInfo.buffer.type && !validBindingConversion,
- "The buffer type in the shader (%s) is not compatible with the type in the "
- "layout (%s).",
- shaderInfo.buffer.type, layoutInfo.buffer.type);
-
- DAWN_INVALID_IF(
- layoutInfo.buffer.minBindingSize != 0 &&
- shaderInfo.buffer.minBindingSize > layoutInfo.buffer.minBindingSize,
- "The shader uses more bytes of the buffer (%u) than the layout's "
- "minBindingSize (%u).",
- shaderInfo.buffer.minBindingSize, layoutInfo.buffer.minBindingSize);
- break;
- }
-
- case BindingInfoType::Sampler:
- DAWN_INVALID_IF(
- (layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison) !=
- shaderInfo.sampler.isComparison,
- "The sampler type in the shader (comparison: %u) doesn't match the type in "
- "the layout (comparison: %u).",
- shaderInfo.sampler.isComparison,
- layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison);
- break;
- }
-
- return {};
- }
- MaybeError ValidateCompatibilityWithBindGroupLayout(DeviceBase* device,
- BindGroupIndex group,
- const EntryPointMetadata& entryPoint,
- const BindGroupLayoutBase* layout) {
- // Iterate over all bindings used by this group in the shader, and find the
- // corresponding binding in the BindGroupLayout, if it exists.
- for (const auto& it : entryPoint.bindings[group]) {
- DAWN_TRY_CONTEXT(ValidateCompatibilityOfSingleBindingWithLayout(
- device, layout, entryPoint.stage, it.first, it.second),
- "validating that the entry-point's declaration for [[group(%u), "
- "binding(%u)]] matches %s",
- static_cast<uint32_t>(group), static_cast<uint32_t>(it.first),
- layout);
- }
-
- return {};
- }
-
- ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(
- const DeviceBase* device,
- const tint::Program* program) {
- ASSERT(program->IsValid());
-
- const CombinedLimits& limits = device->GetLimits();
-
- EntryPointMetadataTable result;
-
- tint::inspector::Inspector inspector(program);
- auto entryPoints = inspector.GetEntryPoints();
- DAWN_INVALID_IF(inspector.has_error(), "Tint Reflection failure: Inspector: %s\n",
- inspector.error());
-
- // TODO(dawn:563): use DAWN_TRY_CONTEXT to output the name of the entry point we're
- // reflecting.
- constexpr uint32_t kMaxInterStageShaderLocation = kMaxInterStageShaderVariables - 1;
- for (auto& entryPoint : entryPoints) {
- ASSERT(result.count(entryPoint.name) == 0);
-
- auto metadata = std::make_unique<EntryPointMetadata>();
-
- if (!entryPoint.overridable_constants.empty()) {
- DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
- "Pipeline overridable constants are disallowed because they "
- "are partially implemented.");
-
- const auto& name2Id = inspector.GetConstantNameToIdMap();
- const auto& id2Scalar = inspector.GetConstantIDs();
-
- for (auto& c : entryPoint.overridable_constants) {
- uint32_t id = name2Id.at(c.name);
- OverridableConstantScalar defaultValue;
- if (c.is_initialized) {
- // if it is initialized, the scalar must exist
- const auto& scalar = id2Scalar.at(id);
- if (scalar.IsBool()) {
- defaultValue.b = scalar.AsBool();
- } else if (scalar.IsU32()) {
- defaultValue.u32 = scalar.AsU32();
- } else if (scalar.IsI32()) {
- defaultValue.i32 = scalar.AsI32();
- } else if (scalar.IsFloat()) {
- defaultValue.f32 = scalar.AsFloat();
- } else {
- UNREACHABLE();
- }
- }
- EntryPointMetadata::OverridableConstant constant = {
- id, FromTintOverridableConstantType(c.type), c.is_initialized,
- defaultValue};
-
- std::string identifier =
- c.is_numeric_id_specified ? std::to_string(constant.id) : c.name;
- metadata->overridableConstants[identifier] = constant;
-
- if (!c.is_initialized) {
- auto it = metadata->uninitializedOverridableConstants.emplace(
- std::move(identifier));
- // The insertion should have taken place
- ASSERT(it.second);
- } else {
- auto it = metadata->initializedOverridableConstants.emplace(
- std::move(identifier));
- // The insertion should have taken place
- ASSERT(it.second);
- }
- }
- }
-
- DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
-
- if (metadata->stage == SingleShaderStage::Compute) {
- DAWN_INVALID_IF(
- entryPoint.workgroup_size_x > limits.v1.maxComputeWorkgroupSizeX ||
- entryPoint.workgroup_size_y > limits.v1.maxComputeWorkgroupSizeY ||
- entryPoint.workgroup_size_z > limits.v1.maxComputeWorkgroupSizeZ,
- "Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
- "maximum allowed (%u, %u, %u).",
- entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
- entryPoint.workgroup_size_z, limits.v1.maxComputeWorkgroupSizeX,
- limits.v1.maxComputeWorkgroupSizeY, limits.v1.maxComputeWorkgroupSizeZ);
-
- // Dimensions have already been validated against their individual limits above.
- // Cast to uint64_t to avoid overflow in this multiplication.
- uint64_t numInvocations = static_cast<uint64_t>(entryPoint.workgroup_size_x) *
- entryPoint.workgroup_size_y *
- entryPoint.workgroup_size_z;
- DAWN_INVALID_IF(numInvocations > limits.v1.maxComputeInvocationsPerWorkgroup,
- "The total number of workgroup invocations (%u) exceeds the "
- "maximum allowed (%u).",
- numInvocations, limits.v1.maxComputeInvocationsPerWorkgroup);
-
- const size_t workgroupStorageSize =
- inspector.GetWorkgroupStorageSize(entryPoint.name);
- DAWN_INVALID_IF(workgroupStorageSize > limits.v1.maxComputeWorkgroupStorageSize,
- "The total use of workgroup storage (%u bytes) is larger than "
- "the maximum allowed (%u bytes).",
- workgroupStorageSize, limits.v1.maxComputeWorkgroupStorageSize);
-
- metadata->localWorkgroupSize.x = entryPoint.workgroup_size_x;
- metadata->localWorkgroupSize.y = entryPoint.workgroup_size_y;
- metadata->localWorkgroupSize.z = entryPoint.workgroup_size_z;
-
- metadata->usesNumWorkgroups = entryPoint.num_workgroups_used;
- }
-
- if (metadata->stage == SingleShaderStage::Vertex) {
- for (const auto& inputVar : entryPoint.input_variables) {
- DAWN_INVALID_IF(
- !inputVar.has_location_decoration,
- "Vertex input variable \"%s\" doesn't have a location decoration.",
- inputVar.name);
-
- uint32_t unsanitizedLocation = inputVar.location_decoration;
- DAWN_INVALID_IF(unsanitizedLocation >= kMaxVertexAttributes,
- "Vertex input variable \"%s\" has a location (%u) that "
- "exceeds the maximum (%u)",
- inputVar.name, unsanitizedLocation, kMaxVertexAttributes);
- VertexAttributeLocation location(static_cast<uint8_t>(unsanitizedLocation));
-
- DAWN_TRY_ASSIGN(
- metadata->vertexInputBaseTypes[location],
- TintComponentTypeToVertexFormatBaseType(inputVar.component_type));
- metadata->usedVertexInputs.set(location);
- }
-
- // [[position]] must be declared in a vertex shader but is not exposed as an
- // output variable by Tint so we directly add its components to the total.
- uint32_t totalInterStageShaderComponents = 4;
- for (const auto& outputVar : entryPoint.output_variables) {
- DAWN_INVALID_IF(
- !outputVar.has_location_decoration,
- "Vertex ouput variable \"%s\" doesn't have a location decoration.",
- outputVar.name);
-
- uint32_t location = outputVar.location_decoration;
- DAWN_INVALID_IF(location > kMaxInterStageShaderLocation,
- "Vertex output variable \"%s\" has a location (%u) that "
- "exceeds the maximum (%u).",
- outputVar.name, location, kMaxInterStageShaderLocation);
-
- metadata->usedInterStageVariables.set(location);
- DAWN_TRY_ASSIGN(
- metadata->interStageVariables[location].baseType,
- TintComponentTypeToInterStageComponentType(outputVar.component_type));
- DAWN_TRY_ASSIGN(metadata->interStageVariables[location].componentCount,
- TintCompositionTypeToInterStageComponentCount(
- outputVar.composition_type));
- DAWN_TRY_ASSIGN(
- metadata->interStageVariables[location].interpolationType,
- TintInterpolationTypeToInterpolationType(outputVar.interpolation_type));
- DAWN_TRY_ASSIGN(
- metadata->interStageVariables[location].interpolationSampling,
- TintInterpolationSamplingToInterpolationSamplingType(
- outputVar.interpolation_sampling));
-
- totalInterStageShaderComponents +=
- metadata->interStageVariables[location].componentCount;
- }
-
- DAWN_INVALID_IF(
- totalInterStageShaderComponents > kMaxInterStageShaderComponents,
- "Total vertex output components count (%u) exceeds the maximum (%u).",
- totalInterStageShaderComponents, kMaxInterStageShaderComponents);
- }
-
- if (metadata->stage == SingleShaderStage::Fragment) {
- uint32_t totalInterStageShaderComponents = 0;
- for (const auto& inputVar : entryPoint.input_variables) {
- DAWN_INVALID_IF(
- !inputVar.has_location_decoration,
- "Fragment input variable \"%s\" doesn't have a location decoration.",
- inputVar.name);
-
- uint32_t location = inputVar.location_decoration;
- DAWN_INVALID_IF(location > kMaxInterStageShaderLocation,
- "Fragment input variable \"%s\" has a location (%u) that "
- "exceeds the maximum (%u).",
- inputVar.name, location, kMaxInterStageShaderLocation);
-
- metadata->usedInterStageVariables.set(location);
- DAWN_TRY_ASSIGN(
- metadata->interStageVariables[location].baseType,
- TintComponentTypeToInterStageComponentType(inputVar.component_type));
- DAWN_TRY_ASSIGN(metadata->interStageVariables[location].componentCount,
- TintCompositionTypeToInterStageComponentCount(
- inputVar.composition_type));
- DAWN_TRY_ASSIGN(
- metadata->interStageVariables[location].interpolationType,
- TintInterpolationTypeToInterpolationType(inputVar.interpolation_type));
- DAWN_TRY_ASSIGN(
- metadata->interStageVariables[location].interpolationSampling,
- TintInterpolationSamplingToInterpolationSamplingType(
- inputVar.interpolation_sampling));
-
- totalInterStageShaderComponents +=
- metadata->interStageVariables[location].componentCount;
- }
-
- if (entryPoint.front_facing_used) {
- totalInterStageShaderComponents += 1;
- }
- if (entryPoint.input_sample_mask_used) {
- totalInterStageShaderComponents += 1;
- }
- if (entryPoint.sample_index_used) {
- totalInterStageShaderComponents += 1;
- }
- if (entryPoint.input_position_used) {
- totalInterStageShaderComponents += 4;
- }
-
- DAWN_INVALID_IF(
- totalInterStageShaderComponents > kMaxInterStageShaderComponents,
- "Total fragment input components count (%u) exceeds the maximum (%u).",
- totalInterStageShaderComponents, kMaxInterStageShaderComponents);
-
- for (const auto& outputVar : entryPoint.output_variables) {
- DAWN_INVALID_IF(
- !outputVar.has_location_decoration,
- "Fragment input variable \"%s\" doesn't have a location decoration.",
- outputVar.name);
-
- uint32_t unsanitizedAttachment = outputVar.location_decoration;
- DAWN_INVALID_IF(unsanitizedAttachment >= kMaxColorAttachments,
- "Fragment output variable \"%s\" has a location (%u) that "
- "exceeds the maximum (%u).",
- outputVar.name, unsanitizedAttachment,
- kMaxColorAttachments);
- ColorAttachmentIndex attachment(
- static_cast<uint8_t>(unsanitizedAttachment));
-
- DAWN_TRY_ASSIGN(
- metadata->fragmentOutputVariables[attachment].baseType,
- TintComponentTypeToTextureComponentType(outputVar.component_type));
- uint32_t componentCount;
- DAWN_TRY_ASSIGN(componentCount,
- TintCompositionTypeToInterStageComponentCount(
- outputVar.composition_type));
- // componentCount should be no larger than 4u
- ASSERT(componentCount <= 4u);
- metadata->fragmentOutputVariables[attachment].componentCount =
- componentCount;
- metadata->fragmentOutputsWritten.set(attachment);
- }
- }
-
- for (const tint::inspector::ResourceBinding& resource :
- inspector.GetResourceBindings(entryPoint.name)) {
- DAWN_INVALID_IF(resource.bind_group >= kMaxBindGroups,
- "The entry-point uses a binding with a group decoration (%u) "
- "that exceeds the maximum (%u).",
- resource.bind_group, kMaxBindGroups);
-
- BindingNumber bindingNumber(resource.binding);
- BindGroupIndex bindGroupIndex(resource.bind_group);
-
- const auto& it = metadata->bindings[bindGroupIndex].emplace(
- bindingNumber, ShaderBindingInfo{});
- DAWN_INVALID_IF(
- !it.second,
- "Entry-point has a duplicate binding for (group:%u, binding:%u).",
- resource.binding, resource.bind_group);
-
- ShaderBindingInfo* info = &it.first->second;
- info->bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
-
- switch (info->bindingType) {
- case BindingInfoType::Buffer:
- info->buffer.minBindingSize = resource.size_no_padding;
- DAWN_TRY_ASSIGN(info->buffer.type, TintResourceTypeToBufferBindingType(
- resource.resource_type));
- break;
- case BindingInfoType::Sampler:
- switch (resource.resource_type) {
- case tint::inspector::ResourceBinding::ResourceType::kSampler:
- info->sampler.isComparison = false;
- break;
- case tint::inspector::ResourceBinding::ResourceType::
- kComparisonSampler:
- info->sampler.isComparison = true;
- break;
- default:
- UNREACHABLE();
- }
- break;
- case BindingInfoType::Texture:
- info->texture.viewDimension =
- TintTextureDimensionToTextureViewDimension(resource.dim);
- if (resource.resource_type ==
- tint::inspector::ResourceBinding::ResourceType::kDepthTexture ||
- resource.resource_type ==
- tint::inspector::ResourceBinding::ResourceType::
- kDepthMultisampledTexture) {
- info->texture.compatibleSampleTypes = SampleTypeBit::Depth;
- } else {
- info->texture.compatibleSampleTypes =
- TintSampledKindToSampleTypeBit(resource.sampled_kind);
- }
- info->texture.multisampled =
- resource.resource_type == tint::inspector::ResourceBinding::
- ResourceType::kMultisampledTexture ||
- resource.resource_type ==
- tint::inspector::ResourceBinding::ResourceType::
- kDepthMultisampledTexture;
-
- break;
- case BindingInfoType::StorageTexture:
- DAWN_TRY_ASSIGN(
- info->storageTexture.access,
- TintResourceTypeToStorageTextureAccess(resource.resource_type));
- info->storageTexture.format =
- TintImageFormatToTextureFormat(resource.image_format);
- info->storageTexture.viewDimension =
- TintTextureDimensionToTextureViewDimension(resource.dim);
-
- break;
- case BindingInfoType::ExternalTexture:
- break;
- default:
- return DAWN_VALIDATION_ERROR("Unknown binding type in Shader");
- }
- }
-
- std::vector<tint::inspector::SamplerTexturePair> samplerTextureUses =
- inspector.GetSamplerTextureUses(entryPoint.name);
- metadata->samplerTexturePairs.reserve(samplerTextureUses.size());
- std::transform(
- samplerTextureUses.begin(), samplerTextureUses.end(),
- std::back_inserter(metadata->samplerTexturePairs),
- [](const tint::inspector::SamplerTexturePair& pair) {
- EntryPointMetadata::SamplerTexturePair result;
- result.sampler = {BindGroupIndex(pair.sampler_binding_point.group),
- BindingNumber(pair.sampler_binding_point.binding)};
- result.texture = {BindGroupIndex(pair.texture_binding_point.group),
- BindingNumber(pair.texture_binding_point.binding)};
- return result;
- });
-
- result[entryPoint.name] = std::move(metadata);
- }
- return std::move(result);
- }
- } // anonymous namespace
-
- ShaderModuleParseResult::ShaderModuleParseResult() = default;
- ShaderModuleParseResult::~ShaderModuleParseResult() = default;
-
- ShaderModuleParseResult::ShaderModuleParseResult(ShaderModuleParseResult&& rhs) = default;
-
- ShaderModuleParseResult& ShaderModuleParseResult::operator=(ShaderModuleParseResult&& rhs) =
- default;
-
- bool ShaderModuleParseResult::HasParsedShader() const {
- return tintProgram != nullptr;
- }
-
- // TintSource is a PIMPL container for a tint::Source::File, which needs to be kept alive for as
- // long as tint diagnostics are inspected / printed.
- class TintSource {
- public:
- template <typename... ARGS>
- TintSource(ARGS&&... args) : file(std::forward<ARGS>(args)...) {
- }
-
- tint::Source::File file;
- };
-
- MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult,
- OwnedCompilationMessages* outMessages) {
- ASSERT(parseResult != nullptr);
-
- const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
- DAWN_INVALID_IF(chainedDescriptor == nullptr,
- "Shader module descriptor missing chained descriptor");
-
- // For now only a single SPIRV or WGSL subdescriptor is allowed.
- DAWN_TRY(ValidateSingleSType(chainedDescriptor, wgpu::SType::ShaderModuleSPIRVDescriptor,
- wgpu::SType::ShaderModuleWGSLDescriptor));
-
- ScopedTintICEHandler scopedICEHandler(device);
-
- const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
- FindInChain(chainedDescriptor, &spirvDesc);
- const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
- FindInChain(chainedDescriptor, &wgslDesc);
-
- // We have a temporary toggle to force the SPIRV ingestion to go through a WGSL
- // intermediate step. It is done by switching the spirvDesc for a wgslDesc below.
- ShaderModuleWGSLDescriptor newWgslDesc;
- std::string newWgslCode;
- if (spirvDesc && device->IsToggleEnabled(Toggle::ForceWGSLStep)) {
- std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
- tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
-
- tint::writer::wgsl::Options options;
- auto result = tint::writer::wgsl::Generate(&program, options);
- DAWN_INVALID_IF(!result.success, "Tint WGSL failure: Generator: %s", result.error);
-
- newWgslCode = std::move(result.wgsl);
- newWgslDesc.source = newWgslCode.c_str();
-
- spirvDesc = nullptr;
- wgslDesc = &newWgslDesc;
- }
-
- if (spirvDesc) {
- DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowSpirv),
- "SPIR-V is disallowed.");
-
- std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
- tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
- parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
- } else if (wgslDesc) {
- auto tintSource = std::make_unique<TintSource>("", wgslDesc->source);
-
- if (device->IsToggleEnabled(Toggle::DumpShaders)) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "// Dumped WGSL:" << std::endl << wgslDesc->source;
- device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
-
- tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseWGSL(&tintSource->file, outMessages));
- parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
- parseResult->tintSource = std::move(tintSource);
- }
-
- return {};
- }
-
- RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
- const PipelineLayoutBase* layout) {
- RequiredBufferSizes bufferSizes;
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- bufferSizes[group] = GetBindGroupMinBufferSizes(entryPoint.bindings[group],
- layout->GetBindGroupLayout(group));
- }
-
- return bufferSizes;
- }
-
- ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
- const tint::Program* program,
- const tint::transform::DataMap& inputs,
- tint::transform::DataMap* outputs,
- OwnedCompilationMessages* outMessages) {
- tint::transform::Output output = transform->Run(program, inputs);
- if (outMessages != nullptr) {
- outMessages->AddMessages(output.program.Diagnostics());
- }
- DAWN_INVALID_IF(!output.program.IsValid(), "Tint program failure: %s\n",
- output.program.Diagnostics().str());
- if (outputs != nullptr) {
- *outputs = std::move(output.data);
- }
- return std::move(output.program);
- }
-
- void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
- const std::string& entryPoint,
- BindGroupIndex pullingBufferBindingSet,
- tint::transform::DataMap* transformInputs) {
- tint::transform::VertexPulling::Config cfg;
- cfg.entry_point_name = entryPoint;
- cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
-
- cfg.vertex_state.resize(renderPipeline.GetVertexBufferCount());
- for (VertexBufferSlot slot : IterateBitSet(renderPipeline.GetVertexBufferSlotsUsed())) {
- const VertexBufferInfo& dawnInfo = renderPipeline.GetVertexBuffer(slot);
- tint::transform::VertexBufferLayoutDescriptor* tintInfo =
- &cfg.vertex_state[static_cast<uint8_t>(slot)];
-
- tintInfo->array_stride = dawnInfo.arrayStride;
- tintInfo->step_mode = ToTintVertexStepMode(dawnInfo.stepMode);
- }
-
- for (VertexAttributeLocation location :
- IterateBitSet(renderPipeline.GetAttributeLocationsUsed())) {
- const VertexAttributeInfo& dawnInfo = renderPipeline.GetAttribute(location);
- tint::transform::VertexAttributeDescriptor tintInfo;
- tintInfo.format = ToTintVertexFormat(dawnInfo.format);
- tintInfo.offset = dawnInfo.offset;
- tintInfo.shader_location = static_cast<uint32_t>(static_cast<uint8_t>(location));
-
- uint8_t vertexBufferSlot = static_cast<uint8_t>(dawnInfo.vertexBufferSlot);
- cfg.vertex_state[vertexBufferSlot].attributes.push_back(tintInfo);
- }
-
- transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
- }
-
- MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
- const EntryPointMetadata& entryPoint,
- const PipelineLayoutBase* layout) {
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- DAWN_TRY_CONTEXT(ValidateCompatibilityWithBindGroupLayout(
- device, group, entryPoint, layout->GetBindGroupLayout(group)),
- "validating the entry-point's compatibility for group %u with %s",
- static_cast<uint32_t>(group), layout->GetBindGroupLayout(group));
- }
-
- for (BindGroupIndex group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
- DAWN_INVALID_IF(entryPoint.bindings[group].size() > 0,
- "The entry-point uses bindings in group %u but %s doesn't have a "
- "BindGroupLayout for this index",
- static_cast<uint32_t>(group), layout);
- }
-
- // Validate that filtering samplers are not used with unfilterable textures.
- for (const auto& pair : entryPoint.samplerTexturePairs) {
- const BindGroupLayoutBase* samplerBGL = layout->GetBindGroupLayout(pair.sampler.group);
- const BindingInfo& samplerInfo =
- samplerBGL->GetBindingInfo(samplerBGL->GetBindingIndex(pair.sampler.binding));
- if (samplerInfo.sampler.type != wgpu::SamplerBindingType::Filtering) {
- continue;
- }
- const BindGroupLayoutBase* textureBGL = layout->GetBindGroupLayout(pair.texture.group);
- const BindingInfo& textureInfo =
- textureBGL->GetBindingInfo(textureBGL->GetBindingIndex(pair.texture.binding));
-
- ASSERT(textureInfo.bindingType != BindingInfoType::Buffer &&
- textureInfo.bindingType != BindingInfoType::Sampler &&
- textureInfo.bindingType != BindingInfoType::StorageTexture);
-
- if (textureInfo.bindingType != BindingInfoType::Texture) {
- continue;
- }
-
- // Uint/sint can't be statically used with a sampler, so they any
- // texture bindings reflected must be float or depth textures. If
- // the shader uses a float/depth texture but the bind group layout
- // specifies a uint/sint texture binding,
- // |ValidateCompatibilityWithBindGroupLayout| will fail since the
- // sampleType does not match.
- ASSERT(textureInfo.texture.sampleType != wgpu::TextureSampleType::Undefined &&
- textureInfo.texture.sampleType != wgpu::TextureSampleType::Uint &&
- textureInfo.texture.sampleType != wgpu::TextureSampleType::Sint);
-
- DAWN_INVALID_IF(
- textureInfo.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat,
- "Texture binding (group:%u, binding:%u) is %s but used statically with a sampler "
- "(group:%u, binding:%u) that's %s",
- static_cast<uint32_t>(pair.texture.group),
- static_cast<uint32_t>(pair.texture.binding),
- wgpu::TextureSampleType::UnfilterableFloat,
- static_cast<uint32_t>(pair.sampler.group),
- static_cast<uint32_t>(pair.sampler.binding), wgpu::SamplerBindingType::Filtering);
- }
-
- return {};
- }
-
- // ShaderModuleBase
-
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag)
- : ApiObjectBase(device, descriptor->label), mType(Type::Undefined) {
- ASSERT(descriptor->nextInChain != nullptr);
- const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
- FindInChain(descriptor->nextInChain, &spirvDesc);
- const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
- FindInChain(descriptor->nextInChain, &wgslDesc);
- ASSERT(spirvDesc || wgslDesc);
-
- if (spirvDesc) {
- mType = Type::Spirv;
- mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
- } else if (wgslDesc) {
- mType = Type::Wgsl;
- mWgsl = std::string(wgslDesc->source);
- }
- }
-
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor, kUntrackedByDevice) {
- TrackInDevice();
- }
-
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device)
- : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mType(Type::Undefined) {
- }
-
- ShaderModuleBase::~ShaderModuleBase() = default;
-
- void ShaderModuleBase::DestroyImpl() {
- if (IsCachedReference()) {
- // Do not uncache the actual cached object if we are a blueprint.
- GetDevice()->UncacheShaderModule(this);
- }
- }
-
- // static
- Ref<ShaderModuleBase> ShaderModuleBase::MakeError(DeviceBase* device) {
- return AcquireRef(new ShaderModuleBase(device, ObjectBase::kError));
- }
-
- ObjectType ShaderModuleBase::GetType() const {
- return ObjectType::ShaderModule;
- }
-
- bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
- return mEntryPoints.count(entryPoint) > 0;
- }
-
- const EntryPointMetadata& ShaderModuleBase::GetEntryPoint(const std::string& entryPoint) const {
- ASSERT(HasEntryPoint(entryPoint));
- return *mEntryPoints.at(entryPoint);
- }
-
- size_t ShaderModuleBase::ComputeContentHash() {
- ObjectContentHasher recorder;
- recorder.Record(mType);
- recorder.Record(mOriginalSpirv);
- recorder.Record(mWgsl);
- return recorder.GetContentHash();
- }
-
- bool ShaderModuleBase::EqualityFunc::operator()(const ShaderModuleBase* a,
- const ShaderModuleBase* b) const {
- return a->mType == b->mType && a->mOriginalSpirv == b->mOriginalSpirv &&
- a->mWgsl == b->mWgsl;
- }
-
- const tint::Program* ShaderModuleBase::GetTintProgram() const {
- ASSERT(mTintProgram);
- return mTintProgram.get();
- }
-
- void ShaderModuleBase::APIGetCompilationInfo(wgpu::CompilationInfoCallback callback,
- void* userdata) {
- if (callback == nullptr) {
- return;
- }
-
- callback(WGPUCompilationInfoRequestStatus_Success,
- mCompilationMessages->GetCompilationInfo(), userdata);
- }
-
- void ShaderModuleBase::InjectCompilationMessages(
- std::unique_ptr<OwnedCompilationMessages> compilationMessages) {
- // TODO(dawn:944): ensure the InjectCompilationMessages is properly handled for shader
- // module returned from cache.
- // InjectCompilationMessages should be called only once for a shader module, after it is
- // created. However currently InjectCompilationMessages may be called on a shader module
- // returned from cache rather than newly created, and violate the rule. We just skip the
- // injection in this case for now, but a proper solution including ensure the cache goes
- // before the validation is required.
- if (mCompilationMessages != nullptr) {
- return;
- }
- // Move the compilationMessages into the shader module and emit the tint errors and warnings
- mCompilationMessages = std::move(compilationMessages);
-
- // Emit the formatted Tint errors and warnings within the moved compilationMessages
- const std::vector<std::string>& formattedTintMessages =
- mCompilationMessages->GetFormattedTintMessages();
- if (formattedTintMessages.empty()) {
- return;
- }
- std::ostringstream t;
- for (auto pMessage = formattedTintMessages.begin(); pMessage != formattedTintMessages.end();
- pMessage++) {
- if (pMessage != formattedTintMessages.begin()) {
- t << std::endl;
- }
- t << *pMessage;
- }
- this->GetDevice()->EmitLog(WGPULoggingType_Warning, t.str().c_str());
- }
-
- OwnedCompilationMessages* ShaderModuleBase::GetCompilationMessages() const {
- return mCompilationMessages.get();
- }
-
- MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult) {
- mTintProgram = std::move(parseResult->tintProgram);
- mTintSource = std::move(parseResult->tintSource);
-
- DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingTint(GetDevice(), mTintProgram.get()));
- return {};
- }
-
- size_t PipelineLayoutEntryPointPairHashFunc::operator()(
- const PipelineLayoutEntryPointPair& pair) const {
- size_t hash = 0;
- HashCombine(&hash, pair.first, pair.second);
- return hash;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
deleted file mode 100644
index 4625ae888bd..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_SHADERMODULE_H_
-#define DAWNNATIVE_SHADERMODULE_H_
-
-#include "common/Constants.h"
-#include "common/ityp_array.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/CachedObject.h"
-#include "dawn_native/CompilationMessages.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/PerStage.h"
-#include "dawn_native/VertexFormat.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <bitset>
-#include <map>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-namespace tint {
-
- class Program;
-
- namespace transform {
- class DataMap;
- class Transform;
- class VertexPulling;
- } // namespace transform
-
-} // namespace tint
-
-namespace dawn_native {
-
- struct EntryPointMetadata;
-
- // Base component type of an inter-stage variable
- enum class InterStageComponentType {
- Sint,
- Uint,
- Float,
- };
-
- enum class InterpolationType {
- Perspective,
- Linear,
- Flat,
- };
-
- enum class InterpolationSampling {
- None,
- Center,
- Centroid,
- Sample,
- };
-
- using PipelineLayoutEntryPointPair = std::pair<PipelineLayoutBase*, std::string>;
- struct PipelineLayoutEntryPointPairHashFunc {
- size_t operator()(const PipelineLayoutEntryPointPair& pair) const;
- };
-
- // A map from name to EntryPointMetadata.
- using EntryPointMetadataTable =
- std::unordered_map<std::string, std::unique_ptr<EntryPointMetadata>>;
-
- // Source for a tint program
- class TintSource;
-
- struct ShaderModuleParseResult {
- ShaderModuleParseResult();
- ~ShaderModuleParseResult();
- ShaderModuleParseResult(ShaderModuleParseResult&& rhs);
- ShaderModuleParseResult& operator=(ShaderModuleParseResult&& rhs);
-
- bool HasParsedShader() const;
-
- std::unique_ptr<tint::Program> tintProgram;
- std::unique_ptr<TintSource> tintSource;
- };
-
- MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult,
- OwnedCompilationMessages* outMessages);
- MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
- const EntryPointMetadata& entryPoint,
- const PipelineLayoutBase* layout);
-
- RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
- const PipelineLayoutBase* layout);
- ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
- const tint::Program* program,
- const tint::transform::DataMap& inputs,
- tint::transform::DataMap* outputs,
- OwnedCompilationMessages* messages);
-
- /// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
- void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
- const std::string& entryPoint,
- BindGroupIndex pullingBufferBindingSet,
- tint::transform::DataMap* transformInputs);
-
- // Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
- // for isComparison instead of a wgpu::SamplerBindingType enum.
- struct ShaderSamplerBindingInfo {
- bool isComparison;
- };
-
- // Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
- // instead of a single enum.
- struct ShaderTextureBindingInfo {
- SampleTypeBit compatibleSampleTypes;
- wgpu::TextureViewDimension viewDimension;
- bool multisampled;
- };
-
- // Per-binding shader metadata contains some SPIRV specific information in addition to
- // most of the frontend per-binding information.
- struct ShaderBindingInfo {
- // The SPIRV ID of the resource.
- uint32_t id;
- uint32_t base_type_id;
-
- BindingNumber binding;
- BindingInfoType bindingType;
-
- BufferBindingLayout buffer;
- ShaderSamplerBindingInfo sampler;
- ShaderTextureBindingInfo texture;
- StorageTextureBindingLayout storageTexture;
- };
-
- using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
- using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
-
- // The WebGPU overridable constants only support these scalar types
- union OverridableConstantScalar {
- // Use int32_t for boolean to initialize the full 32bit
- int32_t b;
- float f32;
- int32_t i32;
- uint32_t u32;
- };
-
- // Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
- // stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
- // pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
- // ShaderModuleBase.
- struct EntryPointMetadata {
- // bindings[G][B] is the reflection data for the binding defined with
- // [[group=G, binding=B]] in WGSL / SPIRV.
- BindingInfoArray bindings;
-
- struct SamplerTexturePair {
- BindingSlot sampler;
- BindingSlot texture;
- };
- std::vector<SamplerTexturePair> samplerTexturePairs;
-
- // The set of vertex attributes this entryPoint uses.
- ityp::array<VertexAttributeLocation, VertexFormatBaseType, kMaxVertexAttributes>
- vertexInputBaseTypes;
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> usedVertexInputs;
-
- // An array to record the basic types (float, int and uint) of the fragment shader outputs.
- struct FragmentOutputVariableInfo {
- wgpu::TextureComponentType baseType;
- uint8_t componentCount;
- };
- ityp::array<ColorAttachmentIndex, FragmentOutputVariableInfo, kMaxColorAttachments>
- fragmentOutputVariables;
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> fragmentOutputsWritten;
-
- struct InterStageVariableInfo {
- InterStageComponentType baseType;
- uint32_t componentCount;
- InterpolationType interpolationType;
- InterpolationSampling interpolationSampling;
- };
- // Now that we only support vertex and fragment stages, there can't be both inter-stage
- // inputs and outputs in one shader stage.
- std::bitset<kMaxInterStageShaderVariables> usedInterStageVariables;
- std::array<InterStageVariableInfo, kMaxInterStageShaderVariables> interStageVariables;
-
- // The local workgroup size declared for a compute entry point (or 0s otehrwise).
- Origin3D localWorkgroupSize;
-
- // The shader stage for this binding.
- SingleShaderStage stage;
-
- struct OverridableConstant {
- uint32_t id;
- // Match tint::inspector::OverridableConstant::Type
- // Bool is defined as a macro on linux X11 and cannot compile
- enum class Type { Boolean, Float32, Uint32, Int32 } type;
-
- // If the constant doesn't not have an initializer in the shader
- // Then it is required for the pipeline stage to have a constant record to initialize a
- // value
- bool isInitialized;
-
- // Store the default initialized value in shader
- // This is used by metal backend as the function_constant does not have dafault values
- // Initialized when isInitialized == true
- OverridableConstantScalar defaultValue;
- };
-
- using OverridableConstantsMap = std::unordered_map<std::string, OverridableConstant>;
-
- // Map identifier to overridable constant
- // Identifier is unique: either the variable name or the numeric ID if specified
- OverridableConstantsMap overridableConstants;
-
- // Overridable constants that are not initialized in shaders
- // They need value initialization from pipeline stage or it is a validation error
- std::unordered_set<std::string> uninitializedOverridableConstants;
-
- // Store constants with shader initialized values as well
- // This is used by metal backend to set values with default initializers that are not
- // overridden
- std::unordered_set<std::string> initializedOverridableConstants;
-
- bool usesNumWorkgroups = false;
- };
-
- class ShaderModuleBase : public ApiObjectBase, public CachedObject {
- public:
- ShaderModuleBase(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- ApiObjectBase::UntrackedByDeviceTag tag);
- ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModuleBase() override;
-
- static Ref<ShaderModuleBase> MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- // Return true iff the program has an entrypoint called `entryPoint`.
- bool HasEntryPoint(const std::string& entryPoint) const;
-
- // Return the metadata for the given `entryPoint`. HasEntryPoint with the same argument
- // must be true.
- const EntryPointMetadata& GetEntryPoint(const std::string& entryPoint) const;
-
- // Functions necessary for the unordered_set<ShaderModuleBase*>-based cache.
- size_t ComputeContentHash() override;
-
- struct EqualityFunc {
- bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
- };
-
- const tint::Program* GetTintProgram() const;
-
- void APIGetCompilationInfo(wgpu::CompilationInfoCallback callback, void* userdata);
-
- void InjectCompilationMessages(
- std::unique_ptr<OwnedCompilationMessages> compilationMessages);
-
- OwnedCompilationMessages* GetCompilationMessages() const;
-
- protected:
- // Constructor used only for mocking and testing.
- ShaderModuleBase(DeviceBase* device);
- void DestroyImpl() override;
-
- MaybeError InitializeBase(ShaderModuleParseResult* parseResult);
-
- private:
- ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- // The original data in the descriptor for caching.
- enum class Type { Undefined, Spirv, Wgsl };
- Type mType;
- std::vector<uint32_t> mOriginalSpirv;
- std::string mWgsl;
-
- EntryPointMetadataTable mEntryPoints;
- std::unique_ptr<tint::Program> mTintProgram;
- std::unique_ptr<TintSource> mTintSource; // Keep the tint::Source::File alive
-
- std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_SHADERMODULE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/SpirvValidation.cpp b/chromium/third_party/dawn/src/dawn_native/SpirvValidation.cpp
deleted file mode 100644
index b7844c19fff..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/SpirvValidation.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/SpirvValidation.h"
-
-#include "dawn_native/Device.h"
-
-#include <spirv-tools/libspirv.hpp>
-#include <sstream>
-
-namespace dawn_native {
-
- MaybeError ValidateSpirv(DeviceBase* device,
- const std::vector<uint32_t>& spirv,
- bool dumpSpirv) {
- spvtools::SpirvTools spirvTools(SPV_ENV_VULKAN_1_1);
- spirvTools.SetMessageConsumer([device](spv_message_level_t level, const char*,
- const spv_position_t& position,
- const char* message) {
- WGPULoggingType wgpuLogLevel;
- switch (level) {
- case SPV_MSG_FATAL:
- case SPV_MSG_INTERNAL_ERROR:
- case SPV_MSG_ERROR:
- wgpuLogLevel = WGPULoggingType_Error;
- break;
- case SPV_MSG_WARNING:
- wgpuLogLevel = WGPULoggingType_Warning;
- break;
- case SPV_MSG_INFO:
- wgpuLogLevel = WGPULoggingType_Info;
- break;
- default:
- wgpuLogLevel = WGPULoggingType_Error;
- break;
- }
-
- std::ostringstream ss;
- ss << "SPIRV line " << position.index << ": " << message << std::endl;
- device->EmitLog(wgpuLogLevel, ss.str().c_str());
- });
-
- const bool valid = spirvTools.Validate(spirv);
- if (dumpSpirv || !valid) {
- std::ostringstream dumpedMsg;
- std::string disassembly;
- if (spirvTools.Disassemble(
- spirv, &disassembly,
- SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT)) {
- dumpedMsg << "/* Dumped generated SPIRV disassembly */" << std::endl << disassembly;
- } else {
- dumpedMsg << "/* Failed to disassemble generated SPIRV */";
- }
- device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
-
- DAWN_INVALID_IF(!valid,
- "Produced invalid SPIRV. Please file a bug at https://crbug.com/tint.");
-
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/SpirvValidation.h b/chromium/third_party/dawn/src/dawn_native/SpirvValidation.h
deleted file mode 100644
index b22fd06e0b3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/SpirvValidation.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Error.h"
-
-#include <vector>
-
-namespace dawn_native {
-
- class DeviceBase;
-
- MaybeError ValidateSpirv(DeviceBase* device,
- const std::vector<uint32_t>& spirv,
- bool dumpSpirv);
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp
deleted file mode 100644
index 63dd65e9bf7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/StagingBuffer.h"
-
-namespace dawn_native {
-
- StagingBufferBase::StagingBufferBase(size_t size) : mBufferSize(size) {
- }
-
- size_t StagingBufferBase::GetSize() const {
- return mBufferSize;
- }
-
- void* StagingBufferBase::GetMappedPointer() const {
- return mMappedPointer;
- }
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/StagingBuffer.h b/chromium/third_party/dawn/src/dawn_native/StagingBuffer.h
deleted file mode 100644
index 4d195488bc9..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/StagingBuffer.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_STAGINGBUFFER_H_
-#define DAWNNATIVE_STAGINGBUFFER_H_
-
-#include "dawn_native/Error.h"
-
-namespace dawn_native {
-
- class StagingBufferBase {
- public:
- StagingBufferBase(size_t size);
- virtual ~StagingBufferBase() = default;
-
- virtual MaybeError Initialize() = 0;
-
- void* GetMappedPointer() const;
- size_t GetSize() const;
-
- protected:
- void* mMappedPointer = nullptr;
-
- private:
- const size_t mBufferSize;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_STAGINGBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Subresource.cpp b/chromium/third_party/dawn/src/dawn_native/Subresource.cpp
deleted file mode 100644
index ff0bd53fd18..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Subresource.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Subresource.h"
-
-#include "common/Assert.h"
-#include "dawn_native/Format.h"
-
-namespace dawn_native {
-
- Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect) {
- Aspect aspectMask = ConvertAspect(format, aspect);
- ASSERT(HasOneBit(aspectMask));
- return aspectMask;
- }
-
- Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect) {
- Aspect aspectMask = SelectFormatAspects(format, aspect);
- ASSERT(aspectMask != Aspect::None);
- return aspectMask;
- }
-
- Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect) {
- // Color view |format| must be treated as the same plane |aspect|.
- if (format.aspects == Aspect::Color) {
- switch (aspect) {
- case wgpu::TextureAspect::Plane0Only:
- return Aspect::Plane0;
- case wgpu::TextureAspect::Plane1Only:
- return Aspect::Plane1;
- default:
- break;
- }
- }
- return ConvertAspect(format, aspect);
- }
-
- Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect) {
- switch (aspect) {
- case wgpu::TextureAspect::All:
- return format.aspects;
- case wgpu::TextureAspect::DepthOnly:
- return format.aspects & Aspect::Depth;
- case wgpu::TextureAspect::StencilOnly:
- return format.aspects & Aspect::Stencil;
- case wgpu::TextureAspect::Plane0Only:
- return format.aspects & Aspect::Plane0;
- case wgpu::TextureAspect::Plane1Only:
- return format.aspects & Aspect::Plane1;
- }
- UNREACHABLE();
- }
-
- uint8_t GetAspectIndex(Aspect aspect) {
- ASSERT(HasOneBit(aspect));
- switch (aspect) {
- case Aspect::Color:
- case Aspect::Depth:
- case Aspect::Plane0:
- case Aspect::CombinedDepthStencil:
- return 0;
- case Aspect::Plane1:
- case Aspect::Stencil:
- return 1;
- default:
- UNREACHABLE();
- }
- }
-
- uint8_t GetAspectCount(Aspect aspects) {
- // TODO(crbug.com/dawn/829): This should use popcount once Dawn has such a function.
- // Note that we can't do a switch because compilers complain that Depth | Stencil is not
- // a valid enum value.
- if (aspects == Aspect::Color || aspects == Aspect::Depth ||
- aspects == Aspect::CombinedDepthStencil) {
- return 1;
- } else if (aspects == (Aspect::Plane0 | Aspect::Plane1)) {
- return 2;
- } else {
- ASSERT(aspects == (Aspect::Depth | Aspect::Stencil));
- return 2;
- }
- }
-
- SubresourceRange::SubresourceRange(Aspect aspects,
- FirstAndCountRange<uint32_t> arrayLayerParam,
- FirstAndCountRange<uint32_t> mipLevelParams)
- : aspects(aspects),
- baseArrayLayer(arrayLayerParam.first),
- layerCount(arrayLayerParam.count),
- baseMipLevel(mipLevelParams.first),
- levelCount(mipLevelParams.count) {
- }
-
- SubresourceRange::SubresourceRange()
- : aspects(Aspect::None), baseArrayLayer(0), layerCount(0), baseMipLevel(0), levelCount(0) {
- }
-
- // static
- SubresourceRange SubresourceRange::SingleMipAndLayer(uint32_t baseMipLevel,
- uint32_t baseArrayLayer,
- Aspect aspects) {
- return {aspects, {baseArrayLayer, 1}, {baseMipLevel, 1}};
- }
-
- // static
- SubresourceRange SubresourceRange::MakeSingle(Aspect aspect,
- uint32_t baseArrayLayer,
- uint32_t baseMipLevel) {
- ASSERT(HasOneBit(aspect));
- return {aspect, {baseArrayLayer, 1}, {baseMipLevel, 1}};
- }
-
- // static
- SubresourceRange SubresourceRange::MakeFull(Aspect aspects,
- uint32_t layerCount,
- uint32_t levelCount) {
- return {aspects, {0, layerCount}, {0, levelCount}};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Subresource.h b/chromium/third_party/dawn/src/dawn_native/Subresource.h
deleted file mode 100644
index 454e17c031b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Subresource.h
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_SUBRESOURCE_H_
-#define DAWNNATIVE_SUBRESOURCE_H_
-
-#include "dawn_native/EnumClassBitmasks.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- // Note: Subresource indices are computed by iterating the aspects in increasing order.
- // D3D12 uses these directly, so the order much match D3D12's indices.
- // - Depth/Stencil textures have Depth as Plane 0, and Stencil as Plane 1.
- enum class Aspect : uint8_t {
- None = 0x0,
- Color = 0x1,
- Depth = 0x2,
- Stencil = 0x4,
-
- // Aspects used to select individual planes in a multi-planar format.
- Plane0 = 0x8,
- Plane1 = 0x10,
-
- // An aspect for that represents the combination of both the depth and stencil aspects. It
- // can be ignored outside of the Vulkan backend.
- CombinedDepthStencil = 0x20,
- };
-
- template <>
- struct EnumBitmaskSize<Aspect> {
- static constexpr unsigned value = 6;
- };
-
- // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
- // does not exist in the format.
- // Also ASSERTs if "All" is selected and results in more than one aspect.
- Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect);
-
- // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
- // does not exist in the format.
- Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect);
-
- // Returns the Aspects of the Format that are selected by the wgpu::TextureAspect.
- // Note that this can return Aspect::None if the Format doesn't have any of the
- // selected aspects.
- Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect);
-
- // Convert TextureAspect to the aspect which corresponds to the view format. This
- // special cases per plane view formats before calling ConvertAspect.
- Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect);
-
- // Helper struct to make it clear that what the parameters of a range mean.
- template <typename T>
- struct FirstAndCountRange {
- T first;
- T count;
- };
-
- struct SubresourceRange {
- SubresourceRange(Aspect aspects,
- FirstAndCountRange<uint32_t> arrayLayerParam,
- FirstAndCountRange<uint32_t> mipLevelParams);
- SubresourceRange();
-
- Aspect aspects;
- uint32_t baseArrayLayer;
- uint32_t layerCount;
- uint32_t baseMipLevel;
- uint32_t levelCount;
-
- static SubresourceRange SingleMipAndLayer(uint32_t baseMipLevel,
- uint32_t baseArrayLayer,
- Aspect aspects);
- static SubresourceRange MakeSingle(Aspect aspect,
- uint32_t baseArrayLayer,
- uint32_t baseMipLevel);
-
- static SubresourceRange MakeFull(Aspect aspects, uint32_t layerCount, uint32_t levelCount);
- };
-
- // Helper function to use aspects as linear indices in arrays.
- uint8_t GetAspectIndex(Aspect aspect);
- uint8_t GetAspectCount(Aspect aspects);
-
- // The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
- // the per plane index does not exceed the known maximum plane count.
- static constexpr uint32_t kMaxPlanesPerFormat = 3;
-
-} // namespace dawn_native
-
-namespace dawn {
-
- template <>
- struct IsDawnBitmask<dawn_native::Aspect> {
- static constexpr bool enable = true;
- };
-
-} // namespace dawn
-
-#endif // DAWNNATIVE_SUBRESOURCE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/SubresourceStorage.h b/chromium/third_party/dawn/src/dawn_native/SubresourceStorage.h
deleted file mode 100644
index 16feffcea21..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/SubresourceStorage.h
+++ /dev/null
@@ -1,555 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_SUBRESOURCESTORAGE_H_
-#define DAWNNATIVE_SUBRESOURCESTORAGE_H_
-
-#include "common/Assert.h"
-#include "common/TypeTraits.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/Subresource.h"
-
-#include <array>
-#include <limits>
-#include <memory>
-#include <vector>
-
-namespace dawn_native {
-
- // SubresourceStorage<T> acts like a simple map from subresource (aspect, layer, level) to a
- // value of type T except that it tries to compress similar subresources so that algorithms
- // can act on a whole range of subresources at once if they have the same state.
- //
- // For example a very common case to optimize for is the tracking of the usage of texture
- // subresources inside a render pass: the vast majority of texture views will select the whole
- // texture while a small minority will select a sub-range. We want to optimize the common case
- // by setting and checking a single "usage" value when a full subresource is used but at the
- // same time allow per-subresource data when needed.
- //
- // Another example is barrier tracking per-subresource in the backends: it will often happen
- // that during texture upload each mip level will have a different "barrier state". However
- // when the texture is fully uploaded and after it is used for sampling (with a full view) for
- // the first time, the barrier state will likely be the same across all the subresources.
- // That's why some form of "recompression" of subresource state must be possibe.
- //
- // In order to keep the implementation details private and to avoid iterator-hell, this
- // container uses a more functional approach of calling a closure on the interesting ranges.
- // This is for example how to look at the state of all subresources.
- //
- // subresources.Iterate([](const SubresourceRange& range, const T& data) {
- // // Do something with the knowledge that all the subresources in `range` have value
- // // `data`.
- // });
- //
- // SubresourceStorage internally tracks compression state per aspect and then per layer of each
- // aspect. This means that a 2-aspect texture can have the following compression state:
- //
- // - Aspect 0 is fully compressed.
- // - Aspect 1 is partially compressed:
- // - Aspect 1 layer 3 is decompressed.
- // - Aspect 1 layer 0-2 and 4-42 are compressed.
- //
- // A useful model to reason about SubresourceStorage is to represent is as a tree:
- //
- // - SubresourceStorage is the root.
- // |-> Nodes 1 deep represent each aspect. If an aspect is compressed, its node doesn't have
- // any children because the data is constant across all of the subtree.
- // |-> Nodes 2 deep represent layers (for uncompressed aspects). If a layer is compressed,
- // its node doesn't have any children because the data is constant across all of the
- // subtree.
- // |-> Nodes 3 deep represent individial mip levels (for uncompressed layers).
- //
- // The concept of recompression is the removal of all child nodes of a non-leaf node when the
- // data is constant across them. Decompression is the addition of child nodes to a leaf node
- // and copying of its data to all its children.
- //
- // The choice of having secondary compression for array layers is to optimize for the cases
- // where transfer operations are used to update specific layers of texture with render or
- // transfer operations, while the rest is untouched. It seems much less likely that there
- // would be operations that touch all Nth mips of a 2D array texture without touching the
- // others.
- //
- // There are several hot code paths that create new SubresourceStorage like the tracking of
- // resource usage per-pass. We don't want to allocate a container for the decompressed data
- // unless we have to because it would dramatically lower performance. Instead
- // SubresourceStorage contains an inline array that contains the per-aspect compressed data
- // and only allocates a per-subresource on aspect decompression.
- //
- // T must be a copyable type that supports equality comparison with ==.
- //
- // The implementation of functions in this file can have a lot of control flow and corner cases
- // so each modification should come with extensive tests and ensure 100% code coverage of the
- // modified functions. See instructions at
- // https://chromium.googlesource.com/chromium/src/+/master/docs/testing/code_coverage.md#local-coverage-script
- // to run the test with code coverage. A command line that worked in the past (with the right
- // GN args for the out/coverage directory in a Chromium checkout) is:
- //
- /*
- python tools/code_coverage/coverage.py dawn_unittests -b out/coverage -o out/report -c \
- "out/coverage/dawn_unittests --gtest_filter=SubresourceStorage\*" -f \
- third_party/dawn/src/dawn_native
- */
- //
- // TODO(crbug.com/dawn/836): Make the recompression optional, the calling code should know
- // if recompression can happen or not in Update() and Merge()
- template <typename T>
- class SubresourceStorage {
- public:
- static_assert(std::is_copy_assignable<T>::value, "T must be copyable");
- static_assert(HasEqualityOperator<T>::value, "T requires bool operator == (T, T)");
-
- // Creates the storage with the given "dimensions" and all subresources starting with the
- // initial value.
- SubresourceStorage(Aspect aspects,
- uint32_t arrayLayerCount,
- uint32_t mipLevelCount,
- T initialValue = {});
-
- // Returns the data for a single subresource. Note that the reference returned might be the
- // same for multiple subresources.
- const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
-
- // Given an iterateFunc that's a function or function-like objet that can be called with
- // arguments of type (const SubresourceRange& range, const T& data) and returns void,
- // calls it with aggregate ranges if possible, such that each subresource is part of
- // exactly one of the ranges iterateFunc is called with (and obviously data is the value
- // stored for that subresource). For example:
- //
- // subresources.Iterate([&](const SubresourceRange& range, const T& data) {
- // // Do something with range and data.
- // });
- template <typename F>
- void Iterate(F&& iterateFunc) const;
-
- // Given an updateFunc that's a function or function-like objet that can be called with
- // arguments of type (const SubresourceRange& range, T* data) and returns void,
- // calls it with ranges that in aggregate form `range` and pass for each of the
- // sub-ranges a pointer to modify the value for that sub-range. For example:
- //
- // subresources.Update(view->GetRange(), [](const SubresourceRange&, T* data) {
- // *data |= wgpu::TextureUsage::Stuff;
- // });
- //
- // /!\ WARNING: updateFunc should never use range to compute the update to data otherwise
- // your code is likely to break when compression happens. Range should only be used for
- // side effects like using it to compute a Vulkan pipeline barrier.
- template <typename F>
- void Update(const SubresourceRange& range, F&& updateFunc);
-
- // Given a mergeFunc that's a function or a function-like object that can be called with
- // arguments of type (const SubresourceRange& range, T* data, const U& otherData) and
- // returns void, calls it with ranges that in aggregate form the full resources and pass
- // for each of the sub-ranges a pointer to modify the value for that sub-range and the
- // corresponding value from other for that sub-range. For example:
- //
- // subresources.Merge(otherUsages,
- // [](const SubresourceRange&, T* data, const T& otherData) {
- // *data |= otherData;
- // });
- //
- // /!\ WARNING: mergeFunc should never use range to compute the update to data otherwise
- // your code is likely to break when compression happens. Range should only be used for
- // side effects like using it to compute a Vulkan pipeline barrier.
- template <typename U, typename F>
- void Merge(const SubresourceStorage<U>& other, F&& mergeFunc);
-
- // Other operations to consider:
- //
- // - UpdateTo(Range, T) that updates the range to a constant value.
-
- // Methods to query the internal state of SubresourceStorage for testing.
- Aspect GetAspectsForTesting() const;
- uint32_t GetArrayLayerCountForTesting() const;
- uint32_t GetMipLevelCountForTesting() const;
- bool IsAspectCompressedForTesting(Aspect aspect) const;
- bool IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const;
-
- private:
- template <typename U>
- friend class SubresourceStorage;
-
- void DecompressAspect(uint32_t aspectIndex);
- void RecompressAspect(uint32_t aspectIndex);
-
- void DecompressLayer(uint32_t aspectIndex, uint32_t layer);
- void RecompressLayer(uint32_t aspectIndex, uint32_t layer);
-
- SubresourceRange GetFullLayerRange(Aspect aspect, uint32_t layer) const;
-
- // LayerCompressed should never be called when the aspect is compressed otherwise it would
- // need to check that mLayerCompressed is not null before indexing it.
- bool& LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex);
- bool LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex) const;
-
- // Return references to the data for a compressed plane / layer or subresource.
- // Each variant should be called exactly under the correct compression level.
- T& DataInline(uint32_t aspectIndex);
- T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0);
- const T& DataInline(uint32_t aspectIndex) const;
- const T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0) const;
-
- Aspect mAspects;
- uint8_t mMipLevelCount;
- uint16_t mArrayLayerCount;
-
- // Invariant: if an aspect is marked compressed, then all it's layers are marked as
- // compressed.
- static constexpr size_t kMaxAspects = 2;
- std::array<bool, kMaxAspects> mAspectCompressed;
- std::array<T, kMaxAspects> mInlineAspectData;
-
- // Indexed as mLayerCompressed[aspectIndex * mArrayLayerCount + layer].
- std::unique_ptr<bool[]> mLayerCompressed;
-
- // Indexed as mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level].
- // The data for a compressed aspect is stored in the slot for (aspect, 0, 0). Similarly
- // the data for a compressed layer of aspect if in the slot for (aspect, layer, 0).
- std::unique_ptr<T[]> mData;
- };
-
- template <typename T>
- SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
- uint32_t arrayLayerCount,
- uint32_t mipLevelCount,
- T initialValue)
- : mAspects(aspects), mMipLevelCount(mipLevelCount), mArrayLayerCount(arrayLayerCount) {
- ASSERT(arrayLayerCount <= std::numeric_limits<decltype(mArrayLayerCount)>::max());
- ASSERT(mipLevelCount <= std::numeric_limits<decltype(mMipLevelCount)>::max());
-
- uint32_t aspectCount = GetAspectCount(aspects);
- ASSERT(aspectCount <= kMaxAspects);
-
- for (uint32_t aspectIndex = 0; aspectIndex < aspectCount; aspectIndex++) {
- mAspectCompressed[aspectIndex] = true;
- DataInline(aspectIndex) = initialValue;
- }
- }
-
- template <typename T>
- template <typename F>
- void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
- bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
- bool fullAspects =
- range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
-
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- uint32_t aspectIndex = GetAspectIndex(aspect);
-
- // Call the updateFunc once for the whole aspect if possible or decompress and fallback
- // to per-layer handling.
- if (mAspectCompressed[aspectIndex]) {
- if (fullAspects) {
- SubresourceRange updateRange =
- SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
- updateFunc(updateRange, &DataInline(aspectIndex));
- continue;
- }
- DecompressAspect(aspectIndex);
- }
-
- uint32_t layerEnd = range.baseArrayLayer + range.layerCount;
- for (uint32_t layer = range.baseArrayLayer; layer < layerEnd; layer++) {
- // Call the updateFunc once for the whole layer if possible or decompress and
- // fallback to per-level handling.
- if (LayerCompressed(aspectIndex, layer)) {
- if (fullLayers) {
- SubresourceRange updateRange = GetFullLayerRange(aspect, layer);
- updateFunc(updateRange, &Data(aspectIndex, layer));
- continue;
- }
- DecompressLayer(aspectIndex, layer);
- }
-
- // Worst case: call updateFunc per level.
- uint32_t levelEnd = range.baseMipLevel + range.levelCount;
- for (uint32_t level = range.baseMipLevel; level < levelEnd; level++) {
- SubresourceRange updateRange =
- SubresourceRange::MakeSingle(aspect, layer, level);
- updateFunc(updateRange, &Data(aspectIndex, layer, level));
- }
-
- // If the range has fullLayers then it is likely we can recompress after the calls
- // to updateFunc (this branch is skipped if updateFunc was called for the whole
- // layer).
- if (fullLayers) {
- RecompressLayer(aspectIndex, layer);
- }
- }
-
- // If the range has fullAspects then it is likely we can recompress after the calls to
- // updateFunc (this branch is skipped if updateFunc was called for the whole aspect).
- if (fullAspects) {
- RecompressAspect(aspectIndex);
- }
- }
- }
-
- template <typename T>
- template <typename U, typename F>
- void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
- ASSERT(mAspects == other.mAspects);
- ASSERT(mArrayLayerCount == other.mArrayLayerCount);
- ASSERT(mMipLevelCount == other.mMipLevelCount);
-
- for (Aspect aspect : IterateEnumMask(mAspects)) {
- uint32_t aspectIndex = GetAspectIndex(aspect);
-
- // If the other storage's aspect is compressed we don't need to decompress anything
- // in `this` and can just iterate through it, merging with `other`'s constant value for
- // the aspect. For code simplicity this can be done with a call to Update().
- if (other.mAspectCompressed[aspectIndex]) {
- const U& otherData = other.DataInline(aspectIndex);
- Update(SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount),
- [&](const SubresourceRange& subrange, T* data) {
- mergeFunc(subrange, data, otherData);
- });
- continue;
- }
-
- // Other doesn't have the aspect compressed so we must do at least per-layer merging.
- if (mAspectCompressed[aspectIndex]) {
- DecompressAspect(aspectIndex);
- }
-
- for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
- // Similarly to above, use a fast path if other's layer is compressed.
- if (other.LayerCompressed(aspectIndex, layer)) {
- const U& otherData = other.Data(aspectIndex, layer);
- Update(GetFullLayerRange(aspect, layer),
- [&](const SubresourceRange& subrange, T* data) {
- mergeFunc(subrange, data, otherData);
- });
- continue;
- }
-
- // Sad case, other is decompressed for this layer, do per-level merging.
- if (LayerCompressed(aspectIndex, layer)) {
- DecompressLayer(aspectIndex, layer);
- }
-
- for (uint32_t level = 0; level < mMipLevelCount; level++) {
- SubresourceRange updateRange =
- SubresourceRange::MakeSingle(aspect, layer, level);
- mergeFunc(updateRange, &Data(aspectIndex, layer, level),
- other.Data(aspectIndex, layer, level));
- }
-
- RecompressLayer(aspectIndex, layer);
- }
-
- RecompressAspect(aspectIndex);
- }
- }
-
- template <typename T>
- template <typename F>
- void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
- for (Aspect aspect : IterateEnumMask(mAspects)) {
- uint32_t aspectIndex = GetAspectIndex(aspect);
-
- // Fastest path, call iterateFunc on the whole aspect at once.
- if (mAspectCompressed[aspectIndex]) {
- SubresourceRange range =
- SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
- iterateFunc(range, DataInline(aspectIndex));
- continue;
- }
-
- for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
- // Fast path, call iterateFunc on the whole array layer at once.
- if (LayerCompressed(aspectIndex, layer)) {
- SubresourceRange range = GetFullLayerRange(aspect, layer);
- iterateFunc(range, Data(aspectIndex, layer));
- continue;
- }
-
- // Slow path, call iterateFunc for each mip level.
- for (uint32_t level = 0; level < mMipLevelCount; level++) {
- SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
- iterateFunc(range, Data(aspectIndex, layer, level));
- }
- }
- }
- }
-
- template <typename T>
- const T& SubresourceStorage<T>::Get(Aspect aspect,
- uint32_t arrayLayer,
- uint32_t mipLevel) const {
- uint32_t aspectIndex = GetAspectIndex(aspect);
- ASSERT(aspectIndex < GetAspectCount(mAspects));
- ASSERT(arrayLayer < mArrayLayerCount);
- ASSERT(mipLevel < mMipLevelCount);
-
- // Fastest path, the aspect is compressed!
- if (mAspectCompressed[aspectIndex]) {
- return DataInline(aspectIndex);
- }
-
- // Fast path, the array layer is compressed.
- if (LayerCompressed(aspectIndex, arrayLayer)) {
- return Data(aspectIndex, arrayLayer);
- }
-
- return Data(aspectIndex, arrayLayer, mipLevel);
- }
-
- template <typename T>
- Aspect SubresourceStorage<T>::GetAspectsForTesting() const {
- return mAspects;
- }
-
- template <typename T>
- uint32_t SubresourceStorage<T>::GetArrayLayerCountForTesting() const {
- return mArrayLayerCount;
- }
-
- template <typename T>
- uint32_t SubresourceStorage<T>::GetMipLevelCountForTesting() const {
- return mMipLevelCount;
- }
-
- template <typename T>
- bool SubresourceStorage<T>::IsAspectCompressedForTesting(Aspect aspect) const {
- return mAspectCompressed[GetAspectIndex(aspect)];
- }
-
- template <typename T>
- bool SubresourceStorage<T>::IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const {
- return mAspectCompressed[GetAspectIndex(aspect)] ||
- mLayerCompressed[GetAspectIndex(aspect) * mArrayLayerCount + layer];
- }
-
- template <typename T>
- void SubresourceStorage<T>::DecompressAspect(uint32_t aspectIndex) {
- ASSERT(mAspectCompressed[aspectIndex]);
- const T& aspectData = DataInline(aspectIndex);
- mAspectCompressed[aspectIndex] = false;
-
- // Extra allocations are only needed when aspects are decompressed. Create them lazily.
- if (mData == nullptr) {
- ASSERT(mLayerCompressed == nullptr);
-
- uint32_t aspectCount = GetAspectCount(mAspects);
- mLayerCompressed = std::make_unique<bool[]>(aspectCount * mArrayLayerCount);
- mData = std::make_unique<T[]>(aspectCount * mArrayLayerCount * mMipLevelCount);
-
- for (uint32_t layerIndex = 0; layerIndex < aspectCount * mArrayLayerCount;
- layerIndex++) {
- mLayerCompressed[layerIndex] = true;
- }
- }
-
- ASSERT(LayerCompressed(aspectIndex, 0));
- for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
- Data(aspectIndex, layer) = aspectData;
- ASSERT(LayerCompressed(aspectIndex, layer));
- }
- }
-
- template <typename T>
- void SubresourceStorage<T>::RecompressAspect(uint32_t aspectIndex) {
- ASSERT(!mAspectCompressed[aspectIndex]);
- // All layers of the aspect must be compressed for the aspect to possibly recompress.
- for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
- if (!LayerCompressed(aspectIndex, layer)) {
- return;
- }
- }
-
- T layer0Data = Data(aspectIndex, 0);
- for (uint32_t layer = 1; layer < mArrayLayerCount; layer++) {
- if (!(Data(aspectIndex, layer) == layer0Data)) {
- return;
- }
- }
-
- mAspectCompressed[aspectIndex] = true;
- DataInline(aspectIndex) = layer0Data;
- }
-
- template <typename T>
- void SubresourceStorage<T>::DecompressLayer(uint32_t aspectIndex, uint32_t layer) {
- ASSERT(LayerCompressed(aspectIndex, layer));
- ASSERT(!mAspectCompressed[aspectIndex]);
- const T& layerData = Data(aspectIndex, layer);
- LayerCompressed(aspectIndex, layer) = false;
-
- // We assume that (aspect, layer, 0) is stored at the same place as (aspect, layer) which
- // allows starting the iteration at level 1.
- for (uint32_t level = 1; level < mMipLevelCount; level++) {
- Data(aspectIndex, layer, level) = layerData;
- }
- }
-
- template <typename T>
- void SubresourceStorage<T>::RecompressLayer(uint32_t aspectIndex, uint32_t layer) {
- ASSERT(!LayerCompressed(aspectIndex, layer));
- ASSERT(!mAspectCompressed[aspectIndex]);
- const T& level0Data = Data(aspectIndex, layer, 0);
-
- for (uint32_t level = 1; level < mMipLevelCount; level++) {
- if (!(Data(aspectIndex, layer, level) == level0Data)) {
- return;
- }
- }
-
- LayerCompressed(aspectIndex, layer) = true;
- }
-
- template <typename T>
- SubresourceRange SubresourceStorage<T>::GetFullLayerRange(Aspect aspect, uint32_t layer) const {
- return {aspect, {layer, 1}, {0, mMipLevelCount}};
- }
-
- template <typename T>
- bool& SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) {
- ASSERT(!mAspectCompressed[aspectIndex]);
- return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
- }
-
- template <typename T>
- bool SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) const {
- ASSERT(!mAspectCompressed[aspectIndex]);
- return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
- }
-
- template <typename T>
- T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) {
- ASSERT(mAspectCompressed[aspectIndex]);
- return mInlineAspectData[aspectIndex];
- }
- template <typename T>
- T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) {
- ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
- ASSERT(!mAspectCompressed[aspectIndex]);
- return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
- }
- template <typename T>
- const T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) const {
- ASSERT(mAspectCompressed[aspectIndex]);
- return mInlineAspectData[aspectIndex];
- }
- template <typename T>
- const T& SubresourceStorage<T>::Data(uint32_t aspectIndex,
- uint32_t layer,
- uint32_t level) const {
- ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
- ASSERT(!mAspectCompressed[aspectIndex]);
- return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
- }
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_SUBRESOURCESTORAGE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface.cpp b/chromium/third_party/dawn/src/dawn_native/Surface.cpp
deleted file mode 100644
index 505fc609293..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Surface.cpp
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2020 the Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Surface.h"
-
-#include "common/Platform.h"
-#include "dawn_native/ChainUtils_autogen.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/SwapChain.h"
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include <windows.ui.core.h>
-# include <windows.ui.xaml.controls.h>
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
-#if defined(DAWN_USE_X11)
-# include "common/xlib_with_undefs.h"
-#endif // defined(DAWN_USE_X11)
-
-namespace dawn_native {
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- Surface::Type value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s) {
- switch (value) {
- case Surface::Type::MetalLayer:
- s->Append("MetalLayer");
- break;
- case Surface::Type::WindowsHWND:
- s->Append("WindowsHWND");
- break;
- case Surface::Type::WindowsCoreWindow:
- s->Append("WindowsCoreWindow");
- break;
- case Surface::Type::WindowsSwapChainPanel:
- s->Append("WindowsSwapChainPanel");
- break;
- case Surface::Type::Xlib:
- s->Append("Xlib");
- break;
- }
- return {true};
- }
-
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- bool InheritsFromCAMetalLayer(void* obj);
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-
- MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
- const SurfaceDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain == nullptr,
- "Surface cannot be created with %s. nextInChain is not specified.",
- descriptor);
-
- DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
- wgpu::SType::SurfaceDescriptorFromMetalLayer,
- wgpu::SType::SurfaceDescriptorFromWindowsHWND,
- wgpu::SType::SurfaceDescriptorFromWindowsCoreWindow,
- wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel,
- wgpu::SType::SurfaceDescriptorFromXlib));
-
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
- FindInChain(descriptor->nextInChain, &metalDesc);
- if (metalDesc) {
- // Check that the layer is a CAMetalLayer (or a derived class).
- DAWN_INVALID_IF(!InheritsFromCAMetalLayer(metalDesc->layer),
- "Layer must be a CAMetalLayer");
- return {};
- }
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-# if defined(DAWN_PLATFORM_WIN32)
- const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
- FindInChain(descriptor->nextInChain, &hwndDesc);
- if (hwndDesc) {
- DAWN_INVALID_IF(IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0, "Invalid HWND");
- return {};
- }
-# endif // defined(DAWN_PLATFORM_WIN32)
- const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
- FindInChain(descriptor->nextInChain, &coreWindowDesc);
- if (coreWindowDesc) {
- // Validate the coreWindow by query for ICoreWindow interface
- ComPtr<ABI::Windows::UI::Core::ICoreWindow> coreWindow;
- DAWN_INVALID_IF(coreWindowDesc->coreWindow == nullptr ||
- FAILED(static_cast<IUnknown*>(coreWindowDesc->coreWindow)
- ->QueryInterface(IID_PPV_ARGS(&coreWindow))),
- "Invalid CoreWindow");
- return {};
- }
- const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
- FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
- if (swapChainPanelDesc) {
- // Validate the swapChainPanel by querying for ISwapChainPanel interface
- ComPtr<ABI::Windows::UI::Xaml::Controls::ISwapChainPanel> swapChainPanel;
- DAWN_INVALID_IF(swapChainPanelDesc->swapChainPanel == nullptr ||
- FAILED(static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel)
- ->QueryInterface(IID_PPV_ARGS(&swapChainPanel))),
- "Invalid SwapChainPanel");
- return {};
- }
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
-#if defined(DAWN_USE_X11)
- const SurfaceDescriptorFromXlib* xDesc = nullptr;
- FindInChain(descriptor->nextInChain, &xDesc);
- if (xDesc) {
- // Check the validity of the window by calling a getter function on the window that
- // returns a status code. If the window is bad the call return a status of zero. We
- // need to set a temporary X11 error handler while doing this because the default
- // X11 error handler exits the program on any error.
- XErrorHandler oldErrorHandler =
- XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
- XWindowAttributes attributes;
- int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display),
- xDesc->window, &attributes);
- XSetErrorHandler(oldErrorHandler);
-
- DAWN_INVALID_IF(status == 0, "Invalid X Window");
- return {};
- }
-#endif // defined(DAWN_USE_X11)
-
- return DAWN_FORMAT_VALIDATION_ERROR("Unsupported sType (%s)",
- descriptor->nextInChain->sType);
- }
-
- Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
- : mInstance(instance) {
- ASSERT(descriptor->nextInChain != nullptr);
- const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
- const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
- const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
- const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
- const SurfaceDescriptorFromXlib* xDesc = nullptr;
- FindInChain(descriptor->nextInChain, &metalDesc);
- FindInChain(descriptor->nextInChain, &hwndDesc);
- FindInChain(descriptor->nextInChain, &coreWindowDesc);
- FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
- FindInChain(descriptor->nextInChain, &xDesc);
- ASSERT(metalDesc || hwndDesc || xDesc);
- if (metalDesc) {
- mType = Type::MetalLayer;
- mMetalLayer = metalDesc->layer;
- } else if (hwndDesc) {
- mType = Type::WindowsHWND;
- mHInstance = hwndDesc->hinstance;
- mHWND = hwndDesc->hwnd;
- } else if (coreWindowDesc) {
-#if defined(DAWN_PLATFORM_WINDOWS)
- mType = Type::WindowsCoreWindow;
- mCoreWindow = static_cast<IUnknown*>(coreWindowDesc->coreWindow);
-#endif // defined(DAWN_PLATFORM_WINDOWS)
- } else if (swapChainPanelDesc) {
-#if defined(DAWN_PLATFORM_WINDOWS)
- mType = Type::WindowsSwapChainPanel;
- mSwapChainPanel = static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel);
-#endif // defined(DAWN_PLATFORM_WINDOWS)
- } else if (xDesc) {
- mType = Type::Xlib;
- mXDisplay = xDesc->display;
- mXWindow = xDesc->window;
- } else {
- UNREACHABLE();
- }
- }
-
- Surface::~Surface() {
- if (mSwapChain != nullptr) {
- mSwapChain->DetachFromSurface();
- mSwapChain = nullptr;
- }
- }
-
- NewSwapChainBase* Surface::GetAttachedSwapChain() {
- return mSwapChain.Get();
- }
-
- void Surface::SetAttachedSwapChain(NewSwapChainBase* swapChain) {
- mSwapChain = swapChain;
- }
-
- InstanceBase* Surface::GetInstance() {
- return mInstance.Get();
- }
-
- Surface::Type Surface::GetType() const {
- return mType;
- }
-
- void* Surface::GetMetalLayer() const {
- ASSERT(mType == Type::MetalLayer);
- return mMetalLayer;
- }
-
- void* Surface::GetHInstance() const {
- ASSERT(mType == Type::WindowsHWND);
- return mHInstance;
- }
- void* Surface::GetHWND() const {
- ASSERT(mType == Type::WindowsHWND);
- return mHWND;
- }
-
- IUnknown* Surface::GetCoreWindow() const {
- ASSERT(mType == Type::WindowsCoreWindow);
-#if defined(DAWN_PLATFORM_WINDOWS)
- return mCoreWindow.Get();
-#else
- return nullptr;
-#endif
- }
-
- IUnknown* Surface::GetSwapChainPanel() const {
- ASSERT(mType == Type::WindowsSwapChainPanel);
-#if defined(DAWN_PLATFORM_WINDOWS)
- return mSwapChainPanel.Get();
-#else
- return nullptr;
-#endif
- }
-
- void* Surface::GetXDisplay() const {
- ASSERT(mType == Type::Xlib);
- return mXDisplay;
- }
- uint32_t Surface::GetXWindow() const {
- ASSERT(mType == Type::Xlib);
- return mXWindow;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface.h b/chromium/third_party/dawn/src/dawn_native/Surface.h
deleted file mode 100644
index f9b47d34428..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Surface.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_SURFACE_H_
-#define DAWNNATIVE_SURFACE_H_
-
-#include "common/RefCounted.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include "common/Platform.h"
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include "dawn_native/d3d12/d3d12_platform.h"
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
-// Forward declare IUnknown
-// GetCoreWindow needs to return an IUnknown pointer
-// non-windows platforms don't have this type
-struct IUnknown;
-
-namespace dawn_native {
-
- MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
- const SurfaceDescriptor* descriptor);
-
- // A surface is a sum types of all the kind of windows Dawn supports. The OS-specific types
- // aren't used because they would cause compilation errors on other OSes (or require
- // ObjectiveC).
- // The surface is also used to store the current swapchain so that we can detach it when it is
- // replaced.
- class Surface final : public RefCounted {
- public:
- Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor);
-
- void SetAttachedSwapChain(NewSwapChainBase* swapChain);
- NewSwapChainBase* GetAttachedSwapChain();
-
- // These are valid to call on all Surfaces.
- enum class Type { MetalLayer, WindowsHWND, WindowsCoreWindow, WindowsSwapChainPanel, Xlib };
- Type GetType() const;
- InstanceBase* GetInstance();
-
- // Valid to call if the type is MetalLayer
- void* GetMetalLayer() const;
-
- // Valid to call if the type is WindowsHWND
- void* GetHInstance() const;
- void* GetHWND() const;
-
- // Valid to call if the type is WindowsCoreWindow
- IUnknown* GetCoreWindow() const;
-
- // Valid to call if the type is WindowsSwapChainPanel
- IUnknown* GetSwapChainPanel() const;
-
- // Valid to call if the type is WindowsXlib
- void* GetXDisplay() const;
- uint32_t GetXWindow() const;
-
- private:
- ~Surface() override;
-
- Ref<InstanceBase> mInstance;
- Type mType;
-
- // The swapchain will set this to null when it is destroyed.
- Ref<NewSwapChainBase> mSwapChain;
-
- // MetalLayer
- void* mMetalLayer = nullptr;
-
- // WindowsHwnd
- void* mHInstance = nullptr;
- void* mHWND = nullptr;
-
-#if defined(DAWN_PLATFORM_WINDOWS)
- // WindowsCoreWindow
- ComPtr<IUnknown> mCoreWindow;
-
- // WindowsSwapChainPanel
- ComPtr<IUnknown> mSwapChainPanel;
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
- // Xlib
- void* mXDisplay = nullptr;
- uint32_t mXWindow = 0;
- };
-
- absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
- Surface::Type value,
- const absl::FormatConversionSpec& spec,
- absl::FormatSink* s);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_SURFACE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface_metal.mm b/chromium/third_party/dawn/src/dawn_native/Surface_metal.mm
deleted file mode 100644
index 9989674fe6a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Surface_metal.mm
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2020 the Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Contains a helper function for Surface.cpp that needs to be written in ObjectiveC.
-
-#if !defined(DAWN_ENABLE_BACKEND_METAL)
-# error "Surface_metal.mm requires the Metal backend to be enabled."
-#endif // !defined(DAWN_ENABLE_BACKEND_METAL)
-
-#import <QuartzCore/CAMetalLayer.h>
-
-namespace dawn_native {
-
- bool InheritsFromCAMetalLayer(void* obj) {
- id<NSObject> object = static_cast<id>(obj);
- return [object isKindOfClass:[CAMetalLayer class]];
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
deleted file mode 100644
index 20ee991b842..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/SwapChain.h"
-
-#include "common/Constants.h"
-#include "dawn_native/Adapter.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/Surface.h"
-#include "dawn_native/Texture.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-namespace dawn_native {
-
- namespace {
-
- class ErrorSwapChain final : public SwapChainBase {
- public:
- ErrorSwapChain(DeviceBase* device) : SwapChainBase(device, ObjectBase::kError) {
- }
-
- private:
- void APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override {
- GetDevice()->ConsumedError(
- DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
- }
-
- TextureViewBase* APIGetCurrentTextureView() override {
- GetDevice()->ConsumedError(
- DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
- return TextureViewBase::MakeError(GetDevice());
- }
-
- void APIPresent() override {
- GetDevice()->ConsumedError(
- DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
- }
- };
-
- } // anonymous namespace
-
- MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
- const Surface* surface,
- const SwapChainDescriptor* descriptor) {
- if (descriptor->implementation != 0) {
- DAWN_INVALID_IF(surface != nullptr,
- "Exactly one of surface or implementation must be set");
-
- DawnSwapChainImplementation* impl =
- reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
-
- DAWN_INVALID_IF(!impl->Init || !impl->Destroy || !impl->Configure ||
- !impl->GetNextTexture || !impl->Present,
- "Implementation is incomplete");
-
- } else {
- DAWN_INVALID_IF(surface == nullptr,
- "At least one of surface or implementation must be set");
-
- DAWN_TRY(ValidatePresentMode(descriptor->presentMode));
-
- // TODO(crbug.com/dawn/160): Lift this restriction once
- // wgpu::Instance::GetPreferredSurfaceFormat is implemented.
- DAWN_INVALID_IF(descriptor->format != wgpu::TextureFormat::BGRA8Unorm,
- "Format (%s) is not %s, which is (currently) the only accepted format.",
- descriptor->format, wgpu::TextureFormat::BGRA8Unorm);
-
- DAWN_INVALID_IF(descriptor->usage != wgpu::TextureUsage::RenderAttachment,
- "Usage (%s) is not %s, which is (currently) the only accepted usage.",
- descriptor->usage, wgpu::TextureUsage::RenderAttachment);
-
- DAWN_INVALID_IF(descriptor->width == 0 || descriptor->height == 0,
- "Swap Chain size (width: %u, height: %u) is empty.", descriptor->width,
- descriptor->height);
-
- DAWN_INVALID_IF(
- descriptor->width > device->GetLimits().v1.maxTextureDimension2D ||
- descriptor->height > device->GetLimits().v1.maxTextureDimension2D,
- "Swap Chain size (width: %u, height: %u) is greater than the maximum 2D texture "
- "size (width: %u, height: %u).",
- descriptor->width, descriptor->height, device->GetLimits().v1.maxTextureDimension2D,
- device->GetLimits().v1.maxTextureDimension2D);
- }
-
- return {};
- }
-
- TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain) {
- TextureDescriptor desc;
- desc.usage = swapChain->GetUsage();
- desc.dimension = wgpu::TextureDimension::e2D;
- desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
- desc.format = swapChain->GetFormat();
- desc.mipLevelCount = 1;
- desc.sampleCount = 1;
-
- return desc;
- }
-
- // SwapChainBase
-
- SwapChainBase::SwapChainBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
- TrackInDevice();
- }
-
- SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag) {
- }
-
- SwapChainBase::~SwapChainBase() {
- }
-
- void SwapChainBase::DestroyImpl() {
- }
-
- // static
- SwapChainBase* SwapChainBase::MakeError(DeviceBase* device) {
- return new ErrorSwapChain(device);
- }
-
- ObjectType SwapChainBase::GetType() const {
- return ObjectType::SwapChain;
- }
-
- // OldSwapChainBase
-
- OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
- : SwapChainBase(device),
- mImplementation(
- *reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
- }
-
- OldSwapChainBase::~OldSwapChainBase() {
- if (!IsError()) {
- const auto& im = GetImplementation();
- im.Destroy(im.userData);
- }
- }
-
- void OldSwapChainBase::APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) {
- if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
- return;
- }
- ASSERT(!IsError());
-
- allowedUsage |= wgpu::TextureUsage::Present;
-
- mFormat = format;
- mAllowedUsage = allowedUsage;
- mWidth = width;
- mHeight = height;
- mImplementation.Configure(mImplementation.userData, static_cast<WGPUTextureFormat>(format),
- static_cast<WGPUTextureUsage>(allowedUsage), width, height);
- }
-
- TextureViewBase* OldSwapChainBase::APIGetCurrentTextureView() {
- if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
- return TextureViewBase::MakeError(GetDevice());
- }
- ASSERT(!IsError());
-
- // Return the same current texture view until Present is called.
- if (mCurrentTextureView != nullptr) {
- // Calling GetCurrentTextureView always returns a new reference so add it even when
- // reuse the existing texture view.
- mCurrentTextureView->Reference();
- return mCurrentTextureView.Get();
- }
-
- // Create the backing texture and the view.
- TextureDescriptor descriptor;
- descriptor.dimension = wgpu::TextureDimension::e2D;
- descriptor.size.width = mWidth;
- descriptor.size.height = mHeight;
- descriptor.size.depthOrArrayLayers = 1;
- descriptor.sampleCount = 1;
- descriptor.format = mFormat;
- descriptor.mipLevelCount = 1;
- descriptor.usage = mAllowedUsage;
-
- // Get the texture but remove the external refcount because it is never passed outside
- // of dawn_native
- mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
-
- mCurrentTextureView = mCurrentTexture->APICreateView();
- return mCurrentTextureView.Get();
- }
-
- void OldSwapChainBase::APIPresent() {
- if (GetDevice()->ConsumedError(ValidatePresent())) {
- return;
- }
- ASSERT(!IsError());
-
- if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTextureView.Get()))) {
- return;
- }
-
- mImplementation.Present(mImplementation.userData);
-
- mCurrentTexture = nullptr;
- mCurrentTextureView = nullptr;
- }
-
- const DawnSwapChainImplementation& OldSwapChainBase::GetImplementation() {
- ASSERT(!IsError());
- return mImplementation;
- }
-
- MaybeError OldSwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- DAWN_TRY(ValidateTextureUsage(allowedUsage));
- DAWN_TRY(ValidateTextureFormat(format));
-
- DAWN_INVALID_IF(width == 0 || height == 0,
- "Configuration size (width: %u, height: %u) for %s is empty.", width,
- height, this);
-
- return {};
- }
-
- MaybeError OldSwapChainBase::ValidateGetCurrentTextureView() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- // If width is 0, it implies swap chain has never been configured
- DAWN_INVALID_IF(mWidth == 0, "%s was not configured prior to calling GetNextTexture.",
- this);
-
- return {};
- }
-
- MaybeError OldSwapChainBase::ValidatePresent() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- DAWN_INVALID_IF(
- mCurrentTextureView == nullptr,
- "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
- this);
-
- return {};
- }
-
- // Implementation of NewSwapChainBase
-
- NewSwapChainBase::NewSwapChainBase(DeviceBase* device,
- Surface* surface,
- const SwapChainDescriptor* descriptor)
- : SwapChainBase(device),
- mAttached(false),
- mWidth(descriptor->width),
- mHeight(descriptor->height),
- mFormat(descriptor->format),
- mUsage(descriptor->usage),
- mPresentMode(descriptor->presentMode),
- mSurface(surface) {
- }
-
- NewSwapChainBase::~NewSwapChainBase() {
- if (mCurrentTextureView != nullptr) {
- ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
- TextureBase::TextureState::Destroyed);
- }
-
- ASSERT(!mAttached);
- }
-
- void NewSwapChainBase::DetachFromSurface() {
- if (mAttached) {
- DetachFromSurfaceImpl();
- mSurface = nullptr;
- mAttached = false;
- }
- }
-
- void NewSwapChainBase::SetIsAttached() {
- mAttached = true;
- }
-
- void NewSwapChainBase::APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) {
- GetDevice()->ConsumedError(
- DAWN_FORMAT_VALIDATION_ERROR("Configure is invalid for surface-based swapchains."));
- }
-
- TextureViewBase* NewSwapChainBase::APIGetCurrentTextureView() {
- if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
- return TextureViewBase::MakeError(GetDevice());
- }
-
- if (mCurrentTextureView != nullptr) {
- // Calling GetCurrentTextureView always returns a new reference so add it even when
- // reusing the existing texture view.
- mCurrentTextureView->Reference();
- return mCurrentTextureView.Get();
- }
-
- TextureViewBase* view = nullptr;
- if (GetDevice()->ConsumedError(GetCurrentTextureViewImpl(), &view)) {
- return TextureViewBase::MakeError(GetDevice());
- }
-
- // Check that the return texture view matches exactly what was given for this descriptor.
- ASSERT(view->GetTexture()->GetFormat().format == mFormat);
- ASSERT(IsSubset(mUsage, view->GetTexture()->GetUsage()));
- ASSERT(view->GetLevelCount() == 1);
- ASSERT(view->GetLayerCount() == 1);
- ASSERT(view->GetDimension() == wgpu::TextureViewDimension::e2D);
- ASSERT(view->GetTexture()->GetMipLevelVirtualSize(view->GetBaseMipLevel()).width == mWidth);
- ASSERT(view->GetTexture()->GetMipLevelVirtualSize(view->GetBaseMipLevel()).height ==
- mHeight);
-
- mCurrentTextureView = view;
- return view;
- }
-
- void NewSwapChainBase::APIPresent() {
- if (GetDevice()->ConsumedError(ValidatePresent())) {
- return;
- }
-
- if (GetDevice()->ConsumedError(PresentImpl())) {
- return;
- }
-
- ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
- TextureBase::TextureState::Destroyed);
- mCurrentTextureView = nullptr;
- }
-
- uint32_t NewSwapChainBase::GetWidth() const {
- return mWidth;
- }
-
- uint32_t NewSwapChainBase::GetHeight() const {
- return mHeight;
- }
-
- wgpu::TextureFormat NewSwapChainBase::GetFormat() const {
- return mFormat;
- }
-
- wgpu::TextureUsage NewSwapChainBase::GetUsage() const {
- return mUsage;
- }
-
- wgpu::PresentMode NewSwapChainBase::GetPresentMode() const {
- return mPresentMode;
- }
-
- Surface* NewSwapChainBase::GetSurface() const {
- return mSurface;
- }
-
- bool NewSwapChainBase::IsAttached() const {
- return mAttached;
- }
-
- wgpu::BackendType NewSwapChainBase::GetBackendType() const {
- return GetDevice()->GetAdapter()->GetBackendType();
- }
-
- MaybeError NewSwapChainBase::ValidatePresent() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- DAWN_INVALID_IF(!mAttached, "Cannot call Present called on detached %s.", this);
-
- DAWN_INVALID_IF(
- mCurrentTextureView == nullptr,
- "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
- this);
-
- return {};
- }
-
- MaybeError NewSwapChainBase::ValidateGetCurrentTextureView() const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- DAWN_INVALID_IF(!mAttached, "Cannot call GetCurrentTextureView on detached %s.", this);
-
- return {};
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.h b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
deleted file mode 100644
index bb880450623..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.h
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_SWAPCHAIN_H_
-#define DAWNNATIVE_SWAPCHAIN_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-
-#include "dawn/dawn_wsi.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
- const Surface* surface,
- const SwapChainDescriptor* descriptor);
-
- TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
-
- class SwapChainBase : public ApiObjectBase {
- public:
- SwapChainBase(DeviceBase* device);
-
- static SwapChainBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- // Dawn API
- virtual void APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) = 0;
- virtual TextureViewBase* APIGetCurrentTextureView() = 0;
- virtual void APIPresent() = 0;
-
- protected:
- SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- ~SwapChainBase() override;
- void DestroyImpl() override;
- };
-
- // The base class for implementation-based SwapChains that are deprecated.
- class OldSwapChainBase : public SwapChainBase {
- public:
- OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
-
- // Dawn API
- void APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override;
- TextureViewBase* APIGetCurrentTextureView() override;
- void APIPresent() override;
-
- protected:
- ~OldSwapChainBase() override;
- const DawnSwapChainImplementation& GetImplementation();
- virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
- virtual MaybeError OnBeforePresent(TextureViewBase* view) = 0;
-
- private:
- MaybeError ValidateConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) const;
- MaybeError ValidateGetCurrentTextureView() const;
- MaybeError ValidatePresent() const;
-
- DawnSwapChainImplementation mImplementation = {};
- wgpu::TextureFormat mFormat = {};
- wgpu::TextureUsage mAllowedUsage;
- uint32_t mWidth = 0;
- uint32_t mHeight = 0;
- Ref<TextureBase> mCurrentTexture;
- Ref<TextureViewBase> mCurrentTextureView;
- };
-
- // The base class for surface-based SwapChains that aren't ready yet.
- class NewSwapChainBase : public SwapChainBase {
- public:
- NewSwapChainBase(DeviceBase* device,
- Surface* surface,
- const SwapChainDescriptor* descriptor);
-
- // This is called when the swapchain is detached when one of the following happens:
- //
- // - The surface it is attached to is being destroyed.
- // - The swapchain is being replaced by another one on the surface.
- //
- // Note that the surface has a Ref on the last swapchain that was used on it so the
- // SwapChain destructor will only be called after one of the things above happens.
- //
- // The call for the detaching previous swapchain should be called inside the backend
- // implementation of SwapChains. This is to allow them to acquire any resources before
- // calling detach to make a seamless transition from the previous swapchain.
- //
- // Likewise the call for the swapchain being destroyed must be done in the backend's
- // swapchain's destructor since C++ says it is UB to call virtual methods in the base class
- // destructor.
- void DetachFromSurface();
-
- void SetIsAttached();
-
- // Dawn API
- void APIConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override;
- TextureViewBase* APIGetCurrentTextureView() override;
- void APIPresent() override;
-
- uint32_t GetWidth() const;
- uint32_t GetHeight() const;
- wgpu::TextureFormat GetFormat() const;
- wgpu::TextureUsage GetUsage() const;
- wgpu::PresentMode GetPresentMode() const;
- Surface* GetSurface() const;
- bool IsAttached() const;
- wgpu::BackendType GetBackendType() const;
-
- protected:
- ~NewSwapChainBase() override;
-
- private:
- bool mAttached;
- uint32_t mWidth;
- uint32_t mHeight;
- wgpu::TextureFormat mFormat;
- wgpu::TextureUsage mUsage;
- wgpu::PresentMode mPresentMode;
-
- // This is a weak reference to the surface. If the surface is destroyed it will call
- // DetachFromSurface and mSurface will be updated to nullptr.
- Surface* mSurface = nullptr;
- Ref<TextureViewBase> mCurrentTextureView;
-
- MaybeError ValidatePresent() const;
- MaybeError ValidateGetCurrentTextureView() const;
-
- // GetCurrentTextureViewImpl and PresentImpl are guaranteed to be called in an interleaved
- // manner, starting with GetCurrentTextureViewImpl.
-
- // The returned texture view must match the swapchain descriptor exactly.
- virtual ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() = 0;
- // The call to present must destroy the current view's texture so further access to it are
- // invalid.
- virtual MaybeError PresentImpl() = 0;
-
- // Guaranteed to be called exactly once during the lifetime of the SwapChain. After it is
- // called no other virtual method can be called.
- virtual void DetachFromSurfaceImpl() = 0;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_SWAPCHAIN_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
deleted file mode 100644
index 97a6a9dd6c5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ /dev/null
@@ -1,777 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/Texture.h"
-
-#include <algorithm>
-
-#include "common/Assert.h"
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "dawn_native/Adapter.h"
-#include "dawn_native/ChainUtils_autogen.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/ObjectType_autogen.h"
-#include "dawn_native/PassResourceUsage.h"
-#include "dawn_native/ValidationUtils_autogen.h"
-
-namespace dawn_native {
- namespace {
- // WebGPU currently does not have texture format reinterpretation. If it does, the
- // code to check for it might go here.
- MaybeError ValidateTextureViewFormatCompatibility(const TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- if (texture->GetFormat().format != descriptor->format) {
- if (descriptor->aspect != wgpu::TextureAspect::All &&
- texture->GetFormat().GetAspectInfo(descriptor->aspect).format ==
- descriptor->format) {
- return {};
- }
-
- return DAWN_VALIDATION_ERROR(
- "The format of texture view is not compatible to the original texture");
- }
-
- return {};
- }
-
- // TODO(crbug.com/dawn/814): Implement for 1D texture.
- bool IsTextureViewDimensionCompatibleWithTextureDimension(
- wgpu::TextureViewDimension textureViewDimension,
- wgpu::TextureDimension textureDimension) {
- switch (textureViewDimension) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return textureDimension == wgpu::TextureDimension::e2D;
-
- case wgpu::TextureViewDimension::e3D:
- return textureDimension == wgpu::TextureDimension::e3D;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- // TODO(crbug.com/dawn/814): Implement for 1D texture.
- bool IsArrayLayerValidForTextureViewDimension(
- wgpu::TextureViewDimension textureViewDimension,
- uint32_t textureViewArrayLayer) {
- switch (textureViewDimension) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e3D:
- return textureViewArrayLayer == 1u;
- case wgpu::TextureViewDimension::e2DArray:
- return true;
- case wgpu::TextureViewDimension::Cube:
- return textureViewArrayLayer == 6u;
- case wgpu::TextureViewDimension::CubeArray:
- return textureViewArrayLayer % 6 == 0;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- MaybeError ValidateSampleCount(const TextureDescriptor* descriptor,
- wgpu::TextureUsage usage,
- const Format* format) {
- DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
- "The sample count (%u) of the texture is not supported.",
- descriptor->sampleCount);
-
- if (descriptor->sampleCount > 1) {
- DAWN_INVALID_IF(descriptor->mipLevelCount > 1,
- "The mip level count (%u) of a multisampled texture is not 1.",
- descriptor->mipLevelCount);
-
- // Multisampled 1D and 3D textures are not supported in D3D12/Metal/Vulkan.
- // Multisampled 2D array texture is not supported because on Metal it requires the
- // version of macOS be greater than 10.14.
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "The dimension (%s) of a multisampled texture is not 2D.",
- descriptor->dimension);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers > 1,
- "The depthOrArrayLayers (%u) of a multisampled texture is not 1.",
- descriptor->size.depthOrArrayLayers);
-
- // If a format can support multisample, it must be renderable. Because Vulkan
- // requires that if the format is not color-renderable or depth/stencil renderable,
- // sampleCount must be 1.
- DAWN_INVALID_IF(!format->isRenderable,
- "The texture format (%s) does not support multisampling.",
- format->format);
-
- // Compressed formats are not renderable. They cannot support multisample.
- ASSERT(!format->isCompressed);
-
- DAWN_INVALID_IF(usage & wgpu::TextureUsage::StorageBinding,
- "The sample count (%u) of a storage textures is not 1.",
- descriptor->sampleCount);
- }
-
- return {};
- }
-
- MaybeError ValidateTextureViewDimensionCompatibility(
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- DAWN_INVALID_IF(
- !IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
- descriptor->arrayLayerCount),
- "The dimension (%s) of the texture view is not compatible with the layer count "
- "(%u) of %s.",
- descriptor->dimension, descriptor->arrayLayerCount, texture);
-
- DAWN_INVALID_IF(
- !IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
- texture->GetDimension()),
- "The dimension (%s) of the texture view is not compatible with the dimension (%s) "
- "of %s.",
- descriptor->dimension, texture->GetDimension(), texture);
-
- switch (descriptor->dimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- DAWN_INVALID_IF(
- texture->GetSize().width != texture->GetSize().height,
- "A %s texture view is not compatible with %s because the texture's width "
- "(%u) and height (%u) are not equal.",
- descriptor->dimension, texture, texture->GetSize().width,
- texture->GetSize().height);
- break;
-
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::e3D:
- break;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- break;
- }
-
- return {};
- }
-
- MaybeError ValidateTextureSize(const DeviceBase* device,
- const TextureDescriptor* descriptor,
- const Format* format) {
- ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
- descriptor->size.depthOrArrayLayers != 0);
- const CombinedLimits& limits = device->GetLimits();
- Extent3D maxExtent;
- switch (descriptor->dimension) {
- case wgpu::TextureDimension::e2D:
- maxExtent = {limits.v1.maxTextureDimension2D, limits.v1.maxTextureDimension2D,
- limits.v1.maxTextureArrayLayers};
- break;
- case wgpu::TextureDimension::e3D:
- maxExtent = {limits.v1.maxTextureDimension3D, limits.v1.maxTextureDimension3D,
- limits.v1.maxTextureDimension3D};
- break;
- case wgpu::TextureDimension::e1D:
- default:
- UNREACHABLE();
- }
- DAWN_INVALID_IF(descriptor->size.width > maxExtent.width ||
- descriptor->size.height > maxExtent.height ||
- descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers,
- "Texture size (%s) exceeded maximum texture size (%s).",
- &descriptor->size, &maxExtent);
-
- uint32_t maxMippedDimension = descriptor->size.width;
- if (descriptor->dimension != wgpu::TextureDimension::e1D) {
- maxMippedDimension = std::max(maxMippedDimension, descriptor->size.height);
- }
- if (descriptor->dimension == wgpu::TextureDimension::e3D) {
- maxMippedDimension =
- std::max(maxMippedDimension, descriptor->size.depthOrArrayLayers);
- }
- DAWN_INVALID_IF(
- Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
- "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
- descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
-
- if (format->isCompressed) {
- const TexelBlockInfo& blockInfo =
- format->GetAspectInfo(wgpu::TextureAspect::All).block;
- DAWN_INVALID_IF(
- descriptor->size.width % blockInfo.width != 0 ||
- descriptor->size.height % blockInfo.height != 0,
- "The size (%s) of the texture is not a multiple of the block width (%u) and "
- "height (%u) of the texture format (%s).",
- &descriptor->size, blockInfo.width, blockInfo.height, format->format);
- }
-
- return {};
- }
-
- MaybeError ValidateTextureUsage(const TextureDescriptor* descriptor,
- wgpu::TextureUsage usage,
- const Format* format) {
- DAWN_TRY(dawn_native::ValidateTextureUsage(usage));
-
- constexpr wgpu::TextureUsage kValidCompressedUsages =
- wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
- wgpu::TextureUsage::CopyDst;
- DAWN_INVALID_IF(
- format->isCompressed && !IsSubset(usage, kValidCompressedUsages),
- "The texture usage (%s) is incompatible with the compressed texture format (%s).",
- usage, format->format);
-
- DAWN_INVALID_IF(
- !format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment),
- "The texture usage (%s) includes %s, which is incompatible with the non-renderable "
- "format (%s).",
- usage, wgpu::TextureUsage::RenderAttachment, format->format);
-
- DAWN_INVALID_IF(
- !format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding),
- "The texture usage (%s) includes %s, which is incompatible with the format (%s).",
- usage, wgpu::TextureUsage::StorageBinding, format->format);
-
- constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
- wgpu::TextureUsage::TextureBinding;
- DAWN_INVALID_IF(
- format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
- "The texture usage (%s) is incompatible with the multi-planar format (%s).", usage,
- format->format);
-
- return {};
- }
-
- } // anonymous namespace
-
- MaybeError ValidateTextureDescriptor(const DeviceBase* device,
- const TextureDescriptor* descriptor) {
- DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
- wgpu::SType::DawnTextureInternalUsageDescriptor));
-
- const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
- FindInChain(descriptor->nextInChain, &internalUsageDesc);
-
- DAWN_INVALID_IF(descriptor->dimension == wgpu::TextureDimension::e1D,
- "1D textures aren't supported (yet).");
-
- DAWN_INVALID_IF(
- internalUsageDesc != nullptr && !device->IsFeatureEnabled(Feature::DawnInternalUsages),
- "The dawn-internal-usages feature is not enabled");
-
- const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
-
- wgpu::TextureUsage usage = descriptor->usage;
- if (internalUsageDesc != nullptr) {
- usage |= internalUsageDesc->internalUsage;
- }
-
- DAWN_TRY(ValidateTextureUsage(descriptor, usage, format));
- DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
- DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
-
- DAWN_INVALID_IF(descriptor->size.width == 0 || descriptor->size.height == 0 ||
- descriptor->size.depthOrArrayLayers == 0 ||
- descriptor->mipLevelCount == 0,
- "The texture size (%s) or mipLevelCount (%u) is empty.", &descriptor->size,
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(
- descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed,
- "The dimension (%s) of a texture with a compressed format (%s) is not 2D.",
- descriptor->dimension, format->format);
-
- // Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
- // doesn't support depth/stencil formats on 3D textures.
- DAWN_INVALID_IF(
- descriptor->dimension != wgpu::TextureDimension::e2D &&
- (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0,
- "The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
- descriptor->dimension, format->format);
-
- DAWN_TRY(ValidateTextureSize(device, descriptor, format));
-
- // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
- // Readbacks from the non-zero mip of a stencil texture may contain garbage data.
- DAWN_INVALID_IF(
- device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
- descriptor->mipLevelCount > 1 &&
- device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal,
- "https://crbug.com/dawn/838: Stencil textures with more than one mip level are "
- "disabled on Metal.");
-
- DAWN_INVALID_IF(
- device->IsToggleEnabled(Toggle::DisableR8RG8Mipmaps) && descriptor->mipLevelCount > 1 &&
- (descriptor->format == wgpu::TextureFormat::R8Unorm ||
- descriptor->format == wgpu::TextureFormat::RG8Unorm),
- "https://crbug.com/dawn/1071: r8unorm and rg8unorm textures with more than one mip "
- "level are disabled on Metal.");
-
- return {};
- }
-
- MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
- // Parent texture should have been already validated.
- ASSERT(texture);
- ASSERT(!texture->IsError());
-
- DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
- DAWN_INVALID_IF(descriptor->dimension == wgpu::TextureViewDimension::e1D,
- "1D texture views aren't supported (yet).");
-
- DAWN_TRY(ValidateTextureFormat(descriptor->format));
-
- DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
- DAWN_INVALID_IF(
- SelectFormatAspects(texture->GetFormat(), descriptor->aspect) == Aspect::None,
- "Texture format (%s) does not have the texture view's selected aspect (%s).",
- texture->GetFormat().format, descriptor->aspect);
-
- DAWN_INVALID_IF(descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0,
- "The texture view's arrayLayerCount (%u) or mipLevelCount (%u) is zero.",
- descriptor->arrayLayerCount, descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(
- uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
- uint64_t(texture->GetArrayLayers()),
- "Texture view array layer range (baseArrayLayer: %u, arrayLayerCount: %u) exceeds the "
- "texture's array layer count (%u).",
- descriptor->baseArrayLayer, descriptor->arrayLayerCount, texture->GetArrayLayers());
-
- DAWN_INVALID_IF(
- uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
- uint64_t(texture->GetNumMipLevels()),
- "Texture view mip level range (baseMipLevel: %u, mipLevelCount: %u) exceeds the "
- "texture's mip level count (%u).",
- descriptor->baseMipLevel, descriptor->mipLevelCount, texture->GetNumMipLevels());
-
- DAWN_TRY(ValidateTextureViewFormatCompatibility(texture, descriptor));
- DAWN_TRY(ValidateTextureViewDimensionCompatibility(texture, descriptor));
-
- return {};
- }
-
- TextureViewDescriptor GetTextureViewDescriptorWithDefaults(
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- ASSERT(texture);
-
- TextureViewDescriptor desc = {};
- if (descriptor) {
- desc = *descriptor;
- }
-
- // The default value for the view dimension depends on the texture's dimension with a
- // special case for 2DArray being chosen automatically if arrayLayerCount is unspecified.
- if (desc.dimension == wgpu::TextureViewDimension::Undefined) {
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e1D:
- desc.dimension = wgpu::TextureViewDimension::e1D;
- break;
-
- case wgpu::TextureDimension::e2D:
- desc.dimension = wgpu::TextureViewDimension::e2D;
- break;
-
- case wgpu::TextureDimension::e3D:
- desc.dimension = wgpu::TextureViewDimension::e3D;
- break;
- }
- }
-
- if (desc.format == wgpu::TextureFormat::Undefined) {
- // TODO(dawn:682): Use GetAspectInfo(aspect).
- desc.format = texture->GetFormat().format;
- }
- if (desc.arrayLayerCount == wgpu::kArrayLayerCountUndefined) {
- switch (desc.dimension) {
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e3D:
- desc.arrayLayerCount = 1;
- break;
- case wgpu::TextureViewDimension::Cube:
- desc.arrayLayerCount = 6;
- break;
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::CubeArray:
- desc.arrayLayerCount = texture->GetArrayLayers() - desc.baseArrayLayer;
- break;
- default:
- // We don't put UNREACHABLE() here because we validate enums only after this
- // function sets default values. Otherwise, the UNREACHABLE() will be hit.
- break;
- }
- }
-
- if (desc.mipLevelCount == wgpu::kMipLevelCountUndefined) {
- desc.mipLevelCount = texture->GetNumMipLevels() - desc.baseMipLevel;
- }
- return desc;
- }
-
- // WebGPU only supports sample counts of 1 and 4. We could expand to more based on
- // platform support, but it would probably be a feature.
- bool IsValidSampleCount(uint32_t sampleCount) {
- switch (sampleCount) {
- case 1:
- case 4:
- return true;
-
- default:
- return false;
- }
- }
-
- // TextureBase
-
- TextureBase::TextureBase(DeviceBase* device,
- const TextureDescriptor* descriptor,
- TextureState state)
- : ApiObjectBase(device, descriptor->label),
- mDimension(descriptor->dimension),
- mFormat(device->GetValidInternalFormat(descriptor->format)),
- mSize(descriptor->size),
- mMipLevelCount(descriptor->mipLevelCount),
- mSampleCount(descriptor->sampleCount),
- mUsage(descriptor->usage),
- mInternalUsage(mUsage),
- mState(state) {
- uint32_t subresourceCount =
- mMipLevelCount * GetArrayLayers() * GetAspectCount(mFormat.aspects);
- mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
-
- const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
- FindInChain(descriptor->nextInChain, &internalUsageDesc);
- if (internalUsageDesc != nullptr) {
- mInternalUsage |= internalUsageDesc->internalUsage;
- }
- TrackInDevice();
- }
-
- static Format kUnusedFormat;
-
- TextureBase::TextureBase(DeviceBase* device, TextureState state)
- : ApiObjectBase(device, kLabelNotImplemented), mFormat(kUnusedFormat), mState(state) {
- TrackInDevice();
- }
-
- TextureBase::TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
- }
-
- void TextureBase::DestroyImpl() {
- mState = TextureState::Destroyed;
- }
-
- // static
- TextureBase* TextureBase::MakeError(DeviceBase* device) {
- return new TextureBase(device, ObjectBase::kError);
- }
-
- ObjectType TextureBase::GetType() const {
- return ObjectType::Texture;
- }
-
- wgpu::TextureDimension TextureBase::GetDimension() const {
- ASSERT(!IsError());
- return mDimension;
- }
-
- const Format& TextureBase::GetFormat() const {
- ASSERT(!IsError());
- return mFormat;
- }
- const Extent3D& TextureBase::GetSize() const {
- ASSERT(!IsError());
- return mSize;
- }
- uint32_t TextureBase::GetWidth() const {
- ASSERT(!IsError());
- return mSize.width;
- }
- uint32_t TextureBase::GetHeight() const {
- ASSERT(!IsError());
- ASSERT(mDimension != wgpu::TextureDimension::e1D);
- return mSize.height;
- }
- uint32_t TextureBase::GetDepth() const {
- ASSERT(!IsError());
- ASSERT(mDimension == wgpu::TextureDimension::e3D);
- return mSize.depthOrArrayLayers;
- }
- uint32_t TextureBase::GetArrayLayers() const {
- ASSERT(!IsError());
- // TODO(crbug.com/dawn/814): Update for 1D textures when they are supported.
- ASSERT(mDimension != wgpu::TextureDimension::e1D);
- if (mDimension == wgpu::TextureDimension::e3D) {
- return 1;
- }
- return mSize.depthOrArrayLayers;
- }
- uint32_t TextureBase::GetNumMipLevels() const {
- ASSERT(!IsError());
- return mMipLevelCount;
- }
- SubresourceRange TextureBase::GetAllSubresources() const {
- ASSERT(!IsError());
- return {mFormat.aspects, {0, GetArrayLayers()}, {0, mMipLevelCount}};
- }
- uint32_t TextureBase::GetSampleCount() const {
- ASSERT(!IsError());
- return mSampleCount;
- }
- uint32_t TextureBase::GetSubresourceCount() const {
- ASSERT(!IsError());
- return static_cast<uint32_t>(mIsSubresourceContentInitializedAtIndex.size());
- }
- wgpu::TextureUsage TextureBase::GetUsage() const {
- ASSERT(!IsError());
- return mUsage;
- }
- wgpu::TextureUsage TextureBase::GetInternalUsage() const {
- ASSERT(!IsError());
- return mInternalUsage;
- }
-
- TextureBase::TextureState TextureBase::GetTextureState() const {
- ASSERT(!IsError());
- return mState;
- }
-
- uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
- uint32_t arraySlice,
- Aspect aspect) const {
- ASSERT(HasOneBit(aspect));
- return mipLevel +
- GetNumMipLevels() * (arraySlice + GetArrayLayers() * GetAspectIndex(aspect));
- }
-
- bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
- ASSERT(!IsError());
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = range.baseMipLevel;
- mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
- uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
- ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
- if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
- return false;
- }
- }
- }
- }
- return true;
- }
-
- void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
- const SubresourceRange& range) {
- ASSERT(!IsError());
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = range.baseMipLevel;
- mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
- uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
- ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
- mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
- }
- }
- }
- }
-
- MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
- ASSERT(!IsError());
- DAWN_INVALID_IF(mState == TextureState::Destroyed, "Destroyed texture %s used in a submit.",
- this);
- return {};
- }
-
- bool TextureBase::IsMultisampledTexture() const {
- ASSERT(!IsError());
- return mSampleCount > 1;
- }
-
- Extent3D TextureBase::GetMipLevelVirtualSize(uint32_t level) const {
- Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
- if (mDimension == wgpu::TextureDimension::e1D) {
- return extent;
- }
-
- extent.height = std::max(mSize.height >> level, 1u);
- if (mDimension == wgpu::TextureDimension::e2D) {
- return extent;
- }
-
- extent.depthOrArrayLayers = std::max(mSize.depthOrArrayLayers >> level, 1u);
- return extent;
- }
-
- Extent3D TextureBase::GetMipLevelPhysicalSize(uint32_t level) const {
- Extent3D extent = GetMipLevelVirtualSize(level);
-
- // Compressed Textures will have paddings if their width or height is not a multiple of
- // 4 at non-zero mipmap levels.
- if (mFormat.isCompressed && level != 0) {
- // If |level| is non-zero, then each dimension of |extent| is at most half of
- // the max texture dimension. Computations here which add the block width/height
- // to the extent cannot overflow.
- const TexelBlockInfo& blockInfo = mFormat.GetAspectInfo(wgpu::TextureAspect::All).block;
- extent.width = (extent.width + blockInfo.width - 1) / blockInfo.width * blockInfo.width;
- extent.height =
- (extent.height + blockInfo.height - 1) / blockInfo.height * blockInfo.height;
- }
-
- return extent;
- }
-
- Extent3D TextureBase::ClampToMipLevelVirtualSize(uint32_t level,
- const Origin3D& origin,
- const Extent3D& extent) const {
- const Extent3D virtualSizeAtLevel = GetMipLevelVirtualSize(level);
- ASSERT(origin.x <= virtualSizeAtLevel.width);
- ASSERT(origin.y <= virtualSizeAtLevel.height);
- uint32_t clampedCopyExtentWidth = (extent.width > virtualSizeAtLevel.width - origin.x)
- ? (virtualSizeAtLevel.width - origin.x)
- : extent.width;
- uint32_t clampedCopyExtentHeight = (extent.height > virtualSizeAtLevel.height - origin.y)
- ? (virtualSizeAtLevel.height - origin.y)
- : extent.height;
- return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depthOrArrayLayers};
- }
-
- TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
-
- Ref<TextureViewBase> result;
- if (device->ConsumedError(device->CreateTextureView(this, descriptor), &result,
- "calling %s.CreateView(%s).", this, descriptor)) {
- return TextureViewBase::MakeError(device);
- }
- return result.Detach();
- }
-
- void TextureBase::APIDestroy() {
- if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy().", this)) {
- return;
- }
- ASSERT(!IsError());
- Destroy();
- }
-
- MaybeError TextureBase::ValidateDestroy() const {
- DAWN_TRY(GetDevice()->ValidateObject(this));
- return {};
- }
-
- // TextureViewBase
-
- TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
- : ApiObjectBase(texture->GetDevice(), descriptor->label),
- mTexture(texture),
- mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
- mDimension(descriptor->dimension),
- mRange({ConvertViewAspect(mFormat, descriptor->aspect),
- {descriptor->baseArrayLayer, descriptor->arrayLayerCount},
- {descriptor->baseMipLevel, descriptor->mipLevelCount}}) {
- TrackInDevice();
- }
-
- TextureViewBase::TextureViewBase(TextureBase* texture)
- : ApiObjectBase(texture->GetDevice(), kLabelNotImplemented),
- mTexture(texture),
- mFormat(kUnusedFormat) {
- TrackInDevice();
- }
-
- TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
- }
-
- void TextureViewBase::DestroyImpl() {
- }
-
- // static
- TextureViewBase* TextureViewBase::MakeError(DeviceBase* device) {
- return new TextureViewBase(device, ObjectBase::kError);
- }
-
- ObjectType TextureViewBase::GetType() const {
- return ObjectType::TextureView;
- }
-
- const TextureBase* TextureViewBase::GetTexture() const {
- ASSERT(!IsError());
- return mTexture.Get();
- }
-
- TextureBase* TextureViewBase::GetTexture() {
- ASSERT(!IsError());
- return mTexture.Get();
- }
-
- Aspect TextureViewBase::GetAspects() const {
- ASSERT(!IsError());
- return mRange.aspects;
- }
-
- const Format& TextureViewBase::GetFormat() const {
- ASSERT(!IsError());
- return mFormat;
- }
-
- wgpu::TextureViewDimension TextureViewBase::GetDimension() const {
- ASSERT(!IsError());
- return mDimension;
- }
-
- uint32_t TextureViewBase::GetBaseMipLevel() const {
- ASSERT(!IsError());
- return mRange.baseMipLevel;
- }
-
- uint32_t TextureViewBase::GetLevelCount() const {
- ASSERT(!IsError());
- return mRange.levelCount;
- }
-
- uint32_t TextureViewBase::GetBaseArrayLayer() const {
- ASSERT(!IsError());
- return mRange.baseArrayLayer;
- }
-
- uint32_t TextureViewBase::GetLayerCount() const {
- ASSERT(!IsError());
- return mRange.layerCount;
- }
-
- const SubresourceRange& TextureViewBase::GetSubresourceRange() const {
- ASSERT(!IsError());
- return mRange;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.h b/chromium/third_party/dawn/src/dawn_native/Texture.h
deleted file mode 100644
index 145d7f3acf5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Texture.h
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_TEXTURE_H_
-#define DAWNNATIVE_TEXTURE_H_
-
-#include "common/ityp_array.h"
-#include "common/ityp_bitset.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/Subresource.h"
-
-#include "dawn_native/dawn_platform.h"
-
-#include <vector>
-
-namespace dawn_native {
-
- MaybeError ValidateTextureDescriptor(const DeviceBase* device,
- const TextureDescriptor* descriptor);
- MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor);
- TextureViewDescriptor GetTextureViewDescriptorWithDefaults(
- const TextureBase* texture,
- const TextureViewDescriptor* descriptor);
-
- bool IsValidSampleCount(uint32_t sampleCount);
-
- static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
- wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding |
- kReadOnlyRenderAttachment;
-
- class TextureBase : public ApiObjectBase {
- public:
- enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
- enum class ClearValue { Zero, NonZero };
- TextureBase(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
-
- static TextureBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- wgpu::TextureDimension GetDimension() const;
- const Format& GetFormat() const;
- const Extent3D& GetSize() const;
- uint32_t GetWidth() const;
- uint32_t GetHeight() const;
- uint32_t GetDepth() const;
- uint32_t GetArrayLayers() const;
- uint32_t GetNumMipLevels() const;
- SubresourceRange GetAllSubresources() const;
- uint32_t GetSampleCount() const;
- uint32_t GetSubresourceCount() const;
-
- // |GetUsage| returns the usage with which the texture was created using the base WebGPU
- // API. The dawn-internal-usages extension may add additional usages. |GetInternalUsage|
- // returns the union of base usage and the usages added by the extension.
- wgpu::TextureUsage GetUsage() const;
- wgpu::TextureUsage GetInternalUsage() const;
-
- TextureState GetTextureState() const;
- uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
- bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
- void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
-
- MaybeError ValidateCanUseInSubmitNow() const;
-
- bool IsMultisampledTexture() const;
-
- // For a texture with non-block-compressed texture format, its physical size is always equal
- // to its virtual size. For a texture with block compressed texture format, the physical
- // size is the one with paddings if necessary, which is always a multiple of the block size
- // and used in texture copying. The virtual size is the one without paddings, which is not
- // required to be a multiple of the block size and used in texture sampling.
- Extent3D GetMipLevelPhysicalSize(uint32_t level) const;
- Extent3D GetMipLevelVirtualSize(uint32_t level) const;
- Extent3D ClampToMipLevelVirtualSize(uint32_t level,
- const Origin3D& origin,
- const Extent3D& extent) const;
-
- // Dawn API
- TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
- void APIDestroy();
-
- protected:
- // Constructor used only for mocking and testing.
- TextureBase(DeviceBase* device, TextureState state);
- void DestroyImpl() override;
-
- private:
- TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- MaybeError ValidateDestroy() const;
- wgpu::TextureDimension mDimension;
- const Format& mFormat;
- Extent3D mSize;
- uint32_t mMipLevelCount;
- uint32_t mSampleCount;
- wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
- wgpu::TextureUsage mInternalUsage = wgpu::TextureUsage::None;
- TextureState mState;
-
- // TODO(crbug.com/dawn/845): Use a more optimized data structure to save space
- std::vector<bool> mIsSubresourceContentInitializedAtIndex;
- };
-
- class TextureViewBase : public ApiObjectBase {
- public:
- TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor);
-
- static TextureViewBase* MakeError(DeviceBase* device);
-
- ObjectType GetType() const override;
-
- const TextureBase* GetTexture() const;
- TextureBase* GetTexture();
-
- Aspect GetAspects() const;
- const Format& GetFormat() const;
- wgpu::TextureViewDimension GetDimension() const;
- uint32_t GetBaseMipLevel() const;
- uint32_t GetLevelCount() const;
- uint32_t GetBaseArrayLayer() const;
- uint32_t GetLayerCount() const;
- const SubresourceRange& GetSubresourceRange() const;
-
- protected:
- // Constructor used only for mocking and testing.
- TextureViewBase(TextureBase* texture);
- void DestroyImpl() override;
-
- private:
- TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
- Ref<TextureBase> mTexture;
-
- const Format& mFormat;
- wgpu::TextureViewDimension mDimension;
- SubresourceRange mRange;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_TEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/TintUtils.cpp b/chromium/third_party/dawn/src/dawn_native/TintUtils.cpp
deleted file mode 100644
index 7315904fef6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/TintUtils.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/TintUtils.h"
-#include "dawn_native/Device.h"
-
-#include <tint/tint.h>
-
-namespace dawn_native {
-
- namespace {
-
- thread_local DeviceBase* tlDevice = nullptr;
-
- void TintICEReporter(const tint::diag::List& diagnostics) {
- if (tlDevice) {
- tlDevice->HandleError(InternalErrorType::Validation, diagnostics.str().c_str());
- }
- }
-
- bool InitializeTintErrorReporter() {
- tint::SetInternalCompilerErrorReporter(&TintICEReporter);
- return true;
- }
-
- } // namespace
-
- ScopedTintICEHandler::ScopedTintICEHandler(DeviceBase* device) {
- // Call tint::SetInternalCompilerErrorReporter() the first time
- // this constructor is called. Static initialization is
- // guaranteed to be thread-safe, and only occur once.
- static bool init_once_tint_error_reporter = InitializeTintErrorReporter();
- (void)init_once_tint_error_reporter;
-
- // Shouldn't have overlapping instances of this handler.
- ASSERT(tlDevice == nullptr);
- tlDevice = device;
- }
-
- ScopedTintICEHandler::~ScopedTintICEHandler() {
- tlDevice = nullptr;
- }
-
-} // namespace dawn_native \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/TintUtils.h b/chromium/third_party/dawn/src/dawn_native/TintUtils.h
deleted file mode 100644
index c3761f69ff3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/TintUtils.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_TINTUTILS_H_
-#define DAWNNATIVE_TINTUTILS_H_
-
-#include "common/NonCopyable.h"
-
-namespace dawn_native {
-
- class DeviceBase;
-
- // Indicates that for the lifetime of this object tint internal compiler errors should be
- // reported to the given device.
- class ScopedTintICEHandler : public NonCopyable {
- public:
- ScopedTintICEHandler(DeviceBase* device);
- ~ScopedTintICEHandler();
-
- private:
- ScopedTintICEHandler(ScopedTintICEHandler&&) = delete;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_TEXTURE_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/ToBackend.h b/chromium/third_party/dawn/src/dawn_native/ToBackend.h
deleted file mode 100644
index 5b0f049894b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/ToBackend.h
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_TOBACKEND_H_
-#define DAWNNATIVE_TOBACKEND_H_
-
-#include "dawn_native/Forward.h"
-
-namespace dawn_native {
-
- // ToBackendTraits implements the mapping from base type to member type of BackendTraits
- template <typename T, typename BackendTraits>
- struct ToBackendTraits;
-
- template <typename BackendTraits>
- struct ToBackendTraits<AdapterBase, BackendTraits> {
- using BackendType = typename BackendTraits::AdapterType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<BindGroupBase, BackendTraits> {
- using BackendType = typename BackendTraits::BindGroupType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
- using BackendType = typename BackendTraits::BindGroupLayoutType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<BufferBase, BackendTraits> {
- using BackendType = typename BackendTraits::BufferType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<CommandBufferBase, BackendTraits> {
- using BackendType = typename BackendTraits::CommandBufferType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<ComputePipelineBase, BackendTraits> {
- using BackendType = typename BackendTraits::ComputePipelineType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<DeviceBase, BackendTraits> {
- using BackendType = typename BackendTraits::DeviceType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
- using BackendType = typename BackendTraits::PipelineLayoutType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<QuerySetBase, BackendTraits> {
- using BackendType = typename BackendTraits::QuerySetType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<QueueBase, BackendTraits> {
- using BackendType = typename BackendTraits::QueueType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<RenderPipelineBase, BackendTraits> {
- using BackendType = typename BackendTraits::RenderPipelineType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<ResourceHeapBase, BackendTraits> {
- using BackendType = typename BackendTraits::ResourceHeapType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<SamplerBase, BackendTraits> {
- using BackendType = typename BackendTraits::SamplerType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
- using BackendType = typename BackendTraits::ShaderModuleType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<StagingBufferBase, BackendTraits> {
- using BackendType = typename BackendTraits::StagingBufferType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<TextureBase, BackendTraits> {
- using BackendType = typename BackendTraits::TextureType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<SwapChainBase, BackendTraits> {
- using BackendType = typename BackendTraits::SwapChainType;
- };
-
- template <typename BackendTraits>
- struct ToBackendTraits<TextureViewBase, BackendTraits> {
- using BackendType = typename BackendTraits::TextureViewType;
- };
-
- // ToBackendBase implements conversion to the given BackendTraits
- // To use it in a backend, use the following:
- // template<typename T>
- // auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
- // return ToBackendBase<MyBackendTraits>(common);
- // }
-
- template <typename BackendTraits, typename T>
- Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
- return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(
- common);
- }
-
- template <typename BackendTraits, typename T>
- Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&& ToBackendBase(Ref<T>&& common) {
- return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&&>(
- common);
- }
-
- template <typename BackendTraits, typename T>
- const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(
- const Ref<T>& common) {
- return reinterpret_cast<
- const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
- }
-
- template <typename BackendTraits, typename T>
- typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
- return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
- }
-
- template <typename BackendTraits, typename T>
- const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
- return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(
- common);
- }
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_TOBACKEND_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
deleted file mode 100644
index b0e2dea250b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <array>
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-#include "dawn_native/Toggles.h"
-
-namespace dawn_native {
- namespace {
-
- struct ToggleEnumAndInfo {
- Toggle toggle;
- ToggleInfo info;
- };
-
- using ToggleEnumAndInfoList =
- std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
-
- static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
- {Toggle::EmulateStoreAndMSAAResolve,
- {"emulate_store_and_msaa_resolve",
- "Emulate storing into multisampled color attachments and doing MSAA resolve "
- "simultaneously. This workaround is enabled by default on the Metal drivers that do "
- "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
- "those platforms, we should do MSAA resolve in another render pass after ending the "
- "previous one.",
- "https://crbug.com/dawn/56"}},
- {Toggle::NonzeroClearResourcesOnCreationForTesting,
- {"nonzero_clear_resources_on_creation_for_testing",
- "Clears texture to full 1 bits as soon as they are created, but doesn't update "
- "the tracking state of the texture. This way we can test the logic of clearing "
- "textures that use recycled memory.",
- "https://crbug.com/dawn/145"}},
- {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
- {"always_resolve_into_zero_level_and_layer",
- "When the resolve target is a texture view that is created on the non-zero level or "
- "layer of a texture, we first resolve into a temporarily 2D texture with only one "
- "mipmap level and one array layer, and copy the result of MSAA resolve into the "
- "true resolve target. This workaround is enabled by default on the Metal drivers "
- "that have bugs when setting non-zero resolveLevel or resolveSlice.",
- "https://crbug.com/dawn/56"}},
- {Toggle::LazyClearResourceOnFirstUse,
- {"lazy_clear_resource_on_first_use",
- "Clears resource to zero on first usage. This initializes the resource "
- "so that no dirty bits from recycled memory is present in the new resource.",
- "https://crbug.com/dawn/145"}},
- {Toggle::TurnOffVsync,
- {"turn_off_vsync",
- "Turn off vsync when rendering. In order to do performance test or run perf tests, "
- "turn off vsync so that the fps can exeed 60.",
- "https://crbug.com/dawn/237"}},
- {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
- {"use_temporary_buffer_in_texture_to_texture_copy",
- "Split texture-to-texture copy into two copies: copy from source texture into a "
- "temporary buffer, and copy from the temporary buffer into the destination texture "
- "when copying between compressed textures that don't have block-aligned sizes. This "
- "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
- "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
- "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
- "https://crbug.com/dawn/42"}},
- {Toggle::UseD3D12ResourceHeapTier2,
- {"use_d3d12_resource_heap_tier2",
- "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
- "texture and buffers in the same heap. This allows better heap re-use and reduces "
- "fragmentation.",
- "https://crbug.com/dawn/27"}},
- {Toggle::UseD3D12RenderPass,
- {"use_d3d12_render_pass",
- "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
- "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
- "will emulate a render pass.",
- "https://crbug.com/dawn/36"}},
- {Toggle::UseD3D12ResidencyManagement,
- {"use_d3d12_residency_management",
- "Enable residency management. This allows page-in and page-out of resource heaps in "
- "GPU memory. This component improves overcommitted performance by keeping the most "
- "recently used resources local to the GPU. Turning this component off can cause "
- "allocation failures when application memory exceeds physical device memory.",
- "https://crbug.com/dawn/193"}},
- {Toggle::SkipValidation,
- {"skip_validation", "Skip expensive validation of Dawn commands.",
- "https://crbug.com/dawn/271"}},
- {Toggle::VulkanUseD32S8,
- {"vulkan_use_d32s8",
- "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
- "backend will use D32S8 (toggle to on) but setting the toggle to off will make it"
- "use the D24S8 format when possible.",
- "https://crbug.com/dawn/286"}},
- {Toggle::MetalDisableSamplerCompare,
- {"metal_disable_sampler_compare",
- "Disables the use of sampler compare on Metal. This is unsupported before A9 "
- "processors.",
- "https://crbug.com/dawn/342"}},
- {Toggle::MetalUseSharedModeForCounterSampleBuffer,
- {"metal_use_shared_mode_for_counter_sample_buffer",
- "The query set on Metal need to create MTLCounterSampleBuffer which storage mode "
- "must be either MTLStorageModeShared or MTLStorageModePrivate. But the private mode "
- "does not work properly on Intel platforms. The workaround is use shared mode "
- "instead.",
- "https://crbug.com/dawn/434"}},
- {Toggle::DisableBaseVertex,
- {"disable_base_vertex",
- "Disables the use of non-zero base vertex which is unsupported on some platforms.",
- "https://crbug.com/dawn/343"}},
- {Toggle::DisableBaseInstance,
- {"disable_base_instance",
- "Disables the use of non-zero base instance which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/343"}},
- {Toggle::DisableIndexedDrawBuffers,
- {"disable_indexed_draw_buffers",
- "Disables the use of indexed draw buffer state which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/582"}},
- {Toggle::DisableSnormRead,
- {"disable_snorm_read",
- "Disables reading from Snorm textures which is unsupported on some platforms.",
- "https://crbug.com/dawn/667"}},
- {Toggle::DisableDepthStencilRead,
- {"disable_depth_stencil_read",
- "Disables reading from depth/stencil textures which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/667"}},
- {Toggle::DisableSampleVariables,
- {"disable_sample_variables",
- "Disables gl_SampleMask and related functionality which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/673"}},
- {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
- {"use_d3d12_small_shader_visible_heap",
- "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
- "default. This setting is used to test bindgroup encoding.",
- "https://crbug.com/dawn/155"}},
- {Toggle::UseDXC,
- {"use_dxc",
- "Use DXC instead of FXC for compiling HLSL when both dxcompiler.dll and dxil.dll "
- "is available.",
- "https://crbug.com/dawn/402"}},
- {Toggle::DisableRobustness,
- {"disable_robustness", "Disable robust buffer access", "https://crbug.com/dawn/480"}},
- {Toggle::MetalEnableVertexPulling,
- {"metal_enable_vertex_pulling",
- "Uses vertex pulling to protect out-of-bounds reads on Metal",
- "https://crbug.com/dawn/480"}},
- {Toggle::DisallowUnsafeAPIs,
- {"disallow_unsafe_apis",
- "Produces validation errors on API entry points or parameter combinations that "
- "aren't considered secure yet.",
- "http://crbug.com/1138528"}},
- {Toggle::FlushBeforeClientWaitSync,
- {"flush_before_client_wait_sync",
- "Call glFlush before glClientWaitSync to work around bugs in the latter",
- "https://crbug.com/dawn/633"}},
- {Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
- {"use_temp_buffer_in_small_format_texture_to_texture_copy_from_greater_to_less_mip_"
- "level",
- "Split texture-to-texture copy into two copies: copy from source texture into a "
- "temporary buffer, and copy from the temporary buffer into the destination texture "
- "under specific situations. This workaround is by default enabled on some Intel "
- "GPUs which have a driver bug in the execution of CopyTextureRegion() when we copy "
- "with the formats whose texel block sizes are less than 4 bytes from a greater mip "
- "level to a smaller mip level on D3D12 backends.",
- "https://crbug.com/1161355"}},
- {Toggle::EmitHLSLDebugSymbols,
- {"emit_hlsl_debug_symbols",
- "Sets the D3DCOMPILE_SKIP_OPTIMIZATION and D3DCOMPILE_DEBUG compilation flags when "
- "compiling HLSL code. Enables better shader debugging with external graphics "
- "debugging tools.",
- "https://crbug.com/dawn/776"}},
- {Toggle::DisallowSpirv,
- {"disallow_spirv",
- "Disallow usage of SPIR-V completely so that only WGSL is used for shader modules."
- "This is useful to prevent a Chromium renderer process from successfully sending"
- "SPIR-V code to be compiled in the GPU process.",
- "https://crbug.com/1214923"}},
- {Toggle::DumpShaders,
- {"dump_shaders",
- "Dump shaders for debugging purposes. Dumped shaders will be log via "
- "EmitLog, thus printed in Chrome console or consumed by user-defined callback "
- "function.",
- "https://crbug.com/dawn/792"}},
- {Toggle::DEPRECATED_DumpTranslatedShaders,
- {"dump_translated_shaders", "Deprecated. Use dump_shaders",
- "https://crbug.com/dawn/792"}},
- {Toggle::ForceWGSLStep,
- {"force_wgsl_step",
- "When ingesting SPIR-V shaders, force a first conversion to WGSL. This allows "
- "testing Tint's SPIRV->WGSL translation on real content to be sure that it will "
- "work when the same translation runs in a WASM module in the page.",
- "https://crbug.com/dawn/960"}},
- {Toggle::DisableWorkgroupInit,
- {"disable_workgroup_init",
- "Disables the workgroup memory zero-initialization for compute shaders.",
- "https://crbug.com/tint/1003"}},
- {Toggle::DisableSymbolRenaming,
- {"disable_symbol_renaming",
- "Disables the WGSL symbol renaming so that names are preserved.",
- "https://crbug.com/dawn/1016"}},
- {Toggle::UseUserDefinedLabelsInBackend,
- {"use_user_defined_labels_in_backend",
- "Enables calls to SetLabel to be forwarded to backend-specific APIs that label "
- "objects.",
- "https://crbug.com/dawn/840"}},
- {Toggle::DisableR8RG8Mipmaps,
- {"disable_r8_rg8_mipmaps",
- "Disables mipmaps for r8unorm and rg8unorm textures, which are known on some drivers "
- "to not clear correctly.",
- "https://crbug.com/dawn/1071"}},
- {Toggle::UseDummyFragmentInVertexOnlyPipeline,
- {"use_dummy_fragment_in_vertex_only_pipeline",
- "Use a dummy empty fragment shader in vertex only render pipeline. This toggle must "
- "be enabled for OpenGL ES backend, and serves as a workaround by default enabled on "
- "some Metal devices with Intel GPU to ensure the depth result is correct.",
- "https://crbug.com/dawn/136"}},
- {Toggle::FxcOptimizations,
- {"fxc_optimizations",
- "Enable optimizations when compiling with FXC. Disabled by default because FXC "
- "miscompiles in many cases when optimizations are enabled.",
- "https://crbug.com/dawn/1203"}},
-
- // Dummy comment to separate the }} so it is clearer what to copy-paste to add a toggle.
- }};
- } // anonymous namespace
-
- void TogglesSet::Set(Toggle toggle, bool enabled) {
- if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
- Set(Toggle::DumpShaders, enabled);
- return;
- }
- ASSERT(toggle != Toggle::InvalidEnum);
- const size_t toggleIndex = static_cast<size_t>(toggle);
- toggleBitset.set(toggleIndex, enabled);
- }
-
- bool TogglesSet::Has(Toggle toggle) const {
- if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
- return Has(Toggle::DumpShaders);
- }
- ASSERT(toggle != Toggle::InvalidEnum);
- const size_t toggleIndex = static_cast<size_t>(toggle);
- return toggleBitset.test(toggleIndex);
- }
-
- std::vector<const char*> TogglesSet::GetContainedToggleNames() const {
- std::vector<const char*> togglesNameInUse(toggleBitset.count());
-
- uint32_t index = 0;
- for (uint32_t i : IterateBitSet(toggleBitset)) {
- const char* toggleName = ToggleEnumToName(static_cast<Toggle>(i));
- togglesNameInUse[index] = toggleName;
- ++index;
- }
-
- return togglesNameInUse;
- }
-
- const char* ToggleEnumToName(Toggle toggle) {
- ASSERT(toggle != Toggle::InvalidEnum);
-
- const ToggleEnumAndInfo& toggleNameAndInfo =
- kToggleNameAndInfoList[static_cast<size_t>(toggle)];
- ASSERT(toggleNameAndInfo.toggle == toggle);
- return toggleNameAndInfo.info.name;
- }
-
- const ToggleInfo* TogglesInfo::GetToggleInfo(const char* toggleName) {
- ASSERT(toggleName);
-
- EnsureToggleNameToEnumMapInitialized();
-
- const auto& iter = mToggleNameToEnumMap.find(toggleName);
- if (iter != mToggleNameToEnumMap.cend()) {
- return &kToggleNameAndInfoList[static_cast<size_t>(iter->second)].info;
- }
- return nullptr;
- }
-
- Toggle TogglesInfo::ToggleNameToEnum(const char* toggleName) {
- ASSERT(toggleName);
-
- EnsureToggleNameToEnumMapInitialized();
-
- const auto& iter = mToggleNameToEnumMap.find(toggleName);
- if (iter != mToggleNameToEnumMap.cend()) {
- return kToggleNameAndInfoList[static_cast<size_t>(iter->second)].toggle;
- }
- return Toggle::InvalidEnum;
- }
-
- void TogglesInfo::EnsureToggleNameToEnumMapInitialized() {
- if (mToggleNameToEnumMapInitialized) {
- return;
- }
-
- for (size_t index = 0; index < kToggleNameAndInfoList.size(); ++index) {
- const ToggleEnumAndInfo& toggleNameAndInfo = kToggleNameAndInfoList[index];
- ASSERT(index == static_cast<size_t>(toggleNameAndInfo.toggle));
- mToggleNameToEnumMap[toggleNameAndInfo.info.name] = toggleNameAndInfo.toggle;
- }
-
- mToggleNameToEnumMapInitialized = true;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.h b/chromium/third_party/dawn/src/dawn_native/Toggles.h
deleted file mode 100644
index 4682cbd57b5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_TOGGLES_H_
-#define DAWNNATIVE_TOGGLES_H_
-
-#include <bitset>
-#include <unordered_map>
-#include <vector>
-
-#include "dawn_native/DawnNative.h"
-
-namespace dawn_native {
-
- enum class Toggle {
- EmulateStoreAndMSAAResolve,
- NonzeroClearResourcesOnCreationForTesting,
- AlwaysResolveIntoZeroLevelAndLayer,
- LazyClearResourceOnFirstUse,
- TurnOffVsync,
- UseTemporaryBufferInCompressedTextureToTextureCopy,
- UseD3D12ResourceHeapTier2,
- UseD3D12RenderPass,
- UseD3D12ResidencyManagement,
- SkipValidation,
- VulkanUseD32S8,
- MetalDisableSamplerCompare,
- MetalUseSharedModeForCounterSampleBuffer,
- DisableBaseVertex,
- DisableBaseInstance,
- DisableIndexedDrawBuffers,
- DisableSnormRead,
- DisableDepthStencilRead,
- DisableSampleVariables,
- UseD3D12SmallShaderVisibleHeapForTesting,
- UseDXC,
- DisableRobustness,
- MetalEnableVertexPulling,
- DisallowUnsafeAPIs,
- FlushBeforeClientWaitSync,
- UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
- EmitHLSLDebugSymbols,
- DisallowSpirv,
- DumpShaders,
- DEPRECATED_DumpTranslatedShaders, // Use DumpShaders
- ForceWGSLStep,
- DisableWorkgroupInit,
- DisableSymbolRenaming,
- UseUserDefinedLabelsInBackend,
- DisableR8RG8Mipmaps,
- UseDummyFragmentInVertexOnlyPipeline,
- FxcOptimizations,
-
- EnumCount,
- InvalidEnum = EnumCount,
- };
-
- // A wrapper of the bitset to store if a toggle is present or not. This wrapper provides the
- // convenience to convert the enums of enum class Toggle to the indices of a bitset.
- struct TogglesSet {
- std::bitset<static_cast<size_t>(Toggle::EnumCount)> toggleBitset;
-
- void Set(Toggle toggle, bool enabled);
- bool Has(Toggle toggle) const;
- std::vector<const char*> GetContainedToggleNames() const;
- };
-
- const char* ToggleEnumToName(Toggle toggle);
-
- class TogglesInfo {
- public:
- // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
- // of a toggle supported in Dawn.
- const ToggleInfo* GetToggleInfo(const char* toggleName);
- Toggle ToggleNameToEnum(const char* toggleName);
-
- private:
- void EnsureToggleNameToEnumMapInitialized();
-
- bool mToggleNameToEnumMapInitialized = false;
- std::unordered_map<std::string, Toggle> mToggleNameToEnumMap;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_TOGGLES_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/VertexFormat.cpp b/chromium/third_party/dawn/src/dawn_native/VertexFormat.cpp
deleted file mode 100644
index b4bdfb7aab4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/VertexFormat.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/VertexFormat.h"
-
-#include "common/Assert.h"
-
-#include <array>
-
-namespace dawn_native {
-
- static constexpr std::array<VertexFormatInfo, 31> sVertexFormatTable = {{
- //
- {wgpu::VertexFormat::Undefined, 0, 0, 0, VertexFormatBaseType::Float},
-
- {wgpu::VertexFormat::Uint8x2, 2, 2, 1, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint8x4, 4, 4, 1, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Sint8x2, 2, 2, 1, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint8x4, 4, 4, 1, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Unorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Unorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Snorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Snorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
-
- {wgpu::VertexFormat::Uint16x2, 4, 2, 2, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint16x4, 8, 4, 2, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Sint16x2, 4, 2, 2, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint16x4, 8, 4, 2, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Unorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Unorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Snorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Snorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float16x2, 4, 2, 2, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float16x4, 8, 4, 2, VertexFormatBaseType::Float},
-
- {wgpu::VertexFormat::Float32, 4, 1, 4, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float32x2, 8, 2, 4, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float32x3, 12, 3, 4, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Float32x4, 16, 4, 4, VertexFormatBaseType::Float},
- {wgpu::VertexFormat::Uint32, 4, 1, 4, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint32x2, 8, 2, 4, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint32x3, 12, 3, 4, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Uint32x4, 16, 4, 4, VertexFormatBaseType::Uint},
- {wgpu::VertexFormat::Sint32, 4, 1, 4, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint32x2, 8, 2, 4, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint32x3, 12, 3, 4, VertexFormatBaseType::Sint},
- {wgpu::VertexFormat::Sint32x4, 16, 4, 4, VertexFormatBaseType::Sint},
- //
- }};
-
- const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format) {
- ASSERT(format != wgpu::VertexFormat::Undefined);
- ASSERT(static_cast<uint32_t>(format) < sVertexFormatTable.size());
- ASSERT(sVertexFormatTable[static_cast<uint32_t>(format)].format == format);
- return sVertexFormatTable[static_cast<uint32_t>(format)];
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/VertexFormat.h b/chromium/third_party/dawn/src/dawn_native/VertexFormat.h
deleted file mode 100644
index 582c95d1e7f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/VertexFormat.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VERTEXFORMAT_H_
-#define DAWNNATIVE_VERTEXFORMAT_H_
-
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
-
- enum class VertexFormatBaseType {
- Float,
- Uint,
- Sint,
- };
-
- struct VertexFormatInfo {
- wgpu::VertexFormat format;
- uint32_t byteSize;
- uint32_t componentCount;
- uint32_t componentByteSize;
- VertexFormatBaseType baseType;
- };
-
- const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_VERTEXFORMAT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/XlibXcbFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/XlibXcbFunctions.cpp
deleted file mode 100644
index 605d2508243..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/XlibXcbFunctions.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/XlibXcbFunctions.h"
-
-namespace dawn_native {
-
- XlibXcbFunctions::XlibXcbFunctions() {
- if (!mLib.Open("libX11-xcb.so.1") ||
- !mLib.GetProc(&xGetXCBConnection, "XGetXCBConnection")) {
- mLib.Close();
- }
- }
- XlibXcbFunctions::~XlibXcbFunctions() = default;
-
- bool XlibXcbFunctions::IsLoaded() const {
- return xGetXCBConnection != nullptr;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/XlibXcbFunctions.h b/chromium/third_party/dawn/src/dawn_native/XlibXcbFunctions.h
deleted file mode 100644
index f295c2aebe2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/XlibXcbFunctions.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_XLIBXCBFUNCTIONS_H_
-#define DAWNNATIVE_XLIBXCBFUNCTIONS_H_
-
-#include "common/DynamicLib.h"
-#include "dawn_native/Error.h"
-
-#include "common/xlib_with_undefs.h"
-
-class DynamicLib;
-
-namespace dawn_native {
-
- // A helper class that dynamically loads the x11-xcb library that contains XGetXCBConnection
- // (and nothing else). This has to be dynamic because this libraries isn't present on all Linux
- // deployment platforms that Chromium targets.
- class XlibXcbFunctions {
- public:
- XlibXcbFunctions();
- ~XlibXcbFunctions();
-
- bool IsLoaded() const;
-
- // Functions from x11-xcb
- decltype(&::XGetXCBConnection) xGetXCBConnection = nullptr;
-
- private:
- DynamicLib mLib;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_XLIBXCBFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
deleted file mode 100644
index ca845f3ad24..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/AdapterD3D12.h"
-
-#include "common/Constants.h"
-#include "common/WindowsUtils.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/d3d12/BackendD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-
-#include <sstream>
-
-namespace dawn_native { namespace d3d12 {
-
- Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
- : AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
- mHardwareAdapter(hardwareAdapter),
- mBackend(backend) {
- }
-
- Adapter::~Adapter() {
- CleanUpDebugLayerFilters();
- }
-
- bool Adapter::SupportsExternalImages() const {
- // Via dawn_native::d3d12::ExternalImageDXGI::Create
- return true;
- }
-
- const D3D12DeviceInfo& Adapter::GetDeviceInfo() const {
- return mDeviceInfo;
- }
-
- IDXGIAdapter3* Adapter::GetHardwareAdapter() const {
- return mHardwareAdapter.Get();
- }
-
- Backend* Adapter::GetBackend() const {
- return mBackend;
- }
-
- ComPtr<ID3D12Device> Adapter::GetDevice() const {
- return mD3d12Device;
- }
-
- const gpu_info::D3DDriverVersion& Adapter::GetDriverVersion() const {
- return mDriverVersion;
- }
-
- MaybeError Adapter::InitializeImpl() {
- // D3D12 cannot check for feature support without a device.
- // Create the device to populate the adapter properties then reuse it when needed for actual
- // rendering.
- const PlatformFunctions* functions = GetBackend()->GetFunctions();
- if (FAILED(functions->d3d12CreateDevice(GetHardwareAdapter(), D3D_FEATURE_LEVEL_11_0,
- _uuidof(ID3D12Device), &mD3d12Device))) {
- return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
- }
-
- DAWN_TRY(InitializeDebugLayerFilters());
-
- DXGI_ADAPTER_DESC1 adapterDesc;
- mHardwareAdapter->GetDesc1(&adapterDesc);
-
- mPCIInfo.deviceId = adapterDesc.DeviceId;
- mPCIInfo.vendorId = adapterDesc.VendorId;
- mPCIInfo.name = WCharToUTF8(adapterDesc.Description);
-
- DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
-
- if (adapterDesc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
- mAdapterType = wgpu::AdapterType::CPU;
- } else {
- mAdapterType = (mDeviceInfo.isUMA) ? wgpu::AdapterType::IntegratedGPU
- : wgpu::AdapterType::DiscreteGPU;
- }
-
- // Convert the adapter's D3D12 driver version to a readable string like "24.21.13.9793".
- LARGE_INTEGER umdVersion;
- if (mHardwareAdapter->CheckInterfaceSupport(__uuidof(IDXGIDevice), &umdVersion) !=
- DXGI_ERROR_UNSUPPORTED) {
- uint64_t encodedVersion = umdVersion.QuadPart;
-
- std::ostringstream o;
- o << "D3D12 driver version ";
- for (size_t i = 0; i < mDriverVersion.size(); ++i) {
- mDriverVersion[i] = (encodedVersion >> (48 - 16 * i)) & 0xFFFF;
- o << mDriverVersion[i] << ".";
- }
- mDriverDescription = o.str();
- }
-
- return {};
- }
-
- bool Adapter::AreTimestampQueriesSupported() const {
- D3D12_COMMAND_QUEUE_DESC queueDesc = {};
- queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
- queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
- ComPtr<ID3D12CommandQueue> d3d12CommandQueue;
- HRESULT hr = mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&d3d12CommandQueue));
- if (FAILED(hr)) {
- return false;
- }
-
- // GetTimestampFrequency returns an error HRESULT when there are bugs in Windows container
- // and vGPU implementations.
- uint64_t timeStampFrequency;
- hr = d3d12CommandQueue->GetTimestampFrequency(&timeStampFrequency);
- if (FAILED(hr)) {
- return false;
- }
-
- return true;
- }
-
- MaybeError Adapter::InitializeSupportedFeaturesImpl() {
- if (AreTimestampQueriesSupported()) {
- mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
- }
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
- mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
- mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
- return {};
- }
-
- MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
- D3D12_FEATURE_DATA_D3D12_OPTIONS featureData = {};
-
- DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
- &featureData, sizeof(featureData)),
- "CheckFeatureSupport D3D12_FEATURE_D3D12_OPTIONS"));
-
- // Check if the device is at least D3D_FEATURE_LEVEL_11_1 or D3D_FEATURE_LEVEL_11_0
- const D3D_FEATURE_LEVEL levelsToQuery[]{D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0};
-
- D3D12_FEATURE_DATA_FEATURE_LEVELS featureLevels;
- featureLevels.NumFeatureLevels = sizeof(levelsToQuery) / sizeof(D3D_FEATURE_LEVEL);
- featureLevels.pFeatureLevelsRequested = levelsToQuery;
- DAWN_TRY(
- CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_FEATURE_LEVELS,
- &featureLevels, sizeof(featureLevels)),
- "CheckFeatureSupport D3D12_FEATURE_FEATURE_LEVELS"));
-
- if (featureLevels.MaxSupportedFeatureLevel == D3D_FEATURE_LEVEL_11_0 &&
- featureData.ResourceBindingTier < D3D12_RESOURCE_BINDING_TIER_2) {
- return DAWN_VALIDATION_ERROR(
- "At least Resource Binding Tier 2 is required for D3D12 Feature Level 11.0 "
- "devices.");
- }
-
- GetDefaultLimits(&limits->v1);
-
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels
-
- // Limits that are the same across D3D feature levels
- limits->v1.maxTextureDimension1D = D3D12_REQ_TEXTURE1D_U_DIMENSION;
- limits->v1.maxTextureDimension2D = D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION;
- limits->v1.maxTextureDimension3D = D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION;
- limits->v1.maxTextureArrayLayers = D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION;
- // Slot values can be 0-15, inclusive:
- // https://docs.microsoft.com/en-ca/windows/win32/api/d3d12/ns-d3d12-d3d12_input_element_desc
- limits->v1.maxVertexBuffers = 16;
- limits->v1.maxVertexAttributes = D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT;
-
- // Note: WebGPU requires FL11.1+
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-support
- // Resource Binding Tier: 1 2 3
-
- // Max(CBV+UAV+SRV) 1M 1M 1M+
- // Max CBV per stage 14 14 full
- // Max SRV per stage 128 full full
- // Max UAV in all stages 64 64 full
- // Max Samplers per stage 16 2048 2048
-
- // https://docs.microsoft.com/en-us/windows-hardware/test/hlk/testref/efad06e8-51d1-40ce-ad5c-573a134b4bb6
- // "full" means the full heap can be used. This is tested
- // to work for 1 million descriptors, and 1.1M for tier 3.
- uint32_t maxCBVsPerStage;
- uint32_t maxSRVsPerStage;
- uint32_t maxUAVsAllStages;
- uint32_t maxSamplersPerStage;
- switch (featureData.ResourceBindingTier) {
- case D3D12_RESOURCE_BINDING_TIER_1:
- maxCBVsPerStage = 14;
- maxSRVsPerStage = 128;
- maxUAVsAllStages = 64;
- maxSamplersPerStage = 16;
- break;
- case D3D12_RESOURCE_BINDING_TIER_2:
- maxCBVsPerStage = 14;
- maxSRVsPerStage = 1'000'000;
- maxUAVsAllStages = 64;
- maxSamplersPerStage = 2048;
- break;
- case D3D12_RESOURCE_BINDING_TIER_3:
- default:
- maxCBVsPerStage = 1'100'000;
- maxSRVsPerStage = 1'100'000;
- maxUAVsAllStages = 1'100'000;
- maxSamplersPerStage = 2048;
- break;
- }
-
- ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageTexturesPerShaderStage);
- ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageBuffersPerShaderStage);
- uint32_t maxUAVsPerStage = maxUAVsAllStages / 2;
-
- limits->v1.maxUniformBuffersPerShaderStage = maxCBVsPerStage;
- // Allocate half of the UAVs to storage buffers, and half to storage textures.
- limits->v1.maxStorageTexturesPerShaderStage = maxUAVsPerStage / 2;
- limits->v1.maxStorageBuffersPerShaderStage = maxUAVsPerStage - maxUAVsPerStage / 2;
- limits->v1.maxSampledTexturesPerShaderStage = maxSRVsPerStage;
- limits->v1.maxSamplersPerShaderStage = maxSamplersPerStage;
-
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/root-signature-limits
- // In DWORDS. Descriptor tables cost 1, Root constants cost 1, Root descriptors cost 2.
- static constexpr uint32_t kMaxRootSignatureSize = 64u;
- // Dawn maps WebGPU's binding model by:
- // - (maxBindGroups)
- // CBVs/UAVs/SRVs for bind group are a root descriptor table
- // - (maxBindGroups)
- // Samplers for each bind group are a root descriptor table
- // - (2 * maxDynamicBuffers)
- // Each dynamic buffer is a root descriptor
- // RESERVED:
- // - 3 = max of:
- // - 2 root constants for the baseVertex/baseInstance constants.
- // - 3 root constants for num workgroups X, Y, Z
- // - 4 root constants (kMaxDynamicStorageBuffersPerPipelineLayout) for dynamic storage
- // buffer lengths.
- static constexpr uint32_t kReservedSlots = 7;
-
- // Available slots after base limits considered.
- uint32_t availableRootSignatureSlots =
- kMaxRootSignatureSize - kReservedSlots -
- 2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
- limits->v1.maxDynamicStorageBuffersPerPipelineLayout);
-
- // Because we need either:
- // - 1 cbv/uav/srv table + 1 sampler table
- // - 2 slots for a root descriptor
- uint32_t availableDynamicBufferOrBindGroup = availableRootSignatureSlots / 2;
-
- // We can either have a bind group, a dyn uniform buffer or a dyn storage buffer.
- // Distribute evenly.
- limits->v1.maxBindGroups += availableDynamicBufferOrBindGroup / 3;
- limits->v1.maxDynamicUniformBuffersPerPipelineLayout +=
- availableDynamicBufferOrBindGroup / 3;
- limits->v1.maxDynamicStorageBuffersPerPipelineLayout +=
- (availableDynamicBufferOrBindGroup - 2 * (availableDynamicBufferOrBindGroup / 3));
-
- ASSERT(2 * (limits->v1.maxBindGroups +
- limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
- limits->v1.maxDynamicStorageBuffersPerPipelineLayout) <=
- kMaxRootSignatureSize - kReservedSlots);
-
- // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-attributes-numthreads
- limits->v1.maxComputeWorkgroupSizeX = D3D12_CS_THREAD_GROUP_MAX_X;
- limits->v1.maxComputeWorkgroupSizeY = D3D12_CS_THREAD_GROUP_MAX_Y;
- limits->v1.maxComputeWorkgroupSizeZ = D3D12_CS_THREAD_GROUP_MAX_Z;
- limits->v1.maxComputeInvocationsPerWorkgroup = D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP;
-
- // https://docs.maxComputeWorkgroupSizeXmicrosoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_dispatch_arguments
- limits->v1.maxComputeWorkgroupsPerDimension =
- D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
-
- // https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-devices-downlevel-compute-shaders
- // Thread Group Shared Memory is limited to 16Kb on downlevel hardware. This is less than
- // the 32Kb that is available to Direct3D 11 hardware. D3D12 is also 32kb.
- limits->v1.maxComputeWorkgroupStorageSize = 32768;
-
- // Max number of "constants" where each constant is a 16-byte float4
- limits->v1.maxUniformBufferBindingSize = D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16;
- // D3D12 has no documented limit on the size of a storage buffer binding.
- limits->v1.maxStorageBufferBindingSize = 4294967295;
-
- // TODO(crbug.com/dawn/685):
- // LIMITS NOT SET:
- // - maxInterStageShaderComponents
- // - maxVertexBufferArrayStride
-
- return {};
- }
-
- MaybeError Adapter::InitializeDebugLayerFilters() {
- if (!GetInstance()->IsBackendValidationEnabled()) {
- return {};
- }
-
- D3D12_MESSAGE_ID denyIds[] = {
-
- //
- // Permanent IDs: list of warnings that are not applicable
- //
-
- // Resource sub-allocation partially maps pre-allocated heaps. This means the
- // entire physical addresses space may have no resources or have many resources
- // assigned the same heap.
- D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_HAS_NO_RESOURCE,
- D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_INTERSECTS_MULTIPLE_BUFFERS,
-
- // The debug layer validates pipeline objects when they are created. Dawn validates
- // them when them when they are set. Therefore, since the issue is caught at a later
- // time, we can silence this warnings.
- D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
-
- // Adding a clear color during resource creation would require heuristics or delayed
- // creation.
- // https://crbug.com/dawn/418
- D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
- D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
-
- // Dawn enforces proper Unmaps at a later time.
- // https://crbug.com/dawn/422
- D3D12_MESSAGE_ID_EXECUTECOMMANDLISTS_GPU_WRITTEN_READBACK_RESOURCE_MAPPED,
-
- // WebGPU allows empty scissors without empty viewports.
- D3D12_MESSAGE_ID_DRAW_EMPTY_SCISSOR_RECTANGLE,
-
- //
- // Temporary IDs: list of warnings that should be fixed or promoted
- //
-
- // Remove after warning have been addressed
- // https://crbug.com/dawn/421
- D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
-
- // For small placed resource alignment, we first request the small alignment, which may
- // get rejected and generate a debug error. Then, we request 0 to get the allowed
- // allowed alignment.
- D3D12_MESSAGE_ID_CREATERESOURCE_INVALIDALIGNMENT,
- };
-
- // Create a retrieval filter with a deny list to suppress messages.
- // Any messages remaining will be converted to Dawn errors.
- D3D12_INFO_QUEUE_FILTER filter{};
- // Filter out info/message and only create errors from warnings or worse.
- D3D12_MESSAGE_SEVERITY severities[] = {
- D3D12_MESSAGE_SEVERITY_INFO,
- D3D12_MESSAGE_SEVERITY_MESSAGE,
- };
- filter.DenyList.NumSeverities = ARRAYSIZE(severities);
- filter.DenyList.pSeverityList = severities;
- filter.DenyList.NumIDs = ARRAYSIZE(denyIds);
- filter.DenyList.pIDList = denyIds;
-
- ComPtr<ID3D12InfoQueue> infoQueue;
- DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
- "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
-
- // To avoid flooding the console, a storage-filter is also used to
- // prevent messages from getting logged.
- DAWN_TRY(CheckHRESULT(infoQueue->PushStorageFilter(&filter),
- "ID3D12InfoQueue::PushStorageFilter"));
-
- DAWN_TRY(CheckHRESULT(infoQueue->PushRetrievalFilter(&filter),
- "ID3D12InfoQueue::PushRetrievalFilter"));
-
- return {};
- }
-
- void Adapter::CleanUpDebugLayerFilters() {
- if (!GetInstance()->IsBackendValidationEnabled()) {
- return;
- }
-
- // The device may not exist if this adapter failed to initialize.
- if (mD3d12Device == nullptr) {
- return;
- }
-
- // If the debug layer is not installed, return immediately to avoid crashing the process.
- ComPtr<ID3D12InfoQueue> infoQueue;
- if (FAILED(mD3d12Device.As(&infoQueue))) {
- return;
- }
-
- infoQueue->PopRetrievalFilter();
- infoQueue->PopStorageFilter();
- }
-
- ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DawnDeviceDescriptor* descriptor) {
- return Device::Create(this, descriptor);
- }
-
- // Resets the backend device and creates a new one. If any D3D12 objects belonging to the
- // current ID3D12Device have not been destroyed, a non-zero value will be returned upon Reset()
- // and the subequent call to CreateDevice will return a handle the existing device instead of
- // creating a new one.
- MaybeError Adapter::ResetInternalDeviceForTestingImpl() {
- ASSERT(mD3d12Device.Reset() == 0);
- DAWN_TRY(Initialize());
-
- return {};
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
deleted file mode 100644
index 4f404479dc4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_ADAPTERD3D12_H_
-#define DAWNNATIVE_D3D12_ADAPTERD3D12_H_
-
-#include "dawn_native/Adapter.h"
-
-#include "common/GPUInfo.h"
-#include "dawn_native/d3d12/D3D12Info.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Backend;
-
- class Adapter : public AdapterBase {
- public:
- Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
- ~Adapter() override;
-
- // AdapterBase Implementation
- bool SupportsExternalImages() const override;
-
- const D3D12DeviceInfo& GetDeviceInfo() const;
- IDXGIAdapter3* GetHardwareAdapter() const;
- Backend* GetBackend() const;
- ComPtr<ID3D12Device> GetDevice() const;
- const gpu_info::D3DDriverVersion& GetDriverVersion() const;
-
- private:
- ResultOrError<DeviceBase*> CreateDeviceImpl(
- const DawnDeviceDescriptor* descriptor) override;
- MaybeError ResetInternalDeviceForTestingImpl() override;
-
- bool AreTimestampQueriesSupported() const;
-
- MaybeError InitializeImpl() override;
- MaybeError InitializeSupportedFeaturesImpl() override;
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
-
- MaybeError InitializeDebugLayerFilters();
- void CleanUpDebugLayerFilters();
-
- ComPtr<IDXGIAdapter3> mHardwareAdapter;
- ComPtr<ID3D12Device> mD3d12Device;
- gpu_info::D3DDriverVersion mDriverVersion;
-
- Backend* mBackend;
- D3D12DeviceInfo mDeviceInfo = {};
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_ADAPTERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
deleted file mode 100644
index 22d29c7bcfa..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/BackendD3D12.h"
-
-#include "dawn_native/D3D12Backend.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/d3d12/AdapterD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
-
- ResultOrError<ComPtr<IDXGIFactory4>> CreateFactory(const PlatformFunctions* functions,
- BackendValidationLevel validationLevel,
- bool beginCaptureOnStartup) {
- ComPtr<IDXGIFactory4> factory;
-
- uint32_t dxgiFactoryFlags = 0;
-
- // Enable the debug layer (requires the Graphics Tools "optional feature").
- {
- if (validationLevel != BackendValidationLevel::Disabled) {
- ComPtr<ID3D12Debug3> debugController;
- if (SUCCEEDED(
- functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
- ASSERT(debugController != nullptr);
- debugController->EnableDebugLayer();
- if (validationLevel == BackendValidationLevel::Full) {
- debugController->SetEnableGPUBasedValidation(true);
- }
-
- // Enable additional debug layers.
- dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
- }
- }
-
- if (beginCaptureOnStartup) {
- ComPtr<IDXGraphicsAnalysis> graphicsAnalysis;
- if (functions->dxgiGetDebugInterface1 != nullptr &&
- SUCCEEDED(functions->dxgiGetDebugInterface1(
- 0, IID_PPV_ARGS(&graphicsAnalysis)))) {
- graphicsAnalysis->BeginCapture();
- }
- }
- }
-
- if (FAILED(functions->createDxgiFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)))) {
- return DAWN_INTERNAL_ERROR("Failed to create a DXGI factory");
- }
-
- ASSERT(factory != nullptr);
- return std::move(factory);
- }
-
- ResultOrError<std::unique_ptr<AdapterBase>> CreateAdapterFromIDXGIAdapter(
- Backend* backend,
- ComPtr<IDXGIAdapter> dxgiAdapter) {
- ComPtr<IDXGIAdapter3> dxgiAdapter3;
- DAWN_TRY(CheckHRESULT(dxgiAdapter.As(&dxgiAdapter3), "DXGIAdapter retrieval"));
- std::unique_ptr<Adapter> adapter =
- std::make_unique<Adapter>(backend, std::move(dxgiAdapter3));
- DAWN_TRY(adapter->Initialize());
-
- return {std::move(adapter)};
- }
-
- } // anonymous namespace
-
- Backend::Backend(InstanceBase* instance)
- : BackendConnection(instance, wgpu::BackendType::D3D12) {
- }
-
- MaybeError Backend::Initialize() {
- mFunctions = std::make_unique<PlatformFunctions>();
- DAWN_TRY(mFunctions->LoadFunctions());
-
- const auto instance = GetInstance();
-
- DAWN_TRY_ASSIGN(mFactory,
- CreateFactory(mFunctions.get(), instance->GetBackendValidationLevel(),
- instance->IsBeginCaptureOnStartupEnabled()));
-
- return {};
- }
-
- ComPtr<IDXGIFactory4> Backend::GetFactory() const {
- return mFactory;
- }
-
- MaybeError Backend::EnsureDxcLibrary() {
- if (mDxcLibrary == nullptr) {
- DAWN_TRY(CheckHRESULT(
- mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
- "DXC create library"));
- ASSERT(mDxcLibrary != nullptr);
- }
- return {};
- }
-
- MaybeError Backend::EnsureDxcCompiler() {
- if (mDxcCompiler == nullptr) {
- DAWN_TRY(CheckHRESULT(
- mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
- "DXC create compiler"));
- ASSERT(mDxcCompiler != nullptr);
- }
- return {};
- }
-
- MaybeError Backend::EnsureDxcValidator() {
- if (mDxcValidator == nullptr) {
- DAWN_TRY(CheckHRESULT(
- mFunctions->dxcCreateInstance(CLSID_DxcValidator, IID_PPV_ARGS(&mDxcValidator)),
- "DXC create validator"));
- ASSERT(mDxcValidator != nullptr);
- }
- return {};
- }
-
- ComPtr<IDxcLibrary> Backend::GetDxcLibrary() const {
- ASSERT(mDxcLibrary != nullptr);
- return mDxcLibrary;
- }
-
- ComPtr<IDxcCompiler> Backend::GetDxcCompiler() const {
- ASSERT(mDxcCompiler != nullptr);
- return mDxcCompiler;
- }
-
- ComPtr<IDxcValidator> Backend::GetDxcValidator() const {
- ASSERT(mDxcValidator != nullptr);
- return mDxcValidator;
- }
-
- const PlatformFunctions* Backend::GetFunctions() const {
- return mFunctions.get();
- }
-
- std::vector<std::unique_ptr<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- AdapterDiscoveryOptions options;
- auto result = DiscoverAdapters(&options);
- if (result.IsError()) {
- GetInstance()->ConsumedError(result.AcquireError());
- return {};
- }
- return result.AcquireSuccess();
- }
-
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> Backend::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) {
- ASSERT(optionsBase->backendType == WGPUBackendType_D3D12);
- const AdapterDiscoveryOptions* options =
- static_cast<const AdapterDiscoveryOptions*>(optionsBase);
-
- std::vector<std::unique_ptr<AdapterBase>> adapters;
- if (options->dxgiAdapter != nullptr) {
- // |dxgiAdapter| was provided. Discover just that adapter.
- std::unique_ptr<AdapterBase> adapter;
- DAWN_TRY_ASSIGN(adapter, CreateAdapterFromIDXGIAdapter(this, options->dxgiAdapter));
- adapters.push_back(std::move(adapter));
- return std::move(adapters);
- }
-
- // Enumerate and discover all available adapters.
- for (uint32_t adapterIndex = 0;; ++adapterIndex) {
- ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
- if (mFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
- break; // No more adapters to enumerate.
- }
-
- ASSERT(dxgiAdapter != nullptr);
- ResultOrError<std::unique_ptr<AdapterBase>> adapter =
- CreateAdapterFromIDXGIAdapter(this, dxgiAdapter);
- if (adapter.IsError()) {
- GetInstance()->ConsumedError(adapter.AcquireError());
- continue;
- }
-
- adapters.push_back(std::move(adapter.AcquireSuccess()));
- }
-
- return adapters;
- }
-
- BackendConnection* Connect(InstanceBase* instance) {
- Backend* backend = new Backend(instance);
-
- if (instance->ConsumedError(backend->Initialize())) {
- delete backend;
- return nullptr;
- }
-
- return backend;
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
deleted file mode 100644
index 17f77ccec35..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_BACKENDD3D12_H_
-#define DAWNNATIVE_D3D12_BACKENDD3D12_H_
-
-#include "dawn_native/BackendConnection.h"
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class PlatformFunctions;
-
- class Backend : public BackendConnection {
- public:
- Backend(InstanceBase* instance);
-
- MaybeError Initialize();
-
- ComPtr<IDXGIFactory4> GetFactory() const;
-
- MaybeError EnsureDxcLibrary();
- MaybeError EnsureDxcCompiler();
- MaybeError EnsureDxcValidator();
- ComPtr<IDxcLibrary> GetDxcLibrary() const;
- ComPtr<IDxcCompiler> GetDxcCompiler() const;
- ComPtr<IDxcValidator> GetDxcValidator() const;
-
- const PlatformFunctions* GetFunctions() const;
-
- std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override;
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) override;
-
- private:
- // Keep mFunctions as the first member so that in the destructor it is freed last. Otherwise
- // the D3D12 DLLs are unloaded before we are done using them.
- std::unique_ptr<PlatformFunctions> mFunctions;
- ComPtr<IDXGIFactory4> mFactory;
- ComPtr<IDxcLibrary> mDxcLibrary;
- ComPtr<IDxcCompiler> mDxcCompiler;
- ComPtr<IDxcValidator> mDxcValidator;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_BACKENDD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
deleted file mode 100644
index 20822408f3f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/BindGroupD3D12.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/ExternalTexture.h"
-#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
-#include "dawn_native/d3d12/BufferD3D12.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
-#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
-#include "dawn_native/d3d12/TextureD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- // static
- ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
- const BindGroupDescriptor* descriptor) {
- return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
- }
-
- BindGroup::BindGroup(Device* device,
- const BindGroupDescriptor* descriptor,
- uint32_t viewSizeIncrement,
- const CPUDescriptorHeapAllocation& viewAllocation)
- : BindGroupBase(this, device, descriptor) {
- BindGroupLayout* bgl = ToBackend(GetLayout());
-
- mCPUViewAllocation = viewAllocation;
-
- const auto& descriptorHeapOffsets = bgl->GetDescriptorHeapOffsets();
-
- ID3D12Device* d3d12Device = device->GetD3D12Device();
-
- // It's not necessary to create descriptors in the descriptor heap for dynamic resources.
- // This is because they are created as root descriptors which are never heap allocated.
- // Since dynamic buffers are packed in the front, we can skip over these bindings by
- // starting from the dynamic buffer count.
- for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
- bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
-
- // Increment size does not need to be stored and is only used to get a handle
- // local to the allocation with OffsetFrom().
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
-
- ID3D12Resource* resource = ToBackend(binding.buffer)->GetD3D12Resource();
- if (resource == nullptr) {
- // The Buffer was destroyed. Skip creating buffer views since there is no
- // resource. This bind group won't be used as it is an error to submit a
- // command buffer that references destroyed resources.
- continue;
- }
-
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform: {
- D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
- desc.SizeInBytes =
- Align(binding.size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT);
- desc.BufferLocation =
- ToBackend(binding.buffer)->GetVA() + binding.offset;
-
- d3d12Device->CreateConstantBufferView(
- &desc, viewAllocation.OffsetFrom(
- viewSizeIncrement, descriptorHeapOffsets[bindingIndex]));
- break;
- }
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding: {
- // Since SPIRV-Cross outputs HLSL shaders with RWByteAddressBuffer,
- // we must use D3D12_BUFFER_UAV_FLAG_RAW when making the
- // UNORDERED_ACCESS_VIEW_DESC. Using D3D12_BUFFER_UAV_FLAG_RAW requires
- // that we use DXGI_FORMAT_R32_TYPELESS as the format of the view.
- // DXGI_FORMAT_R32_TYPELESS requires that the element size be 4
- // byte aligned. Since binding.size and binding.offset are in bytes,
- // we need to divide by 4 to obtain the element size.
- D3D12_UNORDERED_ACCESS_VIEW_DESC desc;
- desc.Buffer.NumElements = binding.size / 4;
- desc.Format = DXGI_FORMAT_R32_TYPELESS;
- desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
- desc.Buffer.FirstElement = binding.offset / 4;
- desc.Buffer.StructureByteStride = 0;
- desc.Buffer.CounterOffsetInBytes = 0;
- desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
-
- d3d12Device->CreateUnorderedAccessView(
- resource, nullptr, &desc,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
- }
- case wgpu::BufferBindingType::ReadOnlyStorage: {
- // Like StorageBuffer, SPIRV-Cross outputs HLSL shaders for readonly
- // storage buffer with ByteAddressBuffer. So we must use
- // D3D12_BUFFER_SRV_FLAG_RAW when making the SRV descriptor. And it has
- // similar requirement for format, element size, etc.
- D3D12_SHADER_RESOURCE_VIEW_DESC desc;
- desc.Format = DXGI_FORMAT_R32_TYPELESS;
- desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
- desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
- desc.Buffer.FirstElement = binding.offset / 4;
- desc.Buffer.NumElements = binding.size / 4;
- desc.Buffer.StructureByteStride = 0;
- desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
- d3d12Device->CreateShaderResourceView(
- resource, &desc,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
- }
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- break;
- }
-
- case BindingInfoType::Texture: {
- auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
- auto& srv = view->GetSRVDescriptor();
-
- ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
- if (resource == nullptr) {
- // The Texture was destroyed. Skip creating the SRV since there is no
- // resource. This bind group won't be used as it is an error to submit a
- // command buffer that references destroyed resources.
- continue;
- }
-
- d3d12Device->CreateShaderResourceView(
- resource, &srv,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
- }
-
- case BindingInfoType::StorageTexture: {
- TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-
- ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
- if (resource == nullptr) {
- // The Texture was destroyed. Skip creating the SRV/UAV since there is no
- // resource. This bind group won't be used as it is an error to submit a
- // command buffer that references destroyed resources.
- continue;
- }
-
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::WriteOnly: {
- D3D12_UNORDERED_ACCESS_VIEW_DESC uav = view->GetUAVDescriptor();
- d3d12Device->CreateUnorderedAccessView(
- resource, nullptr, &uav,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
- }
-
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
-
- break;
- }
-
- case BindingInfoType::ExternalTexture: {
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& views =
- GetBindingAsExternalTexture(bindingIndex)->GetTextureViews();
-
- // Only single-plane formats are supported right now, so assert only one view
- // exists.
- ASSERT(views[1].Get() == nullptr);
- ASSERT(views[2].Get() == nullptr);
-
- auto& srv = ToBackend(views[0])->GetSRVDescriptor();
-
- ID3D12Resource* resource =
- ToBackend(views[0]->GetTexture())->GetD3D12Resource();
-
- d3d12Device->CreateShaderResourceView(
- resource, &srv,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- descriptorHeapOffsets[bindingIndex]));
- break;
- }
-
- case BindingInfoType::Sampler: {
- // No-op as samplers will be later initialized by CreateSamplers().
- break;
- }
- }
- }
-
- // Loop through the dynamic storage buffers and build a flat map from the index of the
- // dynamic storage buffer to its binding size. The index |dynamicStorageBufferIndex|
- // means that it is the i'th buffer that is both dynamic and storage, in increasing order
- // of BindingNumber.
- mDynamicStorageBufferLengths.resize(bgl->GetBindingCountInfo().dynamicStorageBufferCount);
- uint32_t dynamicStorageBufferIndex = 0;
- for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
- ++bindingIndex) {
- if (bgl->IsStorageBufferBinding(bindingIndex)) {
- mDynamicStorageBufferLengths[dynamicStorageBufferIndex++] =
- GetBindingAsBufferBinding(bindingIndex).size;
- }
- }
- }
-
- BindGroup::~BindGroup() = default;
-
- void BindGroup::DestroyImpl() {
- BindGroupBase::DestroyImpl();
- ToBackend(GetLayout())->DeallocateBindGroup(this, &mCPUViewAllocation);
- ASSERT(!mCPUViewAllocation.IsValid());
- }
-
- bool BindGroup::PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator) {
- const BindGroupLayout* bgl = ToBackend(GetLayout());
-
- const uint32_t descriptorCount = bgl->GetCbvUavSrvDescriptorCount();
- if (descriptorCount == 0 || viewAllocator->IsAllocationStillValid(mGPUViewAllocation)) {
- return true;
- }
-
- // Attempt to allocate descriptors for the currently bound shader-visible heaps.
- // If either failed, return early to re-allocate and switch the heaps.
- Device* device = ToBackend(GetDevice());
-
- D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
- if (!viewAllocator->AllocateGPUDescriptors(descriptorCount,
- device->GetPendingCommandSerial(),
- &baseCPUDescriptor, &mGPUViewAllocation)) {
- return false;
- }
-
- // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
- // simple copies per bindgroup, a single non-simple copy could be issued.
- // TODO(dawn:155): Consider doing this optimization.
- device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
- mCPUViewAllocation.GetBaseDescriptor(),
- D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
-
- return true;
- }
-
- D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseViewDescriptor() const {
- return mGPUViewAllocation.GetBaseDescriptor();
- }
-
- D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
- ASSERT(mSamplerAllocationEntry != nullptr);
- return mSamplerAllocationEntry->GetBaseDescriptor();
- }
-
- bool BindGroup::PopulateSamplers(Device* device,
- ShaderVisibleDescriptorAllocator* samplerAllocator) {
- if (mSamplerAllocationEntry == nullptr) {
- return true;
- }
- return mSamplerAllocationEntry->Populate(device, samplerAllocator);
- }
-
- void BindGroup::SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry) {
- mSamplerAllocationEntry = std::move(entry);
- }
-
- const BindGroup::DynamicStorageBufferLengths& BindGroup::GetDynamicStorageBufferLengths()
- const {
- return mDynamicStorageBufferLengths;
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
deleted file mode 100644
index b34b0d4d090..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
-#define DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
-
-#include "common/PlacementAllocated.h"
-#include "common/ityp_span.h"
-#include "common/ityp_stack_vec.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
-#include "dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
- class SamplerHeapCacheEntry;
- class ShaderVisibleDescriptorAllocator;
-
- class BindGroup final : public BindGroupBase, public PlacementAllocated {
- public:
- static ResultOrError<Ref<BindGroup>> Create(Device* device,
- const BindGroupDescriptor* descriptor);
-
- BindGroup(Device* device,
- const BindGroupDescriptor* descriptor,
- uint32_t viewSizeIncrement,
- const CPUDescriptorHeapAllocation& viewAllocation);
-
- // Returns true if the BindGroup was successfully populated.
- bool PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator);
- bool PopulateSamplers(Device* device, ShaderVisibleDescriptorAllocator* samplerAllocator);
-
- D3D12_GPU_DESCRIPTOR_HANDLE GetBaseViewDescriptor() const;
- D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
-
- void SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry);
-
- using DynamicStorageBufferLengths =
- ityp::stack_vec<uint32_t, uint32_t, kMaxDynamicStorageBuffersPerPipelineLayout>;
- const DynamicStorageBufferLengths& GetDynamicStorageBufferLengths() const;
-
- private:
- ~BindGroup() override;
-
- void DestroyImpl() override;
-
- Ref<SamplerHeapCacheEntry> mSamplerAllocationEntry;
-
- GPUDescriptorHeapAllocation mGPUViewAllocation;
- CPUDescriptorHeapAllocation mCPUViewAllocation;
-
- DynamicStorageBufferLengths mDynamicStorageBufferLengths;
- };
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
deleted file mode 100644
index 761b8f74a7a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/d3d12/BindGroupD3D12.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
-#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
- namespace {
- D3D12_DESCRIPTOR_RANGE_TYPE WGPUBindingInfoToDescriptorRangeType(
- const BindingInfo& bindingInfo) {
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- return D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- case BindingInfoType::Sampler:
- return D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
-
- case BindingInfoType::Texture:
- case BindingInfoType::ExternalTexture:
- return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
-
- case BindingInfoType::StorageTexture:
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::WriteOnly:
- return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
- }
- }
- } // anonymous namespace
-
- // static
- Ref<BindGroupLayout> BindGroupLayout::Create(
- Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
- }
-
- BindGroupLayout::BindGroupLayout(Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
- mDescriptorHeapOffsets(GetBindingCount()),
- mShaderRegisters(GetBindingCount()),
- mCbvUavSrvDescriptorCount(0),
- mSamplerDescriptorCount(0),
- mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
-
- D3D12_DESCRIPTOR_RANGE_TYPE descriptorRangeType =
- WGPUBindingInfoToDescriptorRangeType(bindingInfo);
-
- // TODO(dawn:728) In the future, special handling will be needed for external textures
- // here because they encompass multiple views.
- mShaderRegisters[bindingIndex] = uint32_t(bindingInfo.binding);
-
- if (bindingIndex < GetDynamicBufferCount()) {
- continue;
- }
-
- // For dynamic resources, Dawn uses root descriptor in D3D12 backend. So there is no
- // need to allocate the descriptor from descriptor heap or create descriptor ranges.
- ASSERT(!bindingInfo.buffer.hasDynamicOffset);
-
- // TODO(dawn:728) In the future, special handling will be needed for external textures
- // here because they encompass multiple views.
- mDescriptorHeapOffsets[bindingIndex] =
- descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
- ? mSamplerDescriptorCount++
- : mCbvUavSrvDescriptorCount++;
-
- D3D12_DESCRIPTOR_RANGE range;
- range.RangeType = descriptorRangeType;
- range.NumDescriptors = 1;
- range.BaseShaderRegister = GetShaderRegister(bindingIndex);
- range.RegisterSpace = kRegisterSpacePlaceholder;
- range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
-
- std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges =
- descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
- ? mSamplerDescriptorRanges
- : mCbvUavSrvDescriptorRanges;
-
- // Try to join this range with the previous one, if the current range is a continuation
- // of the previous. This is possible because the binding infos in the base type are
- // sorted.
- if (descriptorRanges.size() >= 2) {
- D3D12_DESCRIPTOR_RANGE& previous = descriptorRanges.back();
- if (previous.RangeType == range.RangeType &&
- previous.BaseShaderRegister + previous.NumDescriptors ==
- range.BaseShaderRegister) {
- previous.NumDescriptors += range.NumDescriptors;
- continue;
- }
- }
-
- descriptorRanges.push_back(range);
- }
-
- mViewAllocator = device->GetViewStagingDescriptorAllocator(GetCbvUavSrvDescriptorCount());
- mSamplerAllocator =
- device->GetSamplerStagingDescriptorAllocator(GetSamplerDescriptorCount());
- }
-
- ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
- Device* device,
- const BindGroupDescriptor* descriptor) {
- uint32_t viewSizeIncrement = 0;
- CPUDescriptorHeapAllocation viewAllocation;
- if (GetCbvUavSrvDescriptorCount() > 0) {
- DAWN_TRY_ASSIGN(viewAllocation, mViewAllocator->AllocateCPUDescriptors());
- viewSizeIncrement = mViewAllocator->GetSizeIncrement();
- }
-
- Ref<BindGroup> bindGroup = AcquireRef<BindGroup>(
- mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation));
-
- if (GetSamplerDescriptorCount() > 0) {
- Ref<SamplerHeapCacheEntry> samplerHeapCacheEntry;
- DAWN_TRY_ASSIGN(samplerHeapCacheEntry, device->GetSamplerHeapCache()->GetOrCreate(
- bindGroup.Get(), mSamplerAllocator));
- bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
- }
-
- return bindGroup;
- }
-
- void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
- CPUDescriptorHeapAllocation* viewAllocation) {
- if (viewAllocation->IsValid()) {
- mViewAllocator->Deallocate(viewAllocation);
- }
-
- mBindGroupAllocator.Deallocate(bindGroup);
- }
-
- ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetDescriptorHeapOffsets() const {
- return {mDescriptorHeapOffsets.data(), mDescriptorHeapOffsets.size()};
- }
-
- uint32_t BindGroupLayout::GetShaderRegister(BindingIndex bindingIndex) const {
- return mShaderRegisters[bindingIndex];
- }
-
- uint32_t BindGroupLayout::GetCbvUavSrvDescriptorCount() const {
- return mCbvUavSrvDescriptorCount;
- }
-
- uint32_t BindGroupLayout::GetSamplerDescriptorCount() const {
- return mSamplerDescriptorCount;
- }
-
- const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetCbvUavSrvDescriptorRanges()
- const {
- return mCbvUavSrvDescriptorRanges;
- }
-
- const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetSamplerDescriptorRanges() const {
- return mSamplerDescriptorRanges;
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
deleted file mode 100644
index abf67021a03..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
-#define DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
-
-#include "dawn_native/BindGroupLayout.h"
-
-#include "common/SlabAllocator.h"
-#include "common/ityp_stack_vec.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class BindGroup;
- class CPUDescriptorHeapAllocation;
- class Device;
- class StagingDescriptorAllocator;
-
- // A purposefully invalid register space.
- //
- // We use the bind group index as the register space, but don't know the bind group index until
- // pipeline layout creation time. This value should be replaced in PipelineLayoutD3D12.
- static constexpr uint32_t kRegisterSpacePlaceholder =
- D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START;
-
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- static Ref<BindGroupLayout> Create(Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
-
- // The offset (in descriptor count) into the corresponding descriptor heap. Not valid for
- // dynamic binding indexes.
- ityp::span<BindingIndex, const uint32_t> GetDescriptorHeapOffsets() const;
-
- // The D3D shader register that the Dawn binding index is mapped to by this bind group
- // layout.
- uint32_t GetShaderRegister(BindingIndex bindingIndex) const;
-
- // Counts of descriptors in the descriptor tables.
- uint32_t GetCbvUavSrvDescriptorCount() const;
- uint32_t GetSamplerDescriptorCount() const;
-
- const std::vector<D3D12_DESCRIPTOR_RANGE>& GetCbvUavSrvDescriptorRanges() const;
- const std::vector<D3D12_DESCRIPTOR_RANGE>& GetSamplerDescriptorRanges() const;
-
- private:
- BindGroupLayout(Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
- ~BindGroupLayout() override = default;
-
- // Contains the offset into the descriptor heap for the given resource view. Samplers and
- // non-samplers are stored in separate descriptor heaps, so the offsets should be unique
- // within each group and tightly packed.
- //
- // Dynamic resources are not used here since their descriptors are placed directly in root
- // parameters.
- ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mDescriptorHeapOffsets;
-
- // Contains the shader register this binding is mapped to.
- ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mShaderRegisters;
-
- uint32_t mCbvUavSrvDescriptorCount;
- uint32_t mSamplerDescriptorCount;
-
- std::vector<D3D12_DESCRIPTOR_RANGE> mCbvUavSrvDescriptorRanges;
- std::vector<D3D12_DESCRIPTOR_RANGE> mSamplerDescriptorRanges;
-
- SlabAllocator<BindGroup> mBindGroupAllocator;
-
- StagingDescriptorAllocator* mSamplerAllocator = nullptr;
- StagingDescriptorAllocator* mViewAllocator = nullptr;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_BINDGROUPLAYOUTD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
deleted file mode 100644
index a05eec07112..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
+++ /dev/null
@@ -1,493 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/BufferD3D12.h"
-
-#include "common/Assert.h"
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/d3d12/CommandRecordingContext.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/HeapD3D12.h"
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::BufferUsage usage) {
- D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
-
- if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
- flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
- }
-
- return flags;
- }
-
- D3D12_RESOURCE_STATES D3D12BufferUsage(wgpu::BufferUsage usage) {
- D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
-
- if (usage & wgpu::BufferUsage::CopySrc) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
- }
- if (usage & wgpu::BufferUsage::CopyDst) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
- }
- if (usage & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform)) {
- resourceState |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
- }
- if (usage & wgpu::BufferUsage::Index) {
- resourceState |= D3D12_RESOURCE_STATE_INDEX_BUFFER;
- }
- if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
- resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
- }
- if (usage & kReadOnlyStorageBuffer) {
- resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
- D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
- }
- if (usage & wgpu::BufferUsage::Indirect) {
- resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
- }
- if (usage & wgpu::BufferUsage::QueryResolve) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
- }
-
- return resourceState;
- }
-
- D3D12_HEAP_TYPE D3D12HeapType(wgpu::BufferUsage allowedUsage) {
- if (allowedUsage & wgpu::BufferUsage::MapRead) {
- return D3D12_HEAP_TYPE_READBACK;
- } else if (allowedUsage & wgpu::BufferUsage::MapWrite) {
- return D3D12_HEAP_TYPE_UPLOAD;
- } else {
- return D3D12_HEAP_TYPE_DEFAULT;
- }
- }
-
- size_t D3D12BufferSizeAlignment(wgpu::BufferUsage usage) {
- if ((usage & wgpu::BufferUsage::Uniform) != 0) {
- // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation
- // forbids binding a CBV to an unaligned size. To prevent, one can always safely
- // align the buffer size to the CBV data alignment as other buffer usages
- // ignore it (no size check). The validation will still enforce bound checks with
- // the unaligned size returned by GetSize().
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
- return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
- }
- return 1;
- }
- } // namespace
-
- // static
- ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
- DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
- return buffer;
- }
-
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor) {
- }
-
- MaybeError Buffer::Initialize(bool mappedAtCreation) {
- // Allocate at least 4 bytes so clamped accesses are always in bounds.
- uint64_t size = std::max(GetSize(), uint64_t(4u));
- size_t alignment = D3D12BufferSizeAlignment(GetUsage());
- if (size > std::numeric_limits<uint64_t>::max() - alignment) {
- // Alignment would overlow.
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
- mAllocatedSize = Align(size, alignment);
-
- D3D12_RESOURCE_DESC resourceDescriptor;
- resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
- resourceDescriptor.Alignment = 0;
- resourceDescriptor.Width = mAllocatedSize;
- resourceDescriptor.Height = 1;
- resourceDescriptor.DepthOrArraySize = 1;
- resourceDescriptor.MipLevels = 1;
- resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
- resourceDescriptor.SampleDesc.Count = 1;
- resourceDescriptor.SampleDesc.Quality = 0;
- resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
- // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
- // and robust resource initialization.
- resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
-
- auto heapType = D3D12HeapType(GetUsage());
- auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
-
- // D3D12 requires buffers on the READBACK heap to have the D3D12_RESOURCE_STATE_COPY_DEST
- // state
- if (heapType == D3D12_HEAP_TYPE_READBACK) {
- bufferUsage |= D3D12_RESOURCE_STATE_COPY_DEST;
- mFixedResourceState = true;
- mLastUsage = wgpu::BufferUsage::CopyDst;
- }
-
- // D3D12 requires buffers on the UPLOAD heap to have the D3D12_RESOURCE_STATE_GENERIC_READ
- // state
- if (heapType == D3D12_HEAP_TYPE_UPLOAD) {
- bufferUsage |= D3D12_RESOURCE_STATE_GENERIC_READ;
- mFixedResourceState = true;
- mLastUsage = wgpu::BufferUsage::CopySrc;
- }
-
- DAWN_TRY_ASSIGN(
- mResourceAllocation,
- ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
-
- SetLabelImpl();
-
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
- !mappedAtCreation) {
- CommandRecordingContext* commandRecordingContext;
- DAWN_TRY_ASSIGN(commandRecordingContext,
- ToBackend(GetDevice())->GetPendingCommandContext());
-
- DAWN_TRY(ClearBuffer(commandRecordingContext, uint8_t(1u)));
- }
-
- // Initialize the padding bytes to zero.
- if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
- !mappedAtCreation) {
- uint32_t paddingBytes = GetAllocatedSize() - GetSize();
- if (paddingBytes > 0) {
- CommandRecordingContext* commandRecordingContext;
- DAWN_TRY_ASSIGN(commandRecordingContext,
- ToBackend(GetDevice())->GetPendingCommandContext());
-
- uint32_t clearSize = paddingBytes;
- uint64_t clearOffset = GetSize();
- DAWN_TRY(ClearBuffer(commandRecordingContext, 0, clearOffset, clearSize));
- }
- }
-
- return {};
- }
-
- Buffer::~Buffer() = default;
-
- ID3D12Resource* Buffer::GetD3D12Resource() const {
- return mResourceAllocation.GetD3D12Resource();
- }
-
- // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
- // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
- // cause subsequent errors.
- bool Buffer::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage) {
- // Track the underlying heap to ensure residency.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
-
- // Return the resource barrier.
- return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newUsage);
- }
-
- void Buffer::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::BufferUsage newUsage) {
- D3D12_RESOURCE_BARRIER barrier;
-
- if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newUsage)) {
- commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
- }
- }
-
- // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
- // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
- // cause subsequent errors.
- bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage) {
- // Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
- if (mFixedResourceState) {
- ASSERT(mLastUsage == newUsage);
- return false;
- }
-
- D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
- D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
-
- // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
- // If one of the usages isn't UAV, then other barriers are used.
- bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
- newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
-
- if (needsUAVBarrier) {
- barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
- barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier->UAV.pResource = GetD3D12Resource();
-
- mLastUsage = newUsage;
- return true;
- }
-
- // We can skip transitions to already current usages.
- if (IsSubset(newUsage, mLastUsage)) {
- return false;
- }
-
- mLastUsage = newUsage;
-
- // The COMMON state represents a state where no write operations can be pending, which makes
- // it possible to transition to and from some states without synchronizaton (i.e. without an
- // explicit ResourceBarrier call). A buffer can be implicitly promoted to 1) a single write
- // state, or 2) multiple read states. A buffer that is accessed within a command list will
- // always implicitly decay to the COMMON state after the call to ExecuteCommandLists
- // completes - this is because all buffer writes are guaranteed to be completed before the
- // next ExecuteCommandLists call executes.
- // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
-
- // To track implicit decays, we must record the pending serial on which a transition will
- // occur. When that buffer is used again, the previously recorded serial must be compared to
- // the last completed serial to determine if the buffer has implicity decayed to the common
- // state.
- const ExecutionSerial pendingCommandSerial =
- ToBackend(GetDevice())->GetPendingCommandSerial();
- if (pendingCommandSerial > mLastUsedSerial) {
- lastState = D3D12_RESOURCE_STATE_COMMON;
- mLastUsedSerial = pendingCommandSerial;
- }
-
- // All possible buffer states used by Dawn are eligible for implicit promotion from COMMON.
- // These are: COPY_SOURCE, VERTEX_AND_COPY_BUFFER, INDEX_BUFFER, COPY_DEST,
- // UNORDERED_ACCESS, and INDIRECT_ARGUMENT. Note that for implicit promotion, the
- // destination state cannot be 1) more than one write state, or 2) both a read and write
- // state. This goes unchecked here because it should not be allowed through render/compute
- // pass validation.
- if (lastState == D3D12_RESOURCE_STATE_COMMON) {
- return false;
- }
-
- // TODO(crbug.com/dawn/1024): The before and after states must be different. Remove this
- // workaround and use D3D12 states instead of WebGPU usages to manage the tracking of
- // barrier state.
- if (lastState == newState) {
- return false;
- }
-
- barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
- barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier->Transition.pResource = GetD3D12Resource();
- barrier->Transition.StateBefore = lastState;
- barrier->Transition.StateAfter = newState;
- barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
-
- return true;
- }
-
- D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
- return mResourceAllocation.GetGPUPointer();
- }
-
- bool Buffer::IsCPUWritableAtCreation() const {
- // We use a staging buffer for the buffers with mappedAtCreation == true and created on the
- // READBACK heap because for the buffers on the READBACK heap, the data written on the CPU
- // side won't be uploaded to GPU. When we enable zero-initialization, the CPU side memory
- // of the buffer is all written to 0 but not the GPU side memory, so on the next mapping
- // operation the zeroes get overwritten by whatever was in the GPU memory when the buffer
- // was created. With a staging buffer, the data on the CPU side will first upload to the
- // staging buffer, and copied from the staging buffer to the GPU memory of the current
- // buffer in the unmap() call.
- // TODO(enga): Handle CPU-visible memory on UMA
- return (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
- }
-
- MaybeError Buffer::MapInternal(bool isWrite,
- size_t offset,
- size_t size,
- const char* contextInfo) {
- // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
- // evicted. This buffer should already have been made resident when it was created.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
-
- D3D12_RANGE range = {offset, offset + size};
- // mMappedData is the pointer to the start of the resource, irrespective of offset.
- // MSDN says (note the weird use of "never"):
- //
- // When ppData is not NULL, the pointer returned is never offset by any values in
- // pReadRange.
- //
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
- DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
-
- if (isWrite) {
- mWrittenMappedRange = range;
- }
-
- return {};
- }
-
- MaybeError Buffer::MapAtCreationImpl() {
- // We will use a staging buffer for MapRead buffers instead so we just clear the staging
- // buffer and initialize the original buffer by copying the staging buffer to the original
- // buffer one the first time Unmap() is called.
- ASSERT((GetUsage() & wgpu::BufferUsage::MapWrite) != 0);
-
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- DAWN_TRY(MapInternal(true, 0, size_t(GetAllocatedSize()), "D3D12 map at creation"));
-
- return {};
- }
-
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
- DAWN_TRY(EnsureDataInitialized(commandContext));
-
- return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
- }
-
- void Buffer::UnmapImpl() {
- GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
- mMappedData = nullptr;
- mWrittenMappedRange = {0, 0};
-
- // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
- // them when they are unmapped.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
- }
-
- void* Buffer::GetMappedPointerImpl() {
- // The frontend asks that the pointer returned is from the start of the resource
- // irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
- return mMappedData;
- }
-
- void Buffer::DestroyImpl() {
- if (mMappedData != nullptr) {
- // If the buffer is currently mapped, unmap without flushing the writes to the GPU
- // since the buffer cannot be used anymore. UnmapImpl checks mWrittenRange to know
- // which parts to flush, so we set it to an empty range to prevent flushes.
- mWrittenMappedRange = {0, 0};
- }
- BufferBase::DestroyImpl();
-
- ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
- }
-
- bool Buffer::CheckIsResidentForTesting() const {
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- return heap->IsInList() || heap->IsResidencyLocked();
- }
-
- bool Buffer::CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const {
- return mResourceAllocation.GetInfo().mMethod == allocationMethod;
- }
-
- MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
- if (!NeedsInitialization()) {
- return {};
- }
-
- DAWN_TRY(InitializeToZero(commandContext));
- return {};
- }
-
- ResultOrError<bool> Buffer::EnsureDataInitializedAsDestination(
- CommandRecordingContext* commandContext,
- uint64_t offset,
- uint64_t size) {
- if (!NeedsInitialization()) {
- return {false};
- }
-
- if (IsFullBufferRange(offset, size)) {
- SetIsDataInitialized();
- return {false};
- }
-
- DAWN_TRY(InitializeToZero(commandContext));
- return {true};
- }
-
- MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- const CopyTextureToBufferCmd* copy) {
- if (!NeedsInitialization()) {
- return {};
- }
-
- if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
- SetIsDataInitialized();
- } else {
- DAWN_TRY(InitializeToZero(commandContext));
- }
-
- return {};
- }
-
- void Buffer::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), "Dawn_Buffer",
- GetLabel());
- }
-
- MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
- ASSERT(NeedsInitialization());
-
- // TODO(crbug.com/dawn/484): skip initializing the buffer when it is created on a heap
- // that has already been zero initialized.
- DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
- SetIsDataInitialized();
- GetDevice()->IncrementLazyClearCountForTesting();
-
- return {};
- }
-
- MaybeError Buffer::ClearBuffer(CommandRecordingContext* commandContext,
- uint8_t clearValue,
- uint64_t offset,
- uint64_t size) {
- Device* device = ToBackend(GetDevice());
- size = size > 0 ? size : GetAllocatedSize();
-
- // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
- // changed away, so we can only clear such buffer with buffer mapping.
- if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
- DAWN_TRY(MapInternal(true, static_cast<size_t>(offset), static_cast<size_t>(size),
- "D3D12 map at clear buffer"));
- memset(mMappedData, clearValue, size);
- UnmapImpl();
- } else if (clearValue == 0u) {
- DAWN_TRY(device->ClearBufferToZero(commandContext, this, offset, size));
- } else {
- // TODO(crbug.com/dawn/852): use ClearUnorderedAccessView*() when the buffer usage
- // includes STORAGE.
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
-
- memset(uploadHandle.mappedBuffer, clearValue, size);
-
- device->CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
- uploadHandle.startOffset, this, offset, size);
- }
-
- return {};
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
deleted file mode 100644
index 8e9e01e7b38..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_BUFFERD3D12_H_
-#define DAWNNATIVE_D3D12_BUFFERD3D12_H_
-
-#include "dawn_native/Buffer.h"
-
-#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class CommandRecordingContext;
- class Device;
-
- class Buffer final : public BufferBase {
- public:
- static ResultOrError<Ref<Buffer>> Create(Device* device,
- const BufferDescriptor* descriptor);
-
- ID3D12Resource* GetD3D12Resource() const;
- D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
-
- bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage);
- void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::BufferUsage newUsage);
-
- bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
- bool CheckIsResidentForTesting() const;
-
- MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
- ResultOrError<bool> EnsureDataInitializedAsDestination(
- CommandRecordingContext* commandContext,
- uint64_t offset,
- uint64_t size);
- MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- const CopyTextureToBufferCmd* copy);
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- Buffer(Device* device, const BufferDescriptor* descriptor);
- ~Buffer() override;
-
- MaybeError Initialize(bool mappedAtCreation);
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- bool IsCPUWritableAtCreation() const override;
- virtual MaybeError MapAtCreationImpl() override;
- void* GetMappedPointerImpl() override;
-
- MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
-
- bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage);
-
- MaybeError InitializeToZero(CommandRecordingContext* commandContext);
- MaybeError ClearBuffer(CommandRecordingContext* commandContext,
- uint8_t clearValue,
- uint64_t offset = 0,
- uint64_t size = 0);
-
- ResourceHeapAllocation mResourceAllocation;
- bool mFixedResourceState = false;
- wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
- ExecutionSerial mLastUsedSerial = std::numeric_limits<ExecutionSerial>::max();
-
- D3D12_RANGE mWrittenMappedRange = {0, 0};
- void* mMappedData = nullptr;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_BUFFERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
deleted file mode 100644
index d92398584d9..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
-#include "dawn_native/Error.h"
-
-namespace dawn_native { namespace d3d12 {
-
- CPUDescriptorHeapAllocation::CPUDescriptorHeapAllocation(
- D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
- uint32_t heapIndex)
- : mBaseDescriptor(baseDescriptor), mHeapIndex(heapIndex) {
- }
-
- D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::GetBaseDescriptor() const {
- ASSERT(IsValid());
- return mBaseDescriptor;
- }
-
- D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::OffsetFrom(
- uint32_t sizeIncrementInBytes,
- uint32_t offsetInDescriptorCount) const {
- ASSERT(IsValid());
- D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseDescriptor;
- cpuHandle.ptr += sizeIncrementInBytes * offsetInDescriptorCount;
- return cpuHandle;
- }
-
- uint32_t CPUDescriptorHeapAllocation::GetHeapIndex() const {
- ASSERT(mHeapIndex >= 0);
- return mHeapIndex;
- }
-
- bool CPUDescriptorHeapAllocation::IsValid() const {
- return mBaseDescriptor.ptr != 0;
- }
-
- void CPUDescriptorHeapAllocation::Invalidate() {
- mBaseDescriptor = {0};
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h
deleted file mode 100644
index 51ae2fdb61f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
-#define DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
-
-#include <cstdint>
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- // Wrapper for a handle into a CPU-only descriptor heap.
- class CPUDescriptorHeapAllocation {
- public:
- CPUDescriptorHeapAllocation() = default;
- CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor, uint32_t heapIndex);
-
- D3D12_CPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
-
- D3D12_CPU_DESCRIPTOR_HANDLE OffsetFrom(uint32_t sizeIncrementInBytes,
- uint32_t offsetInDescriptorCount) const;
- uint32_t GetHeapIndex() const;
-
- bool IsValid() const;
-
- void Invalidate();
-
- private:
- D3D12_CPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
- uint32_t mHeapIndex = -1;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp
deleted file mode 100644
index c011feba602..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/CommandAllocatorManager.h"
-
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-
-namespace dawn_native { namespace d3d12 {
-
- CommandAllocatorManager::CommandAllocatorManager(Device* device)
- : device(device), mAllocatorCount(0) {
- mFreeAllocators.set();
- }
-
- ResultOrError<ID3D12CommandAllocator*> CommandAllocatorManager::ReserveCommandAllocator() {
- // If there are no free allocators, get the oldest serial in flight and wait on it
- if (mFreeAllocators.none()) {
- const ExecutionSerial firstSerial = mInFlightCommandAllocators.FirstSerial();
- DAWN_TRY(device->WaitForSerial(firstSerial));
- DAWN_TRY(Tick(firstSerial));
- }
-
- ASSERT(mFreeAllocators.any());
-
- // Get the index of the first free allocator from the bitset
- unsigned int firstFreeIndex = *(IterateBitSet(mFreeAllocators).begin());
-
- if (firstFreeIndex >= mAllocatorCount) {
- ASSERT(firstFreeIndex == mAllocatorCount);
- mAllocatorCount++;
- DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateCommandAllocator(
- D3D12_COMMAND_LIST_TYPE_DIRECT,
- IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])),
- "D3D12 create command allocator"));
- }
-
- // Mark the command allocator as used
- mFreeAllocators.reset(firstFreeIndex);
-
- // Enqueue the command allocator. It will be scheduled for reset after the next
- // ExecuteCommandLists
- mInFlightCommandAllocators.Enqueue({mCommandAllocators[firstFreeIndex], firstFreeIndex},
- device->GetPendingCommandSerial());
- return mCommandAllocators[firstFreeIndex].Get();
- }
-
- MaybeError CommandAllocatorManager::Tick(ExecutionSerial lastCompletedSerial) {
- // Reset all command allocators that are no longer in flight
- for (auto it : mInFlightCommandAllocators.IterateUpTo(lastCompletedSerial)) {
- DAWN_TRY(CheckHRESULT(it.commandAllocator->Reset(), "D3D12 reset command allocator"));
- mFreeAllocators.set(it.index);
- }
- mInFlightCommandAllocators.ClearUpTo(lastCompletedSerial);
- return {};
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h
deleted file mode 100644
index 3c123954c17..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandAllocatorManager.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
-#define DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-#include "common/SerialQueue.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/IntegerTypes.h"
-
-#include <bitset>
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class CommandAllocatorManager {
- public:
- CommandAllocatorManager(Device* device);
-
- // A CommandAllocator that is reserved must be used on the next ExecuteCommandLists
- // otherwise its commands may be reset before execution has completed on the GPU
- ResultOrError<ID3D12CommandAllocator*> ReserveCommandAllocator();
- MaybeError Tick(ExecutionSerial lastCompletedSerial);
-
- private:
- Device* device;
-
- // This must be at least 2 because the Device and Queue use separate command allocators
- static constexpr unsigned int kMaxCommandAllocators = 32;
- unsigned int mAllocatorCount;
-
- struct IndexedCommandAllocator {
- ComPtr<ID3D12CommandAllocator> commandAllocator;
- unsigned int index;
- };
-
- ComPtr<ID3D12CommandAllocator> mCommandAllocators[kMaxCommandAllocators];
- std::bitset<kMaxCommandAllocators> mFreeAllocators;
- SerialQueue<ExecutionSerial, IndexedCommandAllocator> mInFlightCommandAllocators;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
deleted file mode 100644
index 5ae6149f4b3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ /dev/null
@@ -1,1651 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/CommandBufferD3D12.h"
-
-#include "dawn_native/BindGroupTracker.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/RenderBundle.h"
-#include "dawn_native/d3d12/BindGroupD3D12.h"
-#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
-#include "dawn_native/d3d12/ComputePipelineD3D12.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-#include "dawn_native/d3d12/QuerySetD3D12.h"
-#include "dawn_native/d3d12/RenderPassBuilderD3D12.h"
-#include "dawn_native/d3d12/RenderPipelineD3D12.h"
-#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
-#include "dawn_native/d3d12/StagingBufferD3D12.h"
-#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
-
- DXGI_FORMAT DXGIIndexFormat(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Undefined:
- return DXGI_FORMAT_UNKNOWN;
- case wgpu::IndexFormat::Uint16:
- return DXGI_FORMAT_R16_UINT;
- case wgpu::IndexFormat::Uint32:
- return DXGI_FORMAT_R32_UINT;
- }
- }
-
- D3D12_QUERY_TYPE D3D12QueryType(wgpu::QueryType type) {
- switch (type) {
- case wgpu::QueryType::Occlusion:
- return D3D12_QUERY_TYPE_BINARY_OCCLUSION;
- case wgpu::QueryType::PipelineStatistics:
- return D3D12_QUERY_TYPE_PIPELINE_STATISTICS;
- case wgpu::QueryType::Timestamp:
- return D3D12_QUERY_TYPE_TIMESTAMP;
- }
- }
-
- bool CanUseCopyResource(const TextureCopy& src,
- const TextureCopy& dst,
- const Extent3D& copySize) {
- // Checked by validation
- ASSERT(src.texture->GetSampleCount() == dst.texture->GetSampleCount());
- ASSERT(src.texture->GetFormat().format == dst.texture->GetFormat().format);
- ASSERT(src.aspect == dst.aspect);
-
- const Extent3D& srcSize = src.texture->GetSize();
- const Extent3D& dstSize = dst.texture->GetSize();
-
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
- // In order to use D3D12's copy resource, the textures must be the same dimensions, and
- // the copy must be of the entire resource.
- // TODO(dawn:129): Support 1D textures.
- return src.aspect == src.texture->GetFormat().aspects &&
- src.texture->GetDimension() == dst.texture->GetDimension() && //
- dst.texture->GetNumMipLevels() == 1 && //
- src.texture->GetNumMipLevels() == 1 && // A copy command is of a single mip, so
- // if a resource has more than one, we
- // definitely cannot use CopyResource.
- copySize.width == dstSize.width && //
- copySize.width == srcSize.width && //
- copySize.height == dstSize.height && //
- copySize.height == srcSize.height && //
- copySize.depthOrArrayLayers == dstSize.depthOrArrayLayers && //
- copySize.depthOrArrayLayers == srcSize.depthOrArrayLayers;
- }
-
- void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList,
- WriteTimestampCmd* cmd) {
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- ASSERT(D3D12QueryType(querySet->GetQueryType()) == D3D12_QUERY_TYPE_TIMESTAMP);
- commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_TIMESTAMP,
- cmd->queryIndex);
- }
-
- void RecordResolveQuerySetCmd(ID3D12GraphicsCommandList* commandList,
- Device* device,
- QuerySet* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- Buffer* destination,
- uint64_t destinationOffset) {
- const std::vector<bool>& availability = querySet->GetQueryAvailability();
-
- auto currentIt = availability.begin() + firstQuery;
- auto lastIt = availability.begin() + firstQuery + queryCount;
-
- // Traverse available queries in the range of [firstQuery, firstQuery + queryCount - 1]
- while (currentIt != lastIt) {
- auto firstTrueIt = std::find(currentIt, lastIt, true);
- // No available query found for resolving
- if (firstTrueIt == lastIt) {
- break;
- }
- auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
-
- // The query index of firstTrueIt where the resolving starts
- uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
- // The queries count between firstTrueIt and nextFalseIt need to be resolved
- uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
-
- // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
- uint32_t resolveDestinationOffset =
- destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
-
- // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
- commandList->ResolveQueryData(
- querySet->GetQueryHeap(), D3D12QueryType(querySet->GetQueryType()),
- resolveQueryIndex, resolveQueryCount, destination->GetD3D12Resource(),
- resolveDestinationOffset);
-
- // Set current iterator to next false
- currentIt = nextFalseIt;
- }
- }
-
- void RecordFirstIndexOffset(ID3D12GraphicsCommandList* commandList,
- RenderPipeline* pipeline,
- uint32_t firstVertex,
- uint32_t firstInstance) {
- const FirstOffsetInfo& firstOffsetInfo = pipeline->GetFirstOffsetInfo();
- if (!firstOffsetInfo.usesVertexIndex && !firstOffsetInfo.usesInstanceIndex) {
- return;
- }
- std::array<uint32_t, 2> offsets{};
- uint32_t count = 0;
- if (firstOffsetInfo.usesVertexIndex) {
- offsets[firstOffsetInfo.vertexIndexOffset / sizeof(uint32_t)] = firstVertex;
- ++count;
- }
- if (firstOffsetInfo.usesInstanceIndex) {
- offsets[firstOffsetInfo.instanceIndexOffset / sizeof(uint32_t)] = firstInstance;
- ++count;
- }
- PipelineLayout* layout = ToBackend(pipeline->GetLayout());
- commandList->SetGraphicsRoot32BitConstants(layout->GetFirstIndexOffsetParameterIndex(),
- count, offsets.data(), 0);
- }
-
- bool ShouldCopyUsingTemporaryBuffer(DeviceBase* device,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy) {
- // Currently we only need the workaround for an Intel D3D12 driver issue.
- if (device->IsToggleEnabled(
- Toggle::
- UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel)) {
- bool copyToLesserLevel = srcCopy.mipLevel > dstCopy.mipLevel;
- ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
-
- // GetAspectInfo(aspect) requires HasOneBit(aspect) == true, plus the texel block
- // sizes of depth stencil formats are always no less than 4 bytes.
- bool isSmallColorFormat =
- HasOneBit(srcCopy.aspect) &&
- srcCopy.texture->GetFormat().GetAspectInfo(srcCopy.aspect).block.byteSize < 4u;
- if (copyToLesserLevel && isSmallColorFormat) {
- return true;
- }
- }
-
- return false;
- }
-
- MaybeError RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize) {
- ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
- ASSERT(srcCopy.aspect == dstCopy.aspect);
- dawn_native::Format format = srcCopy.texture->GetFormat();
- const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
- ASSERT(copySize.width % blockInfo.width == 0);
- uint32_t widthInBlocks = copySize.width / blockInfo.width;
- ASSERT(copySize.height % blockInfo.height == 0);
- uint32_t heightInBlocks = copySize.height / blockInfo.height;
-
- // Create tempBuffer
- uint32_t bytesPerRow =
- Align(blockInfo.byteSize * widthInBlocks, kTextureBytesPerRowAlignment);
- uint32_t rowsPerImage = heightInBlocks;
-
- // The size of temporary buffer isn't needed to be a multiple of 4 because we don't
- // need to set mappedAtCreation to be true.
- auto tempBufferSize =
- ComputeRequiredBytesInCopy(blockInfo, copySize, bytesPerRow, rowsPerImage);
-
- BufferDescriptor tempBufferDescriptor;
- tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
- tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
- Device* device = ToBackend(srcCopy.texture->GetDevice());
- Ref<BufferBase> tempBufferBase;
- DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
- Ref<Buffer> tempBuffer = ToBackend(std::move(tempBufferBase));
-
- // Copy from source texture into tempBuffer
- Texture* srcTexture = ToBackend(srcCopy.texture).Get();
- tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopyDst);
- BufferCopy bufferCopy;
- bufferCopy.buffer = tempBuffer;
- bufferCopy.offset = 0;
- bufferCopy.bytesPerRow = bytesPerRow;
- bufferCopy.rowsPerImage = rowsPerImage;
- RecordCopyTextureToBuffer(recordingContext->GetCommandList(), srcCopy, bufferCopy,
- srcTexture, tempBuffer.Get(), copySize);
-
- // Copy from tempBuffer into destination texture
- tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopySrc);
- Texture* dstTexture = ToBackend(dstCopy.texture).Get();
- RecordCopyBufferToTexture(recordingContext, dstCopy, tempBuffer->GetD3D12Resource(), 0,
- bytesPerRow, rowsPerImage, copySize, dstTexture,
- dstCopy.aspect);
-
- // Save tempBuffer into recordingContext
- recordingContext->AddToTempBuffers(std::move(tempBuffer));
-
- return {};
- }
-
- void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
- ComputePipeline* pipeline,
- DispatchCmd* dispatch) {
- if (!pipeline->UsesNumWorkgroups()) {
- return;
- }
-
- PipelineLayout* layout = ToBackend(pipeline->GetLayout());
- commandList->SetComputeRoot32BitConstants(layout->GetNumWorkgroupsParameterIndex(), 3,
- dispatch, 0);
- }
-
- // Records the necessary barriers for a synchronization scope using the resource usage
- // data pre-computed in the frontend. Also performs lazy initialization if required.
- // Returns whether any UAV are used in the synchronization scope.
- bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
- const SyncScopeResourceUsage& usages) {
- std::vector<D3D12_RESOURCE_BARRIER> barriers;
-
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
-
- for (size_t i = 0; i < usages.buffers.size(); ++i) {
- Buffer* buffer = ToBackend(usages.buffers[i]);
-
- // TODO(crbug.com/dawn/852): clear storage buffers with
- // ClearUnorderedAccessView*().
- buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
-
- D3D12_RESOURCE_BARRIER barrier;
- if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
- usages.bufferUsages[i])) {
- barriers.push_back(barrier);
- }
- bufferUsages |= usages.bufferUsages[i];
- }
-
- wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
-
- for (size_t i = 0; i < usages.textures.size(); ++i) {
- Texture* texture = ToBackend(usages.textures[i]);
-
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- usages.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
- }
- textureUsages |= usage;
- });
-
- ToBackend(usages.textures[i])
- ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
- usages.textureUsages[i]);
- }
-
- if (barriers.size()) {
- commandList->ResourceBarrier(barriers.size(), barriers.data());
- }
-
- return (bufferUsages & wgpu::BufferUsage::Storage ||
- textureUsages & wgpu::TextureUsage::StorageBinding);
- }
-
- } // anonymous namespace
-
- class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
- using Base = BindGroupTrackerBase;
-
- public:
- BindGroupStateTracker(Device* device)
- : BindGroupTrackerBase(),
- mDevice(device),
- mViewAllocator(device->GetViewShaderVisibleDescriptorAllocator()),
- mSamplerAllocator(device->GetSamplerShaderVisibleDescriptorAllocator()) {
- }
-
- void SetInComputePass(bool inCompute_) {
- mInCompute = inCompute_;
- }
-
- MaybeError Apply(CommandRecordingContext* commandContext) {
- BeforeApply();
-
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
- UpdateRootSignatureIfNecessary(commandList);
-
- // Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
- // ringbuffer. There can be a single shader-visible descriptor heap of each type bound
- // at any given time. This means that when we switch heaps, all other currently bound
- // bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
- // the signal to change the bounded heaps.
- // Re-populating all bindgroups after the last one fails causes duplicated allocations
- // to occur on overflow.
- bool didCreateBindGroupViews = true;
- bool didCreateBindGroupSamplers = true;
- for (BindGroupIndex index : IterateBitSet(mDirtyBindGroups)) {
- BindGroup* group = ToBackend(mBindGroups[index]);
- didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
- didCreateBindGroupSamplers = group->PopulateSamplers(mDevice, mSamplerAllocator);
- if (!didCreateBindGroupViews && !didCreateBindGroupSamplers) {
- break;
- }
- }
-
- if (!didCreateBindGroupViews || !didCreateBindGroupSamplers) {
- if (!didCreateBindGroupViews) {
- DAWN_TRY(mViewAllocator->AllocateAndSwitchShaderVisibleHeap());
- }
-
- if (!didCreateBindGroupSamplers) {
- DAWN_TRY(mSamplerAllocator->AllocateAndSwitchShaderVisibleHeap());
- }
-
- mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
- mDirtyBindGroups |= mBindGroupLayoutsMask;
-
- // Must be called before applying the bindgroups.
- SetID3D12DescriptorHeaps(commandList);
-
- for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
- BindGroup* group = ToBackend(mBindGroups[index]);
- didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
- didCreateBindGroupSamplers =
- group->PopulateSamplers(mDevice, mSamplerAllocator);
- ASSERT(didCreateBindGroupViews);
- ASSERT(didCreateBindGroupSamplers);
- }
- }
-
- for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- BindGroup* group = ToBackend(mBindGroups[index]);
- ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
- mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
- }
-
- AfterApply();
-
- return {};
- }
-
- void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
- ASSERT(commandList != nullptr);
- std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps = {
- mViewAllocator->GetShaderVisibleHeap(), mSamplerAllocator->GetShaderVisibleHeap()};
- ASSERT(descriptorHeaps[0] != nullptr);
- ASSERT(descriptorHeaps[1] != nullptr);
- commandList->SetDescriptorHeaps(descriptorHeaps.size(), descriptorHeaps.data());
- }
-
- private:
- void UpdateRootSignatureIfNecessary(ID3D12GraphicsCommandList* commandList) {
- if (mLastAppliedPipelineLayout != mPipelineLayout) {
- if (mInCompute) {
- commandList->SetComputeRootSignature(
- ToBackend(mPipelineLayout)->GetRootSignature());
- } else {
- commandList->SetGraphicsRootSignature(
- ToBackend(mPipelineLayout)->GetRootSignature());
- }
- // Invalidate the root sampler tables previously set in the root signature.
- mBoundRootSamplerTables = {};
- }
- }
-
- void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
- const PipelineLayout* pipelineLayout,
- BindGroupIndex index,
- BindGroup* group,
- uint32_t dynamicOffsetCountIn,
- const uint64_t* dynamicOffsetsIn) {
- ityp::span<BindingIndex, const uint64_t> dynamicOffsets(
- dynamicOffsetsIn, BindingIndex(dynamicOffsetCountIn));
- ASSERT(dynamicOffsets.size() == group->GetLayout()->GetDynamicBufferCount());
-
- // Usually, the application won't set the same offsets many times,
- // so always try to apply dynamic offsets even if the offsets stay the same
- if (dynamicOffsets.size() != BindingIndex(0)) {
- // Update dynamic offsets.
- // Dynamic buffer bindings are packed at the beginning of the layout.
- for (BindingIndex bindingIndex{0}; bindingIndex < dynamicOffsets.size();
- ++bindingIndex) {
- const BindingInfo& bindingInfo =
- group->GetLayout()->GetBindingInfo(bindingIndex);
- if (bindingInfo.visibility == wgpu::ShaderStage::None) {
- // Skip dynamic buffers that are not visible. D3D12 does not have None
- // visibility.
- continue;
- }
-
- uint32_t parameterIndex =
- pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
- BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
-
- // Calculate buffer locations that root descriptors links to. The location
- // is (base buffer location + initial offset + dynamic offset)
- uint64_t dynamicOffset = dynamicOffsets[bindingIndex];
- uint64_t offset = binding.offset + dynamicOffset;
- D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
- ToBackend(binding.buffer)->GetVA() + offset;
-
- ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- if (mInCompute) {
- commandList->SetComputeRootConstantBufferView(parameterIndex,
- bufferLocation);
- } else {
- commandList->SetGraphicsRootConstantBufferView(parameterIndex,
- bufferLocation);
- }
- break;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- if (mInCompute) {
- commandList->SetComputeRootUnorderedAccessView(parameterIndex,
- bufferLocation);
- } else {
- commandList->SetGraphicsRootUnorderedAccessView(parameterIndex,
- bufferLocation);
- }
- break;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- if (mInCompute) {
- commandList->SetComputeRootShaderResourceView(parameterIndex,
- bufferLocation);
- } else {
- commandList->SetGraphicsRootShaderResourceView(parameterIndex,
- bufferLocation);
- }
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- }
- }
-
- // It's not necessary to update descriptor tables if only the dynamic offset changed.
- if (!mDirtyBindGroups[index]) {
- return;
- }
-
- const uint32_t cbvUavSrvCount =
- ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
- const uint32_t samplerCount =
- ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
-
- if (cbvUavSrvCount > 0) {
- uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
- const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseViewDescriptor();
- if (mInCompute) {
- commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
- } else {
- commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
- }
- }
-
- if (samplerCount > 0) {
- uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
- const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
- group->GetBaseSamplerDescriptor();
- // Check if the group requires its sampler table to be set in the pipeline.
- // This because sampler heap allocations could be cached and use the same table.
- if (mBoundRootSamplerTables[index].ptr != baseDescriptor.ptr) {
- if (mInCompute) {
- commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
- } else {
- commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
- }
-
- mBoundRootSamplerTables[index] = baseDescriptor;
- }
- }
-
- const auto& dynamicStorageBufferLengths = group->GetDynamicStorageBufferLengths();
- if (dynamicStorageBufferLengths.size() != 0) {
- uint32_t parameterIndex =
- pipelineLayout->GetDynamicStorageBufferLengthsParameterIndex();
- uint32_t firstRegisterOffset =
- pipelineLayout->GetDynamicStorageBufferLengthInfo()[index].firstRegisterOffset;
-
- if (mInCompute) {
- commandList->SetComputeRoot32BitConstants(
- parameterIndex, dynamicStorageBufferLengths.size(),
- dynamicStorageBufferLengths.data(), firstRegisterOffset);
- } else {
- commandList->SetGraphicsRoot32BitConstants(
- parameterIndex, dynamicStorageBufferLengths.size(),
- dynamicStorageBufferLengths.data(), firstRegisterOffset);
- }
- }
- }
-
- Device* mDevice;
-
- bool mInCompute = false;
-
- ityp::array<BindGroupIndex, D3D12_GPU_DESCRIPTOR_HANDLE, kMaxBindGroups>
- mBoundRootSamplerTables = {};
-
- ShaderVisibleDescriptorAllocator* mViewAllocator;
- ShaderVisibleDescriptorAllocator* mSamplerAllocator;
- };
-
- namespace {
- class VertexBufferTracker {
- public:
- void OnSetVertexBuffer(VertexBufferSlot slot,
- Buffer* buffer,
- uint64_t offset,
- uint64_t size) {
- mStartSlot = std::min(mStartSlot, slot);
- mEndSlot = std::max(mEndSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
-
- auto* d3d12BufferView = &mD3D12BufferViews[slot];
- d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
- d3d12BufferView->SizeInBytes = size;
- // The bufferView stride is set based on the vertex state before a draw.
- }
-
- void Apply(ID3D12GraphicsCommandList* commandList,
- const RenderPipeline* renderPipeline) {
- ASSERT(renderPipeline != nullptr);
-
- VertexBufferSlot startSlot = mStartSlot;
- VertexBufferSlot endSlot = mEndSlot;
-
- // If the vertex state has changed, we need to update the StrideInBytes
- // for the D3D12 buffer views. We also need to extend the dirty range to
- // touch all these slots because the stride may have changed.
- if (mLastAppliedRenderPipeline != renderPipeline) {
- mLastAppliedRenderPipeline = renderPipeline;
-
- for (VertexBufferSlot slot :
- IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
- startSlot = std::min(startSlot, slot);
- endSlot =
- std::max(endSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
- mD3D12BufferViews[slot].StrideInBytes =
- renderPipeline->GetVertexBuffer(slot).arrayStride;
- }
- }
-
- if (endSlot <= startSlot) {
- return;
- }
-
- // mD3D12BufferViews is kept up to date with the most recent data passed
- // to SetVertexBuffer. This makes it correct to only track the start
- // and end of the dirty range. When Apply is called,
- // we will at worst set non-dirty vertex buffers in duplicate.
- commandList->IASetVertexBuffers(static_cast<uint8_t>(startSlot),
- static_cast<uint8_t>(ityp::Sub(endSlot, startSlot)),
- &mD3D12BufferViews[startSlot]);
-
- mStartSlot = VertexBufferSlot(kMaxVertexBuffers);
- mEndSlot = VertexBufferSlot(uint8_t(0));
- }
-
- private:
- // startSlot and endSlot indicate the range of dirty vertex buffers.
- // If there are multiple calls to SetVertexBuffer, the start and end
- // represent the union of the dirty ranges (the union may have non-dirty
- // data in the middle of the range).
- const RenderPipeline* mLastAppliedRenderPipeline = nullptr;
- VertexBufferSlot mStartSlot{kMaxVertexBuffers};
- VertexBufferSlot mEndSlot{uint8_t(0)};
- ityp::array<VertexBufferSlot, D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers>
- mD3D12BufferViews = {};
- };
-
- void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
- BeginRenderPassCmd* renderPass) {
- ASSERT(renderPass != nullptr);
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- TextureViewBase* resolveTarget =
- renderPass->colorAttachments[i].resolveTarget.Get();
- if (resolveTarget == nullptr) {
- continue;
- }
-
- TextureViewBase* colorView = renderPass->colorAttachments[i].view.Get();
- Texture* colorTexture = ToBackend(colorView->GetTexture());
- Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
-
- // Transition the usages of the color attachment and resolve target.
- colorTexture->TrackUsageAndTransitionNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_SOURCE,
- colorView->GetSubresourceRange());
- resolveTexture->TrackUsageAndTransitionNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_DEST,
- resolveTarget->GetSubresourceRange());
-
- // Do MSAA resolve with ResolveSubResource().
- ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
- ID3D12Resource* resolveTextureHandle = resolveTexture->GetD3D12Resource();
- const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
- resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer(),
- Aspect::Color);
- constexpr uint32_t kColorTextureSubresourceIndex = 0;
- commandContext->GetCommandList()->ResolveSubresource(
- resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
- kColorTextureSubresourceIndex, colorTexture->GetD3D12Format());
- }
- }
-
- } // anonymous namespace
-
- // static
- Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
- }
-
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
- }
-
- MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
- Device* device = ToBackend(GetDevice());
- BindGroupStateTracker bindingTracker(device);
-
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- // Make sure we use the correct descriptors for this command list. Could be done once per
- // actual command list but here is ok because there should be few command buffers.
- bindingTracker.SetID3D12DescriptorHeaps(commandList);
-
- size_t nextComputePassNumber = 0;
- size_t nextRenderPassNumber = 0;
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::BeginComputePass: {
- mCommands.NextCommand<BeginComputePassCmd>();
-
- bindingTracker.SetInComputePass(true);
- DAWN_TRY(RecordComputePass(
- commandContext, &bindingTracker,
- GetResourceUsages().computePasses[nextComputePassNumber]));
-
- nextComputePassNumber++;
- break;
- }
-
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* beginRenderPassCmd =
- mCommands.NextCommand<BeginRenderPassCmd>();
-
- const bool passHasUAV = TransitionAndClearForSyncScope(
- commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
- bindingTracker.SetInComputePass(false);
-
- LazyClearRenderPassAttachments(beginRenderPassCmd);
- DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
- passHasUAV));
-
- nextRenderPassNumber++;
- break;
- }
-
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- if (copy->size == 0) {
- // Skip no-op copies.
- break;
- }
- Buffer* srcBuffer = ToBackend(copy->source.Get());
- Buffer* dstBuffer = ToBackend(copy->destination.Get());
-
- DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
- bool cleared;
- DAWN_TRY_ASSIGN(cleared,
- dstBuffer->EnsureDataInitializedAsDestination(
- commandContext, copy->destinationOffset, copy->size));
- DAWN_UNUSED(cleared);
-
- srcBuffer->TrackUsageAndTransitionNow(commandContext,
- wgpu::BufferUsage::CopySrc);
- dstBuffer->TrackUsageAndTransitionNow(commandContext,
- wgpu::BufferUsage::CopyDst);
-
- commandList->CopyBufferRegion(
- dstBuffer->GetD3D12Resource(), copy->destinationOffset,
- srcBuffer->GetD3D12Resource(), copy->sourceOffset, copy->size);
- break;
- }
-
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- Buffer* buffer = ToBackend(copy->source.buffer.Get());
- Texture* texture = ToBackend(copy->destination.texture.Get());
-
- DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
-
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
- SubresourceRange subresources =
- GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
-
- if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
- copy->destination.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, subresources);
- } else {
- texture->EnsureSubresourceContentInitialized(commandContext, subresources);
- }
-
- buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
- texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
- subresources);
-
- RecordCopyBufferToTexture(commandContext, copy->destination,
- buffer->GetD3D12Resource(), copy->source.offset,
- copy->source.bytesPerRow, copy->source.rowsPerImage,
- copy->copySize, texture, subresources.aspects);
-
- break;
- }
-
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- Texture* texture = ToBackend(copy->source.texture.Get());
- Buffer* buffer = ToBackend(copy->destination.buffer.Get());
-
- DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
-
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
- SubresourceRange subresources =
- GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
-
- texture->EnsureSubresourceContentInitialized(commandContext, subresources);
-
- texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
- subresources);
- buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
-
- RecordCopyTextureToBuffer(commandList, copy->source, copy->destination, texture,
- buffer, copy->copySize);
-
- break;
- }
-
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- mCommands.NextCommand<CopyTextureToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- Texture* source = ToBackend(copy->source.texture.Get());
- Texture* destination = ToBackend(copy->destination.texture.Get());
-
- SubresourceRange srcRange =
- GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
- SubresourceRange dstRange =
- GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
-
- source->EnsureSubresourceContentInitialized(commandContext, srcRange);
- if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
- copy->destination.mipLevel)) {
- destination->SetIsSubresourceContentInitialized(true, dstRange);
- } else {
- destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
- }
-
- if (copy->source.texture.Get() == copy->destination.texture.Get() &&
- copy->source.mipLevel == copy->destination.mipLevel) {
- // When there are overlapped subresources, the layout of the overlapped
- // subresources should all be COMMON instead of what we set now. Currently
- // it is not allowed to copy with overlapped subresources, but we still
- // add the ASSERT here as a reminder for this possible misuse.
- ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
- copy->copySize.depthOrArrayLayers));
- }
- source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
- srcRange);
- destination->TrackUsageAndTransitionNow(commandContext,
- wgpu::TextureUsage::CopyDst, dstRange);
-
- ASSERT(srcRange.aspects == dstRange.aspects);
- if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source,
- copy->destination)) {
- DAWN_TRY(RecordCopyTextureWithTemporaryBuffer(
- commandContext, copy->source, copy->destination, copy->copySize));
- break;
- }
-
- if (CanUseCopyResource(copy->source, copy->destination, copy->copySize)) {
- commandList->CopyResource(destination->GetD3D12Resource(),
- source->GetD3D12Resource());
- } else if (source->GetDimension() == wgpu::TextureDimension::e3D &&
- destination->GetDimension() == wgpu::TextureDimension::e3D) {
- for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
- D3D12_TEXTURE_COPY_LOCATION srcLocation =
- ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel,
- 0, aspect);
- D3D12_TEXTURE_COPY_LOCATION dstLocation =
- ComputeTextureCopyLocationForTexture(
- destination, copy->destination.mipLevel, 0, aspect);
-
- D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
- copy->source.origin, copy->copySize);
-
- commandList->CopyTextureRegion(&dstLocation, copy->destination.origin.x,
- copy->destination.origin.y,
- copy->destination.origin.z, &srcLocation,
- &sourceRegion);
- }
- } else {
- // TODO(crbug.com/dawn/814): support copying with 1D.
- ASSERT(source->GetDimension() != wgpu::TextureDimension::e1D &&
- destination->GetDimension() != wgpu::TextureDimension::e1D);
- const dawn_native::Extent3D copyExtentOneSlice = {
- copy->copySize.width, copy->copySize.height, 1u};
-
- for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
- for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
- uint32_t sourceLayer = 0;
- uint32_t sourceZ = 0;
- switch (source->GetDimension()) {
- case wgpu::TextureDimension::e2D:
- sourceLayer = copy->source.origin.z + z;
- break;
- case wgpu::TextureDimension::e3D:
- sourceZ = copy->source.origin.z + z;
- break;
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
-
- uint32_t destinationLayer = 0;
- uint32_t destinationZ = 0;
- switch (destination->GetDimension()) {
- case wgpu::TextureDimension::e2D:
- destinationLayer = copy->destination.origin.z + z;
- break;
- case wgpu::TextureDimension::e3D:
- destinationZ = copy->destination.origin.z + z;
- break;
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
- D3D12_TEXTURE_COPY_LOCATION srcLocation =
- ComputeTextureCopyLocationForTexture(
- source, copy->source.mipLevel, sourceLayer, aspect);
-
- D3D12_TEXTURE_COPY_LOCATION dstLocation =
- ComputeTextureCopyLocationForTexture(destination,
- copy->destination.mipLevel,
- destinationLayer, aspect);
-
- Origin3D sourceOriginInSubresource = copy->source.origin;
- sourceOriginInSubresource.z = sourceZ;
- D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
- sourceOriginInSubresource, copyExtentOneSlice);
-
- commandList->CopyTextureRegion(
- &dstLocation, copy->destination.origin.x,
- copy->destination.origin.y, destinationZ, &srcLocation,
- &sourceRegion);
- }
- }
- }
- break;
- }
-
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
- if (cmd->size == 0) {
- // Skip no-op fills.
- break;
- }
- Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
-
- bool clearedToZero;
- DAWN_TRY_ASSIGN(clearedToZero, dstBuffer->EnsureDataInitializedAsDestination(
- commandContext, cmd->offset, cmd->size));
-
- if (!clearedToZero) {
- DAWN_TRY(device->ClearBufferToZero(commandContext, cmd->buffer.Get(),
- cmd->offset, cmd->size));
- }
-
- break;
- }
-
- case Command::ResolveQuerySet: {
- ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- uint32_t firstQuery = cmd->firstQuery;
- uint32_t queryCount = cmd->queryCount;
- Buffer* destination = ToBackend(cmd->destination.Get());
- uint64_t destinationOffset = cmd->destinationOffset;
-
- bool cleared;
- DAWN_TRY_ASSIGN(cleared, destination->EnsureDataInitializedAsDestination(
- commandContext, destinationOffset,
- queryCount * sizeof(uint64_t)));
- DAWN_UNUSED(cleared);
-
- // Resolving unavailable queries is undefined behaviour on D3D12, we only can
- // resolve the available part of sparse queries. In order to resolve the
- // unavailables as 0s, we need to clear the resolving region of the destination
- // buffer to 0s.
- auto startIt = querySet->GetQueryAvailability().begin() + firstQuery;
- auto endIt = querySet->GetQueryAvailability().begin() + firstQuery + queryCount;
- bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
- if (hasUnavailableQueries) {
- DAWN_TRY(device->ClearBufferToZero(commandContext, destination,
- destinationOffset,
- queryCount * sizeof(uint64_t)));
- }
-
- destination->TrackUsageAndTransitionNow(commandContext,
- wgpu::BufferUsage::QueryResolve);
-
- RecordResolveQuerySetCmd(commandList, device, querySet, firstQuery, queryCount,
- destination, destinationOffset);
-
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
- RecordWriteTimestampCmd(commandList, cmd);
- break;
- }
-
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
- }
-
- case Command::PopDebugGroup: {
- mCommands.NextCommand<PopDebugGroupCmd>();
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixEndEventOnCommandList(commandList);
- }
- break;
- }
-
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
- }
-
- case Command::WriteBuffer: {
- WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
- const uint64_t offset = write->offset;
- const uint64_t size = write->size;
- if (size == 0) {
- continue;
- }
-
- Buffer* dstBuffer = ToBackend(write->buffer.Get());
- uint8_t* data = mCommands.NextData<uint8_t>(size);
- Device* device = ToBackend(GetDevice());
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
- memcpy(uploadHandle.mappedBuffer, data, size);
-
- bool cleared;
- DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
- commandContext, offset, size));
- DAWN_UNUSED(cleared);
- dstBuffer->TrackUsageAndTransitionNow(commandContext,
- wgpu::BufferUsage::CopyDst);
- commandList->CopyBufferRegion(
- dstBuffer->GetD3D12Resource(), offset,
- ToBackend(uploadHandle.stagingBuffer)->GetResource(),
- uploadHandle.startOffset, size);
- break;
- }
-
- default:
- UNREACHABLE();
- }
- }
-
- return {};
- }
-
- MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- const ComputePassResourceUsage& resourceUsages) {
- uint64_t currentDispatch = 0;
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- Command type;
- ComputePipeline* lastPipeline = nullptr;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::Dispatch: {
- DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
-
- // Skip noop dispatches, it can cause D3D12 warning from validation layers and
- // leads to device lost.
- if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
- break;
- }
-
- TransitionAndClearForSyncScope(commandContext,
- resourceUsages.dispatchUsages[currentDispatch]);
- DAWN_TRY(bindingTracker->Apply(commandContext));
-
- RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
- commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
- currentDispatch++;
- break;
- }
-
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
-
- TransitionAndClearForSyncScope(commandContext,
- resourceUsages.dispatchUsages[currentDispatch]);
- DAWN_TRY(bindingTracker->Apply(commandContext));
-
- ComPtr<ID3D12CommandSignature> signature =
- lastPipeline->GetDispatchIndirectCommandSignature();
- commandList->ExecuteIndirect(
- signature.Get(), 1, ToBackend(dispatch->indirectBuffer)->GetD3D12Resource(),
- dispatch->indirectOffset, nullptr, 0);
- currentDispatch++;
- break;
- }
-
- case Command::EndComputePass: {
- mCommands.NextCommand<EndComputePassCmd>();
- return {};
- }
-
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
- ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
-
- commandList->SetPipelineState(pipeline->GetPipelineState());
-
- bindingTracker->OnSetPipeline(pipeline);
- lastPipeline = pipeline;
- break;
- }
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- BindGroup* group = ToBackend(cmd->group.Get());
- uint32_t* dynamicOffsets = nullptr;
-
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
-
- bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
- dynamicOffsets);
- break;
- }
-
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
- }
-
- case Command::PopDebugGroup: {
- mCommands.NextCommand<PopDebugGroupCmd>();
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixEndEventOnCommandList(commandList);
- }
- break;
- }
-
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
- RecordWriteTimestampCmd(commandList, cmd);
- break;
- }
-
- default:
- UNREACHABLE();
- }
- }
-
- return {};
- }
-
- MaybeError CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
- BeginRenderPassCmd* renderPass,
- RenderPassBuilder* renderPassBuilder) {
- Device* device = ToBackend(GetDevice());
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
- TextureView* view = ToBackend(attachmentInfo.view.Get());
-
- // Set view attachment.
- CPUDescriptorHeapAllocation rtvAllocation;
- DAWN_TRY_ASSIGN(
- rtvAllocation,
- device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
-
- const D3D12_RENDER_TARGET_VIEW_DESC viewDesc = view->GetRTVDescriptor();
- const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = rtvAllocation.GetBaseDescriptor();
-
- device->GetD3D12Device()->CreateRenderTargetView(
- ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
-
- renderPassBuilder->SetRenderTargetView(i, baseDescriptor);
-
- // Set color load operation.
- renderPassBuilder->SetRenderTargetBeginningAccess(
- i, attachmentInfo.loadOp, attachmentInfo.clearColor, view->GetD3D12Format());
-
- // Set color store operation.
- if (attachmentInfo.resolveTarget != nullptr) {
- TextureView* resolveDestinationView = ToBackend(attachmentInfo.resolveTarget.Get());
- Texture* resolveDestinationTexture =
- ToBackend(resolveDestinationView->GetTexture());
-
- resolveDestinationTexture->TrackUsageAndTransitionNow(
- commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST,
- resolveDestinationView->GetSubresourceRange());
-
- renderPassBuilder->SetRenderTargetEndingAccessResolve(i, attachmentInfo.storeOp,
- view, resolveDestinationView);
- } else {
- renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
- }
- }
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- RenderPassDepthStencilAttachmentInfo& attachmentInfo =
- renderPass->depthStencilAttachment;
- TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
-
- // Set depth attachment.
- CPUDescriptorHeapAllocation dsvAllocation;
- DAWN_TRY_ASSIGN(
- dsvAllocation,
- device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
-
- const D3D12_DEPTH_STENCIL_VIEW_DESC viewDesc = view->GetDSVDescriptor(
- attachmentInfo.depthReadOnly, attachmentInfo.stencilReadOnly);
- const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvAllocation.GetBaseDescriptor();
-
- device->GetD3D12Device()->CreateDepthStencilView(
- ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
-
- renderPassBuilder->SetDepthStencilView(baseDescriptor);
-
- const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
- const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
-
- // Set depth/stencil load operations.
- if (hasDepth) {
- renderPassBuilder->SetDepthAccess(
- attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
- attachmentInfo.clearDepth, view->GetD3D12Format());
- } else {
- renderPassBuilder->SetDepthNoAccess();
- }
-
- if (hasStencil) {
- renderPassBuilder->SetStencilAccess(
- attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
- attachmentInfo.clearStencil, view->GetD3D12Format());
- } else {
- renderPassBuilder->SetStencilNoAccess();
- }
-
- } else {
- renderPassBuilder->SetDepthStencilNoAccess();
- }
-
- return {};
- }
-
- void CommandBuffer::EmulateBeginRenderPass(CommandRecordingContext* commandContext,
- const RenderPassBuilder* renderPassBuilder) const {
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- // Clear framebuffer attachments as needed.
- {
- for (ColorAttachmentIndex i(uint8_t(0));
- i < renderPassBuilder->GetColorAttachmentCount(); i++) {
- // Load op - color
- if (renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i]
- .BeginningAccess.Type == D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
- commandList->ClearRenderTargetView(
- renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i].cpuDescriptor,
- renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i]
- .BeginningAccess.Clear.ClearValue.Color,
- 0, nullptr);
- }
- }
-
- if (renderPassBuilder->HasDepth()) {
- D3D12_CLEAR_FLAGS clearFlags = {};
- float depthClear = 0.0f;
- uint8_t stencilClear = 0u;
-
- if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
- ->DepthBeginningAccess.Type ==
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
- clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
- depthClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
- ->DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth;
- }
- if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
- ->StencilBeginningAccess.Type ==
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
- clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
- stencilClear =
- renderPassBuilder->GetRenderPassDepthStencilDescriptor()
- ->StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil;
- }
-
- if (clearFlags) {
- commandList->ClearDepthStencilView(
- renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor,
- clearFlags, depthClear, stencilClear, 0, nullptr);
- }
- }
- }
-
- commandList->OMSetRenderTargets(
- static_cast<uint8_t>(renderPassBuilder->GetColorAttachmentCount()),
- renderPassBuilder->GetRenderTargetViews(), FALSE,
- renderPassBuilder->HasDepth()
- ? &renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor
- : nullptr);
- }
-
- MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- BeginRenderPassCmd* renderPass,
- const bool passHasUAV) {
- Device* device = ToBackend(GetDevice());
- const bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
-
- // renderPassBuilder must be scoped to RecordRenderPass because any underlying
- // D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
- // valid until after EndRenderPass() has been called.
- RenderPassBuilder renderPassBuilder(passHasUAV);
-
- DAWN_TRY(SetupRenderPass(commandContext, renderPass, &renderPassBuilder));
-
- // Use D3D12's native render pass API if it's available, otherwise emulate the
- // beginning and ending access operations.
- if (useRenderPass) {
- commandContext->GetCommandList4()->BeginRenderPass(
- static_cast<uint8_t>(renderPassBuilder.GetColorAttachmentCount()),
- renderPassBuilder.GetRenderPassRenderTargetDescriptors().data(),
- renderPassBuilder.HasDepth()
- ? renderPassBuilder.GetRenderPassDepthStencilDescriptor()
- : nullptr,
- renderPassBuilder.GetRenderPassFlags());
- } else {
- EmulateBeginRenderPass(commandContext, &renderPassBuilder);
- }
-
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- // Set up default dynamic state
- {
- uint32_t width = renderPass->width;
- uint32_t height = renderPass->height;
- D3D12_VIEWPORT viewport = {
- 0.f, 0.f, static_cast<float>(width), static_cast<float>(height), 0.f, 1.f};
- D3D12_RECT scissorRect = {0, 0, static_cast<long>(width), static_cast<long>(height)};
- commandList->RSSetViewports(1, &viewport);
- commandList->RSSetScissorRects(1, &scissorRect);
-
- static constexpr std::array<float, 4> defaultBlendFactor = {0, 0, 0, 0};
- commandList->OMSetBlendFactor(&defaultBlendFactor[0]);
-
- commandList->OMSetStencilRef(0);
- }
-
- RenderPipeline* lastPipeline = nullptr;
- VertexBufferTracker vertexBufferTracker = {};
-
- auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
- switch (type) {
- case Command::Draw: {
- DrawCmd* draw = iter->NextCommand<DrawCmd>();
-
- DAWN_TRY(bindingTracker->Apply(commandContext));
- vertexBufferTracker.Apply(commandList, lastPipeline);
- RecordFirstIndexOffset(commandList, lastPipeline, draw->firstVertex,
- draw->firstInstance);
- commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
- draw->firstVertex, draw->firstInstance);
- break;
- }
-
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
-
- DAWN_TRY(bindingTracker->Apply(commandContext));
- vertexBufferTracker.Apply(commandList, lastPipeline);
- RecordFirstIndexOffset(commandList, lastPipeline, draw->baseVertex,
- draw->firstInstance);
- commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
- draw->firstIndex, draw->baseVertex,
- draw->firstInstance);
- break;
- }
-
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
-
- DAWN_TRY(bindingTracker->Apply(commandContext));
- vertexBufferTracker.Apply(commandList, lastPipeline);
-
- // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
- // Zero the index offset values to avoid reusing values from the previous draw
- RecordFirstIndexOffset(commandList, lastPipeline, 0, 0);
-
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- ComPtr<ID3D12CommandSignature> signature =
- ToBackend(GetDevice())->GetDrawIndirectSignature();
- commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
- draw->indirectOffset, nullptr, 0);
- break;
- }
-
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
-
- DAWN_TRY(bindingTracker->Apply(commandContext));
- vertexBufferTracker.Apply(commandList, lastPipeline);
-
- // TODO(dawn:548): remove this once builtins are emulated for indirect draws.
- // Zero the index offset values to avoid reusing values from the previous draw
- RecordFirstIndexOffset(commandList, lastPipeline, 0, 0);
-
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- ASSERT(buffer != nullptr);
-
- ComPtr<ID3D12CommandSignature> signature =
- ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
- commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
- draw->indirectOffset, nullptr, 0);
- break;
- }
-
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
- const char* label = iter->NextData<char>(cmd->length + 1);
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
- }
-
- case Command::PopDebugGroup: {
- iter->NextCommand<PopDebugGroupCmd>();
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixEndEventOnCommandList(commandList);
- }
- break;
- }
-
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
- const char* label = iter->NextData<char>(cmd->length + 1);
-
- if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
- // PIX color is 1 byte per channel in ARGB format
- constexpr uint64_t kPIXBlackColor = 0xff000000;
- ToBackend(GetDevice())
- ->GetFunctions()
- ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
- }
- break;
- }
-
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
- RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
-
- commandList->SetPipelineState(pipeline->GetPipelineState());
- commandList->IASetPrimitiveTopology(pipeline->GetD3D12PrimitiveTopology());
-
- bindingTracker->OnSetPipeline(pipeline);
-
- lastPipeline = pipeline;
- break;
- }
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- BindGroup* group = ToBackend(cmd->group.Get());
- uint32_t* dynamicOffsets = nullptr;
-
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
-
- bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
- dynamicOffsets);
- break;
- }
-
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
-
- D3D12_INDEX_BUFFER_VIEW bufferView;
- bufferView.Format = DXGIIndexFormat(cmd->format);
- bufferView.BufferLocation = ToBackend(cmd->buffer)->GetVA() + cmd->offset;
- bufferView.SizeInBytes = cmd->size;
-
- commandList->IASetIndexBuffer(&bufferView);
- break;
- }
-
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
-
- vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
- cmd->offset, cmd->size);
- break;
- }
-
- default:
- UNREACHABLE();
- break;
- }
- return {};
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndRenderPass: {
- mCommands.NextCommand<EndRenderPassCmd>();
- if (useRenderPass) {
- commandContext->GetCommandList4()->EndRenderPass();
- } else if (renderPass->attachmentState->GetSampleCount() > 1) {
- ResolveMultisampledRenderPass(commandContext, renderPass);
- }
- return {};
- }
-
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
-
- commandList->OMSetStencilRef(cmd->reference);
- break;
- }
-
- case Command::SetViewport: {
- SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
- D3D12_VIEWPORT viewport;
- viewport.TopLeftX = cmd->x;
- viewport.TopLeftY = cmd->y;
- viewport.Width = cmd->width;
- viewport.Height = cmd->height;
- viewport.MinDepth = cmd->minDepth;
- viewport.MaxDepth = cmd->maxDepth;
-
- commandList->RSSetViewports(1, &viewport);
- break;
- }
-
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
- D3D12_RECT rect;
- rect.left = cmd->x;
- rect.top = cmd->y;
- rect.right = cmd->x + cmd->width;
- rect.bottom = cmd->y + cmd->height;
-
- commandList->RSSetScissorRects(1, &rect);
- break;
- }
-
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
- const std::array<float, 4> color = ConvertToFloatColor(cmd->color);
- commandList->OMSetBlendFactor(color.data());
- break;
- }
-
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
- auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
-
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* iter = bundles[i]->GetCommands();
- iter->Reset();
- while (iter->NextCommandId(&type)) {
- DAWN_TRY(EncodeRenderBundleCommand(iter, type));
- }
- }
- break;
- }
-
- case Command::BeginOcclusionQuery: {
- BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
- D3D12_QUERY_TYPE_BINARY_OCCLUSION);
- commandList->BeginQuery(querySet->GetQueryHeap(),
- D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
- break;
- }
-
- case Command::EndOcclusionQuery: {
- EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
- D3D12_QUERY_TYPE_BINARY_OCCLUSION);
- commandList->EndQuery(querySet->GetQueryHeap(),
- D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
- RecordWriteTimestampCmd(commandList, cmd);
- break;
- }
-
- default: {
- DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
- break;
- }
- }
- }
- return {};
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
deleted file mode 100644
index 51dc52728bc..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
-#define DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
-
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/Error.h"
-
-namespace dawn_native {
- struct BeginRenderPassCmd;
-} // namespace dawn_native
-
-namespace dawn_native { namespace d3d12 {
-
- class BindGroupStateTracker;
- class CommandRecordingContext;
- class RenderPassBuilder;
-
- class CommandBuffer final : public CommandBufferBase {
- public:
- static Ref<CommandBuffer> Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor);
-
- MaybeError RecordCommands(CommandRecordingContext* commandContext);
-
- private:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
-
- MaybeError RecordComputePass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- const ComputePassResourceUsage& resourceUsages);
- MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- BeginRenderPassCmd* renderPass,
- bool passHasUAV);
- MaybeError SetupRenderPass(CommandRecordingContext* commandContext,
- BeginRenderPassCmd* renderPass,
- RenderPassBuilder* renderPassBuilder);
- void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
- const RenderPassBuilder* renderPassBuilder) const;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
deleted file mode 100644
index 8faa46e0369..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#include "dawn_native/d3d12/CommandRecordingContext.h"
-#include "dawn_native/d3d12/CommandAllocatorManager.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/HeapD3D12.h"
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- void CommandRecordingContext::AddToSharedTextureList(Texture* texture) {
- ASSERT(IsOpen());
- mSharedTextures.insert(texture);
- }
-
- MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
- CommandAllocatorManager* commandAllocationManager) {
- ASSERT(!IsOpen());
- ID3D12CommandAllocator* commandAllocator;
- DAWN_TRY_ASSIGN(commandAllocator, commandAllocationManager->ReserveCommandAllocator());
- if (mD3d12CommandList != nullptr) {
- MaybeError error = CheckHRESULT(mD3d12CommandList->Reset(commandAllocator, nullptr),
- "D3D12 resetting command list");
- if (error.IsError()) {
- mD3d12CommandList.Reset();
- DAWN_TRY(std::move(error));
- }
- } else {
- ComPtr<ID3D12GraphicsCommandList> d3d12GraphicsCommandList;
- DAWN_TRY(CheckHRESULT(
- d3d12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, commandAllocator,
- nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
- "D3D12 creating direct command list"));
- mD3d12CommandList = std::move(d3d12GraphicsCommandList);
- // Store a cast to ID3D12GraphicsCommandList4. This is required to use the D3D12 render
- // pass APIs introduced in Windows build 1809.
- mD3d12CommandList.As(&mD3d12CommandList4);
- }
-
- mIsOpen = true;
-
- return {};
- }
-
- MaybeError CommandRecordingContext::ExecuteCommandList(Device* device) {
- if (IsOpen()) {
- // Shared textures must be transitioned to common state after the last usage in order
- // for them to be used by other APIs like D3D11. We ensure this by transitioning to the
- // common state right before command list submission. TransitionUsageNow itself ensures
- // no unnecessary transitions happen if the resources is already in the common state.
- for (Texture* texture : mSharedTextures) {
- texture->TrackAllUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
- }
-
- MaybeError error =
- CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
- if (error.IsError()) {
- Release();
- DAWN_TRY(std::move(error));
- }
- DAWN_TRY(device->GetResidencyManager()->EnsureHeapsAreResident(
- mHeapsPendingUsage.data(), mHeapsPendingUsage.size()));
-
- ID3D12CommandList* d3d12CommandList = GetCommandList();
- device->GetCommandQueue()->ExecuteCommandLists(1, &d3d12CommandList);
-
- mIsOpen = false;
- mSharedTextures.clear();
- mHeapsPendingUsage.clear();
- }
- return {};
- }
-
- void CommandRecordingContext::TrackHeapUsage(Heap* heap, ExecutionSerial serial) {
- // Before tracking the heap, check the last serial it was recorded on to ensure we aren't
- // tracking it more than once.
- if (heap->GetLastUsage() < serial) {
- heap->SetLastUsage(serial);
- mHeapsPendingUsage.push_back(heap);
- }
- }
-
- ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
- ASSERT(mD3d12CommandList != nullptr);
- ASSERT(IsOpen());
- return mD3d12CommandList.Get();
- }
-
- // This function will fail on Windows versions prior to 1809. Support must be queried through
- // the device before calling.
- ID3D12GraphicsCommandList4* CommandRecordingContext::GetCommandList4() const {
- ASSERT(IsOpen());
- ASSERT(mD3d12CommandList != nullptr);
- return mD3d12CommandList4.Get();
- }
-
- void CommandRecordingContext::Release() {
- mD3d12CommandList.Reset();
- mD3d12CommandList4.Reset();
- mIsOpen = false;
- mSharedTextures.clear();
- mHeapsPendingUsage.clear();
- mTempBuffers.clear();
- }
-
- bool CommandRecordingContext::IsOpen() const {
- return mIsOpen;
- }
-
- void CommandRecordingContext::AddToTempBuffers(Ref<Buffer> tempBuffer) {
- mTempBuffers.emplace_back(tempBuffer);
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
deleted file mode 100644
index 6c6dc37dd0f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#ifndef DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
-#define DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/d3d12/BufferD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-#include <set>
-
-namespace dawn_native { namespace d3d12 {
- class CommandAllocatorManager;
- class Device;
- class Heap;
- class Texture;
-
- class CommandRecordingContext {
- public:
- void AddToSharedTextureList(Texture* texture);
- MaybeError Open(ID3D12Device* d3d12Device,
- CommandAllocatorManager* commandAllocationManager);
-
- ID3D12GraphicsCommandList* GetCommandList() const;
- ID3D12GraphicsCommandList4* GetCommandList4() const;
- void Release();
- bool IsOpen() const;
-
- MaybeError ExecuteCommandList(Device* device);
-
- void TrackHeapUsage(Heap* heap, ExecutionSerial serial);
-
- void AddToTempBuffers(Ref<Buffer> tempBuffer);
-
- private:
- ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
- ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
- bool mIsOpen = false;
- std::set<Texture*> mSharedTextures;
- std::vector<Heap*> mHeapsPendingUsage;
-
- std::vector<Ref<Buffer>> mTempBuffers;
- };
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
deleted file mode 100644
index 82579f321aa..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/ComputePipelineD3D12.h"
-
-#include "dawn_native/CreatePipelineAsyncTask.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-#include "dawn_native/d3d12/ShaderModuleD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(device, descriptor));
- }
-
- MaybeError ComputePipeline::Initialize() {
- Device* device = ToBackend(GetDevice());
- uint32_t compileFlags = 0;
-
- if (!device->IsToggleEnabled(Toggle::UseDXC) &&
- !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
- compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
- }
-
- if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
- compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
- }
-
- // SPRIV-cross does matrix multiplication expecting row major matrices
- compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
-
- const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
- ShaderModule* module = ToBackend(computeStage.module.Get());
-
- D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
- d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
-
- CompiledShader compiledShader;
- DAWN_TRY_ASSIGN(compiledShader, module->Compile(computeStage, SingleShaderStage::Compute,
- ToBackend(GetLayout()), compileFlags));
- d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
- auto* d3d12Device = device->GetD3D12Device();
- DAWN_TRY(CheckHRESULT(
- d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
- "D3D12 creating pipeline state"));
-
- SetLabelImpl();
-
- return {};
- }
-
- ComputePipeline::~ComputePipeline() = default;
-
- void ComputePipeline::DestroyImpl() {
- ComputePipelineBase::DestroyImpl();
- ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
- }
-
- ID3D12PipelineState* ComputePipeline::GetPipelineState() const {
- return mPipelineState.Get();
- }
-
- void ComputePipeline::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_ComputePipeline",
- GetLabel());
- }
-
- void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
- userdata);
- CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
-
- bool ComputePipeline::UsesNumWorkgroups() const {
- return GetStage(SingleShaderStage::Compute).metadata->usesNumWorkgroups;
- }
-
- ComPtr<ID3D12CommandSignature> ComputePipeline::GetDispatchIndirectCommandSignature() {
- if (UsesNumWorkgroups()) {
- return ToBackend(GetLayout())->GetDispatchIndirectCommandSignatureWithNumWorkgroups();
- }
- return ToBackend(GetDevice())->GetDispatchIndirectSignature();
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
deleted file mode 100644
index ddf7476ed18..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
-#define DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
-
-#include "dawn_native/ComputePipeline.h"
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class ComputePipeline final : public ComputePipelineBase {
- public:
- static Ref<ComputePipeline> CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- ComputePipeline() = delete;
-
- ID3D12PipelineState* GetPipelineState() const;
-
- MaybeError Initialize() override;
-
- // Dawn API
- void SetLabelImpl() override;
-
- bool UsesNumWorkgroups() const;
-
- ComPtr<ID3D12CommandSignature> GetDispatchIndirectCommandSignature();
-
- private:
- ~ComputePipeline() override;
-
- void DestroyImpl() override;
-
- using ComputePipelineBase::ComputePipelineBase;
- ComPtr<ID3D12PipelineState> mPipelineState;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_COMPUTEPIPELINED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.cpp
deleted file mode 100644
index b8c28f6c43e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.cpp
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// D3D12Backend.cpp: contains the definition of symbols exported by D3D12Backend.h so that they
-// can be compiled twice: once export (shared library), once not exported (static library)
-
-#include "dawn_native/d3d12/D3D11on12Util.h"
-
-#include "common/HashUtils.h"
-#include "common/Log.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- void Flush11On12DeviceToAvoidLeaks(ComPtr<ID3D11On12Device> d3d11on12Device) {
- if (d3d11on12Device == nullptr) {
- return;
- }
-
- ComPtr<ID3D11Device> d3d11Device;
- if (FAILED(d3d11on12Device.As(&d3d11Device))) {
- return;
- }
-
- ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
- d3d11Device->GetImmediateContext(&d3d11DeviceContext);
-
- ASSERT(d3d11DeviceContext != nullptr);
-
- // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
- // are not released until work is submitted to the device context and flushed.
- // The most minimal work we can get away with is issuing a TiledResourceBarrier.
-
- // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
- // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
- ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
- if (FAILED(d3d11DeviceContext.As(&d3d11DeviceContext2))) {
- return;
- }
-
- d3d11DeviceContext2->TiledResourceBarrier(nullptr, nullptr);
- d3d11DeviceContext2->Flush();
- }
-
- D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
- ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex,
- ComPtr<ID3D11On12Device> d3d11On12Device)
- : mDXGIKeyedMutex(std::move(dxgiKeyedMutex)), mD3D11on12Device(std::move(d3d11On12Device)) {
- }
-
- D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
- ComPtr<ID3D11On12Device> d3d11On12Device)
- : mD3D11on12Device(std::move(d3d11On12Device)) {
- }
-
- D3D11on12ResourceCacheEntry::~D3D11on12ResourceCacheEntry() {
- if (mDXGIKeyedMutex == nullptr) {
- return;
- }
-
- ComPtr<ID3D11Resource> d3d11Resource;
- if (FAILED(mDXGIKeyedMutex.As(&d3d11Resource))) {
- return;
- }
-
- ASSERT(mD3D11on12Device != nullptr);
-
- ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
- mD3D11on12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
-
- d3d11Resource.Reset();
- mDXGIKeyedMutex.Reset();
-
- Flush11On12DeviceToAvoidLeaks(std::move(mD3D11on12Device));
- }
-
- ComPtr<IDXGIKeyedMutex> D3D11on12ResourceCacheEntry::GetDXGIKeyedMutex() const {
- ASSERT(mDXGIKeyedMutex != nullptr);
- return mDXGIKeyedMutex;
- }
-
- size_t D3D11on12ResourceCacheEntry::HashFunc::operator()(
- const Ref<D3D11on12ResourceCacheEntry> a) const {
- size_t hash = 0;
- HashCombine(&hash, a->mD3D11on12Device.Get());
- return hash;
- }
-
- bool D3D11on12ResourceCacheEntry::EqualityFunc::operator()(
- const Ref<D3D11on12ResourceCacheEntry> a,
- const Ref<D3D11on12ResourceCacheEntry> b) const {
- return a->mD3D11on12Device == b->mD3D11on12Device;
- }
-
- D3D11on12ResourceCache::D3D11on12ResourceCache() = default;
-
- D3D11on12ResourceCache::~D3D11on12ResourceCache() = default;
-
- Ref<D3D11on12ResourceCacheEntry> D3D11on12ResourceCache::GetOrCreateD3D11on12Resource(
- WGPUDevice device,
- ID3D12Resource* d3d12Resource) {
- Device* backendDevice = reinterpret_cast<Device*>(device);
- // The Dawn and 11on12 device share the same D3D12 command queue whereas this external image
- // could be accessed/produced with multiple Dawn devices. To avoid cross-queue sharing
- // restrictions, the 11 wrapped resource is forbidden to be shared between Dawn devices by
- // using the 11on12 device as the cache key.
- ComPtr<ID3D11On12Device> d3d11on12Device = backendDevice->GetOrCreateD3D11on12Device();
- if (d3d11on12Device == nullptr) {
- dawn::ErrorLog() << "Unable to create 11on12 device for external image";
- return nullptr;
- }
-
- D3D11on12ResourceCacheEntry blueprint(d3d11on12Device);
- auto iter = mCache.find(&blueprint);
- if (iter != mCache.end()) {
- return *iter;
- }
-
- // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
- // are a viable alternative but are, unfortunately, not available on all versions of Windows
- // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
- // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
- ComPtr<ID3D11Texture2D> d3d11Texture;
- D3D11_RESOURCE_FLAGS resourceFlags;
- resourceFlags.BindFlags = 0;
- resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
- resourceFlags.CPUAccessFlags = 0;
- resourceFlags.StructureByteStride = 0;
- if (FAILED(d3d11on12Device->CreateWrappedResource(
- d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON,
- D3D12_RESOURCE_STATE_COMMON, IID_PPV_ARGS(&d3d11Texture)))) {
- return nullptr;
- }
-
- ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
- if (FAILED(d3d11Texture.As(&dxgiKeyedMutex))) {
- return nullptr;
- }
-
- // Keep this cache from growing unbounded.
- // TODO(dawn:625): Consider using a replacement policy based cache.
- if (mCache.size() > kMaxD3D11on12ResourceCacheSize) {
- mCache.clear();
- }
-
- Ref<D3D11on12ResourceCacheEntry> entry =
- AcquireRef(new D3D11on12ResourceCacheEntry(dxgiKeyedMutex, std::move(d3d11on12Device)));
- mCache.insert(entry);
-
- return entry;
- }
-
-}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.h
deleted file mode 100644
index 91db081a8e2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D11ON12UTIL_H_
-#define DAWNNATIVE_D3D11ON12UTIL_H_
-
-#include "common/RefCounted.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-#include <dawn_native/DawnNative.h>
-#include <memory>
-#include <unordered_set>
-
-struct ID3D11On12Device;
-struct IDXGIKeyedMutex;
-
-namespace dawn_native { namespace d3d12 {
-
- // Wraps 11 wrapped resources in a cache.
- class D3D11on12ResourceCacheEntry : public RefCounted {
- public:
- D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11on12Device);
- D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> d3d11on12Resource,
- ComPtr<ID3D11On12Device> d3d11on12Device);
- ~D3D11on12ResourceCacheEntry();
-
- ComPtr<IDXGIKeyedMutex> GetDXGIKeyedMutex() const;
-
- // Functors necessary for the
- // unordered_set<D3D11on12ResourceCacheEntry&>-based cache.
- struct HashFunc {
- size_t operator()(const Ref<D3D11on12ResourceCacheEntry> a) const;
- };
-
- struct EqualityFunc {
- bool operator()(const Ref<D3D11on12ResourceCacheEntry> a,
- const Ref<D3D11on12ResourceCacheEntry> b) const;
- };
-
- private:
- ComPtr<IDXGIKeyedMutex> mDXGIKeyedMutex;
- ComPtr<ID3D11On12Device> mD3D11on12Device;
- };
-
- // |D3D11on12ResourceCache| maintains a cache of 11 wrapped resources.
- // Each entry represents a 11 resource that is exclusively accessed by Dawn device.
- // Since each Dawn device creates and stores a 11on12 device, the 11on12 device
- // is used as the key for the cache entry which ensures only the same 11 wrapped
- // resource is re-used and also fully released.
- //
- // The cache is primarily needed to avoid repeatedly calling CreateWrappedResource
- // and special release code per ProduceTexture(device).
- class D3D11on12ResourceCache {
- public:
- D3D11on12ResourceCache();
- ~D3D11on12ResourceCache();
-
- Ref<D3D11on12ResourceCacheEntry> GetOrCreateD3D11on12Resource(
- WGPUDevice device,
- ID3D12Resource* d3d12Resource);
-
- private:
- // TODO(dawn:625): Figure out a large enough cache size.
- static constexpr uint64_t kMaxD3D11on12ResourceCacheSize = 5;
-
- // 11on12 resource cache entries are refcounted to ensure if the ExternalImage outlives the
- // Dawn texture (or vice-versa), we always fully release the 11 wrapped resource without
- // waiting until Dawn device to shutdown.
- using Cache = std::unordered_set<Ref<D3D11on12ResourceCacheEntry>,
- D3D11on12ResourceCacheEntry::HashFunc,
- D3D11on12ResourceCacheEntry::EqualityFunc>;
-
- Cache mCache;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D11ON12UTIL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
deleted file mode 100644
index 35baa1b9606..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// D3D12Backend.cpp: contains the definition of symbols exported by D3D12Backend.h so that they
-// can be compiled twice: once export (shared library), once not exported (static library)
-
-#include "dawn_native/D3D12Backend.h"
-
-#include "common/Log.h"
-#include "common/Math.h"
-#include "common/SwapChainUtils.h"
-#include "dawn_native/d3d12/D3D11on12Util.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/NativeSwapChainImplD3D12.h"
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-#include "dawn_native/d3d12/TextureD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
- return ToBackend(FromAPI(device))->GetD3D12Device();
- }
-
- DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
- Device* backendDevice = ToBackend(FromAPI(device));
-
- DawnSwapChainImplementation impl;
- impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
- impl.textureUsage = WGPUTextureUsage_Present;
-
- return impl;
- }
-
- WGPUTextureFormat GetNativeSwapChainPreferredFormat(
- const DawnSwapChainImplementation* swapChain) {
- NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
- }
-
- ExternalImageDescriptorDXGISharedHandle::ExternalImageDescriptorDXGISharedHandle()
- : ExternalImageDescriptor(ExternalImageType::DXGISharedHandle) {
- }
-
- ExternalImageDXGI::ExternalImageDXGI(ComPtr<ID3D12Resource> d3d12Resource,
- const WGPUTextureDescriptor* descriptor)
- : mD3D12Resource(std::move(d3d12Resource)),
- mUsage(descriptor->usage),
- mDimension(descriptor->dimension),
- mSize(descriptor->size),
- mFormat(descriptor->format),
- mMipLevelCount(descriptor->mipLevelCount),
- mSampleCount(descriptor->sampleCount) {
- ASSERT(!descriptor->nextInChain ||
- descriptor->nextInChain->sType == WGPUSType_DawnTextureInternalUsageDescriptor);
- if (descriptor->nextInChain) {
- mUsageInternal = reinterpret_cast<const WGPUDawnTextureInternalUsageDescriptor*>(
- descriptor->nextInChain)
- ->internalUsage;
- }
- mD3D11on12ResourceCache = std::make_unique<D3D11on12ResourceCache>();
- }
-
- ExternalImageDXGI::~ExternalImageDXGI() = default;
-
- WGPUTexture ExternalImageDXGI::ProduceTexture(
- WGPUDevice device,
- const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
- Device* backendDevice = ToBackend(FromAPI(device));
-
- // Ensure the texture usage is allowed
- if (!IsSubset(descriptor->usage, mUsage)) {
- dawn::ErrorLog() << "Texture usage is not valid for external image";
- return nullptr;
- }
-
- TextureDescriptor textureDescriptor = {};
- textureDescriptor.usage = static_cast<wgpu::TextureUsage>(descriptor->usage);
- textureDescriptor.dimension = static_cast<wgpu::TextureDimension>(mDimension);
- textureDescriptor.size = {mSize.width, mSize.height, mSize.depthOrArrayLayers};
- textureDescriptor.format = static_cast<wgpu::TextureFormat>(mFormat);
- textureDescriptor.mipLevelCount = mMipLevelCount;
- textureDescriptor.sampleCount = mSampleCount;
-
- DawnTextureInternalUsageDescriptor internalDesc = {};
- if (mUsageInternal) {
- textureDescriptor.nextInChain = &internalDesc;
- internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(mUsageInternal);
- internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
- }
-
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource =
- mD3D11on12ResourceCache->GetOrCreateD3D11on12Resource(device, mD3D12Resource.Get());
- if (d3d11on12Resource == nullptr) {
- dawn::ErrorLog() << "Unable to create 11on12 resource for external image";
- return nullptr;
- }
-
- Ref<TextureBase> texture = backendDevice->CreateExternalTexture(
- &textureDescriptor, mD3D12Resource, std::move(d3d11on12Resource),
- ExternalMutexSerial(descriptor->acquireMutexKey),
- ExternalMutexSerial(descriptor->releaseMutexKey), descriptor->isSwapChainTexture,
- descriptor->isInitialized);
-
- return ToAPI(texture.Detach());
- }
-
- // static
- std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
- WGPUDevice device,
- const ExternalImageDescriptorDXGISharedHandle* descriptor) {
- Device* backendDevice = ToBackend(FromAPI(device));
-
- Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
- if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
- descriptor->sharedHandle, IID_PPV_ARGS(&d3d12Resource)))) {
- return nullptr;
- }
-
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- if (backendDevice->ConsumedError(
- ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
- return nullptr;
- }
-
- if (backendDevice->ConsumedError(
- ValidateTextureDescriptorCanBeWrapped(textureDescriptor),
- "validating that a D3D12 external image can be wrapped with %s",
- textureDescriptor)) {
- return nullptr;
- }
-
- if (backendDevice->ConsumedError(
- ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), textureDescriptor))) {
- return nullptr;
- }
-
- // Shared handle is assumed to support resource sharing capability. The resource
- // shared capability tier must agree to share resources between D3D devices.
- const Format* format =
- backendDevice->GetInternalFormat(textureDescriptor->format).AcquireSuccess();
- if (format->IsMultiPlanar()) {
- if (backendDevice->ConsumedError(ValidateD3D12VideoTextureCanBeShared(
- backendDevice, D3D12TextureFormat(textureDescriptor->format)))) {
- return nullptr;
- }
- }
-
- std::unique_ptr<ExternalImageDXGI> result(
- new ExternalImageDXGI(std::move(d3d12Resource), descriptor->cTextureDescriptor));
- return result;
- }
-
- uint64_t SetExternalMemoryReservation(WGPUDevice device,
- uint64_t requestedReservationSize,
- MemorySegment memorySegment) {
- Device* backendDevice = ToBackend(FromAPI(device));
-
- return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
- memorySegment, requestedReservationSize);
- }
-
- AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(nullptr) {
- }
-
- AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
- : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
deleted file mode 100644
index efc9fe09bad..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/D3D12Error.h"
-
-#include <iomanip>
-#include <sstream>
-#include <string>
-
-namespace dawn_native { namespace d3d12 {
- MaybeError CheckHRESULTImpl(HRESULT result, const char* context) {
- if (DAWN_LIKELY(SUCCEEDED(result))) {
- return {};
- }
-
- std::ostringstream messageStream;
- messageStream << context << " failed with ";
- if (result == E_FAKE_ERROR_FOR_TESTING) {
- messageStream << "E_FAKE_ERROR_FOR_TESTING";
- } else {
- messageStream << "0x" << std::uppercase << std::setfill('0') << std::setw(8) << std::hex
- << result;
- }
-
- if (result == DXGI_ERROR_DEVICE_REMOVED) {
- return DAWN_DEVICE_LOST_ERROR(messageStream.str());
- } else {
- return DAWN_INTERNAL_ERROR(messageStream.str());
- }
- }
-
- MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context) {
- if (result == E_OUTOFMEMORY || result == E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING) {
- return DAWN_OUT_OF_MEMORY_ERROR(context);
- }
-
- return CheckHRESULTImpl(result, context);
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
deleted file mode 100644
index ade06fe62e6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_D3D12ERROR_H_
-#define DAWNNATIVE_D3D12_D3D12ERROR_H_
-
-#include <d3d12.h>
-#include "dawn_native/Error.h"
-#include "dawn_native/ErrorInjector.h"
-
-namespace dawn_native { namespace d3d12 {
-
- constexpr HRESULT E_FAKE_ERROR_FOR_TESTING = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFF);
- constexpr HRESULT E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING =
- MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFE);
-
- // Returns a success only if result of HResult is success
- MaybeError CheckHRESULTImpl(HRESULT result, const char* context);
-
- // Uses CheckRESULT but returns OOM specific error when recoverable.
- MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context);
-
-#define CheckHRESULT(resultIn, contextIn) \
- ::dawn_native::d3d12::CheckHRESULTImpl( \
- INJECT_ERROR_OR_RUN(resultIn, E_FAKE_ERROR_FOR_TESTING), contextIn)
-#define CheckOutOfMemoryHRESULT(resultIn, contextIn) \
- ::dawn_native::d3d12::CheckOutOfMemoryHRESULTImpl( \
- INJECT_ERROR_OR_RUN(resultIn, E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING, \
- E_FAKE_ERROR_FOR_TESTING), \
- contextIn)
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_D3D12ERROR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
deleted file mode 100644
index d7a0f5e66c4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/D3D12Info.h"
-
-#include "common/GPUInfo.h"
-#include "dawn_native/d3d12/AdapterD3D12.h"
-#include "dawn_native/d3d12/BackendD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-
-namespace dawn_native { namespace d3d12 {
-
- ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
- D3D12DeviceInfo info = {};
-
- // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
- // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
- // for backwards compat.
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
- D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
- DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE,
- &arch, sizeof(arch)),
- "ID3D12Device::CheckFeatureSupport"));
-
- info.isUMA = arch.UMA;
-
- D3D12_FEATURE_DATA_D3D12_OPTIONS options = {};
- DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
- &options, sizeof(options)),
- "ID3D12Device::CheckFeatureSupport"));
-
- info.resourceHeapTier = options.ResourceHeapTier;
-
- // Windows builds 1809 and above can use the D3D12 render pass API. If we query
- // CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
- // the render pass API.
- info.supportsRenderPass = false;
- D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
- if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
- D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
- // Performance regressions been observed when using a render pass on Intel graphics
- // with RENDER_PASS_TIER_1 available, so fall back to a software emulated render
- // pass on these platforms.
- if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
- !gpu_info::IsIntel(adapter.GetPCIInfo().vendorId)) {
- info.supportsRenderPass = true;
- }
- }
-
- // Used to share resources cross-API. If we query CheckFeatureSupport for
- // D3D12_FEATURE_D3D12_OPTIONS4 successfully, then we can use cross-API sharing.
- info.supportsSharedResourceCapabilityTier1 = false;
- D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureOptions4 = {};
- if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
- D3D12_FEATURE_D3D12_OPTIONS4, &featureOptions4, sizeof(featureOptions4)))) {
- // Tier 1 support additionally enables the NV12 format. Since only the NV12 format
- // is used by Dawn, check for Tier 1.
- if (featureOptions4.SharedResourceCompatibilityTier >=
- D3D12_SHARED_RESOURCE_COMPATIBILITY_TIER_1) {
- info.supportsSharedResourceCapabilityTier1 = true;
- }
- }
-
- D3D12_FEATURE_DATA_SHADER_MODEL knownShaderModels[] = {{D3D_SHADER_MODEL_6_2},
- {D3D_SHADER_MODEL_6_1},
- {D3D_SHADER_MODEL_6_0},
- {D3D_SHADER_MODEL_5_1}};
- uint32_t driverShaderModel = 0;
- for (D3D12_FEATURE_DATA_SHADER_MODEL shaderModel : knownShaderModels) {
- if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
- D3D12_FEATURE_SHADER_MODEL, &shaderModel, sizeof(shaderModel)))) {
- driverShaderModel = shaderModel.HighestShaderModel;
- break;
- }
- }
-
- if (driverShaderModel < D3D_SHADER_MODEL_5_1) {
- return DAWN_INTERNAL_ERROR("Driver doesn't support Shader Model 5.1 or higher");
- }
-
- // D3D_SHADER_MODEL is encoded as 0xMm with M the major version and m the minor version
- ASSERT(driverShaderModel <= 0xFF);
- uint32_t shaderModelMajor = (driverShaderModel & 0xF0) >> 4;
- uint32_t shaderModelMinor = (driverShaderModel & 0xF);
-
- ASSERT(shaderModelMajor < 10);
- ASSERT(shaderModelMinor < 10);
- info.shaderModel = 10 * shaderModelMajor + shaderModelMinor;
-
- // Profiles are always <stage>s_<minor>_<major> so we build the s_<minor>_major and add
- // it to each of the stage's suffix.
- std::wstring profileSuffix = L"s_M_n";
- profileSuffix[2] = wchar_t('0' + shaderModelMajor);
- profileSuffix[4] = wchar_t('0' + shaderModelMinor);
-
- info.shaderProfiles[SingleShaderStage::Vertex] = L"v" + profileSuffix;
- info.shaderProfiles[SingleShaderStage::Fragment] = L"p" + profileSuffix;
- info.shaderProfiles[SingleShaderStage::Compute] = L"c" + profileSuffix;
-
- D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureData4 = {};
- if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
- D3D12_FEATURE_D3D12_OPTIONS4, &featureData4, sizeof(featureData4)))) {
- info.supportsShaderFloat16 = driverShaderModel >= D3D_SHADER_MODEL_6_2 &&
- featureData4.Native16BitShaderOpsSupported;
- }
-
- return std::move(info);
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h
deleted file mode 100644
index a7195951adf..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_D3D12INFO_H_
-#define DAWNNATIVE_D3D12_D3D12INFO_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/PerStage.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Adapter;
-
- struct D3D12DeviceInfo {
- bool isUMA;
- uint32_t resourceHeapTier;
- bool supportsRenderPass;
- bool supportsShaderFloat16;
- // shaderModel indicates the maximum supported shader model, for example, the value 62
- // indicates that current driver supports the maximum shader model is shader model 6.2.
- uint32_t shaderModel;
- PerStage<std::wstring> shaderProfiles;
- bool supportsSharedResourceCapabilityTier1;
- };
-
- ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_D3D12INFO_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
deleted file mode 100644
index 210478934a1..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ /dev/null
@@ -1,745 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/DeviceD3D12.h"
-
-#include "common/GPUInfo.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/d3d12/AdapterD3D12.h"
-#include "dawn_native/d3d12/BackendD3D12.h"
-#include "dawn_native/d3d12/BindGroupD3D12.h"
-#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
-#include "dawn_native/d3d12/CommandAllocatorManager.h"
-#include "dawn_native/d3d12/CommandBufferD3D12.h"
-#include "dawn_native/d3d12/ComputePipelineD3D12.h"
-#include "dawn_native/d3d12/D3D11on12Util.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-#include "dawn_native/d3d12/QuerySetD3D12.h"
-#include "dawn_native/d3d12/QueueD3D12.h"
-#include "dawn_native/d3d12/RenderPipelineD3D12.h"
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
-#include "dawn_native/d3d12/SamplerD3D12.h"
-#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
-#include "dawn_native/d3d12/ShaderModuleD3D12.h"
-#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
-#include "dawn_native/d3d12/StagingBufferD3D12.h"
-#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
-#include "dawn_native/d3d12/SwapChainD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-#include <sstream>
-
-namespace dawn_native { namespace d3d12 {
-
- // TODO(dawn:155): Figure out these values.
- static constexpr uint16_t kShaderVisibleDescriptorHeapSize = 1024;
- static constexpr uint8_t kAttachmentDescriptorHeapSize = 64;
-
- // Value may change in the future to better accomodate large clears.
- static constexpr uint64_t kZeroBufferSize = 1024 * 1024 * 4; // 4 Mb
-
- static constexpr uint64_t kMaxDebugMessagesToPrint = 5;
-
- // static
- ResultOrError<Device*> Device::Create(Adapter* adapter,
- const DawnDeviceDescriptor* descriptor) {
- Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
- DAWN_TRY(device->Initialize());
- return device.Detach();
- }
-
- MaybeError Device::Initialize() {
- InitTogglesFromDriver();
-
- mD3d12Device = ToBackend(GetAdapter())->GetDevice();
-
- ASSERT(mD3d12Device != nullptr);
-
- // Create device-global objects
- D3D12_COMMAND_QUEUE_DESC queueDesc = {};
- queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
- queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
- DAWN_TRY(
- CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
- "D3D12 create command queue"));
-
- if (IsFeatureEnabled(Feature::TimestampQuery)) {
- // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
- // command queue doesn't support timestamps. D3D12_COMMAND_LIST_TYPE_DIRECT queues
- // always support timestamps except where there are bugs in Windows container and vGPU
- // implementations.
- uint64_t frequency;
- DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
- "D3D12 get timestamp frequency"));
- // Calculate the period in nanoseconds by the frequency.
- mTimestampPeriod = static_cast<float>(1e9) / frequency;
- }
-
- // If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
- // value.
- mCommandQueue.As(&mD3d12SharingContract);
-
- DAWN_TRY(
- CheckHRESULT(mD3d12Device->CreateFence(uint64_t(GetLastSubmittedCommandSerial()),
- D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&mFence)),
- "D3D12 create fence"));
-
- mFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
- ASSERT(mFenceEvent != nullptr);
-
- // Initialize backend services
- mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
-
- // Zero sized allocator is never requested and does not need to exist.
- for (uint32_t countIndex = 0; countIndex < kNumViewDescriptorAllocators; countIndex++) {
- mViewAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
- this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
- D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
- }
-
- for (uint32_t countIndex = 0; countIndex < kNumSamplerDescriptorAllocators; countIndex++) {
- mSamplerAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
- this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
- D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
- }
-
- mRenderTargetViewAllocator = std::make_unique<StagingDescriptorAllocator>(
- this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
-
- mDepthStencilViewAllocator = std::make_unique<StagingDescriptorAllocator>(
- this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
-
- mSamplerHeapCache = std::make_unique<SamplerHeapCache>(this);
-
- mResidencyManager = std::make_unique<ResidencyManager>(this);
- mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
-
- // ShaderVisibleDescriptorAllocators use the ResidencyManager and must be initialized after.
- DAWN_TRY_ASSIGN(
- mSamplerShaderVisibleDescriptorAllocator,
- ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
-
- DAWN_TRY_ASSIGN(
- mViewShaderVisibleDescriptorAllocator,
- ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
-
- // Initialize indirect commands
- D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
- argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
-
- D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
- programDesc.ByteStride = 3 * sizeof(uint32_t);
- programDesc.NumArgumentDescs = 1;
- programDesc.pArgumentDescs = &argumentDesc;
-
- GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
- IID_PPV_ARGS(&mDispatchIndirectSignature));
-
- argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
- programDesc.ByteStride = 4 * sizeof(uint32_t);
-
- GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
- IID_PPV_ARGS(&mDrawIndirectSignature));
-
- argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
- programDesc.ByteStride = 5 * sizeof(uint32_t);
-
- GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
- IID_PPV_ARGS(&mDrawIndexedIndirectSignature));
-
- DAWN_TRY(DeviceBase::Initialize(new Queue(this)));
- // Device shouldn't be used until after DeviceBase::Initialize so we must wait until after
- // device initialization to call NextSerial
- DAWN_TRY(NextSerial());
-
- // The environment can only use DXC when it's available. Override the decision if it is not
- // applicable.
- DAWN_TRY(ApplyUseDxcToggle());
-
- DAWN_TRY(CreateZeroBuffer());
-
- return {};
- }
-
- Device::~Device() {
- Destroy();
- }
-
- ID3D12Device* Device::GetD3D12Device() const {
- return mD3d12Device.Get();
- }
-
- ComPtr<ID3D12CommandQueue> Device::GetCommandQueue() const {
- return mCommandQueue;
- }
-
- ID3D12SharingContract* Device::GetSharingContract() const {
- return mD3d12SharingContract.Get();
- }
-
- ComPtr<ID3D12CommandSignature> Device::GetDispatchIndirectSignature() const {
- return mDispatchIndirectSignature;
- }
-
- ComPtr<ID3D12CommandSignature> Device::GetDrawIndirectSignature() const {
- return mDrawIndirectSignature;
- }
-
- ComPtr<ID3D12CommandSignature> Device::GetDrawIndexedIndirectSignature() const {
- return mDrawIndexedIndirectSignature;
- }
-
- ComPtr<IDXGIFactory4> Device::GetFactory() const {
- return ToBackend(GetAdapter())->GetBackend()->GetFactory();
- }
-
- MaybeError Device::ApplyUseDxcToggle() {
- if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
- ForceSetToggle(Toggle::UseDXC, false);
- } else if (IsFeatureEnabled(Feature::ShaderFloat16)) {
- // Currently we can only use DXC to compile HLSL shaders using float16.
- ForceSetToggle(Toggle::UseDXC, true);
- }
-
- if (IsToggleEnabled(Toggle::UseDXC)) {
- DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcCompiler());
- DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcLibrary());
- DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcValidator());
- }
-
- return {};
- }
-
- ComPtr<IDxcLibrary> Device::GetDxcLibrary() const {
- return ToBackend(GetAdapter())->GetBackend()->GetDxcLibrary();
- }
-
- ComPtr<IDxcCompiler> Device::GetDxcCompiler() const {
- return ToBackend(GetAdapter())->GetBackend()->GetDxcCompiler();
- }
-
- ComPtr<IDxcValidator> Device::GetDxcValidator() const {
- return ToBackend(GetAdapter())->GetBackend()->GetDxcValidator();
- }
-
- const PlatformFunctions* Device::GetFunctions() const {
- return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
- }
-
- CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
- return mCommandAllocatorManager.get();
- }
-
- ResidencyManager* Device::GetResidencyManager() const {
- return mResidencyManager.get();
- }
-
- ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
- // Callers of GetPendingCommandList do so to record commands. Only reserve a command
- // allocator when it is needed so we don't submit empty command lists
- if (!mPendingCommands.IsOpen()) {
- DAWN_TRY(mPendingCommands.Open(mD3d12Device.Get(), mCommandAllocatorManager.get()));
- }
- return &mPendingCommands;
- }
-
- MaybeError Device::CreateZeroBuffer() {
- BufferDescriptor zeroBufferDescriptor;
- zeroBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
- zeroBufferDescriptor.size = kZeroBufferSize;
- zeroBufferDescriptor.label = "ZeroBuffer_Internal";
- DAWN_TRY_ASSIGN(mZeroBuffer, Buffer::Create(this, &zeroBufferDescriptor));
-
- return {};
- }
-
- MaybeError Device::ClearBufferToZero(CommandRecordingContext* commandContext,
- BufferBase* destination,
- uint64_t offset,
- uint64_t size) {
- // TODO(crbug.com/dawn/852): It would be ideal to clear the buffer in CreateZeroBuffer, but
- // the allocation of the staging buffer causes various end2end tests that monitor heap usage
- // to fail if it's done during device creation. Perhaps ClearUnorderedAccessView*() can be
- // used to avoid that.
- if (!mZeroBuffer->IsDataInitialized()) {
- DynamicUploader* uploader = GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(kZeroBufferSize, GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
-
- memset(uploadHandle.mappedBuffer, 0u, kZeroBufferSize);
-
- CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
- uploadHandle.startOffset, mZeroBuffer.Get(), 0,
- kZeroBufferSize);
-
- mZeroBuffer->SetIsDataInitialized();
- }
-
- Buffer* dstBuffer = ToBackend(destination);
-
- // Necessary to ensure residency of the zero buffer.
- mZeroBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
- dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
-
- while (size > 0) {
- uint64_t copySize = std::min(kZeroBufferSize, size);
- commandContext->GetCommandList()->CopyBufferRegion(
- dstBuffer->GetD3D12Resource(), offset, mZeroBuffer->GetD3D12Resource(), 0,
- copySize);
-
- offset += copySize;
- size -= copySize;
- }
-
- return {};
- }
-
- MaybeError Device::TickImpl() {
- // Perform cleanup operations to free unused objects
- ExecutionSerial completedSerial = GetCompletedCommandSerial();
-
- mResourceAllocatorManager->Tick(completedSerial);
- DAWN_TRY(mCommandAllocatorManager->Tick(completedSerial));
- mViewShaderVisibleDescriptorAllocator->Tick(completedSerial);
- mSamplerShaderVisibleDescriptorAllocator->Tick(completedSerial);
- mRenderTargetViewAllocator->Tick(completedSerial);
- mDepthStencilViewAllocator->Tick(completedSerial);
- mUsedComObjectRefs.ClearUpTo(completedSerial);
-
- if (mPendingCommands.IsOpen()) {
- DAWN_TRY(ExecutePendingCommandContext());
- DAWN_TRY(NextSerial());
- }
-
- DAWN_TRY(CheckDebugLayerAndGenerateErrors());
-
- return {};
- }
-
- MaybeError Device::NextSerial() {
- IncrementLastSubmittedCommandSerial();
-
- return CheckHRESULT(
- mCommandQueue->Signal(mFence.Get(), uint64_t(GetLastSubmittedCommandSerial())),
- "D3D12 command queue signal fence");
- }
-
- MaybeError Device::WaitForSerial(ExecutionSerial serial) {
- DAWN_TRY(CheckPassedSerials());
- if (GetCompletedCommandSerial() < serial) {
- DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(uint64_t(serial), mFenceEvent),
- "D3D12 set event on completion"));
- WaitForSingleObject(mFenceEvent, INFINITE);
- DAWN_TRY(CheckPassedSerials());
- }
- return {};
- }
-
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- ExecutionSerial completedSerial = ExecutionSerial(mFence->GetCompletedValue());
- if (DAWN_UNLIKELY(completedSerial == ExecutionSerial(UINT64_MAX))) {
- // GetCompletedValue returns UINT64_MAX if the device was removed.
- // Try to query the failure reason.
- DAWN_TRY(CheckHRESULT(mD3d12Device->GetDeviceRemovedReason(),
- "ID3D12Device::GetDeviceRemovedReason"));
- // Otherwise, return a generic device lost error.
- return DAWN_DEVICE_LOST_ERROR("Device lost");
- }
-
- if (completedSerial <= GetCompletedCommandSerial()) {
- return ExecutionSerial(0);
- }
-
- return completedSerial;
- }
-
- void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
- mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
- }
-
- MaybeError Device::ExecutePendingCommandContext() {
- return mPendingCommands.ExecuteCommandList(this);
- }
-
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- return BindGroup::Create(this, descriptor);
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return CommandBuffer::Create(encoder, descriptor);
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return ComputePipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return PipelineLayout::Create(this, descriptor);
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return QuerySet::Create(this, descriptor);
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return Sampler::Create(this, descriptor);
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- return ShaderModule::Create(this, descriptor, parseResult);
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return OldSwapChain::Create(this, descriptor);
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, surface, previousSwapChain, descriptor);
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return Texture::Create(this, descriptor);
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return TextureView::Create(texture, descriptor);
- }
- void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
- }
- void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
- }
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer =
- std::make_unique<StagingBuffer>(size, this);
- DAWN_TRY(stagingBuffer->Initialize());
- return std::move(stagingBuffer);
- }
-
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- CommandRecordingContext* commandRecordingContext;
- DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
-
- Buffer* dstBuffer = ToBackend(destination);
-
- bool cleared;
- DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
- commandRecordingContext, destinationOffset, size));
- DAWN_UNUSED(cleared);
-
- CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
- destinationOffset, size);
-
- return {};
- }
-
- void Device::CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
- StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- ASSERT(commandContext != nullptr);
- Buffer* dstBuffer = ToBackend(destination);
- StagingBuffer* srcBuffer = ToBackend(source);
- dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
-
- commandContext->GetCommandList()->CopyBufferRegion(
- dstBuffer->GetD3D12Resource(), destinationOffset, srcBuffer->GetResource(),
- sourceOffset, size);
- }
-
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
- Texture* texture = ToBackend(dst->texture.Get());
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
-
- SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
-
- if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
- }
-
- texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
-
- RecordCopyBufferToTexture(commandContext, *dst, ToBackend(source)->GetResource(),
- src.offset, src.bytesPerRow, src.rowsPerImage, copySizePixels,
- texture, range.aspects);
-
- return {};
- }
-
- void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
- mResourceAllocatorManager->DeallocateMemory(allocation);
- }
-
- ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage) {
- return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor,
- initialUsage);
- }
-
- Ref<TextureBase> Device::CreateExternalTexture(
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture,
- bool isInitialized) {
- Ref<Texture> dawnTexture;
- if (ConsumedError(
- Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
- std::move(d3d11on12Resource), acquireMutexKey,
- releaseMutexKey, isSwapChainTexture, isInitialized),
- &dawnTexture)) {
- return nullptr;
- }
- return {dawnTexture};
- }
-
- ComPtr<ID3D11On12Device> Device::GetOrCreateD3D11on12Device() {
- if (mD3d11On12Device == nullptr) {
- ComPtr<ID3D11Device> d3d11Device;
- D3D_FEATURE_LEVEL d3dFeatureLevel;
- IUnknown* const iUnknownQueue = mCommandQueue.Get();
- if (FAILED(GetFunctions()->d3d11on12CreateDevice(mD3d12Device.Get(), 0, nullptr, 0,
- &iUnknownQueue, 1, 1, &d3d11Device,
- nullptr, &d3dFeatureLevel))) {
- return nullptr;
- }
-
- ComPtr<ID3D11On12Device> d3d11on12Device;
- HRESULT hr = d3d11Device.As(&d3d11on12Device);
- ASSERT(SUCCEEDED(hr));
-
- mD3d11On12Device = std::move(d3d11on12Device);
- }
- return mD3d11On12Device;
- }
-
- const D3D12DeviceInfo& Device::GetDeviceInfo() const {
- return ToBackend(GetAdapter())->GetDeviceInfo();
- }
-
- void Device::InitTogglesFromDriver() {
- const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
- SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
- SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
- SetToggle(Toggle::UseD3D12ResidencyManagement, true);
- SetToggle(Toggle::UseDXC, false);
-
- // Disable optimizations when using FXC
- // See https://crbug.com/dawn/1203
- SetToggle(Toggle::FxcOptimizations, false);
-
- // By default use the maximum shader-visible heap size allowed.
- SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
-
- PCIInfo pciInfo = GetAdapter()->GetPCIInfo();
-
- // Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
- // See http://crbug.com/1161355 for more information.
- if (gpu_info::IsIntel(pciInfo.vendorId) &&
- (gpu_info::IsSkylake(pciInfo.deviceId) || gpu_info::IsKabylake(pciInfo.deviceId) ||
- gpu_info::IsCoffeelake(pciInfo.deviceId))) {
- constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {30, 0, 100, 9864};
- if (gpu_info::CompareD3DDriverVersion(pciInfo.vendorId,
- ToBackend(GetAdapter())->GetDriverVersion(),
- kFirstDriverVersionWithFix) < 0) {
- SetToggle(
- Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
- true);
- }
- }
- }
-
- MaybeError Device::WaitForIdleForDestruction() {
- // Immediately forget about all pending commands
- mPendingCommands.Release();
-
- DAWN_TRY(NextSerial());
- // Wait for all in-flight commands to finish executing
- DAWN_TRY(WaitForSerial(GetLastSubmittedCommandSerial()));
-
- return {};
- }
-
- MaybeError Device::CheckDebugLayerAndGenerateErrors() {
- if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
- return {};
- }
-
- ComPtr<ID3D12InfoQueue> infoQueue;
- DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
- "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
- uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
-
- // Check if any errors have occurred otherwise we would be creating an empty error. Note
- // that we use GetNumStoredMessagesAllowedByRetrievalFilter instead of GetNumStoredMessages
- // because we only convert WARNINGS or higher messages to dawn errors.
- if (totalErrors == 0) {
- return {};
- }
-
- std::ostringstream messages;
- uint64_t errorsToPrint = std::min(kMaxDebugMessagesToPrint, totalErrors);
- for (uint64_t i = 0; i < errorsToPrint; ++i) {
- SIZE_T messageLength = 0;
- HRESULT hr = infoQueue->GetMessage(i, nullptr, &messageLength);
- if (FAILED(hr)) {
- messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
- continue;
- }
-
- std::unique_ptr<uint8_t[]> messageData(new uint8_t[messageLength]);
- D3D12_MESSAGE* message = reinterpret_cast<D3D12_MESSAGE*>(messageData.get());
- hr = infoQueue->GetMessage(i, message, &messageLength);
- if (FAILED(hr)) {
- messages << " ID3D12InfoQueue::GetMessage failed with " << hr << '\n';
- continue;
- }
-
- messages << message->pDescription << " (" << message->ID << ")\n";
- }
- if (errorsToPrint < totalErrors) {
- messages << (totalErrors - errorsToPrint) << " messages silenced\n";
- }
- // We only print up to the first kMaxDebugMessagesToPrint errors
- infoQueue->ClearStoredMessages();
-
- return DAWN_INTERNAL_ERROR(messages.str());
- }
-
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
-
- // Immediately forget about all pending commands for the case where device is lost on its
- // own and WaitForIdleForDestruction isn't called.
- mPendingCommands.Release();
-
- if (mFenceEvent != nullptr) {
- ::CloseHandle(mFenceEvent);
- }
-
- // Release recycled resource heaps.
- if (mResourceAllocatorManager != nullptr) {
- mResourceAllocatorManager->DestroyPool();
- }
-
- // We need to handle clearing up com object refs that were enqeued after TickImpl
- mUsedComObjectRefs.ClearUpTo(std::numeric_limits<ExecutionSerial>::max());
-
- ASSERT(mUsedComObjectRefs.Empty());
- ASSERT(!mPendingCommands.IsOpen());
- }
-
- ShaderVisibleDescriptorAllocator* Device::GetViewShaderVisibleDescriptorAllocator() const {
- return mViewShaderVisibleDescriptorAllocator.get();
- }
-
- ShaderVisibleDescriptorAllocator* Device::GetSamplerShaderVisibleDescriptorAllocator() const {
- return mSamplerShaderVisibleDescriptorAllocator.get();
- }
-
- StagingDescriptorAllocator* Device::GetViewStagingDescriptorAllocator(
- uint32_t descriptorCount) const {
- ASSERT(descriptorCount <= kMaxViewDescriptorsPerBindGroup);
- // This is Log2 of the next power of two, plus 1.
- uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
- return mViewAllocators[allocatorIndex].get();
- }
-
- StagingDescriptorAllocator* Device::GetSamplerStagingDescriptorAllocator(
- uint32_t descriptorCount) const {
- ASSERT(descriptorCount <= kMaxSamplerDescriptorsPerBindGroup);
- // This is Log2 of the next power of two, plus 1.
- uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
- return mSamplerAllocators[allocatorIndex].get();
- }
-
- StagingDescriptorAllocator* Device::GetRenderTargetViewAllocator() const {
- return mRenderTargetViewAllocator.get();
- }
-
- StagingDescriptorAllocator* Device::GetDepthStencilViewAllocator() const {
- return mDepthStencilViewAllocator.get();
- }
-
- SamplerHeapCache* Device::GetSamplerHeapCache() {
- return mSamplerHeapCache.get();
- }
-
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
- }
-
- // TODO(dawn:512): Once we optimize DynamicUploader allocation with offsets we
- // should make this return D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT = 512.
- // Current implementations would try to allocate additional 511 bytes,
- // so we return 1 and let ComputeTextureCopySplits take care of the alignment.
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return 1;
- }
-
- float Device::GetTimestampPeriodInNS() const {
- return mTimestampPeriod;
- }
-
- bool Device::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- ComputePipelineBase* computePipeline) const {
- return ToBackend(computePipeline)->UsesNumWorkgroups();
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
deleted file mode 100644
index e6b72347115..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_DEVICED3D12_H_
-#define DAWNNATIVE_D3D12_DEVICED3D12_H_
-
-#include "common/SerialQueue.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/d3d12/CommandRecordingContext.h"
-#include "dawn_native/d3d12/D3D12Info.h"
-#include "dawn_native/d3d12/Forward.h"
-#include "dawn_native/d3d12/TextureD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class CommandAllocatorManager;
- class PlatformFunctions;
- class ResidencyManager;
- class ResourceAllocatorManager;
- class SamplerHeapCache;
- class ShaderVisibleDescriptorAllocator;
- class StagingDescriptorAllocator;
-
-#define ASSERT_SUCCESS(hr) \
- do { \
- HRESULT succeeded = hr; \
- ASSERT(SUCCEEDED(succeeded)); \
- } while (0)
-
- // Definition of backend types
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Device*> Create(Adapter* adapter,
- const DawnDeviceDescriptor* descriptor);
- ~Device() override;
-
- MaybeError Initialize();
-
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
-
- MaybeError TickImpl() override;
-
- ID3D12Device* GetD3D12Device() const;
- ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
- ID3D12SharingContract* GetSharingContract() const;
-
- ComPtr<ID3D12CommandSignature> GetDispatchIndirectSignature() const;
- ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
- ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectSignature() const;
-
- CommandAllocatorManager* GetCommandAllocatorManager() const;
- ResidencyManager* GetResidencyManager() const;
-
- const PlatformFunctions* GetFunctions() const;
- ComPtr<IDXGIFactory4> GetFactory() const;
- ComPtr<IDxcLibrary> GetDxcLibrary() const;
- ComPtr<IDxcCompiler> GetDxcCompiler() const;
- ComPtr<IDxcValidator> GetDxcValidator() const;
-
- ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
-
- MaybeError ClearBufferToZero(CommandRecordingContext* commandContext,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size);
-
- const D3D12DeviceInfo& GetDeviceInfo() const;
-
- MaybeError NextSerial();
- MaybeError WaitForSerial(ExecutionSerial serial);
-
- void ReferenceUntilUnused(ComPtr<IUnknown> object);
-
- MaybeError ExecutePendingCommandContext();
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
-
- void CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
- StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size);
-
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- ResultOrError<ResourceHeapAllocation> AllocateMemory(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage);
-
- void DeallocateMemory(ResourceHeapAllocation& allocation);
-
- ShaderVisibleDescriptorAllocator* GetViewShaderVisibleDescriptorAllocator() const;
- ShaderVisibleDescriptorAllocator* GetSamplerShaderVisibleDescriptorAllocator() const;
-
- // Returns nullptr when descriptor count is zero.
- StagingDescriptorAllocator* GetViewStagingDescriptorAllocator(
- uint32_t descriptorCount) const;
-
- StagingDescriptorAllocator* GetSamplerStagingDescriptorAllocator(
- uint32_t descriptorCount) const;
-
- SamplerHeapCache* GetSamplerHeapCache();
-
- StagingDescriptorAllocator* GetRenderTargetViewAllocator() const;
-
- StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
-
- Ref<TextureBase> CreateExternalTexture(const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture,
- bool isInitialized);
-
- ComPtr<ID3D11On12Device> GetOrCreateD3D11on12Device();
-
- void InitTogglesFromDriver();
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
- ComputePipelineBase* computePipeline) const override;
-
- private:
- using DeviceBase::DeviceBase;
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) override;
- void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) override;
-
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
-
- MaybeError CheckDebugLayerAndGenerateErrors();
-
- MaybeError ApplyUseDxcToggle();
-
- MaybeError CreateZeroBuffer();
-
- ComPtr<ID3D12Fence> mFence;
- HANDLE mFenceEvent = nullptr;
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
-
- ComPtr<ID3D12Device> mD3d12Device; // Device is owned by adapter and will not be outlived.
- ComPtr<ID3D12CommandQueue> mCommandQueue;
- ComPtr<ID3D12SharingContract> mD3d12SharingContract;
-
- // 11on12 device corresponding to mCommandQueue
- ComPtr<ID3D11On12Device> mD3d11On12Device;
-
- ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
- ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
- ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
-
- CommandRecordingContext mPendingCommands;
-
- SerialQueue<ExecutionSerial, ComPtr<IUnknown>> mUsedComObjectRefs;
-
- std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
- std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
- std::unique_ptr<ResidencyManager> mResidencyManager;
-
- static constexpr uint32_t kMaxSamplerDescriptorsPerBindGroup =
- 3 * kMaxSamplersPerShaderStage;
- static constexpr uint32_t kMaxViewDescriptorsPerBindGroup =
- kMaxBindingsPerPipelineLayout - kMaxSamplerDescriptorsPerBindGroup;
-
- static constexpr uint32_t kNumSamplerDescriptorAllocators =
- ConstexprLog2Ceil(kMaxSamplerDescriptorsPerBindGroup) + 1;
- static constexpr uint32_t kNumViewDescriptorAllocators =
- ConstexprLog2Ceil(kMaxViewDescriptorsPerBindGroup) + 1;
-
- // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
- // the range [0, kMaxSamplerDescriptorsPerBindGroup].
- std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumViewDescriptorAllocators + 1>
- mViewAllocators;
-
- // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
- // the range [0, kMaxViewDescriptorsPerBindGroup].
- std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumSamplerDescriptorAllocators + 1>
- mSamplerAllocators;
-
- std::unique_ptr<StagingDescriptorAllocator> mRenderTargetViewAllocator;
-
- std::unique_ptr<StagingDescriptorAllocator> mDepthStencilViewAllocator;
-
- std::unique_ptr<ShaderVisibleDescriptorAllocator> mViewShaderVisibleDescriptorAllocator;
-
- std::unique_ptr<ShaderVisibleDescriptorAllocator> mSamplerShaderVisibleDescriptorAllocator;
-
- // Sampler cache needs to be destroyed before the CPU sampler allocator to ensure the final
- // release is called.
- std::unique_ptr<SamplerHeapCache> mSamplerHeapCache;
-
- // A buffer filled with zeros that is used to copy into other buffers when they need to be
- // cleared.
- Ref<Buffer> mZeroBuffer;
-
- // The number of nanoseconds required for a timestamp query to be incremented by 1
- float mTimestampPeriod = 1.0f;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_DEVICED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h b/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
deleted file mode 100644
index 4e5368b63a5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_FORWARD_H_
-#define DAWNNATIVE_D3D12_FORWARD_H_
-
-#include "dawn_native/ToBackend.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- class Heap;
- class PipelineLayout;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- class Sampler;
- class ShaderModule;
- class StagingBuffer;
- class SwapChain;
- class Texture;
- class TextureView;
-
- struct D3D12BackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using ResourceHeapType = Heap;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using StagingBufferType = StagingBuffer;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
-
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<D3D12BackendTraits>(common)) {
- return ToBackendBase<D3D12BackendTraits>(common);
- }
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
deleted file mode 100644
index 939a38c187e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- GPUDescriptorHeapAllocation::GPUDescriptorHeapAllocation(
- D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
- ExecutionSerial lastUsageSerial,
- HeapVersionID heapSerial)
- : mBaseDescriptor(baseDescriptor),
- mLastUsageSerial(lastUsageSerial),
- mHeapSerial(heapSerial) {
- }
-
- D3D12_GPU_DESCRIPTOR_HANDLE GPUDescriptorHeapAllocation::GetBaseDescriptor() const {
- return mBaseDescriptor;
- }
-
- ExecutionSerial GPUDescriptorHeapAllocation::GetLastUsageSerial() const {
- return mLastUsageSerial;
- }
-
- HeapVersionID GPUDescriptorHeapAllocation::GetHeapSerial() const {
- return mHeapSerial;
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h
deleted file mode 100644
index bdab5beee17..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_GPUDESCRIPTORHEAPALLOCATION_H_
-#define DAWNNATIVE_D3D12_GPUDESCRIPTORHEAPALLOCATION_H_
-
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/d3d12/IntegerTypes.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- // Wrapper for a handle into a GPU-only descriptor heap.
- class GPUDescriptorHeapAllocation {
- public:
- GPUDescriptorHeapAllocation() = default;
- GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
- ExecutionSerial lastUsageSerial,
- HeapVersionID heapSerial);
-
- D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
- ExecutionSerial GetLastUsageSerial() const;
- HeapVersionID GetHeapSerial() const;
-
- private:
- D3D12_GPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
- ExecutionSerial mLastUsageSerial = ExecutionSerial(0);
- HeapVersionID mHeapSerial = HeapVersionID(0);
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp
deleted file mode 100644
index 2c9f3778e0a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/HeapD3D12.h"
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- HeapAllocator::HeapAllocator(Device* device,
- D3D12_HEAP_TYPE heapType,
- D3D12_HEAP_FLAGS heapFlags,
- MemorySegment memorySegment)
- : mDevice(device),
- mHeapType(heapType),
- mHeapFlags(heapFlags),
- mMemorySegment(memorySegment) {
- }
-
- ResultOrError<std::unique_ptr<ResourceHeapBase>> HeapAllocator::AllocateResourceHeap(
- uint64_t size) {
- D3D12_HEAP_DESC heapDesc;
- heapDesc.SizeInBytes = size;
- heapDesc.Properties.Type = mHeapType;
- heapDesc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
- heapDesc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
- heapDesc.Properties.CreationNodeMask = 0;
- heapDesc.Properties.VisibleNodeMask = 0;
- // It is preferred to use a size that is a multiple of the alignment.
- // However, MSAA heaps are always aligned to 4MB instead of 64KB. This means
- // if the heap size is too small, the VMM would fragment.
- // TODO(crbug.com/dawn/849): Consider having MSAA vs non-MSAA heaps.
- heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
- heapDesc.Flags = mHeapFlags;
-
- // CreateHeap will implicitly make the created heap resident. We must ensure enough free
- // memory exists before allocating to avoid an out-of-memory error when overcommitted.
- DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(size, mMemorySegment));
-
- ComPtr<ID3D12Heap> d3d12Heap;
- DAWN_TRY(CheckOutOfMemoryHRESULT(
- mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&d3d12Heap)),
- "ID3D12Device::CreateHeap"));
-
- std::unique_ptr<ResourceHeapBase> heapBase =
- std::make_unique<Heap>(std::move(d3d12Heap), mMemorySegment, size);
-
- // Calling CreateHeap implicitly calls MakeResident on the new heap. We must track this to
- // avoid calling MakeResident a second time.
- mDevice->GetResidencyManager()->TrackResidentAllocation(ToBackend(heapBase.get()));
- return std::move(heapBase);
- }
-
- void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
- mDevice->ReferenceUntilUnused(static_cast<Heap*>(heap.get())->GetD3D12Heap());
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.h
deleted file mode 100644
index 53254c5e9fd..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
-#define DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
-
-#include "dawn_native/D3D12Backend.h"
-#include "dawn_native/ResourceHeapAllocator.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- // Wrapper to allocate a D3D12 heap.
- class HeapAllocator : public ResourceHeapAllocator {
- public:
- HeapAllocator(Device* device,
- D3D12_HEAP_TYPE heapType,
- D3D12_HEAP_FLAGS heapFlags,
- MemorySegment memorySegment);
- ~HeapAllocator() override = default;
-
- ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
- uint64_t size) override;
- void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
-
- private:
- Device* mDevice;
- D3D12_HEAP_TYPE mHeapType;
- D3D12_HEAP_FLAGS mHeapFlags;
- MemorySegment mMemorySegment;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
deleted file mode 100644
index 7bb4e323a67..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/HeapD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
- Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
- : Pageable(std::move(d3d12Pageable), memorySegment, size) {
- mD3d12Pageable.As(&mD3d12Heap);
- }
-
- // This function should only be used when mD3D12Pageable was initialized from a
- // ID3D12Pageable that was initially created as an ID3D12Heap (i.e. SubAllocation). If the
- // ID3D12Pageable was initially created as an ID3D12Resource (i.e. DirectAllocation), then
- // use GetD3D12Pageable().
- ID3D12Heap* Heap::GetD3D12Heap() const {
- return mD3d12Heap.Get();
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
deleted file mode 100644
index b59c6449dec..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_HEAPD3D12_H_
-#define DAWNNATIVE_D3D12_HEAPD3D12_H_
-
-#include "dawn_native/ResourceHeap.h"
-#include "dawn_native/d3d12/PageableD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- // This class is used to represent ID3D12Heap allocations, as well as an implicit heap
- // representing a directly allocated resource. It inherits from Pageable because each Heap must
- // be represented in the ResidencyManager.
- class Heap : public ResourceHeapBase, public Pageable {
- public:
- Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
-
- ID3D12Heap* GetD3D12Heap() const;
-
- private:
- ComPtr<ID3D12Heap> mD3d12Heap;
- };
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_HEAPD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/IntegerTypes.h b/chromium/third_party/dawn/src/dawn_native/d3d12/IntegerTypes.h
deleted file mode 100644
index 219f392a194..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/IntegerTypes.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_INTEGERTYPES_H_
-#define DAWNNATIVE_D3D12_INTEGERTYPES_H_
-
-#include "common/Constants.h"
-#include "common/TypedInteger.h"
-
-#include <cstdint>
-
-namespace dawn_native { namespace d3d12 {
-
- // An ID used to desambiguate between multiple uses of the same descriptor heap in the
- // BindGroup allocations.
- using HeapVersionID = TypedInteger<struct HeapVersionIDT, uint64_t>;
-
- // The monotonically increasing serial for external D3D12 mutexes imported in Dawn.
- using ExternalMutexSerial = TypedInteger<struct ExternalMutexSerialT, uint64_t>;
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_INTEGERTYPES_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp
deleted file mode 100644
index f4ef1209237..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/NativeSwapChainImplD3D12.h"
-
-#include "common/Assert.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/TextureD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- DXGI_USAGE D3D12SwapChainBufferUsage(WGPUTextureUsage allowedUsages) {
- DXGI_USAGE usage = DXGI_CPU_ACCESS_NONE;
- if (allowedUsages & WGPUTextureUsage_TextureBinding) {
- usage |= DXGI_USAGE_SHADER_INPUT;
- }
- if (allowedUsages & WGPUTextureUsage_StorageBinding) {
- usage |= DXGI_USAGE_UNORDERED_ACCESS;
- }
- if (allowedUsages & WGPUTextureUsage_RenderAttachment) {
- usage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
- }
- return usage;
- }
-
- static constexpr unsigned int kFrameCount = 3;
- } // anonymous namespace
-
- NativeSwapChainImpl::NativeSwapChainImpl(Device* device, HWND window)
- : mWindow(window), mDevice(device), mInterval(1) {
- }
-
- NativeSwapChainImpl::~NativeSwapChainImpl() {
- }
-
- void NativeSwapChainImpl::Init(DawnWSIContextD3D12* /*context*/) {
- }
-
- DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
- WGPUTextureUsage usage,
- uint32_t width,
- uint32_t height) {
- ASSERT(width > 0);
- ASSERT(height > 0);
- ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
-
- ComPtr<IDXGIFactory4> factory = mDevice->GetFactory();
- ComPtr<ID3D12CommandQueue> queue = mDevice->GetCommandQueue();
-
- mInterval = mDevice->IsToggleEnabled(Toggle::TurnOffVsync) == true ? 0 : 1;
-
- // Create the D3D12 swapchain, assuming only two buffers for now
- DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
- swapChainDesc.Width = width;
- swapChainDesc.Height = height;
- swapChainDesc.Format = D3D12TextureFormat(GetPreferredFormat());
- swapChainDesc.BufferUsage = D3D12SwapChainBufferUsage(usage);
- swapChainDesc.BufferCount = kFrameCount;
- swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
- swapChainDesc.SampleDesc.Count = 1;
- swapChainDesc.SampleDesc.Quality = 0;
-
- ComPtr<IDXGISwapChain1> swapChain1;
- ASSERT_SUCCESS(factory->CreateSwapChainForHwnd(queue.Get(), mWindow, &swapChainDesc,
- nullptr, nullptr, &swapChain1));
-
- ASSERT_SUCCESS(swapChain1.As(&mSwapChain));
-
- // Gather the resources that will be used to present to the swapchain
- mBuffers.resize(kFrameCount);
- for (uint32_t i = 0; i < kFrameCount; ++i) {
- ASSERT_SUCCESS(mSwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])));
- }
-
- // Set the initial serial of buffers to 0 so that we don't wait on them when they are first
- // used
- mBufferSerials.resize(kFrameCount, ExecutionSerial(0));
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- mCurrentBuffer = mSwapChain->GetCurrentBackBufferIndex();
- nextTexture->texture.ptr = mBuffers[mCurrentBuffer].Get();
-
- // TODO(crbug.com/dawn/269) Currently we force the CPU to wait for the GPU to be finished
- // with the buffer. Ideally the synchronization should be all done on the GPU.
- ASSERT(mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]).IsSuccess());
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::Present() {
- // This assumes the texture has already been transition to the PRESENT state.
-
- ASSERT_SUCCESS(mSwapChain->Present(mInterval, 0));
- // TODO(crbug.com/dawn/833): Make the serial ticking implicit.
- ASSERT(mDevice->NextSerial().IsSuccess());
-
- mBufferSerials[mCurrentBuffer] = mDevice->GetPendingCommandSerial();
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return wgpu::TextureFormat::RGBA8Unorm;
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h
deleted file mode 100644
index bb532684711..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
-#define DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-#include "dawn/dawn_wsi.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <vector>
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class NativeSwapChainImpl {
- public:
- using WSIContext = DawnWSIContextD3D12;
-
- NativeSwapChainImpl(Device* device, HWND window);
- ~NativeSwapChainImpl();
-
- void Init(DawnWSIContextD3D12* context);
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height);
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
- DawnSwapChainError Present();
-
- wgpu::TextureFormat GetPreferredFormat() const;
-
- private:
- HWND mWindow = nullptr;
- Device* mDevice = nullptr;
- UINT mInterval;
-
- ComPtr<IDXGISwapChain3> mSwapChain = nullptr;
- std::vector<ComPtr<ID3D12Resource>> mBuffers;
- std::vector<ExecutionSerial> mBufferSerials;
- uint32_t mCurrentBuffer;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_NATIVESWAPCHAINIMPLD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp
deleted file mode 100644
index b171db5a8dc..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/PageableD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
- Pageable::Pageable(ComPtr<ID3D12Pageable> d3d12Pageable,
- MemorySegment memorySegment,
- uint64_t size)
- : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {
- }
-
- // When a pageable is destroyed, it no longer resides in resident memory, so we must evict
- // it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
- // ResidencyManager will attempt to use it after it has been deallocated.
- Pageable::~Pageable() {
- if (IsInResidencyLRUCache()) {
- RemoveFromList();
- }
- }
-
- ID3D12Pageable* Pageable::GetD3D12Pageable() const {
- return mD3d12Pageable.Get();
- }
-
- ExecutionSerial Pageable::GetLastUsage() const {
- return mLastUsage;
- }
-
- void Pageable::SetLastUsage(ExecutionSerial serial) {
- mLastUsage = serial;
- }
-
- ExecutionSerial Pageable::GetLastSubmission() const {
- return mLastSubmission;
- }
-
- void Pageable::SetLastSubmission(ExecutionSerial serial) {
- mLastSubmission = serial;
- }
-
- MemorySegment Pageable::GetMemorySegment() const {
- return mMemorySegment;
- }
-
- uint64_t Pageable::GetSize() const {
- return mSize;
- }
-
- bool Pageable::IsInResidencyLRUCache() const {
- return IsInList();
- }
-
- void Pageable::IncrementResidencyLock() {
- mResidencyLockRefCount++;
- }
-
- void Pageable::DecrementResidencyLock() {
- mResidencyLockRefCount--;
- }
-
- bool Pageable::IsResidencyLocked() const {
- return mResidencyLockRefCount != 0;
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.h
deleted file mode 100644
index fb3a6894d69..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_PAGEABLED3D12_H_
-#define DAWNNATIVE_D3D12_PAGEABLED3D12_H_
-
-#include "common/LinkedList.h"
-#include "dawn_native/D3D12Backend.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
- // This class is used to represent ID3D12Pageable allocations, and also serves as a node within
- // the ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
- // allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
- // LRU cache when it is evicted from resident memory due to budget constraints, or when the
- // pageable allocation is released.
- class Pageable : public LinkNode<Pageable> {
- public:
- Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
- ~Pageable();
-
- ID3D12Pageable* GetD3D12Pageable() const;
-
- // We set mLastRecordingSerial to denote the serial this pageable was last recorded to be
- // used. We must check this serial against the current serial when recording usages to
- // ensure we do not process residency for this pageable multiple times.
- ExecutionSerial GetLastUsage() const;
- void SetLastUsage(ExecutionSerial serial);
-
- // The residency manager must know the last serial that any portion of the pageable was
- // submitted to be used so that we can ensure this pageable stays resident in memory at
- // least until that serial has completed.
- ExecutionSerial GetLastSubmission() const;
- void SetLastSubmission(ExecutionSerial serial);
-
- MemorySegment GetMemorySegment() const;
-
- uint64_t GetSize() const;
-
- bool IsInResidencyLRUCache() const;
-
- // In some scenarios, such as async buffer mapping or descriptor heaps, we must lock
- // residency to ensure the pageable cannot be evicted. Because multiple buffers may be
- // mapped in a single heap, we must track the number of resources currently locked.
- void IncrementResidencyLock();
- void DecrementResidencyLock();
- bool IsResidencyLocked() const;
-
- protected:
- ComPtr<ID3D12Pageable> mD3d12Pageable;
-
- private:
- // mLastUsage denotes the last time this pageable was recorded for use.
- ExecutionSerial mLastUsage = ExecutionSerial(0);
- // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
- // although this variable often contains the same value as mLastUsage, it can differ in some
- // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
- // updated upon the call, but the backend operation is deferred until the next submission
- // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
- // accurately identify when a pageable can be evicted.
- ExecutionSerial mLastSubmission = ExecutionSerial(0);
- MemorySegment mMemorySegment;
- uint32_t mResidencyLockRefCount = 0;
- uint64_t mSize = 0;
- };
-}} // namespace dawn_native::d3d12
-
-#endif
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
deleted file mode 100644
index 7d67ac9dfeb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
-#include <sstream>
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-
-using Microsoft::WRL::ComPtr;
-
-namespace dawn_native { namespace d3d12 {
- namespace {
-
- // Reserve register names for internal use. This registers map to bindings in the shader,
- // but are not directly related to allocation of the root signature.
- // In the root signature, it the index of the root parameter where these registers are
- // used that determines the layout of the root signature.
- static constexpr uint32_t kRenderOrComputeInternalRegisterSpace = kMaxBindGroups + 1;
- static constexpr uint32_t kRenderOrComputeInternalBaseRegister = 0;
-
- static constexpr uint32_t kDynamicStorageBufferLengthsRegisterSpace = kMaxBindGroups + 2;
- static constexpr uint32_t kDynamicStorageBufferLengthsBaseRegister = 0;
-
- D3D12_SHADER_VISIBILITY ShaderVisibilityType(wgpu::ShaderStage visibility) {
- ASSERT(visibility != wgpu::ShaderStage::None);
-
- if (visibility == wgpu::ShaderStage::Vertex) {
- return D3D12_SHADER_VISIBILITY_VERTEX;
- }
-
- if (visibility == wgpu::ShaderStage::Fragment) {
- return D3D12_SHADER_VISIBILITY_PIXEL;
- }
-
- // For compute or any two combination of stages, visibility must be ALL
- return D3D12_SHADER_VISIBILITY_ALL;
- }
-
- D3D12_ROOT_PARAMETER_TYPE RootParameterType(wgpu::BufferBindingType type) {
- switch (type) {
- case wgpu::BufferBindingType::Uniform:
- return D3D12_ROOT_PARAMETER_TYPE_CBV;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- return D3D12_ROOT_PARAMETER_TYPE_UAV;
- case wgpu::BufferBindingType::ReadOnlyStorage:
- return D3D12_ROOT_PARAMETER_TYPE_SRV;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- }
-
- } // anonymous namespace
-
- ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
- Device* device,
- const PipelineLayoutDescriptor* descriptor) {
- Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
- DAWN_TRY(layout->Initialize());
- return layout;
- }
-
- MaybeError PipelineLayout::Initialize() {
- Device* device = ToBackend(GetDevice());
- // Parameters are D3D12_ROOT_PARAMETER_TYPE which is either a root table, constant, or
- // descriptor.
- std::vector<D3D12_ROOT_PARAMETER> rootParameters;
-
- size_t rangesCount = 0;
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
- rangesCount += bindGroupLayout->GetCbvUavSrvDescriptorRanges().size() +
- bindGroupLayout->GetSamplerDescriptorRanges().size();
- }
-
- // We are taking pointers to `ranges`, so we cannot let it resize while we're pushing to it.
- std::vector<D3D12_DESCRIPTOR_RANGE> ranges(rangesCount);
-
- uint32_t rangeIndex = 0;
-
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
-
- // Set the root descriptor table parameter and copy ranges. Ranges are offset by the
- // bind group index Returns whether or not the parameter was set. A root parameter is
- // not set if the number of ranges is 0
- auto SetRootDescriptorTable =
- [&](const std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges) -> bool {
- auto rangeCount = descriptorRanges.size();
- if (rangeCount == 0) {
- return false;
- }
-
- D3D12_ROOT_PARAMETER rootParameter = {};
- rootParameter.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
- rootParameter.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
- rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
- rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
-
- for (auto& range : descriptorRanges) {
- ASSERT(range.RegisterSpace == kRegisterSpacePlaceholder);
- ranges[rangeIndex] = range;
- ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
- rangeIndex++;
- }
-
- rootParameters.emplace_back(rootParameter);
-
- return true;
- };
-
- if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
- mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
- }
- if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorRanges())) {
- mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
- }
-
- // Init root descriptors in root signatures for dynamic buffer bindings.
- // These are packed at the beginning of the layout binding info.
- for (BindingIndex dynamicBindingIndex{0};
- dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
- ++dynamicBindingIndex) {
- const BindingInfo& bindingInfo =
- bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
-
- if (bindingInfo.visibility == wgpu::ShaderStage::None) {
- // Skip dynamic buffers that are not visible. D3D12 does not have None
- // visibility.
- continue;
- }
-
- D3D12_ROOT_PARAMETER rootParameter = {};
-
- // Setup root descriptor.
- D3D12_ROOT_DESCRIPTOR rootDescriptor;
- rootDescriptor.ShaderRegister =
- bindGroupLayout->GetShaderRegister(dynamicBindingIndex);
- rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
-
- // Set root descriptors in root signatures.
- rootParameter.Descriptor = rootDescriptor;
- mDynamicRootParameterIndices[group][dynamicBindingIndex] = rootParameters.size();
-
- // Set parameter types according to bind group layout descriptor.
- rootParameter.ParameterType = RootParameterType(bindingInfo.buffer.type);
-
- // Set visibilities according to bind group layout descriptor.
- rootParameter.ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
-
- rootParameters.emplace_back(rootParameter);
- }
- }
-
- // Make sure that we added exactly the number of elements we expected. If we added more,
- // |ranges| will have resized and the pointers in the |rootParameter|s will be invalid.
- ASSERT(rangeIndex == rangesCount);
-
- D3D12_ROOT_PARAMETER renderOrComputeInternalConstants{};
- renderOrComputeInternalConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
- renderOrComputeInternalConstants.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
- // Always allocate 3 constants for either:
- // - vertex_index and instance_index
- // - num_workgroups_x, num_workgroups_y and num_workgroups_z
- // NOTE: We should consider delaying root signature creation until we know how many values
- // we need
- renderOrComputeInternalConstants.Constants.Num32BitValues = 3;
- renderOrComputeInternalConstants.Constants.RegisterSpace =
- kRenderOrComputeInternalRegisterSpace;
- renderOrComputeInternalConstants.Constants.ShaderRegister =
- kRenderOrComputeInternalBaseRegister;
- mFirstIndexOffsetParameterIndex = rootParameters.size();
- mNumWorkgroupsParameterIndex = rootParameters.size();
- // NOTE: We should consider moving this entry to earlier in the root signature since offsets
- // would need to be updated often
- rootParameters.emplace_back(renderOrComputeInternalConstants);
-
- // Loops over all of the dynamic storage buffer bindings in the layout and build
- // a mapping from the binding to the next offset into the root constant array where
- // that dynamic storage buffer's binding size will be stored. The next register offset
- // to use is tracked with |dynamicStorageBufferLengthsShaderRegisterOffset|.
- // This data will be used by shader translation to emit a load from the root constant
- // array to use as the binding's size in runtime array calculations.
- // Each bind group's length data is stored contiguously in the root constant array,
- // so the loop also computes the first register offset for each group where the
- // data should start.
- uint32_t dynamicStorageBufferLengthsShaderRegisterOffset = 0;
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
-
- mDynamicStorageBufferLengthInfo[group].firstRegisterOffset =
- dynamicStorageBufferLengthsShaderRegisterOffset;
- mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.reserve(
- bgl->GetBindingCountInfo().dynamicStorageBufferCount);
-
- for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
- ++bindingIndex) {
- if (bgl->IsStorageBufferBinding(bindingIndex)) {
- mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.push_back(
- {bgl->GetBindingInfo(bindingIndex).binding,
- dynamicStorageBufferLengthsShaderRegisterOffset++});
- }
- }
-
- ASSERT(mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.size() ==
- bgl->GetBindingCountInfo().dynamicStorageBufferCount);
- }
- ASSERT(dynamicStorageBufferLengthsShaderRegisterOffset <=
- kMaxDynamicStorageBuffersPerPipelineLayout);
-
- D3D12_ROOT_PARAMETER dynamicStorageBufferLengthConstants{};
- dynamicStorageBufferLengthConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
- dynamicStorageBufferLengthConstants.ParameterType =
- D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
- dynamicStorageBufferLengthConstants.Constants.Num32BitValues =
- dynamicStorageBufferLengthsShaderRegisterOffset;
- dynamicStorageBufferLengthConstants.Constants.RegisterSpace =
- kDynamicStorageBufferLengthsRegisterSpace;
- dynamicStorageBufferLengthConstants.Constants.ShaderRegister =
- kDynamicStorageBufferLengthsBaseRegister;
- mDynamicStorageBufferLengthsParameterIndex = rootParameters.size();
- rootParameters.emplace_back(dynamicStorageBufferLengthConstants);
-
- D3D12_ROOT_SIGNATURE_DESC rootSignatureDescriptor;
- rootSignatureDescriptor.NumParameters = rootParameters.size();
- rootSignatureDescriptor.pParameters = rootParameters.data();
- rootSignatureDescriptor.NumStaticSamplers = 0;
- rootSignatureDescriptor.pStaticSamplers = nullptr;
- rootSignatureDescriptor.Flags =
- D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
-
- ComPtr<ID3DBlob> signature;
- ComPtr<ID3DBlob> error;
- HRESULT hr = device->GetFunctions()->d3d12SerializeRootSignature(
- &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error);
- if (DAWN_UNLIKELY(FAILED(hr))) {
- std::ostringstream messageStream;
- if (error) {
- messageStream << static_cast<const char*>(error->GetBufferPointer());
-
- // |error| is observed to always end with a \n, but is not
- // specified to do so, so we add an extra newline just in case.
- messageStream << std::endl;
- }
- messageStream << "D3D12 serialize root signature";
- DAWN_TRY(CheckHRESULT(hr, messageStream.str().c_str()));
- }
- DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
- 0, signature->GetBufferPointer(), signature->GetBufferSize(),
- IID_PPV_ARGS(&mRootSignature)),
- "D3D12 create root signature"));
- return {};
- }
-
- uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const {
- ASSERT(group < kMaxBindGroupsTyped);
- return mCbvUavSrvRootParameterInfo[group];
- }
-
- uint32_t PipelineLayout::GetSamplerRootParameterIndex(BindGroupIndex group) const {
- ASSERT(group < kMaxBindGroupsTyped);
- return mSamplerRootParameterInfo[group];
- }
-
- ID3D12RootSignature* PipelineLayout::GetRootSignature() const {
- return mRootSignature.Get();
- }
-
- const PipelineLayout::DynamicStorageBufferLengthInfo&
- PipelineLayout::GetDynamicStorageBufferLengthInfo() const {
- return mDynamicStorageBufferLengthInfo;
- }
-
- uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
- BindingIndex bindingIndex) const {
- ASSERT(group < kMaxBindGroupsTyped);
- ASSERT(bindingIndex < kMaxDynamicBuffersPerPipelineLayoutTyped);
- ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).buffer.hasDynamicOffset);
- ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
- wgpu::ShaderStage::None);
- return mDynamicRootParameterIndices[group][bindingIndex];
- }
-
- uint32_t PipelineLayout::GetFirstIndexOffsetRegisterSpace() const {
- return kRenderOrComputeInternalRegisterSpace;
- }
-
- uint32_t PipelineLayout::GetFirstIndexOffsetShaderRegister() const {
- return kRenderOrComputeInternalBaseRegister;
- }
-
- uint32_t PipelineLayout::GetFirstIndexOffsetParameterIndex() const {
- return mFirstIndexOffsetParameterIndex;
- }
-
- uint32_t PipelineLayout::GetNumWorkgroupsRegisterSpace() const {
- return kRenderOrComputeInternalRegisterSpace;
- }
-
- uint32_t PipelineLayout::GetNumWorkgroupsShaderRegister() const {
- return kRenderOrComputeInternalBaseRegister;
- }
-
- uint32_t PipelineLayout::GetNumWorkgroupsParameterIndex() const {
- return mNumWorkgroupsParameterIndex;
- }
-
- uint32_t PipelineLayout::GetDynamicStorageBufferLengthsRegisterSpace() const {
- return kDynamicStorageBufferLengthsRegisterSpace;
- }
-
- uint32_t PipelineLayout::GetDynamicStorageBufferLengthsShaderRegister() const {
- return kDynamicStorageBufferLengthsBaseRegister;
- }
-
- uint32_t PipelineLayout::GetDynamicStorageBufferLengthsParameterIndex() const {
- return mDynamicStorageBufferLengthsParameterIndex;
- }
-
- ID3D12CommandSignature* PipelineLayout::GetDispatchIndirectCommandSignatureWithNumWorkgroups() {
- // mDispatchIndirectCommandSignatureWithNumWorkgroups won't be created until it is needed.
- if (mDispatchIndirectCommandSignatureWithNumWorkgroups.Get() != nullptr) {
- return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
- }
-
- D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
- argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
- argumentDescs[0].Constant.RootParameterIndex = GetNumWorkgroupsParameterIndex();
- argumentDescs[0].Constant.Num32BitValuesToSet = 3;
- argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
-
- // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
- // command. That command must come last.
- argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
-
- D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
- programDesc.ByteStride = 6 * sizeof(uint32_t);
- programDesc.NumArgumentDescs = 2;
- programDesc.pArgumentDescs = argumentDescs;
-
- // The root signature must be specified if and only if the command signature changes one of
- // the root arguments.
- ToBackend(GetDevice())
- ->GetD3D12Device()
- ->CreateCommandSignature(
- &programDesc, GetRootSignature(),
- IID_PPV_ARGS(&mDispatchIndirectCommandSignatureWithNumWorkgroups));
- return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
deleted file mode 100644
index 1304f73c2e6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
-#define DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
-
-#include "common/Constants.h"
-#include "common/ityp_array.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/PipelineLayout.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class PipelineLayout final : public PipelineLayoutBase {
- public:
- static ResultOrError<Ref<PipelineLayout>> Create(
- Device* device,
- const PipelineLayoutDescriptor* descriptor);
-
- uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
- uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
-
- // Returns the index of the root parameter reserved for a dynamic buffer binding
- uint32_t GetDynamicRootParameterIndex(BindGroupIndex group,
- BindingIndex bindingIndex) const;
-
- uint32_t GetFirstIndexOffsetRegisterSpace() const;
- uint32_t GetFirstIndexOffsetShaderRegister() const;
- uint32_t GetFirstIndexOffsetParameterIndex() const;
-
- uint32_t GetNumWorkgroupsRegisterSpace() const;
- uint32_t GetNumWorkgroupsShaderRegister() const;
- uint32_t GetNumWorkgroupsParameterIndex() const;
-
- uint32_t GetDynamicStorageBufferLengthsRegisterSpace() const;
- uint32_t GetDynamicStorageBufferLengthsShaderRegister() const;
- uint32_t GetDynamicStorageBufferLengthsParameterIndex() const;
-
- ID3D12RootSignature* GetRootSignature() const;
-
- ID3D12CommandSignature* GetDispatchIndirectCommandSignatureWithNumWorkgroups();
-
- struct PerBindGroupDynamicStorageBufferLengthInfo {
- // First register offset for a bind group's dynamic storage buffer lengths.
- // This is the index into the array of root constants where this bind group's
- // lengths start.
- uint32_t firstRegisterOffset;
-
- struct BindingAndRegisterOffset {
- BindingNumber binding;
- uint32_t registerOffset;
- };
- // Associative list of (BindingNumber,registerOffset) pairs, which is passed into
- // the shader to map the BindingPoint(thisGroup, BindingNumber) to the registerOffset
- // into the root constant array which holds the dynamic storage buffer lengths.
- std::vector<BindingAndRegisterOffset> bindingAndRegisterOffsets;
- };
-
- // Flat map from bind group index to the list of (BindingNumber,Register) pairs.
- // Each pair is used in shader translation to
- using DynamicStorageBufferLengthInfo =
- ityp::array<BindGroupIndex, PerBindGroupDynamicStorageBufferLengthInfo, kMaxBindGroups>;
-
- const DynamicStorageBufferLengthInfo& GetDynamicStorageBufferLengthInfo() const;
-
- private:
- ~PipelineLayout() override = default;
- using PipelineLayoutBase::PipelineLayoutBase;
- MaybeError Initialize();
- ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
- ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
- ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
- kMaxBindGroups>
- mDynamicRootParameterIndices;
- DynamicStorageBufferLengthInfo mDynamicStorageBufferLengthInfo;
- uint32_t mFirstIndexOffsetParameterIndex;
- uint32_t mNumWorkgroupsParameterIndex;
- uint32_t mDynamicStorageBufferLengthsParameterIndex;
- ComPtr<ID3D12RootSignature> mRootSignature;
- ComPtr<ID3D12CommandSignature> mDispatchIndirectCommandSignatureWithNumWorkgroups;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
deleted file mode 100644
index 27e656486ef..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/PlatformFunctions.h"
-
-#include "common/DynamicLib.h"
-
-#include <comdef.h>
-#include <array>
-#include <sstream>
-
-namespace dawn_native { namespace d3d12 {
- namespace {
- // Extract Version from "10.0.{Version}.0" if possible, otherwise return 0.
- uint32_t GetWindowsSDKVersionFromDirectoryName(const char* directoryName) {
- constexpr char kPrefix[] = "10.0.";
- constexpr char kPostfix[] = ".0";
-
- constexpr uint32_t kPrefixLen = sizeof(kPrefix) - 1;
- constexpr uint32_t kPostfixLen = sizeof(kPostfix) - 1;
- const uint32_t directoryNameLen = strlen(directoryName);
-
- if (directoryNameLen < kPrefixLen + kPostfixLen + 1) {
- return 0;
- }
-
- // Check if directoryName starts with "10.0.".
- if (strncmp(directoryName, kPrefix, kPrefixLen) != 0) {
- return 0;
- }
-
- // Check if directoryName ends with ".0".
- if (strncmp(directoryName + (directoryNameLen - kPostfixLen), kPostfix, kPostfixLen) !=
- 0) {
- return 0;
- }
-
- // Extract Version from "10.0.{Version}.0" and convert Version into an integer.
- return atoi(directoryName + kPrefixLen);
- }
-
- class ScopedFileHandle final {
- public:
- explicit ScopedFileHandle(HANDLE handle) : mHandle(handle) {
- }
- ~ScopedFileHandle() {
- if (mHandle != INVALID_HANDLE_VALUE) {
- ASSERT(FindClose(mHandle));
- }
- }
- HANDLE GetHandle() const {
- return mHandle;
- }
-
- private:
- HANDLE mHandle;
- };
-
- std::string GetWindowsSDKBasePath() {
- const char* kDefaultWindowsSDKPath =
- "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\*";
- WIN32_FIND_DATAA fileData;
- ScopedFileHandle handle(FindFirstFileA(kDefaultWindowsSDKPath, &fileData));
- if (handle.GetHandle() == INVALID_HANDLE_VALUE) {
- return "";
- }
-
- uint32_t highestWindowsSDKVersion = 0;
- do {
- if (!(fileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
- continue;
- }
-
- highestWindowsSDKVersion =
- std::max(highestWindowsSDKVersion,
- GetWindowsSDKVersionFromDirectoryName(fileData.cFileName));
- } while (FindNextFileA(handle.GetHandle(), &fileData));
-
- if (highestWindowsSDKVersion == 0) {
- return "";
- }
-
- // Currently we only support using DXC on x64.
- std::ostringstream ostream;
- ostream << "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0."
- << highestWindowsSDKVersion << ".0\\x64\\";
-
- return ostream.str();
- }
- } // anonymous namespace
-
- PlatformFunctions::PlatformFunctions() = default;
- PlatformFunctions::~PlatformFunctions() = default;
-
- MaybeError PlatformFunctions::LoadFunctions() {
- DAWN_TRY(LoadD3D12());
- DAWN_TRY(LoadDXGI());
- LoadDXCLibraries();
- DAWN_TRY(LoadFXCompiler());
- DAWN_TRY(LoadD3D11());
- LoadPIXRuntime();
- return {};
- }
-
- MaybeError PlatformFunctions::LoadD3D12() {
-#if DAWN_PLATFORM_WINUWP
- d3d12CreateDevice = &D3D12CreateDevice;
- d3d12GetDebugInterface = &D3D12GetDebugInterface;
- d3d12SerializeRootSignature = &D3D12SerializeRootSignature;
- d3d12CreateRootSignatureDeserializer = &D3D12CreateRootSignatureDeserializer;
- d3d12SerializeVersionedRootSignature = &D3D12SerializeVersionedRootSignature;
- d3d12CreateVersionedRootSignatureDeserializer =
- &D3D12CreateVersionedRootSignatureDeserializer;
-#else
- std::string error;
- if (!mD3D12Lib.Open("d3d12.dll", &error) ||
- !mD3D12Lib.GetProc(&d3d12CreateDevice, "D3D12CreateDevice", &error) ||
- !mD3D12Lib.GetProc(&d3d12GetDebugInterface, "D3D12GetDebugInterface", &error) ||
- !mD3D12Lib.GetProc(&d3d12SerializeRootSignature, "D3D12SerializeRootSignature",
- &error) ||
- !mD3D12Lib.GetProc(&d3d12CreateRootSignatureDeserializer,
- "D3D12CreateRootSignatureDeserializer", &error) ||
- !mD3D12Lib.GetProc(&d3d12SerializeVersionedRootSignature,
- "D3D12SerializeVersionedRootSignature", &error) ||
- !mD3D12Lib.GetProc(&d3d12CreateVersionedRootSignatureDeserializer,
- "D3D12CreateVersionedRootSignatureDeserializer", &error)) {
- return DAWN_INTERNAL_ERROR(error.c_str());
- }
-#endif
-
- return {};
- }
-
- MaybeError PlatformFunctions::LoadD3D11() {
-#if DAWN_PLATFORM_WINUWP
- d3d11on12CreateDevice = &D3D11On12CreateDevice;
-#else
- std::string error;
- if (!mD3D11Lib.Open("d3d11.dll", &error) ||
- !mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
- return DAWN_INTERNAL_ERROR(error.c_str());
- }
-#endif
-
- return {};
- }
-
- MaybeError PlatformFunctions::LoadDXGI() {
-#if DAWN_PLATFORM_WINUWP
-# if defined(_DEBUG)
- // DXGIGetDebugInterface1 is tagged as a development-only capability
- // which implies that linking to this function will cause
- // the application to fail Windows store certification
- // But we need it when debuging using VS Graphics Diagnostics or PIX
- // So we only link to it in debug build
- dxgiGetDebugInterface1 = &DXGIGetDebugInterface1;
-# endif
- createDxgiFactory2 = &CreateDXGIFactory2;
-#else
- std::string error;
- if (!mDXGILib.Open("dxgi.dll", &error) ||
- !mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
- !mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
- return DAWN_INTERNAL_ERROR(error.c_str());
- }
-#endif
-
- return {};
- }
-
- void PlatformFunctions::LoadDXCLibraries() {
- // TODO(dawn:766)
- // Statically linked with dxcompiler.lib in UWP
- // currently linked with dxcompiler.lib making CoreApp unable to activate
- // LoadDXIL and LoadDXCompiler will fail in UWP, but LoadFunctions() can still be
- // successfully executed.
-
- const std::string& windowsSDKBasePath = GetWindowsSDKBasePath();
-
- LoadDXIL(windowsSDKBasePath);
- LoadDXCompiler(windowsSDKBasePath);
- }
-
- void PlatformFunctions::LoadDXIL(const std::string& baseWindowsSDKPath) {
- const char* dxilDLLName = "dxil.dll";
- const std::array<std::string, 2> kDxilDLLPaths = {
- {dxilDLLName, baseWindowsSDKPath + dxilDLLName}};
-
- for (const std::string& dxilDLLPath : kDxilDLLPaths) {
- if (mDXILLib.Open(dxilDLLPath, nullptr)) {
- return;
- }
- }
- ASSERT(!mDXILLib.Valid());
- }
-
- void PlatformFunctions::LoadDXCompiler(const std::string& baseWindowsSDKPath) {
- // DXIL must be loaded before DXC, otherwise shader signing is unavailable
- if (!mDXILLib.Valid()) {
- return;
- }
-
- const char* dxCompilerDLLName = "dxcompiler.dll";
- const std::array<std::string, 2> kDxCompilerDLLPaths = {
- {dxCompilerDLLName, baseWindowsSDKPath + dxCompilerDLLName}};
-
- DynamicLib dxCompilerLib;
- for (const std::string& dxCompilerDLLName : kDxCompilerDLLPaths) {
- if (dxCompilerLib.Open(dxCompilerDLLName, nullptr)) {
- break;
- }
- }
-
- if (dxCompilerLib.Valid() &&
- dxCompilerLib.GetProc(&dxcCreateInstance, "DxcCreateInstance", nullptr)) {
- mDXCompilerLib = std::move(dxCompilerLib);
- } else {
- mDXILLib.Close();
- }
- }
-
- MaybeError PlatformFunctions::LoadFXCompiler() {
-#if DAWN_PLATFORM_WINUWP
- d3dCompile = &D3DCompile;
- d3dDisassemble = &D3DDisassemble;
-#else
- std::string error;
- if (!mFXCompilerLib.Open("d3dcompiler_47.dll", &error) ||
- !mFXCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error) ||
- !mFXCompilerLib.GetProc(&d3dDisassemble, "D3DDisassemble", &error)) {
- return DAWN_INTERNAL_ERROR(error.c_str());
- }
-#endif
- return {};
- }
-
- bool PlatformFunctions::IsPIXEventRuntimeLoaded() const {
- return mPIXEventRuntimeLib.Valid();
- }
-
- bool PlatformFunctions::IsDXCAvailable() const {
- return mDXILLib.Valid() && mDXCompilerLib.Valid();
- }
-
- void PlatformFunctions::LoadPIXRuntime() {
- // TODO(dawn:766):
- // In UWP PIX should be statically linked WinPixEventRuntime_UAP.lib
- // So maybe we should put WinPixEventRuntime as a third party package
- // Currently PIX is not going to be loaded in UWP since the following
- // mPIXEventRuntimeLib.Open will fail.
- if (!mPIXEventRuntimeLib.Open("WinPixEventRuntime.dll") ||
- !mPIXEventRuntimeLib.GetProc(&pixBeginEventOnCommandList,
- "PIXBeginEventOnCommandList") ||
- !mPIXEventRuntimeLib.GetProc(&pixEndEventOnCommandList, "PIXEndEventOnCommandList") ||
- !mPIXEventRuntimeLib.GetProc(&pixSetMarkerOnCommandList, "PIXSetMarkerOnCommandList")) {
- mPIXEventRuntimeLib.Close();
- }
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h
deleted file mode 100644
index cd88217553e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
-#define DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-#include "common/DynamicLib.h"
-#include "dawn_native/Error.h"
-
-#include <d3dcompiler.h>
-
-namespace dawn_native { namespace d3d12 {
-
- // Loads the functions required from the platform dynamically so that we don't need to rely on
- // them being present in the system. For example linking against d3d12.lib would prevent
- // dawn_native from loading on Windows 7 system where d3d12.dll doesn't exist.
- class PlatformFunctions {
- public:
- PlatformFunctions();
- ~PlatformFunctions();
-
- MaybeError LoadFunctions();
- bool IsPIXEventRuntimeLoaded() const;
- bool IsDXCAvailable() const;
-
- // Functions from d3d12.dll
- PFN_D3D12_CREATE_DEVICE d3d12CreateDevice = nullptr;
- PFN_D3D12_GET_DEBUG_INTERFACE d3d12GetDebugInterface = nullptr;
-
- PFN_D3D12_SERIALIZE_ROOT_SIGNATURE d3d12SerializeRootSignature = nullptr;
- PFN_D3D12_CREATE_ROOT_SIGNATURE_DESERIALIZER d3d12CreateRootSignatureDeserializer = nullptr;
- PFN_D3D12_SERIALIZE_VERSIONED_ROOT_SIGNATURE d3d12SerializeVersionedRootSignature = nullptr;
- PFN_D3D12_CREATE_VERSIONED_ROOT_SIGNATURE_DESERIALIZER
- d3d12CreateVersionedRootSignatureDeserializer = nullptr;
-
- // Functions from dxgi.dll
- using PFN_DXGI_GET_DEBUG_INTERFACE1 = HRESULT(WINAPI*)(UINT Flags,
- REFIID riid,
- _COM_Outptr_ void** pDebug);
- PFN_DXGI_GET_DEBUG_INTERFACE1 dxgiGetDebugInterface1 = nullptr;
-
- using PFN_CREATE_DXGI_FACTORY2 = HRESULT(WINAPI*)(UINT Flags,
- REFIID riid,
- _COM_Outptr_ void** ppFactory);
- PFN_CREATE_DXGI_FACTORY2 createDxgiFactory2 = nullptr;
-
- // Functions from dxcompiler.dll
- using PFN_DXC_CREATE_INSTANCE = HRESULT(WINAPI*)(REFCLSID rclsid,
- REFIID riid,
- _COM_Outptr_ void** ppCompiler);
- PFN_DXC_CREATE_INSTANCE dxcCreateInstance = nullptr;
-
- // Functions from d3d3compiler.dll
- pD3DCompile d3dCompile = nullptr;
- pD3DDisassemble d3dDisassemble = nullptr;
-
- // Functions from WinPixEventRuntime.dll
- using PFN_PIX_END_EVENT_ON_COMMAND_LIST =
- HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList);
-
- PFN_PIX_END_EVENT_ON_COMMAND_LIST pixEndEventOnCommandList = nullptr;
-
- using PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST = HRESULT(
- WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
-
- PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST pixBeginEventOnCommandList = nullptr;
-
- using PFN_SET_MARKER_ON_COMMAND_LIST = HRESULT(
- WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
-
- PFN_SET_MARKER_ON_COMMAND_LIST pixSetMarkerOnCommandList = nullptr;
-
- // Functions from D3D11.dll
- PFN_D3D11ON12_CREATE_DEVICE d3d11on12CreateDevice = nullptr;
-
- private:
- MaybeError LoadD3D12();
- MaybeError LoadD3D11();
- MaybeError LoadDXGI();
- void LoadDXCLibraries();
- void LoadDXIL(const std::string& baseWindowsSDKPath);
- void LoadDXCompiler(const std::string& baseWindowsSDKPath);
- MaybeError LoadFXCompiler();
- void LoadPIXRuntime();
-
- DynamicLib mD3D12Lib;
- DynamicLib mD3D11Lib;
- DynamicLib mDXGILib;
- DynamicLib mDXILLib;
- DynamicLib mDXCompilerLib;
- DynamicLib mFXCompilerLib;
- DynamicLib mPIXEventRuntimeLib;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_PLATFORMFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp
deleted file mode 100644
index d6a618086d8..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/QuerySetD3D12.h"
-
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- D3D12_QUERY_HEAP_TYPE D3D12QueryHeapType(wgpu::QueryType type) {
- switch (type) {
- case wgpu::QueryType::Occlusion:
- return D3D12_QUERY_HEAP_TYPE_OCCLUSION;
- case wgpu::QueryType::PipelineStatistics:
- return D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS;
- case wgpu::QueryType::Timestamp:
- return D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
- }
- }
- } // anonymous namespace
-
- // static
- ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
- Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
- DAWN_TRY(querySet->Initialize());
- return querySet;
- }
-
- MaybeError QuerySet::Initialize() {
- D3D12_QUERY_HEAP_DESC queryHeapDesc = {};
- queryHeapDesc.Type = D3D12QueryHeapType(GetQueryType());
- queryHeapDesc.Count = std::max(GetQueryCount(), uint32_t(1u));
-
- ID3D12Device* d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
- DAWN_TRY(CheckOutOfMemoryHRESULT(
- d3d12Device->CreateQueryHeap(&queryHeapDesc, IID_PPV_ARGS(&mQueryHeap)),
- "ID3D12Device::CreateQueryHeap"));
-
- SetLabelImpl();
-
- return {};
- }
-
- ID3D12QueryHeap* QuerySet::GetQueryHeap() const {
- return mQueryHeap.Get();
- }
-
- QuerySet::~QuerySet() = default;
-
- void QuerySet::DestroyImpl() {
- QuerySetBase::DestroyImpl();
- ToBackend(GetDevice())->ReferenceUntilUnused(mQueryHeap);
- mQueryHeap = nullptr;
- }
-
- void QuerySet::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), mQueryHeap.Get(), "Dawn_QuerySet", GetLabel());
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h
deleted file mode 100644
index 7058798bd31..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_QUERYSETD3D12_H_
-#define DAWNNATIVE_D3D12_QUERYSETD3D12_H_
-
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class QuerySet : public QuerySetBase {
- public:
- static ResultOrError<Ref<QuerySet>> Create(Device* device,
- const QuerySetDescriptor* descriptor);
-
- ID3D12QueryHeap* GetQueryHeap() const;
-
- private:
- ~QuerySet() override;
- using QuerySetBase::QuerySetBase;
- MaybeError Initialize();
-
- // Dawn API
- void DestroyImpl() override;
- void SetLabelImpl() override;
-
- ComPtr<ID3D12QueryHeap> mQueryHeap;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_QUERYSETD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
deleted file mode 100644
index ca6064de511..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/QueueD3D12.h"
-
-#include "common/Math.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/d3d12/CommandBufferD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-
-namespace dawn_native { namespace d3d12 {
-
- Queue::Queue(Device* device) : QueueBase(device) {
- }
-
- MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
- Device* device = ToBackend(GetDevice());
-
- DAWN_TRY(device->Tick());
-
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
-
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
- "CommandBufferD3D12::RecordCommands");
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
- }
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording,
- "CommandBufferD3D12::RecordCommands");
-
- DAWN_TRY(device->ExecutePendingCommandContext());
-
- DAWN_TRY(device->NextSerial());
- return {};
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
deleted file mode 100644
index 311c6071926..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_QUEUED3D12_H_
-#define DAWNNATIVE_D3D12_QUEUED3D12_H_
-
-#include "dawn_native/Queue.h"
-
-#include "dawn_native/d3d12/CommandRecordingContext.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class Queue final : public QueueBase {
- public:
- Queue(Device* device);
-
- private:
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_QUEUED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
deleted file mode 100644
index 47da954c228..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/RenderPassBuilderD3D12.h"
-
-#include "dawn_native/Format.h"
-#include "dawn_native/d3d12/CommandBufferD3D12.h"
-#include "dawn_native/d3d12/Forward.h"
-#include "dawn_native/d3d12/TextureD3D12.h"
-
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE D3D12BeginningAccessType(wgpu::LoadOp loadOp) {
- switch (loadOp) {
- case wgpu::LoadOp::Clear:
- return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR;
- case wgpu::LoadOp::Load:
- return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_PRESERVE;
- }
- }
-
- D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
- switch (storeOp) {
- case wgpu::StoreOp::Discard:
- return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
- case wgpu::StoreOp::Store:
- return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
- }
- }
-
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS D3D12EndingAccessResolveParameters(
- wgpu::StoreOp storeOp,
- TextureView* resolveSource,
- TextureView* resolveDestination) {
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS resolveParameters;
-
- resolveParameters.Format = resolveDestination->GetD3D12Format();
- resolveParameters.pSrcResource =
- ToBackend(resolveSource->GetTexture())->GetD3D12Resource();
- resolveParameters.pDstResource =
- ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
-
- // Clear or preserve the resolve source.
- if (storeOp == wgpu::StoreOp::Discard) {
- resolveParameters.PreserveResolveSource = false;
- } else if (storeOp == wgpu::StoreOp::Store) {
- resolveParameters.PreserveResolveSource = true;
- }
-
- // RESOLVE_MODE_AVERAGE is only valid for non-integer formats.
- // TODO: Investigate and determine how integer format resolves should work in WebGPU.
- switch (resolveDestination->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
- case wgpu::TextureComponentType::Sint:
- case wgpu::TextureComponentType::Uint:
- resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_MAX;
- break;
- case wgpu::TextureComponentType::Float:
- resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_AVERAGE;
- break;
-
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
- }
-
- resolveParameters.SubresourceCount = 1;
-
- return resolveParameters;
- }
-
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS
- D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
- Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
- ASSERT(resolveDestinationTexture->GetFormat().aspects == Aspect::Color);
-
- subresourceParameters.DstX = 0;
- subresourceParameters.DstY = 0;
- subresourceParameters.SrcSubresource = 0;
- subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
- resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer(),
- Aspect::Color);
- // Resolving a specified sub-rect is only valid on hardware that supports sample
- // positions. This means even {0, 0, width, height} would be invalid if unsupported. To
- // avoid this, we assume sub-rect resolves never work by setting them to all zeros or
- // "empty" to resolve the entire region.
- subresourceParameters.SrcRect = {0, 0, 0, 0};
-
- return subresourceParameters;
- }
- } // anonymous namespace
-
- RenderPassBuilder::RenderPassBuilder(bool hasUAV) {
- if (hasUAV) {
- mRenderPassFlags = D3D12_RENDER_PASS_FLAG_ALLOW_UAV_WRITES;
- }
- }
-
- void RenderPassBuilder::SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
- D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
- ASSERT(mColorAttachmentCount < kMaxColorAttachmentsTyped);
- mRenderTargetViews[attachmentIndex] = baseDescriptor;
- mRenderPassRenderTargetDescriptors[attachmentIndex].cpuDescriptor = baseDescriptor;
- mColorAttachmentCount++;
- }
-
- void RenderPassBuilder::SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
- mRenderPassDepthStencilDesc.cpuDescriptor = baseDescriptor;
- }
-
- ColorAttachmentIndex RenderPassBuilder::GetColorAttachmentCount() const {
- return mColorAttachmentCount;
- }
-
- bool RenderPassBuilder::HasDepth() const {
- return mHasDepth;
- }
-
- ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
- RenderPassBuilder::GetRenderPassRenderTargetDescriptors() const {
- return {mRenderPassRenderTargetDescriptors.data(), mColorAttachmentCount};
- }
-
- const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC*
- RenderPassBuilder::GetRenderPassDepthStencilDescriptor() const {
- return &mRenderPassDepthStencilDesc;
- }
-
- D3D12_RENDER_PASS_FLAGS RenderPassBuilder::GetRenderPassFlags() const {
- return mRenderPassFlags;
- }
-
- const D3D12_CPU_DESCRIPTOR_HANDLE* RenderPassBuilder::GetRenderTargetViews() const {
- return mRenderTargetViews.data();
- }
-
- void RenderPassBuilder::SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
- wgpu::LoadOp loadOp,
- dawn_native::Color clearColor,
- DXGI_FORMAT format) {
- mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Type =
- D3D12BeginningAccessType(loadOp);
- if (loadOp == wgpu::LoadOp::Clear) {
- mRenderPassRenderTargetDescriptors[attachment]
- .BeginningAccess.Clear.ClearValue.Color[0] = clearColor.r;
- mRenderPassRenderTargetDescriptors[attachment]
- .BeginningAccess.Clear.ClearValue.Color[1] = clearColor.g;
- mRenderPassRenderTargetDescriptors[attachment]
- .BeginningAccess.Clear.ClearValue.Color[2] = clearColor.b;
- mRenderPassRenderTargetDescriptors[attachment]
- .BeginningAccess.Clear.ClearValue.Color[3] = clearColor.a;
- mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Format =
- format;
- }
- }
-
- void RenderPassBuilder::SetRenderTargetEndingAccess(ColorAttachmentIndex attachment,
- wgpu::StoreOp storeOp) {
- mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
- D3D12EndingAccessType(storeOp);
- }
-
- void RenderPassBuilder::SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
- wgpu::StoreOp storeOp,
- TextureView* resolveSource,
- TextureView* resolveDestination) {
- mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
- D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_RESOLVE;
- mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve =
- D3D12EndingAccessResolveParameters(storeOp, resolveSource, resolveDestination);
-
- mSubresourceParams[attachment] =
- D3D12EndingAccessResolveSubresourceParameters(resolveDestination);
-
- mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve.pSubresourceParameters =
- &mSubresourceParams[attachment];
- }
-
- void RenderPassBuilder::SetDepthAccess(wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- float clearDepth,
- DXGI_FORMAT format) {
- mHasDepth = true;
- mRenderPassDepthStencilDesc.DepthBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
- if (loadOp == wgpu::LoadOp::Clear) {
- mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth =
- clearDepth;
- mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.Format = format;
- }
- mRenderPassDepthStencilDesc.DepthEndingAccess.Type = D3D12EndingAccessType(storeOp);
- }
-
- void RenderPassBuilder::SetStencilAccess(wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- uint8_t clearStencil,
- DXGI_FORMAT format) {
- mRenderPassDepthStencilDesc.StencilBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
- if (loadOp == wgpu::LoadOp::Clear) {
- mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.DepthStencil
- .Stencil = clearStencil;
- mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.Format = format;
- }
- mRenderPassDepthStencilDesc.StencilEndingAccess.Type = D3D12EndingAccessType(storeOp);
- }
-
- void RenderPassBuilder::SetDepthNoAccess() {
- mRenderPassDepthStencilDesc.DepthBeginningAccess.Type =
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
- mRenderPassDepthStencilDesc.DepthEndingAccess.Type =
- D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
- }
-
- void RenderPassBuilder::SetDepthStencilNoAccess() {
- SetDepthNoAccess();
- SetStencilNoAccess();
- }
-
- void RenderPassBuilder::SetStencilNoAccess() {
- mRenderPassDepthStencilDesc.StencilBeginningAccess.Type =
- D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
- mRenderPassDepthStencilDesc.StencilEndingAccess.Type =
- D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h
deleted file mode 100644
index 20f70a4190f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
-#define DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
-
-#include "common/Constants.h"
-#include "common/ityp_array.h"
-#include "common/ityp_span.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-
-namespace dawn_native { namespace d3d12 {
-
- class TextureView;
-
- // RenderPassBuilder stores parameters related to render pass load and store operations.
- // When the D3D12 render pass API is available, the needed descriptors can be fetched
- // directly from the RenderPassBuilder. When the D3D12 render pass API is not available, the
- // descriptors are still fetched and any information necessary to emulate the load and store
- // operations is extracted from the descriptors.
- class RenderPassBuilder {
- public:
- RenderPassBuilder(bool hasUAV);
-
- ColorAttachmentIndex GetColorAttachmentCount() const;
-
- // Returns descriptors that are fed directly to BeginRenderPass, or are used as parameter
- // storage if D3D12 render pass API is unavailable.
- ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
- GetRenderPassRenderTargetDescriptors() const;
- const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* GetRenderPassDepthStencilDescriptor() const;
-
- D3D12_RENDER_PASS_FLAGS GetRenderPassFlags() const;
-
- // Returns attachment RTVs to use with OMSetRenderTargets.
- const D3D12_CPU_DESCRIPTOR_HANDLE* GetRenderTargetViews() const;
-
- bool HasDepth() const;
-
- // Functions that set the appropriate values in the render pass descriptors.
- void SetDepthAccess(wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- float clearDepth,
- DXGI_FORMAT format);
- void SetDepthNoAccess();
- void SetDepthStencilNoAccess();
- void SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
- wgpu::LoadOp loadOp,
- dawn_native::Color clearColor,
- DXGI_FORMAT format);
- void SetRenderTargetEndingAccess(ColorAttachmentIndex attachment, wgpu::StoreOp storeOp);
- void SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
- wgpu::StoreOp storeOp,
- TextureView* resolveSource,
- TextureView* resolveDestination);
- void SetStencilAccess(wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- uint8_t clearStencil,
- DXGI_FORMAT format);
- void SetStencilNoAccess();
-
- void SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
- D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
- void SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
-
- private:
- ColorAttachmentIndex mColorAttachmentCount{uint8_t(0)};
- bool mHasDepth = false;
- D3D12_RENDER_PASS_FLAGS mRenderPassFlags = D3D12_RENDER_PASS_FLAG_NONE;
- D3D12_RENDER_PASS_DEPTH_STENCIL_DESC mRenderPassDepthStencilDesc;
- ityp::
- array<ColorAttachmentIndex, D3D12_RENDER_PASS_RENDER_TARGET_DESC, kMaxColorAttachments>
- mRenderPassRenderTargetDescriptors;
- ityp::array<ColorAttachmentIndex, D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments>
- mRenderTargetViews;
- ityp::array<ColorAttachmentIndex,
- D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS,
- kMaxColorAttachments>
- mSubresourceParams;
- };
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
deleted file mode 100644
index 410d36f85d6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ /dev/null
@@ -1,490 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/RenderPipelineD3D12.h"
-
-#include "common/Assert.h"
-#include "common/Log.h"
-#include "dawn_native/CreatePipelineAsyncTask.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-#include "dawn_native/d3d12/ShaderModuleD3D12.h"
-#include "dawn_native/d3d12/TextureD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-#include <d3dcompiler.h>
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- return DXGI_FORMAT_R8G8_UINT;
- case wgpu::VertexFormat::Uint8x4:
- return DXGI_FORMAT_R8G8B8A8_UINT;
- case wgpu::VertexFormat::Sint8x2:
- return DXGI_FORMAT_R8G8_SINT;
- case wgpu::VertexFormat::Sint8x4:
- return DXGI_FORMAT_R8G8B8A8_SINT;
- case wgpu::VertexFormat::Unorm8x2:
- return DXGI_FORMAT_R8G8_UNORM;
- case wgpu::VertexFormat::Unorm8x4:
- return DXGI_FORMAT_R8G8B8A8_UNORM;
- case wgpu::VertexFormat::Snorm8x2:
- return DXGI_FORMAT_R8G8_SNORM;
- case wgpu::VertexFormat::Snorm8x4:
- return DXGI_FORMAT_R8G8B8A8_SNORM;
- case wgpu::VertexFormat::Uint16x2:
- return DXGI_FORMAT_R16G16_UINT;
- case wgpu::VertexFormat::Uint16x4:
- return DXGI_FORMAT_R16G16B16A16_UINT;
- case wgpu::VertexFormat::Sint16x2:
- return DXGI_FORMAT_R16G16_SINT;
- case wgpu::VertexFormat::Sint16x4:
- return DXGI_FORMAT_R16G16B16A16_SINT;
- case wgpu::VertexFormat::Unorm16x2:
- return DXGI_FORMAT_R16G16_UNORM;
- case wgpu::VertexFormat::Unorm16x4:
- return DXGI_FORMAT_R16G16B16A16_UNORM;
- case wgpu::VertexFormat::Snorm16x2:
- return DXGI_FORMAT_R16G16_SNORM;
- case wgpu::VertexFormat::Snorm16x4:
- return DXGI_FORMAT_R16G16B16A16_SNORM;
- case wgpu::VertexFormat::Float16x2:
- return DXGI_FORMAT_R16G16_FLOAT;
- case wgpu::VertexFormat::Float16x4:
- return DXGI_FORMAT_R16G16B16A16_FLOAT;
- case wgpu::VertexFormat::Float32:
- return DXGI_FORMAT_R32_FLOAT;
- case wgpu::VertexFormat::Float32x2:
- return DXGI_FORMAT_R32G32_FLOAT;
- case wgpu::VertexFormat::Float32x3:
- return DXGI_FORMAT_R32G32B32_FLOAT;
- case wgpu::VertexFormat::Float32x4:
- return DXGI_FORMAT_R32G32B32A32_FLOAT;
- case wgpu::VertexFormat::Uint32:
- return DXGI_FORMAT_R32_UINT;
- case wgpu::VertexFormat::Uint32x2:
- return DXGI_FORMAT_R32G32_UINT;
- case wgpu::VertexFormat::Uint32x3:
- return DXGI_FORMAT_R32G32B32_UINT;
- case wgpu::VertexFormat::Uint32x4:
- return DXGI_FORMAT_R32G32B32A32_UINT;
- case wgpu::VertexFormat::Sint32:
- return DXGI_FORMAT_R32_SINT;
- case wgpu::VertexFormat::Sint32x2:
- return DXGI_FORMAT_R32G32_SINT;
- case wgpu::VertexFormat::Sint32x3:
- return DXGI_FORMAT_R32G32B32_SINT;
- case wgpu::VertexFormat::Sint32x4:
- return DXGI_FORMAT_R32G32B32A32_SINT;
- default:
- UNREACHABLE();
- }
- }
-
- D3D12_INPUT_CLASSIFICATION VertexStepModeFunction(wgpu::VertexStepMode mode) {
- switch (mode) {
- case wgpu::VertexStepMode::Vertex:
- return D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
- case wgpu::VertexStepMode::Instance:
- return D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
- }
- }
-
- D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return D3D_PRIMITIVE_TOPOLOGY_POINTLIST;
- case wgpu::PrimitiveTopology::LineList:
- return D3D_PRIMITIVE_TOPOLOGY_LINELIST;
- case wgpu::PrimitiveTopology::LineStrip:
- return D3D_PRIMITIVE_TOPOLOGY_LINESTRIP;
- case wgpu::PrimitiveTopology::TriangleList:
- return D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
- case wgpu::PrimitiveTopology::TriangleStrip:
- return D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
- }
- }
-
- D3D12_PRIMITIVE_TOPOLOGY_TYPE D3D12PrimitiveTopologyType(
- wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
- case wgpu::PrimitiveTopology::LineList:
- case wgpu::PrimitiveTopology::LineStrip:
- return D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
- case wgpu::PrimitiveTopology::TriangleList:
- case wgpu::PrimitiveTopology::TriangleStrip:
- return D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
- }
- }
-
- D3D12_CULL_MODE D3D12CullMode(wgpu::CullMode mode) {
- switch (mode) {
- case wgpu::CullMode::None:
- return D3D12_CULL_MODE_NONE;
- case wgpu::CullMode::Front:
- return D3D12_CULL_MODE_FRONT;
- case wgpu::CullMode::Back:
- return D3D12_CULL_MODE_BACK;
- }
- }
-
- D3D12_BLEND D3D12Blend(wgpu::BlendFactor factor) {
- switch (factor) {
- case wgpu::BlendFactor::Zero:
- return D3D12_BLEND_ZERO;
- case wgpu::BlendFactor::One:
- return D3D12_BLEND_ONE;
- case wgpu::BlendFactor::Src:
- return D3D12_BLEND_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrc:
- return D3D12_BLEND_INV_SRC_COLOR;
- case wgpu::BlendFactor::SrcAlpha:
- return D3D12_BLEND_SRC_ALPHA;
- case wgpu::BlendFactor::OneMinusSrcAlpha:
- return D3D12_BLEND_INV_SRC_ALPHA;
- case wgpu::BlendFactor::Dst:
- return D3D12_BLEND_DEST_COLOR;
- case wgpu::BlendFactor::OneMinusDst:
- return D3D12_BLEND_INV_DEST_COLOR;
- case wgpu::BlendFactor::DstAlpha:
- return D3D12_BLEND_DEST_ALPHA;
- case wgpu::BlendFactor::OneMinusDstAlpha:
- return D3D12_BLEND_INV_DEST_ALPHA;
- case wgpu::BlendFactor::SrcAlphaSaturated:
- return D3D12_BLEND_SRC_ALPHA_SAT;
- case wgpu::BlendFactor::Constant:
- return D3D12_BLEND_BLEND_FACTOR;
- case wgpu::BlendFactor::OneMinusConstant:
- return D3D12_BLEND_INV_BLEND_FACTOR;
- }
- }
-
- // When a blend factor is defined for the alpha channel, any of the factors that don't
- // explicitly state that they apply to alpha should be treated as their explicitly-alpha
- // equivalents. See: https://github.com/gpuweb/gpuweb/issues/65
- D3D12_BLEND D3D12AlphaBlend(wgpu::BlendFactor factor) {
- switch (factor) {
- case wgpu::BlendFactor::Src:
- return D3D12_BLEND_SRC_ALPHA;
- case wgpu::BlendFactor::OneMinusSrc:
- return D3D12_BLEND_INV_SRC_ALPHA;
- case wgpu::BlendFactor::Dst:
- return D3D12_BLEND_DEST_ALPHA;
- case wgpu::BlendFactor::OneMinusDst:
- return D3D12_BLEND_INV_DEST_ALPHA;
-
- // Other blend factors translate to the same D3D12 enum as the color blend factors.
- default:
- return D3D12Blend(factor);
- }
- }
-
- D3D12_BLEND_OP D3D12BlendOperation(wgpu::BlendOperation operation) {
- switch (operation) {
- case wgpu::BlendOperation::Add:
- return D3D12_BLEND_OP_ADD;
- case wgpu::BlendOperation::Subtract:
- return D3D12_BLEND_OP_SUBTRACT;
- case wgpu::BlendOperation::ReverseSubtract:
- return D3D12_BLEND_OP_REV_SUBTRACT;
- case wgpu::BlendOperation::Min:
- return D3D12_BLEND_OP_MIN;
- case wgpu::BlendOperation::Max:
- return D3D12_BLEND_OP_MAX;
- }
- }
-
- uint8_t D3D12RenderTargetWriteMask(wgpu::ColorWriteMask writeMask) {
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Red) ==
- D3D12_COLOR_WRITE_ENABLE_RED,
- "ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Green) ==
- D3D12_COLOR_WRITE_ENABLE_GREEN,
- "ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Blue) ==
- D3D12_COLOR_WRITE_ENABLE_BLUE,
- "ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Alpha) ==
- D3D12_COLOR_WRITE_ENABLE_ALPHA,
- "ColorWriteMask values must match");
- return static_cast<uint8_t>(writeMask);
- }
-
- D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorTargetState* state) {
- D3D12_RENDER_TARGET_BLEND_DESC blendDesc;
- blendDesc.BlendEnable = state->blend != nullptr;
- if (blendDesc.BlendEnable) {
- blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
- blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
- blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
- blendDesc.SrcBlendAlpha = D3D12AlphaBlend(state->blend->alpha.srcFactor);
- blendDesc.DestBlendAlpha = D3D12AlphaBlend(state->blend->alpha.dstFactor);
- blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
- }
- blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
- blendDesc.LogicOpEnable = false;
- blendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
- return blendDesc;
- }
-
- D3D12_STENCIL_OP StencilOp(wgpu::StencilOperation op) {
- switch (op) {
- case wgpu::StencilOperation::Keep:
- return D3D12_STENCIL_OP_KEEP;
- case wgpu::StencilOperation::Zero:
- return D3D12_STENCIL_OP_ZERO;
- case wgpu::StencilOperation::Replace:
- return D3D12_STENCIL_OP_REPLACE;
- case wgpu::StencilOperation::IncrementClamp:
- return D3D12_STENCIL_OP_INCR_SAT;
- case wgpu::StencilOperation::DecrementClamp:
- return D3D12_STENCIL_OP_DECR_SAT;
- case wgpu::StencilOperation::Invert:
- return D3D12_STENCIL_OP_INVERT;
- case wgpu::StencilOperation::IncrementWrap:
- return D3D12_STENCIL_OP_INCR;
- case wgpu::StencilOperation::DecrementWrap:
- return D3D12_STENCIL_OP_DECR;
- }
- }
-
- D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilFaceState& descriptor) {
- D3D12_DEPTH_STENCILOP_DESC desc;
-
- desc.StencilFailOp = StencilOp(descriptor.failOp);
- desc.StencilDepthFailOp = StencilOp(descriptor.depthFailOp);
- desc.StencilPassOp = StencilOp(descriptor.passOp);
- desc.StencilFunc = ToD3D12ComparisonFunc(descriptor.compare);
-
- return desc;
- }
-
- D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
- D3D12_DEPTH_STENCIL_DESC mDepthStencilDescriptor;
- mDepthStencilDescriptor.DepthEnable = TRUE;
- mDepthStencilDescriptor.DepthWriteMask = descriptor->depthWriteEnabled
- ? D3D12_DEPTH_WRITE_MASK_ALL
- : D3D12_DEPTH_WRITE_MASK_ZERO;
- mDepthStencilDescriptor.DepthFunc = ToD3D12ComparisonFunc(descriptor->depthCompare);
-
- mDepthStencilDescriptor.StencilEnable = StencilTestEnabled(descriptor) ? TRUE : FALSE;
- mDepthStencilDescriptor.StencilReadMask =
- static_cast<UINT8>(descriptor->stencilReadMask);
- mDepthStencilDescriptor.StencilWriteMask =
- static_cast<UINT8>(descriptor->stencilWriteMask);
-
- mDepthStencilDescriptor.FrontFace = StencilOpDesc(descriptor->stencilFront);
- mDepthStencilDescriptor.BackFace = StencilOpDesc(descriptor->stencilBack);
- return mDepthStencilDescriptor;
- }
-
- D3D12_INDEX_BUFFER_STRIP_CUT_VALUE ComputeIndexBufferStripCutValue(
- wgpu::PrimitiveTopology primitiveTopology,
- wgpu::IndexFormat indexFormat) {
- if (primitiveTopology != wgpu::PrimitiveTopology::TriangleStrip &&
- primitiveTopology != wgpu::PrimitiveTopology::LineStrip) {
- return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
- }
-
- switch (indexFormat) {
- case wgpu::IndexFormat::Uint16:
- return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF;
- case wgpu::IndexFormat::Uint32:
- return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF;
- case wgpu::IndexFormat::Undefined:
- return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
- }
- }
-
- } // anonymous namespace
-
- Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(device, descriptor));
- }
-
- MaybeError RenderPipeline::Initialize() {
- Device* device = ToBackend(GetDevice());
- uint32_t compileFlags = 0;
-
- if (!device->IsToggleEnabled(Toggle::UseDXC) &&
- !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
- compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
- }
-
- if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
- compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
- }
-
- // SPRIV-cross does matrix multiplication expecting row major matrices
- compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
-
- // FXC can miscompile code that depends on special float values (NaN, INF, etc) when IEEE
- // strictness is not enabled. See crbug.com/tint/976.
- compileFlags |= D3DCOMPILE_IEEE_STRICTNESS;
-
- D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
-
- PerStage<ProgrammableStage> pipelineStages = GetAllStages();
-
- PerStage<D3D12_SHADER_BYTECODE*> shaders;
- shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
- shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
-
- PerStage<CompiledShader> compiledShader;
-
- for (auto stage : IterateStages(GetStageMask())) {
- DAWN_TRY_ASSIGN(
- compiledShader[stage],
- ToBackend(pipelineStages[stage].module)
- ->Compile(pipelineStages[stage], stage, ToBackend(GetLayout()), compileFlags));
- *shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
- }
-
- mFirstOffsetInfo = compiledShader[SingleShaderStage::Vertex].firstOffsetInfo;
-
- PipelineLayout* layout = ToBackend(GetLayout());
-
- descriptorD3D12.pRootSignature = layout->GetRootSignature();
-
- // D3D12 logs warnings if any empty input state is used
- std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes> inputElementDescriptors;
- if (GetAttributeLocationsUsed().any()) {
- descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
- }
-
- descriptorD3D12.IBStripCutValue =
- ComputeIndexBufferStripCutValue(GetPrimitiveTopology(), GetStripIndexFormat());
-
- descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
- descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
- descriptorD3D12.RasterizerState.FrontCounterClockwise =
- (GetFrontFace() == wgpu::FrontFace::CCW) ? TRUE : FALSE;
- descriptorD3D12.RasterizerState.DepthBias = GetDepthBias();
- descriptorD3D12.RasterizerState.DepthBiasClamp = GetDepthBiasClamp();
- descriptorD3D12.RasterizerState.SlopeScaledDepthBias = GetDepthBiasSlopeScale();
- descriptorD3D12.RasterizerState.DepthClipEnable = TRUE;
- descriptorD3D12.RasterizerState.MultisampleEnable = (GetSampleCount() > 1) ? TRUE : FALSE;
- descriptorD3D12.RasterizerState.AntialiasedLineEnable = FALSE;
- descriptorD3D12.RasterizerState.ForcedSampleCount = 0;
- descriptorD3D12.RasterizerState.ConservativeRaster =
- D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
-
- if (HasDepthStencilAttachment()) {
- descriptorD3D12.DSVFormat = D3D12TextureFormat(GetDepthStencilFormat());
- }
-
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- descriptorD3D12.RTVFormats[static_cast<uint8_t>(i)] =
- D3D12TextureFormat(GetColorAttachmentFormat(i));
- descriptorD3D12.BlendState.RenderTarget[static_cast<uint8_t>(i)] =
- ComputeColorDesc(GetColorTargetState(i));
- }
- descriptorD3D12.NumRenderTargets = static_cast<uint32_t>(GetColorAttachmentsMask().count());
-
- descriptorD3D12.BlendState.AlphaToCoverageEnable = IsAlphaToCoverageEnabled();
- descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
-
- descriptorD3D12.DepthStencilState = ComputeDepthStencilDesc(GetDepthStencilState());
-
- descriptorD3D12.SampleMask = GetSampleMask();
- descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
- descriptorD3D12.SampleDesc.Count = GetSampleCount();
- descriptorD3D12.SampleDesc.Quality = 0;
-
- mD3d12PrimitiveTopology = D3D12PrimitiveTopology(GetPrimitiveTopology());
-
- DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
- &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
- "D3D12 create graphics pipeline state"));
-
- SetLabelImpl();
-
- return {};
- }
-
- RenderPipeline::~RenderPipeline() = default;
-
- void RenderPipeline::DestroyImpl() {
- RenderPipelineBase::DestroyImpl();
- ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
- }
-
- D3D12_PRIMITIVE_TOPOLOGY RenderPipeline::GetD3D12PrimitiveTopology() const {
- return mD3d12PrimitiveTopology;
- }
-
- ID3D12PipelineState* RenderPipeline::GetPipelineState() const {
- return mPipelineState.Get();
- }
-
- const FirstOffsetInfo& RenderPipeline::GetFirstOffsetInfo() const {
- return mFirstOffsetInfo;
- }
-
- void RenderPipeline::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_RenderPipeline", GetLabel());
- }
-
- D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
- std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
- unsigned int count = 0;
- for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
- D3D12_INPUT_ELEMENT_DESC& inputElementDescriptor = (*inputElementDescriptors)[count++];
-
- const VertexAttributeInfo& attribute = GetAttribute(loc);
-
- // If the HLSL semantic is TEXCOORDN the SemanticName should be "TEXCOORD" and the
- // SemanticIndex N
- inputElementDescriptor.SemanticName = "TEXCOORD";
- inputElementDescriptor.SemanticIndex = static_cast<uint8_t>(loc);
- inputElementDescriptor.Format = VertexFormatType(attribute.format);
- inputElementDescriptor.InputSlot = static_cast<uint8_t>(attribute.vertexBufferSlot);
-
- const VertexBufferInfo& input = GetVertexBuffer(attribute.vertexBufferSlot);
-
- inputElementDescriptor.AlignedByteOffset = attribute.offset;
- inputElementDescriptor.InputSlotClass = VertexStepModeFunction(input.stepMode);
- if (inputElementDescriptor.InputSlotClass ==
- D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA) {
- inputElementDescriptor.InstanceDataStepRate = 0;
- } else {
- inputElementDescriptor.InstanceDataStepRate = 1;
- }
- }
-
- D3D12_INPUT_LAYOUT_DESC inputLayoutDescriptor;
- inputLayoutDescriptor.pInputElementDescs = &(*inputElementDescriptors)[0];
- inputLayoutDescriptor.NumElements = count;
- return inputLayoutDescriptor;
- }
-
- void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
- std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
- userdata);
- CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
deleted file mode 100644
index 66f0ea66e46..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
-#define DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
-
-#include "dawn_native/RenderPipeline.h"
-
-#include "dawn_native/d3d12/ShaderModuleD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class RenderPipeline final : public RenderPipelineBase {
- public:
- static Ref<RenderPipeline> CreateUninitialized(Device* device,
- const RenderPipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
- RenderPipeline() = delete;
-
- MaybeError Initialize() override;
-
- D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
- ID3D12PipelineState* GetPipelineState() const;
-
- const FirstOffsetInfo& GetFirstOffsetInfo() const;
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- ~RenderPipeline() override;
-
- void DestroyImpl() override;
-
- using RenderPipelineBase::RenderPipelineBase;
- D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
- std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
-
- D3D12_PRIMITIVE_TOPOLOGY mD3d12PrimitiveTopology;
- ComPtr<ID3D12PipelineState> mPipelineState;
- FirstOffsetInfo mFirstOffsetInfo;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_RENDERPIPELINED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
deleted file mode 100644
index ffc1e1b85a4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-
-#include "dawn_native/d3d12/AdapterD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/Forward.h"
-#include "dawn_native/d3d12/HeapD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- ResidencyManager::ResidencyManager(Device* device)
- : mDevice(device),
- mResidencyManagementEnabled(
- device->IsToggleEnabled(Toggle::UseD3D12ResidencyManagement)) {
- UpdateVideoMemoryInfo();
- }
-
- // Increments number of locks on a heap to ensure the heap remains resident.
- MaybeError ResidencyManager::LockAllocation(Pageable* pageable) {
- if (!mResidencyManagementEnabled) {
- return {};
- }
-
- // If the heap isn't already resident, make it resident.
- if (!pageable->IsInResidencyLRUCache() && !pageable->IsResidencyLocked()) {
- ID3D12Pageable* d3d12Pageable = pageable->GetD3D12Pageable();
- uint64_t size = pageable->GetSize();
-
- DAWN_TRY(MakeAllocationsResident(GetMemorySegmentInfo(pageable->GetMemorySegment()),
- size, 1, &d3d12Pageable));
- }
-
- // Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
- if (pageable->IsInResidencyLRUCache()) {
- pageable->RemoveFromList();
- }
-
- pageable->IncrementResidencyLock();
-
- return {};
- }
-
- // Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
- // inserted into the LRU cache and becomes eligible for eviction.
- void ResidencyManager::UnlockAllocation(Pageable* pageable) {
- if (!mResidencyManagementEnabled) {
- return;
- }
-
- ASSERT(pageable->IsResidencyLocked());
- ASSERT(!pageable->IsInResidencyLRUCache());
- pageable->DecrementResidencyLock();
-
- // If another lock still exists on the heap, nothing further should be done.
- if (pageable->IsResidencyLocked()) {
- return;
- }
-
- // When all locks have been removed, the resource remains resident and becomes tracked in
- // the corresponding LRU.
- TrackResidentAllocation(pageable);
- }
-
- // Returns the appropriate MemorySegmentInfo for a given MemorySegment.
- ResidencyManager::MemorySegmentInfo* ResidencyManager::GetMemorySegmentInfo(
- MemorySegment memorySegment) {
- switch (memorySegment) {
- case MemorySegment::Local:
- return &mVideoMemoryInfo.local;
- case MemorySegment::NonLocal:
- ASSERT(!mDevice->GetDeviceInfo().isUMA);
- return &mVideoMemoryInfo.nonLocal;
- default:
- UNREACHABLE();
- }
- }
-
- // Allows an application component external to Dawn to cap Dawn's residency budgets to prevent
- // competition for device memory. Returns the amount of memory reserved, which may be less
- // that the requested reservation when under pressure.
- uint64_t ResidencyManager::SetExternalMemoryReservation(MemorySegment segment,
- uint64_t requestedReservationSize) {
- MemorySegmentInfo* segmentInfo = GetMemorySegmentInfo(segment);
-
- segmentInfo->externalRequest = requestedReservationSize;
-
- UpdateMemorySegmentInfo(segmentInfo);
-
- return segmentInfo->externalReservation;
- }
-
- void ResidencyManager::UpdateVideoMemoryInfo() {
- UpdateMemorySegmentInfo(&mVideoMemoryInfo.local);
- if (!mDevice->GetDeviceInfo().isUMA) {
- UpdateMemorySegmentInfo(&mVideoMemoryInfo.nonLocal);
- }
- }
-
- void ResidencyManager::UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo) {
- DXGI_QUERY_VIDEO_MEMORY_INFO queryVideoMemoryInfo;
-
- ToBackend(mDevice->GetAdapter())
- ->GetHardwareAdapter()
- ->QueryVideoMemoryInfo(0, segmentInfo->dxgiSegment, &queryVideoMemoryInfo);
-
- // The video memory budget provided by QueryVideoMemoryInfo is defined by the operating
- // system, and may be lower than expected in certain scenarios. Under memory pressure, we
- // cap the external reservation to half the available budget, which prevents the external
- // component from consuming a disproportionate share of memory and ensures that Dawn can
- // continue to make forward progress. Note the choice to halve memory is arbitrarily chosen
- // and subject to future experimentation.
- segmentInfo->externalReservation =
- std::min(queryVideoMemoryInfo.Budget / 2, segmentInfo->externalRequest);
-
- segmentInfo->usage = queryVideoMemoryInfo.CurrentUsage - segmentInfo->externalReservation;
-
- // If we're restricting the budget for testing, leave the budget as is.
- if (mRestrictBudgetForTesting) {
- return;
- }
-
- // We cap Dawn's budget to 95% of the provided budget. Leaving some budget unused
- // decreases fluctuations in the operating-system-defined budget, which improves stability
- // for both Dawn and other applications on the system. Note the value of 95% is arbitrarily
- // chosen and subject to future experimentation.
- static constexpr float kBudgetCap = 0.95;
- segmentInfo->budget =
- (queryVideoMemoryInfo.Budget - segmentInfo->externalReservation) * kBudgetCap;
- }
-
- // Removes a heap from the LRU and returns the least recently used heap when possible. Returns
- // nullptr when nothing further can be evicted.
- ResultOrError<Pageable*> ResidencyManager::RemoveSingleEntryFromLRU(
- MemorySegmentInfo* memorySegment) {
- // If the LRU is empty, return nullptr to allow execution to continue. Note that fully
- // emptying the LRU is undesirable, because it can mean either 1) the LRU is not accurately
- // accounting for Dawn's GPU allocations, or 2) a component external to Dawn is using all of
- // the process budget and starving Dawn, which will cause thrash.
- if (memorySegment->lruCache.empty()) {
- return nullptr;
- }
-
- Pageable* pageable = memorySegment->lruCache.head()->value();
-
- ExecutionSerial lastSubmissionSerial = pageable->GetLastSubmission();
-
- // If the next candidate for eviction was inserted into the LRU during the current serial,
- // it is because more memory is being used in a single command list than is available.
- // In this scenario, we cannot make any more resources resident and thrashing must occur.
- if (lastSubmissionSerial == mDevice->GetPendingCommandSerial()) {
- return nullptr;
- }
-
- // We must ensure that any previous use of a resource has completed before the resource can
- // be evicted.
- if (lastSubmissionSerial > mDevice->GetCompletedCommandSerial()) {
- DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
- }
-
- pageable->RemoveFromList();
- return pageable;
- }
-
- MaybeError ResidencyManager::EnsureCanAllocate(uint64_t allocationSize,
- MemorySegment memorySegment) {
- if (!mResidencyManagementEnabled) {
- return {};
- }
-
- uint64_t bytesEvicted;
- DAWN_TRY_ASSIGN(bytesEvicted,
- EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment)));
- DAWN_UNUSED(bytesEvicted);
-
- return {};
- }
-
- // Any time we need to make something resident, we must check that we have enough free memory to
- // make the new object resident while also staying within budget. If there isn't enough
- // memory, we should evict until there is. Returns the number of bytes evicted.
- ResultOrError<uint64_t> ResidencyManager::EnsureCanMakeResident(
- uint64_t sizeToMakeResident,
- MemorySegmentInfo* memorySegment) {
- ASSERT(mResidencyManagementEnabled);
-
- UpdateMemorySegmentInfo(memorySegment);
-
- uint64_t memoryUsageAfterMakeResident = sizeToMakeResident + memorySegment->usage;
-
- // Return when we can call MakeResident and remain under budget.
- if (memoryUsageAfterMakeResident < memorySegment->budget) {
- return 0;
- }
-
- std::vector<ID3D12Pageable*> resourcesToEvict;
- uint64_t sizeNeededToBeUnderBudget = memoryUsageAfterMakeResident - memorySegment->budget;
- uint64_t sizeEvicted = 0;
- while (sizeEvicted < sizeNeededToBeUnderBudget) {
- Pageable* pageable;
- DAWN_TRY_ASSIGN(pageable, RemoveSingleEntryFromLRU(memorySegment));
-
- // If no heap was returned, then nothing more can be evicted.
- if (pageable == nullptr) {
- break;
- }
-
- sizeEvicted += pageable->GetSize();
- resourcesToEvict.push_back(pageable->GetD3D12Pageable());
- }
-
- if (resourcesToEvict.size() > 0) {
- DAWN_TRY(CheckHRESULT(
- mDevice->GetD3D12Device()->Evict(resourcesToEvict.size(), resourcesToEvict.data()),
- "Evicting resident heaps to free memory"));
- }
-
- return sizeEvicted;
- }
-
- // Given a list of heaps that are pending usage, this function will estimate memory needed,
- // evict resources until enough space is available, then make resident any heaps scheduled for
- // usage.
- MaybeError ResidencyManager::EnsureHeapsAreResident(Heap** heaps, size_t heapCount) {
- if (!mResidencyManagementEnabled) {
- return {};
- }
-
- std::vector<ID3D12Pageable*> localHeapsToMakeResident;
- std::vector<ID3D12Pageable*> nonLocalHeapsToMakeResident;
- uint64_t localSizeToMakeResident = 0;
- uint64_t nonLocalSizeToMakeResident = 0;
-
- ExecutionSerial pendingCommandSerial = mDevice->GetPendingCommandSerial();
- for (size_t i = 0; i < heapCount; i++) {
- Heap* heap = heaps[i];
-
- // Heaps that are locked resident are not tracked in the LRU cache.
- if (heap->IsResidencyLocked()) {
- continue;
- }
-
- if (heap->IsInResidencyLRUCache()) {
- // If the heap is already in the LRU, we must remove it and append again below to
- // update its position in the LRU.
- heap->RemoveFromList();
- } else {
- if (heap->GetMemorySegment() == MemorySegment::Local) {
- localSizeToMakeResident += heap->GetSize();
- localHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
- } else {
- nonLocalSizeToMakeResident += heap->GetSize();
- nonLocalHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
- }
- }
-
- // If we submit a command list to the GPU, we must ensure that heaps referenced by that
- // command list stay resident at least until that command list has finished execution.
- // Setting this serial unnecessarily can leave the LRU in a state where nothing is
- // eligible for eviction, even though some evictions may be possible.
- heap->SetLastSubmission(pendingCommandSerial);
-
- // Insert the heap into the appropriate LRU.
- TrackResidentAllocation(heap);
- }
-
- if (localSizeToMakeResident > 0) {
- return MakeAllocationsResident(&mVideoMemoryInfo.local, localSizeToMakeResident,
- localHeapsToMakeResident.size(),
- localHeapsToMakeResident.data());
- }
-
- if (nonLocalSizeToMakeResident > 0) {
- ASSERT(!mDevice->GetDeviceInfo().isUMA);
- return MakeAllocationsResident(&mVideoMemoryInfo.nonLocal, nonLocalSizeToMakeResident,
- nonLocalHeapsToMakeResident.size(),
- nonLocalHeapsToMakeResident.data());
- }
-
- return {};
- }
-
- MaybeError ResidencyManager::MakeAllocationsResident(MemorySegmentInfo* segment,
- uint64_t sizeToMakeResident,
- uint64_t numberOfObjectsToMakeResident,
- ID3D12Pageable** allocations) {
- uint64_t bytesEvicted;
- DAWN_TRY_ASSIGN(bytesEvicted, EnsureCanMakeResident(sizeToMakeResident, segment));
- DAWN_UNUSED(bytesEvicted);
-
- // Note that MakeResident is a synchronous function and can add a significant
- // overhead to command recording. In the future, it may be possible to decrease this
- // overhead by using MakeResident on a secondary thread, or by instead making use of
- // the EnqueueMakeResident function (which is not available on all Windows 10
- // platforms).
- HRESULT hr =
- mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
-
- // A MakeResident call can fail if there's not enough available memory. This
- // could occur when there's significant fragmentation or if the allocation size
- // estimates are incorrect. We may be able to continue execution by evicting some
- // more memory and calling MakeResident again.
- while (FAILED(hr)) {
- constexpr uint32_t kAdditonalSizeToEvict = 50000000; // 50MB
-
- uint64_t sizeEvicted = 0;
-
- DAWN_TRY_ASSIGN(sizeEvicted, EnsureCanMakeResident(kAdditonalSizeToEvict, segment));
-
- // If nothing can be evicted after MakeResident has failed, we cannot continue
- // execution and must throw a fatal error.
- if (sizeEvicted == 0) {
- return DAWN_OUT_OF_MEMORY_ERROR(
- "MakeResident has failed due to excessive video memory usage.");
- }
-
- hr =
- mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
- }
-
- return {};
- }
-
- // Inserts a heap at the bottom of the LRU. The passed heap must be resident or scheduled to
- // become resident within the current serial. Failing to call this function when an allocation
- // is implicitly made resident will cause the residency manager to view the allocation as
- // non-resident and call MakeResident - which will make D3D12's internal residency refcount on
- // the allocation out of sync with Dawn.
- void ResidencyManager::TrackResidentAllocation(Pageable* pageable) {
- if (!mResidencyManagementEnabled) {
- return;
- }
-
- ASSERT(pageable->IsInList() == false);
- GetMemorySegmentInfo(pageable->GetMemorySegment())->lruCache.Append(pageable);
- }
-
- // Places an artifical cap on Dawn's budget so we can test in a predictable manner. If used,
- // this function must be called before any resources have been created.
- void ResidencyManager::RestrictBudgetForTesting(uint64_t artificialBudgetCap) {
- ASSERT(mVideoMemoryInfo.nonLocal.lruCache.empty());
- ASSERT(!mRestrictBudgetForTesting);
-
- mRestrictBudgetForTesting = true;
- UpdateVideoMemoryInfo();
-
- // Dawn has a non-zero memory usage even before any resources have been created, and this
- // value can vary depending on the environment Dawn is running in. By adding this in
- // addition to the artificial budget cap, we can create a predictable and reproducible
- // budget for testing.
- mVideoMemoryInfo.local.budget = mVideoMemoryInfo.local.usage + artificialBudgetCap;
- if (!mDevice->GetDeviceInfo().isUMA) {
- mVideoMemoryInfo.nonLocal.budget =
- mVideoMemoryInfo.nonLocal.usage + artificialBudgetCap;
- }
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
deleted file mode 100644
index 02fad75c32a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
-#define DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
-
-#include "common/LinkedList.h"
-#include "dawn_native/D3D12Backend.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/dawn_platform.h"
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
- class Heap;
- class Pageable;
-
- class ResidencyManager {
- public:
- ResidencyManager(Device* device);
-
- MaybeError LockAllocation(Pageable* pageable);
- void UnlockAllocation(Pageable* pageable);
-
- MaybeError EnsureCanAllocate(uint64_t allocationSize, MemorySegment memorySegment);
- MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
-
- uint64_t SetExternalMemoryReservation(MemorySegment segment,
- uint64_t requestedReservationSize);
-
- void TrackResidentAllocation(Pageable* pageable);
-
- void RestrictBudgetForTesting(uint64_t artificialBudgetCap);
-
- private:
- struct MemorySegmentInfo {
- const DXGI_MEMORY_SEGMENT_GROUP dxgiSegment;
- LinkedList<Pageable> lruCache = {};
- uint64_t budget = 0;
- uint64_t usage = 0;
- uint64_t externalReservation = 0;
- uint64_t externalRequest = 0;
- };
-
- struct VideoMemoryInfo {
- MemorySegmentInfo local = {DXGI_MEMORY_SEGMENT_GROUP_LOCAL};
- MemorySegmentInfo nonLocal = {DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL};
- };
-
- MemorySegmentInfo* GetMemorySegmentInfo(MemorySegment memorySegment);
- ResultOrError<uint64_t> EnsureCanMakeResident(uint64_t allocationSize,
- MemorySegmentInfo* memorySegment);
- ResultOrError<Pageable*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
- MaybeError MakeAllocationsResident(MemorySegmentInfo* segment,
- uint64_t sizeToMakeResident,
- uint64_t numberOfObjectsToMakeResident,
- ID3D12Pageable** allocations);
- void UpdateVideoMemoryInfo();
- void UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo);
-
- Device* mDevice;
- bool mResidencyManagementEnabled = false;
- bool mRestrictBudgetForTesting = false;
- VideoMemoryInfo mVideoMemoryInfo = {};
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
deleted file mode 100644
index 166d59f8177..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
-
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
-#include "dawn_native/d3d12/HeapD3D12.h"
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
- namespace {
- MemorySegment GetMemorySegment(Device* device, D3D12_HEAP_TYPE heapType) {
- if (device->GetDeviceInfo().isUMA) {
- return MemorySegment::Local;
- }
-
- D3D12_HEAP_PROPERTIES heapProperties =
- device->GetD3D12Device()->GetCustomHeapProperties(0, heapType);
-
- if (heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L1) {
- return MemorySegment::Local;
- }
-
- return MemorySegment::NonLocal;
- }
-
- D3D12_HEAP_TYPE GetD3D12HeapType(ResourceHeapKind resourceHeapKind) {
- switch (resourceHeapKind) {
- case Readback_OnlyBuffers:
- case Readback_AllBuffersAndTextures:
- return D3D12_HEAP_TYPE_READBACK;
- case Default_AllBuffersAndTextures:
- case Default_OnlyBuffers:
- case Default_OnlyNonRenderableOrDepthTextures:
- case Default_OnlyRenderableOrDepthTextures:
- return D3D12_HEAP_TYPE_DEFAULT;
- case Upload_OnlyBuffers:
- case Upload_AllBuffersAndTextures:
- return D3D12_HEAP_TYPE_UPLOAD;
- case EnumCount:
- UNREACHABLE();
- }
- }
-
- D3D12_HEAP_FLAGS GetD3D12HeapFlags(ResourceHeapKind resourceHeapKind) {
- switch (resourceHeapKind) {
- case Default_AllBuffersAndTextures:
- case Readback_AllBuffersAndTextures:
- case Upload_AllBuffersAndTextures:
- return D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES;
- case Default_OnlyBuffers:
- case Readback_OnlyBuffers:
- case Upload_OnlyBuffers:
- return D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
- case Default_OnlyNonRenderableOrDepthTextures:
- return D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
- case Default_OnlyRenderableOrDepthTextures:
- return D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
- case EnumCount:
- UNREACHABLE();
- }
- }
-
- ResourceHeapKind GetResourceHeapKind(D3D12_RESOURCE_DIMENSION dimension,
- D3D12_HEAP_TYPE heapType,
- D3D12_RESOURCE_FLAGS flags,
- uint32_t resourceHeapTier) {
- if (resourceHeapTier >= 2) {
- switch (heapType) {
- case D3D12_HEAP_TYPE_UPLOAD:
- return Upload_AllBuffersAndTextures;
- case D3D12_HEAP_TYPE_DEFAULT:
- return Default_AllBuffersAndTextures;
- case D3D12_HEAP_TYPE_READBACK:
- return Readback_AllBuffersAndTextures;
- default:
- UNREACHABLE();
- }
- }
-
- switch (dimension) {
- case D3D12_RESOURCE_DIMENSION_BUFFER: {
- switch (heapType) {
- case D3D12_HEAP_TYPE_UPLOAD:
- return Upload_OnlyBuffers;
- case D3D12_HEAP_TYPE_DEFAULT:
- return Default_OnlyBuffers;
- case D3D12_HEAP_TYPE_READBACK:
- return Readback_OnlyBuffers;
- default:
- UNREACHABLE();
- }
- break;
- }
- case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
- case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
- case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
- switch (heapType) {
- case D3D12_HEAP_TYPE_DEFAULT: {
- if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
- (flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
- return Default_OnlyRenderableOrDepthTextures;
- }
- return Default_OnlyNonRenderableOrDepthTextures;
- }
-
- default:
- UNREACHABLE();
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-
- uint64_t GetResourcePlacementAlignment(ResourceHeapKind resourceHeapKind,
- uint32_t sampleCount,
- uint64_t requestedAlignment) {
- switch (resourceHeapKind) {
- // Small resources can take advantage of smaller alignments. For example,
- // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
- // Must be non-depth or without render-target to use small resource alignment.
- // This also applies to MSAA textures (4MB => 64KB).
- //
- // Note: Only known to be used for small textures; however, MSDN suggests
- // it could be extended for more cases. If so, this could default to always
- // attempt small resource placement.
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
- case Default_OnlyNonRenderableOrDepthTextures:
- return (sampleCount > 1) ? D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT
- : D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
- default:
- return requestedAlignment;
- }
- }
-
- bool IsClearValueOptimizable(const D3D12_RESOURCE_DESC& resourceDescriptor) {
- // Optimized clear color cannot be set on buffers, non-render-target/depth-stencil
- // textures, or typeless resources
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createcommittedresource
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
- return !IsTypeless(resourceDescriptor.Format) &&
- resourceDescriptor.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
- (resourceDescriptor.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET |
- D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
- }
-
- } // namespace
-
- ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
- mResourceHeapTier = (mDevice->IsToggleEnabled(Toggle::UseD3D12ResourceHeapTier2))
- ? mDevice->GetDeviceInfo().resourceHeapTier
- : 1;
-
- for (uint32_t i = 0; i < ResourceHeapKind::EnumCount; i++) {
- const ResourceHeapKind resourceHeapKind = static_cast<ResourceHeapKind>(i);
- mHeapAllocators[i] = std::make_unique<HeapAllocator>(
- mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind),
- GetMemorySegment(device, GetD3D12HeapType(resourceHeapKind)));
- mPooledHeapAllocators[i] =
- std::make_unique<PooledResourceMemoryAllocator>(mHeapAllocators[i].get());
- mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
- kMaxHeapSize, kMinHeapSize, mPooledHeapAllocators[i].get());
- }
- }
-
- ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage) {
- // In order to suppress a warning in the D3D12 debug layer, we need to specify an
- // optimized clear value. As there are no negative consequences when picking a mismatched
- // clear value, we use zero as the optimized clear value. This also enables fast clears on
- // some architectures.
- D3D12_CLEAR_VALUE zero{};
- D3D12_CLEAR_VALUE* optimizedClearValue = nullptr;
- if (IsClearValueOptimizable(resourceDescriptor)) {
- zero.Format = resourceDescriptor.Format;
- optimizedClearValue = &zero;
- }
-
- // TODO(crbug.com/dawn/849): Conditionally disable sub-allocation.
- // For very large resources, there is no benefit to suballocate.
- // For very small resources, it is inefficent to suballocate given the min. heap
- // size could be much larger then the resource allocation.
- // Attempt to satisfy the request using sub-allocation (placed resource in a heap).
- ResourceHeapAllocation subAllocation;
- DAWN_TRY_ASSIGN(subAllocation, CreatePlacedResource(heapType, resourceDescriptor,
- optimizedClearValue, initialUsage));
- if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
- return std::move(subAllocation);
- }
-
- // If sub-allocation fails, fall-back to direct allocation (committed resource).
- ResourceHeapAllocation directAllocation;
- DAWN_TRY_ASSIGN(directAllocation,
- CreateCommittedResource(heapType, resourceDescriptor, optimizedClearValue,
- initialUsage));
- if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
- return std::move(directAllocation);
- }
-
- // If direct allocation fails, the system is probably out of memory.
- return DAWN_OUT_OF_MEMORY_ERROR("Allocation failed");
- }
-
- void ResourceAllocatorManager::Tick(ExecutionSerial completedSerial) {
- for (ResourceHeapAllocation& allocation :
- mAllocationsToDelete.IterateUpTo(completedSerial)) {
- if (allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated) {
- FreeMemory(allocation);
- }
- }
- mAllocationsToDelete.ClearUpTo(completedSerial);
- }
-
- void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
- if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
- return;
- }
-
- mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
-
- // Directly allocated ResourceHeapAllocations are created with a heap object that must be
- // manually deleted upon deallocation. See ResourceAllocatorManager::CreateCommittedResource
- // for more information.
- if (allocation.GetInfo().mMethod == AllocationMethod::kDirect) {
- delete allocation.GetResourceHeap();
- }
-
- // Invalidate the allocation immediately in case one accidentally
- // calls DeallocateMemory again using the same allocation.
- allocation.Invalidate();
-
- ASSERT(allocation.GetD3D12Resource() == nullptr);
- }
-
- void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
- ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
-
- D3D12_HEAP_PROPERTIES heapProp;
- allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
-
- const D3D12_RESOURCE_DESC resourceDescriptor = allocation.GetD3D12Resource()->GetDesc();
-
- const size_t resourceHeapKindIndex =
- GetResourceHeapKind(resourceDescriptor.Dimension, heapProp.Type,
- resourceDescriptor.Flags, mResourceHeapTier);
-
- mSubAllocatedResourceAllocators[resourceHeapKindIndex]->Deallocate(allocation);
- }
-
- ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
- const D3D12_CLEAR_VALUE* optimizedClearValue,
- D3D12_RESOURCE_STATES initialUsage) {
- const ResourceHeapKind resourceHeapKind =
- GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
- requestedResourceDescriptor.Flags, mResourceHeapTier);
-
- D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
- resourceDescriptor.Alignment = GetResourcePlacementAlignment(
- resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
- requestedResourceDescriptor.Alignment);
-
- // TODO(bryan.bernhart): Figure out how to compute the alignment without calling this
- // twice.
- D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
- mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
-
- // If the requested resource alignment was rejected, let D3D tell us what the
- // required alignment is for this resource.
- if (resourceDescriptor.Alignment != 0 &&
- resourceDescriptor.Alignment != resourceInfo.Alignment) {
- resourceDescriptor.Alignment = 0;
- resourceInfo =
- mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
- }
-
- // If d3d tells us the resource size is invalid, treat the error as OOM.
- // Otherwise, creating the resource could cause a device loss (too large).
- // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
- // incorrectly allocate a mismatched size.
- if (resourceInfo.SizeInBytes == 0 ||
- resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
- }
-
- BuddyMemoryAllocator* allocator =
- mSubAllocatedResourceAllocators[static_cast<size_t>(resourceHeapKind)].get();
-
- ResourceMemoryAllocation allocation;
- DAWN_TRY_ASSIGN(allocation,
- allocator->Allocate(resourceInfo.SizeInBytes, resourceInfo.Alignment));
- if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
- return ResourceHeapAllocation{}; // invalid
- }
-
- Heap* heap = ToBackend(allocation.GetResourceHeap());
-
- // Before calling CreatePlacedResource, we must ensure the target heap is resident.
- // CreatePlacedResource will fail if it is not.
- DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(heap));
-
- // With placed resources, a single heap can be reused.
- // The resource placed at an offset is only reclaimed
- // upon Tick or after the last command list using the resource has completed
- // on the GPU. This means the same physical memory is not reused
- // within the same command-list and does not require additional synchronization (aliasing
- // barrier).
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
- ComPtr<ID3D12Resource> placedResource;
- DAWN_TRY(CheckOutOfMemoryHRESULT(
- mDevice->GetD3D12Device()->CreatePlacedResource(
- heap->GetD3D12Heap(), allocation.GetOffset(), &resourceDescriptor, initialUsage,
- optimizedClearValue, IID_PPV_ARGS(&placedResource)),
- "ID3D12Device::CreatePlacedResource"));
-
- // After CreatePlacedResource has finished, the heap can be unlocked from residency. This
- // will insert it into the residency LRU.
- mDevice->GetResidencyManager()->UnlockAllocation(heap);
-
- return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
- std::move(placedResource), heap};
- }
-
- ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- const D3D12_CLEAR_VALUE* optimizedClearValue,
- D3D12_RESOURCE_STATES initialUsage) {
- D3D12_HEAP_PROPERTIES heapProperties;
- heapProperties.Type = heapType;
- heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
- heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
- heapProperties.CreationNodeMask = 0;
- heapProperties.VisibleNodeMask = 0;
-
- // If d3d tells us the resource size is invalid, treat the error as OOM.
- // Otherwise, creating the resource could cause a device loss (too large).
- // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
- // incorrectly allocate a mismatched size.
- D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
- mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
- if (resourceInfo.SizeInBytes == 0 ||
- resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
- }
-
- if (resourceInfo.SizeInBytes > kMaxHeapSize) {
- return ResourceHeapAllocation{}; // Invalid
- }
-
- // CreateCommittedResource will implicitly make the created resource resident. We must
- // ensure enough free memory exists before allocating to avoid an out-of-memory error when
- // overcommitted.
- DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(
- resourceInfo.SizeInBytes, GetMemorySegment(mDevice, heapType)));
-
- // Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
- // provided to CreateCommittedResource.
- ComPtr<ID3D12Resource> committedResource;
- DAWN_TRY(CheckOutOfMemoryHRESULT(
- mDevice->GetD3D12Device()->CreateCommittedResource(
- &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage,
- optimizedClearValue, IID_PPV_ARGS(&committedResource)),
- "ID3D12Device::CreateCommittedResource"));
-
- // When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
- // resource allocation. Because Dawn's memory residency management occurs at the resource
- // heap granularity, every directly allocated ResourceHeapAllocation also stores a Heap
- // object. This object is created manually, and must be deleted manually upon deallocation
- // of the committed resource.
- Heap* heap = new Heap(committedResource, GetMemorySegment(mDevice, heapType),
- resourceInfo.SizeInBytes);
-
- // Calling CreateCommittedResource implicitly calls MakeResident on the resource. We must
- // track this to avoid calling MakeResident a second time.
- mDevice->GetResidencyManager()->TrackResidentAllocation(heap);
-
- AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
-
- return ResourceHeapAllocation{info,
- /*offset*/ 0, std::move(committedResource), heap};
- }
-
- void ResourceAllocatorManager::DestroyPool() {
- for (auto& alloc : mPooledHeapAllocators) {
- alloc->DestroyPool();
- }
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
deleted file mode 100644
index 7bbf53ae0bb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
-#define DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
-
-#include "common/SerialQueue.h"
-#include "dawn_native/BuddyMemoryAllocator.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/PooledResourceMemoryAllocator.h"
-#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
-#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
-
-#include <array>
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- // Resource heap types + flags combinations are named after the D3D constants.
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_flags
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_type
- enum ResourceHeapKind {
-
- // Resource heap tier 2
- // Allows resource heaps to contain all buffer and textures types.
- // This enables better heap re-use by avoiding the need for separate heaps and
- // also reduces fragmentation.
- Readback_AllBuffersAndTextures,
- Upload_AllBuffersAndTextures,
- Default_AllBuffersAndTextures,
-
- // Resource heap tier 1
- // Resource heaps only support types from a single resource category.
- Readback_OnlyBuffers,
- Upload_OnlyBuffers,
- Default_OnlyBuffers,
-
- Default_OnlyNonRenderableOrDepthTextures,
- Default_OnlyRenderableOrDepthTextures,
-
- EnumCount,
- InvalidEnum = EnumCount,
- };
-
- // Manages a list of resource allocators used by the device to create resources using
- // multiple allocation methods.
- class ResourceAllocatorManager {
- public:
- ResourceAllocatorManager(Device* device);
-
- ResultOrError<ResourceHeapAllocation> AllocateMemory(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage);
-
- void DeallocateMemory(ResourceHeapAllocation& allocation);
-
- void Tick(ExecutionSerial lastCompletedSerial);
-
- void DestroyPool();
-
- private:
- void FreeMemory(ResourceHeapAllocation& allocation);
-
- ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
- const D3D12_CLEAR_VALUE* optimizedClearValue,
- D3D12_RESOURCE_STATES initialUsage);
-
- ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- const D3D12_CLEAR_VALUE* optimizedClearValue,
- D3D12_RESOURCE_STATES initialUsage);
-
- Device* mDevice;
- uint32_t mResourceHeapTier;
-
- static constexpr uint64_t kMaxHeapSize = 32ll * 1024ll * 1024ll * 1024ll; // 32GB
- static constexpr uint64_t kMinHeapSize = 4ll * 1024ll * 1024ll; // 4MB
-
- std::array<std::unique_ptr<BuddyMemoryAllocator>, ResourceHeapKind::EnumCount>
- mSubAllocatedResourceAllocators;
- std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
-
- std::array<std::unique_ptr<PooledResourceMemoryAllocator>, ResourceHeapKind::EnumCount>
- mPooledHeapAllocators;
-
- SerialQueue<ExecutionSerial, ResourceHeapAllocation> mAllocationsToDelete;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
deleted file mode 100644
index c7f8c68d1df..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
-
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/HeapD3D12.h"
-
-#include <utility>
-
-namespace dawn_native { namespace d3d12 {
- ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
- uint64_t offset,
- ComPtr<ID3D12Resource> resource,
- Heap* heap)
- : ResourceMemoryAllocation(info, offset, heap), mResource(std::move(resource)) {
- ASSERT((info.mMethod == AllocationMethod::kExternal) == (heap == nullptr));
- }
-
- void ResourceHeapAllocation::Invalidate() {
- ResourceMemoryAllocation::Invalidate();
- mResource.Reset();
- }
-
- ID3D12Resource* ResourceHeapAllocation::GetD3D12Resource() const {
- return mResource.Get();
- }
-
- D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
- return mResource->GetGPUVirtualAddress();
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
deleted file mode 100644
index 7f1fe0a9d85..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
-#define DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/ResourceMemoryAllocation.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Heap;
-
- class ResourceHeapAllocation : public ResourceMemoryAllocation {
- public:
- ResourceHeapAllocation() = default;
- ResourceHeapAllocation(const AllocationInfo& info,
- uint64_t offset,
- ComPtr<ID3D12Resource> resource,
- Heap* heap);
- ~ResourceHeapAllocation() override = default;
- ResourceHeapAllocation(const ResourceHeapAllocation&) = default;
- ResourceHeapAllocation& operator=(const ResourceHeapAllocation&) = default;
-
- void Invalidate() override;
-
- ID3D12Resource* GetD3D12Resource() const;
- D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
-
- private:
- ComPtr<ID3D12Resource> mResource;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp
deleted file mode 100644
index 0671cc428dc..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/SamplerD3D12.h"
-
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- D3D12_TEXTURE_ADDRESS_MODE AddressMode(wgpu::AddressMode mode) {
- switch (mode) {
- case wgpu::AddressMode::Repeat:
- return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
- case wgpu::AddressMode::MirrorRepeat:
- return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
- case wgpu::AddressMode::ClampToEdge:
- return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
- }
- }
- } // namespace
-
- // static
- Ref<Sampler> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
- return AcquireRef(new Sampler(device, descriptor));
- }
-
- Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
- : SamplerBase(device, descriptor) {
- D3D12_FILTER_TYPE minFilter;
- switch (descriptor->minFilter) {
- case wgpu::FilterMode::Nearest:
- minFilter = D3D12_FILTER_TYPE_POINT;
- break;
- case wgpu::FilterMode::Linear:
- minFilter = D3D12_FILTER_TYPE_LINEAR;
- break;
- }
-
- D3D12_FILTER_TYPE magFilter;
- switch (descriptor->magFilter) {
- case wgpu::FilterMode::Nearest:
- magFilter = D3D12_FILTER_TYPE_POINT;
- break;
- case wgpu::FilterMode::Linear:
- magFilter = D3D12_FILTER_TYPE_LINEAR;
- break;
- }
-
- D3D12_FILTER_TYPE mipmapFilter;
- switch (descriptor->mipmapFilter) {
- case wgpu::FilterMode::Nearest:
- mipmapFilter = D3D12_FILTER_TYPE_POINT;
- break;
- case wgpu::FilterMode::Linear:
- mipmapFilter = D3D12_FILTER_TYPE_LINEAR;
- break;
- }
-
- D3D12_FILTER_REDUCTION_TYPE reduction =
- descriptor->compare == wgpu::CompareFunction::Undefined
- ? D3D12_FILTER_REDUCTION_TYPE_STANDARD
- : D3D12_FILTER_REDUCTION_TYPE_COMPARISON;
-
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_sampler_desc
- mSamplerDesc.MaxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
-
- if (mSamplerDesc.MaxAnisotropy > 1) {
- mSamplerDesc.Filter = D3D12_ENCODE_ANISOTROPIC_FILTER(reduction);
- } else {
- mSamplerDesc.Filter =
- D3D12_ENCODE_BASIC_FILTER(minFilter, magFilter, mipmapFilter, reduction);
- }
-
- mSamplerDesc.AddressU = AddressMode(descriptor->addressModeU);
- mSamplerDesc.AddressV = AddressMode(descriptor->addressModeV);
- mSamplerDesc.AddressW = AddressMode(descriptor->addressModeW);
- mSamplerDesc.MipLODBias = 0.f;
-
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- mSamplerDesc.ComparisonFunc = ToD3D12ComparisonFunc(descriptor->compare);
- } else {
- // Still set the function so it's not garbage.
- mSamplerDesc.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
- }
- mSamplerDesc.MinLOD = descriptor->lodMinClamp;
- mSamplerDesc.MaxLOD = descriptor->lodMaxClamp;
- }
-
- const D3D12_SAMPLER_DESC& Sampler::GetSamplerDescriptor() const {
- return mSamplerDesc;
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h
deleted file mode 100644
index ede374b94b4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_SAMPLERD3D12_H_
-#define DAWNNATIVE_D3D12_SAMPLERD3D12_H_
-
-#include "dawn_native/Sampler.h"
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class Sampler final : public SamplerBase {
- public:
- static Ref<Sampler> Create(Device* device, const SamplerDescriptor* descriptor);
-
- const D3D12_SAMPLER_DESC& GetSamplerDescriptor() const;
-
- private:
- Sampler(Device* device, const SamplerDescriptor* descriptor);
- ~Sampler() override = default;
- D3D12_SAMPLER_DESC mSamplerDesc = {};
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_SAMPLERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp
deleted file mode 100644
index d1d6f3f011b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/SamplerHeapCacheD3D12.h"
-
-#include "common/Assert.h"
-#include "common/HashUtils.h"
-#include "dawn_native/d3d12/BindGroupD3D12.h"
-#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/Forward.h"
-#include "dawn_native/d3d12/SamplerD3D12.h"
-#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
-#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- SamplerHeapCacheEntry::SamplerHeapCacheEntry(std::vector<Sampler*> samplers)
- : mSamplers(std::move(samplers)) {
- }
-
- SamplerHeapCacheEntry::SamplerHeapCacheEntry(SamplerHeapCache* cache,
- StagingDescriptorAllocator* allocator,
- std::vector<Sampler*> samplers,
- CPUDescriptorHeapAllocation allocation)
- : mCPUAllocation(std::move(allocation)),
- mSamplers(std::move(samplers)),
- mAllocator(allocator),
- mCache(cache) {
- ASSERT(mCache != nullptr);
- ASSERT(mCPUAllocation.IsValid());
- ASSERT(!mSamplers.empty());
- }
-
- std::vector<Sampler*>&& SamplerHeapCacheEntry::AcquireSamplers() {
- return std::move(mSamplers);
- }
-
- SamplerHeapCacheEntry::~SamplerHeapCacheEntry() {
- // If this is a blueprint then the CPU allocation cannot exist and has no entry to remove.
- if (mCPUAllocation.IsValid()) {
- mCache->RemoveCacheEntry(this);
- mAllocator->Deallocate(&mCPUAllocation);
- }
-
- ASSERT(!mCPUAllocation.IsValid());
- }
-
- bool SamplerHeapCacheEntry::Populate(Device* device,
- ShaderVisibleDescriptorAllocator* allocator) {
- if (allocator->IsAllocationStillValid(mGPUAllocation)) {
- return true;
- }
-
- ASSERT(!mSamplers.empty());
-
- // Attempt to allocate descriptors for the currently bound shader-visible heaps.
- // If either failed, return early to re-allocate and switch the heaps.
- const uint32_t descriptorCount = mSamplers.size();
- D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
- if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
- &baseCPUDescriptor, &mGPUAllocation)) {
- return false;
- }
-
- // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
- // simple copies per bindgroup, a single non-simple copy could be issued.
- // TODO(dawn:155): Consider doing this optimization.
- device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
- mCPUAllocation.GetBaseDescriptor(),
- D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
-
- return true;
- }
-
- D3D12_GPU_DESCRIPTOR_HANDLE SamplerHeapCacheEntry::GetBaseDescriptor() const {
- return mGPUAllocation.GetBaseDescriptor();
- }
-
- ResultOrError<Ref<SamplerHeapCacheEntry>> SamplerHeapCache::GetOrCreate(
- const BindGroup* group,
- StagingDescriptorAllocator* samplerAllocator) {
- const BindGroupLayout* bgl = ToBackend(group->GetLayout());
-
- // If a previously created bindgroup used the same samplers, the backing sampler heap
- // allocation can be reused. The packed list of samplers acts as the key to lookup the
- // allocation in a cache.
- // TODO(dawn:155): Avoid re-allocating the vector each lookup.
- std::vector<Sampler*> samplers;
- samplers.reserve(bgl->GetSamplerDescriptorCount());
-
- for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
- bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
- if (bindingInfo.bindingType == BindingInfoType::Sampler) {
- samplers.push_back(ToBackend(group->GetBindingAsSampler(bindingIndex)));
- }
- }
-
- // Check the cache if there exists a sampler heap allocation that corresponds to the
- // samplers.
- SamplerHeapCacheEntry blueprint(std::move(samplers));
- auto iter = mCache.find(&blueprint);
- if (iter != mCache.end()) {
- return Ref<SamplerHeapCacheEntry>(*iter);
- }
-
- // Steal the sampler vector back from the blueprint to avoid creating a new copy for the
- // real entry below.
- samplers = std::move(blueprint.AcquireSamplers());
-
- CPUDescriptorHeapAllocation allocation;
- DAWN_TRY_ASSIGN(allocation, samplerAllocator->AllocateCPUDescriptors());
-
- const uint32_t samplerSizeIncrement = samplerAllocator->GetSizeIncrement();
- ID3D12Device* d3d12Device = mDevice->GetD3D12Device();
-
- for (uint32_t i = 0; i < samplers.size(); ++i) {
- const auto& samplerDesc = samplers[i]->GetSamplerDescriptor();
- d3d12Device->CreateSampler(&samplerDesc,
- allocation.OffsetFrom(samplerSizeIncrement, i));
- }
-
- Ref<SamplerHeapCacheEntry> entry = AcquireRef(new SamplerHeapCacheEntry(
- this, samplerAllocator, std::move(samplers), std::move(allocation)));
- mCache.insert(entry.Get());
- return std::move(entry);
- }
-
- SamplerHeapCache::SamplerHeapCache(Device* device) : mDevice(device) {
- }
-
- SamplerHeapCache::~SamplerHeapCache() {
- ASSERT(mCache.empty());
- }
-
- void SamplerHeapCache::RemoveCacheEntry(SamplerHeapCacheEntry* entry) {
- ASSERT(entry->GetRefCountForTesting() == 0);
- size_t removedCount = mCache.erase(entry);
- ASSERT(removedCount == 1);
- }
-
- size_t SamplerHeapCacheEntry::HashFunc::operator()(const SamplerHeapCacheEntry* entry) const {
- size_t hash = 0;
- for (const Sampler* sampler : entry->mSamplers) {
- HashCombine(&hash, sampler);
- }
- return hash;
- }
-
- bool SamplerHeapCacheEntry::EqualityFunc::operator()(const SamplerHeapCacheEntry* a,
- const SamplerHeapCacheEntry* b) const {
- return a->mSamplers == b->mSamplers;
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h
deleted file mode 100644
index 3e2cf4c80b5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
-#define DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
-
-#include "common/RefCounted.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
-#include "dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
-
-#include <unordered_set>
-
-// |SamplerHeapCacheEntry| maintains a cache of sampler descriptor heap allocations.
-// Each entry represents one or more sampler descriptors that co-exist in a CPU and
-// GPU descriptor heap. The CPU-side allocation is deallocated once the final reference
-// has been released while the GPU-side allocation is deallocated when the GPU is finished.
-//
-// The BindGroupLayout hands out these entries upon constructing the bindgroup. If the entry is not
-// invalid, it will allocate and initialize so it may be reused by another bindgroup.
-//
-// The cache is primary needed for the GPU sampler heap, which is much smaller than the view heap
-// and switches incur expensive pipeline flushes.
-namespace dawn_native { namespace d3d12 {
-
- class BindGroup;
- class Device;
- class Sampler;
- class SamplerHeapCache;
- class StagingDescriptorAllocator;
- class ShaderVisibleDescriptorAllocator;
-
- // Wraps sampler descriptor heap allocations in a cache.
- class SamplerHeapCacheEntry : public RefCounted {
- public:
- SamplerHeapCacheEntry() = default;
- SamplerHeapCacheEntry(std::vector<Sampler*> samplers);
- SamplerHeapCacheEntry(SamplerHeapCache* cache,
- StagingDescriptorAllocator* allocator,
- std::vector<Sampler*> samplers,
- CPUDescriptorHeapAllocation allocation);
- ~SamplerHeapCacheEntry() override;
-
- D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
-
- std::vector<Sampler*>&& AcquireSamplers();
-
- bool Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator);
-
- // Functors necessary for the unordered_map<SamplerHeapCacheEntry*>-based cache.
- struct HashFunc {
- size_t operator()(const SamplerHeapCacheEntry* entry) const;
- };
-
- struct EqualityFunc {
- bool operator()(const SamplerHeapCacheEntry* a, const SamplerHeapCacheEntry* b) const;
- };
-
- private:
- CPUDescriptorHeapAllocation mCPUAllocation;
- GPUDescriptorHeapAllocation mGPUAllocation;
-
- // Storing raw pointer because the sampler object will be already hashed
- // by the device and will already be unique.
- std::vector<Sampler*> mSamplers;
-
- StagingDescriptorAllocator* mAllocator = nullptr;
- SamplerHeapCache* mCache = nullptr;
- };
-
- // Cache descriptor heap allocations so that we don't create duplicate ones for every
- // BindGroup.
- class SamplerHeapCache {
- public:
- SamplerHeapCache(Device* device);
- ~SamplerHeapCache();
-
- ResultOrError<Ref<SamplerHeapCacheEntry>> GetOrCreate(
- const BindGroup* group,
- StagingDescriptorAllocator* samplerAllocator);
-
- void RemoveCacheEntry(SamplerHeapCacheEntry* entry);
-
- private:
- Device* mDevice;
-
- using Cache = std::unordered_set<SamplerHeapCacheEntry*,
- SamplerHeapCacheEntry::HashFunc,
- SamplerHeapCacheEntry::EqualityFunc>;
-
- Cache mCache;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
deleted file mode 100644
index 3001c6c1592..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ /dev/null
@@ -1,837 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/ShaderModuleD3D12.h"
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-#include "common/Log.h"
-#include "common/WindowsUtils.h"
-#include "dawn_native/Pipeline.h"
-#include "dawn_native/TintUtils.h"
-#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
-#include "dawn_native/d3d12/PlatformFunctions.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-#include <d3dcompiler.h>
-
-#include <tint/tint.h>
-#include <map>
-#include <sstream>
-#include <unordered_map>
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- ResultOrError<uint64_t> GetDXCompilerVersion(ComPtr<IDxcValidator> dxcValidator) {
- ComPtr<IDxcVersionInfo> versionInfo;
- DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
- "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
-
- uint32_t compilerMajor, compilerMinor;
- DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
- "IDxcVersionInfo::GetVersion"));
-
- // Pack both into a single version number.
- return (uint64_t(compilerMajor) << uint64_t(32)) + compilerMinor;
- }
-
- uint64_t GetD3DCompilerVersion() {
- return D3D_COMPILER_VERSION;
- }
-
- struct CompareBindingPoint {
- constexpr bool operator()(const tint::transform::BindingPoint& lhs,
- const tint::transform::BindingPoint& rhs) const {
- if (lhs.group != rhs.group) {
- return lhs.group < rhs.group;
- } else {
- return lhs.binding < rhs.binding;
- }
- }
- };
-
- void Serialize(std::stringstream& output, const tint::ast::Access& access) {
- output << access;
- }
-
- void Serialize(std::stringstream& output,
- const tint::transform::BindingPoint& binding_point) {
- output << "(BindingPoint";
- output << " group=" << binding_point.group;
- output << " binding=" << binding_point.binding;
- output << ")";
- }
-
- template <typename T,
- typename = typename std::enable_if<std::is_fundamental<T>::value>::type>
- void Serialize(std::stringstream& output, const T& val) {
- output << val;
- }
-
- template <typename T>
- void Serialize(std::stringstream& output,
- const std::unordered_map<tint::transform::BindingPoint, T>& map) {
- output << "(map";
-
- std::map<tint::transform::BindingPoint, T, CompareBindingPoint> sorted(map.begin(),
- map.end());
- for (auto& entry : sorted) {
- output << " ";
- Serialize(output, entry.first);
- output << "=";
- Serialize(output, entry.second);
- }
- output << ")";
- }
-
- void Serialize(std::stringstream& output,
- const tint::writer::ArrayLengthFromUniformOptions& arrayLengthFromUniform) {
- output << "(ArrayLengthFromUniformOptions";
- output << " ubo_binding=";
- Serialize(output, arrayLengthFromUniform.ubo_binding);
- output << " bindpoint_to_size_index=";
- Serialize(output, arrayLengthFromUniform.bindpoint_to_size_index);
- output << ")";
- }
-
- // 32 bit float has 7 decimal digits of precision so setting n to 8 should be enough
- std::string FloatToStringWithPrecision(float v, std::streamsize n = 8) {
- std::ostringstream out;
- out.precision(n);
- out << std::fixed << v;
- return out.str();
- }
-
- std::string GetHLSLValueString(EntryPointMetadata::OverridableConstant::Type dawnType,
- const OverridableConstantScalar* entry,
- double value = 0) {
- switch (dawnType) {
- case EntryPointMetadata::OverridableConstant::Type::Boolean:
- return std::to_string(entry ? entry->b : static_cast<int32_t>(value));
- case EntryPointMetadata::OverridableConstant::Type::Float32:
- return FloatToStringWithPrecision(entry ? entry->f32
- : static_cast<float>(value));
- case EntryPointMetadata::OverridableConstant::Type::Int32:
- return std::to_string(entry ? entry->i32 : static_cast<int32_t>(value));
- case EntryPointMetadata::OverridableConstant::Type::Uint32:
- return std::to_string(entry ? entry->u32 : static_cast<uint32_t>(value));
- default:
- UNREACHABLE();
- }
- }
-
- constexpr char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
-
- void GetOverridableConstantsDefines(
- std::vector<std::pair<std::string, std::string>>* defineStrings,
- const PipelineConstantEntries* pipelineConstantEntries,
- const EntryPointMetadata::OverridableConstantsMap* shaderEntryPointConstants) {
- std::unordered_set<std::string> overriddenConstants;
-
- // Set pipeline overridden values
- for (const auto& pipelineConstant : *pipelineConstantEntries) {
- const std::string& name = pipelineConstant.first;
- double value = pipelineConstant.second;
-
- overriddenConstants.insert(name);
-
- // This is already validated so `name` must exist
- const auto& moduleConstant = shaderEntryPointConstants->at(name);
-
- defineStrings->emplace_back(
- kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
- GetHLSLValueString(moduleConstant.type, nullptr, value));
- }
-
- // Set shader initialized default values
- for (const auto& iter : *shaderEntryPointConstants) {
- const std::string& name = iter.first;
- if (overriddenConstants.count(name) != 0) {
- // This constant already has overridden value
- continue;
- }
-
- const auto& moduleConstant = shaderEntryPointConstants->at(name);
-
- // Uninitialized default values are okay since they ar only defined to pass
- // compilation but not used
- defineStrings->emplace_back(
- kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
- GetHLSLValueString(moduleConstant.type, &moduleConstant.defaultValue));
- }
- }
-
- // The inputs to a shader compilation. These have been intentionally isolated from the
- // device to help ensure that the pipeline cache key contains all inputs for compilation.
- struct ShaderCompilationRequest {
- enum Compiler { FXC, DXC };
-
- // Common inputs
- Compiler compiler;
- const tint::Program* program;
- const char* entryPointName;
- SingleShaderStage stage;
- uint32_t compileFlags;
- bool disableSymbolRenaming;
- tint::transform::BindingRemapper::BindingPoints remappedBindingPoints;
- tint::transform::BindingRemapper::AccessControls remappedAccessControls;
- bool isRobustnessEnabled;
- bool usesNumWorkgroups;
- uint32_t numWorkgroupsRegisterSpace;
- uint32_t numWorkgroupsShaderRegister;
- tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
- std::vector<std::pair<std::string, std::string>> defineStrings;
-
- // FXC/DXC common inputs
- bool disableWorkgroupInit;
-
- // FXC inputs
- uint64_t fxcVersion;
-
- // DXC inputs
- uint64_t dxcVersion;
- const D3D12DeviceInfo* deviceInfo;
- bool hasShaderFloat16Feature;
-
- static ResultOrError<ShaderCompilationRequest> Create(
- const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t compileFlags,
- const Device* device,
- const tint::Program* program,
- const EntryPointMetadata& entryPoint,
- const ProgrammableStage& programmableStage) {
- Compiler compiler;
- uint64_t dxcVersion = 0;
- if (device->IsToggleEnabled(Toggle::UseDXC)) {
- compiler = Compiler::DXC;
- DAWN_TRY_ASSIGN(dxcVersion, GetDXCompilerVersion(device->GetDxcValidator()));
- } else {
- compiler = Compiler::FXC;
- }
-
- using tint::transform::BindingPoint;
- using tint::transform::BindingRemapper;
-
- BindingRemapper::BindingPoints remappedBindingPoints;
- BindingRemapper::AccessControls remappedAccessControls;
-
- tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
- arrayLengthFromUniform.ubo_binding = {
- layout->GetDynamicStorageBufferLengthsRegisterSpace(),
- layout->GetDynamicStorageBufferLengthsShaderRegister()};
-
- const BindingInfoArray& moduleBindingInfo = entryPoint.bindings;
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
- const auto& groupBindingInfo = moduleBindingInfo[group];
-
- // d3d12::BindGroupLayout packs the bindings per HLSL register-space. We modify
- // the Tint AST to make the "bindings" decoration match the offset chosen by
- // d3d12::BindGroupLayout so that Tint produces HLSL with the correct registers
- // assigned to each interface variable.
- for (const auto& it : groupBindingInfo) {
- BindingNumber binding = it.first;
- auto const& bindingInfo = it.second;
- BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
- BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(binding)};
- BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
- bgl->GetShaderRegister(bindingIndex)};
- if (srcBindingPoint != dstBindingPoint) {
- remappedBindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
-
- // Declaring a read-only storage buffer in HLSL but specifying a storage
- // buffer in the BGL produces the wrong output. Force read-only storage
- // buffer bindings to be treated as UAV instead of SRV. Internal storage
- // buffer is a storage buffer used in the internal pipeline.
- const bool forceStorageBufferAsUAV =
- (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
- (bgl->GetBindingInfo(bindingIndex).buffer.type ==
- wgpu::BufferBindingType::Storage ||
- bgl->GetBindingInfo(bindingIndex).buffer.type ==
- kInternalStorageBufferBinding));
- if (forceStorageBufferAsUAV) {
- remappedAccessControls.emplace(srcBindingPoint,
- tint::ast::Access::kReadWrite);
- }
- }
-
- // Add arrayLengthFromUniform options
- {
- for (const auto& bindingAndRegisterOffset :
- layout->GetDynamicStorageBufferLengthInfo()[group]
- .bindingAndRegisterOffsets) {
- BindingNumber binding = bindingAndRegisterOffset.binding;
- uint32_t registerOffset = bindingAndRegisterOffset.registerOffset;
-
- BindingPoint bindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(binding)};
- // Get the renamed binding point if it was remapped.
- auto it = remappedBindingPoints.find(bindingPoint);
- if (it != remappedBindingPoints.end()) {
- bindingPoint = it->second;
- }
-
- arrayLengthFromUniform.bindpoint_to_size_index.emplace(bindingPoint,
- registerOffset);
- }
- }
- }
-
- ShaderCompilationRequest request;
- request.compiler = compiler;
- request.program = program;
- request.entryPointName = entryPointName;
- request.stage = stage;
- request.compileFlags = compileFlags;
- request.disableSymbolRenaming =
- device->IsToggleEnabled(Toggle::DisableSymbolRenaming);
- request.remappedBindingPoints = std::move(remappedBindingPoints);
- request.remappedAccessControls = std::move(remappedAccessControls);
- request.isRobustnessEnabled = device->IsRobustnessEnabled();
- request.disableWorkgroupInit =
- device->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- request.usesNumWorkgroups = entryPoint.usesNumWorkgroups;
- request.numWorkgroupsShaderRegister = layout->GetNumWorkgroupsShaderRegister();
- request.numWorkgroupsRegisterSpace = layout->GetNumWorkgroupsRegisterSpace();
- request.arrayLengthFromUniform = std::move(arrayLengthFromUniform);
- request.fxcVersion = compiler == Compiler::FXC ? GetD3DCompilerVersion() : 0;
- request.dxcVersion = compiler == Compiler::DXC ? dxcVersion : 0;
- request.deviceInfo = &device->GetDeviceInfo();
- request.hasShaderFloat16Feature = device->IsFeatureEnabled(Feature::ShaderFloat16);
-
- GetOverridableConstantsDefines(
- &request.defineStrings, &programmableStage.constants,
- &programmableStage.module->GetEntryPoint(programmableStage.entryPoint)
- .overridableConstants);
-
- return std::move(request);
- }
-
- ResultOrError<PersistentCacheKey> CreateCacheKey() const {
- // Generate the WGSL from the Tint program so it's normalized.
- // TODO(tint:1180): Consider using a binary serialization of the tint AST for a more
- // compact representation.
- auto result = tint::writer::wgsl::Generate(program, tint::writer::wgsl::Options{});
- if (!result.success) {
- std::ostringstream errorStream;
- errorStream << "Tint WGSL failure:" << std::endl;
- errorStream << "Generator: " << result.error << std::endl;
- return DAWN_INTERNAL_ERROR(errorStream.str().c_str());
- }
-
- std::stringstream stream;
-
- // Prefix the key with the type to avoid collisions from another type that could
- // have the same key.
- stream << static_cast<uint32_t>(PersistentKeyType::Shader);
- stream << "\n";
-
- stream << result.wgsl.length();
- stream << "\n";
-
- stream << result.wgsl;
- stream << "\n";
-
- stream << "(ShaderCompilationRequest";
- stream << " compiler=" << compiler;
- stream << " entryPointName=" << entryPointName;
- stream << " stage=" << uint32_t(stage);
- stream << " compileFlags=" << compileFlags;
- stream << " disableSymbolRenaming=" << disableSymbolRenaming;
-
- stream << " remappedBindingPoints=";
- Serialize(stream, remappedBindingPoints);
-
- stream << " remappedAccessControls=";
- Serialize(stream, remappedAccessControls);
-
- stream << " useNumWorkgroups=" << usesNumWorkgroups;
- stream << " numWorkgroupsRegisterSpace=" << numWorkgroupsRegisterSpace;
- stream << " numWorkgroupsShaderRegister=" << numWorkgroupsShaderRegister;
-
- stream << " arrayLengthFromUniform=";
- Serialize(stream, arrayLengthFromUniform);
-
- stream << " shaderModel=" << deviceInfo->shaderModel;
- stream << " disableWorkgroupInit=" << disableWorkgroupInit;
- stream << " isRobustnessEnabled=" << isRobustnessEnabled;
- stream << " fxcVersion=" << fxcVersion;
- stream << " dxcVersion=" << dxcVersion;
- stream << " hasShaderFloat16Feature=" << hasShaderFloat16Feature;
-
- stream << " defines={";
- for (const auto& it : defineStrings) {
- stream << " <" << it.first << "," << it.second << ">";
- }
- stream << " }";
-
- stream << ")";
- stream << "\n";
-
- return PersistentCacheKey(std::istreambuf_iterator<char>{stream},
- std::istreambuf_iterator<char>{});
- }
- };
-
- std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
- std::vector<const wchar_t*> arguments;
- if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
- arguments.push_back(L"/Gec");
- }
- if (compileFlags & D3DCOMPILE_IEEE_STRICTNESS) {
- arguments.push_back(L"/Gis");
- }
- constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
- if (compileFlags & d3dCompileFlagsBits) {
- switch (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
- case D3DCOMPILE_OPTIMIZATION_LEVEL0:
- arguments.push_back(L"/O0");
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL2:
- arguments.push_back(L"/O2");
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL3:
- arguments.push_back(L"/O3");
- break;
- }
- }
- if (compileFlags & D3DCOMPILE_DEBUG) {
- arguments.push_back(L"/Zi");
- }
- if (compileFlags & D3DCOMPILE_PACK_MATRIX_ROW_MAJOR) {
- arguments.push_back(L"/Zpr");
- }
- if (compileFlags & D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR) {
- arguments.push_back(L"/Zpc");
- }
- if (compileFlags & D3DCOMPILE_AVOID_FLOW_CONTROL) {
- arguments.push_back(L"/Gfa");
- }
- if (compileFlags & D3DCOMPILE_PREFER_FLOW_CONTROL) {
- arguments.push_back(L"/Gfp");
- }
- if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
- arguments.push_back(L"/res_may_alias");
- }
-
- if (enable16BitTypes) {
- // enable-16bit-types are only allowed in -HV 2018 (default)
- arguments.push_back(L"/enable-16bit-types");
- }
-
- arguments.push_back(L"-HV");
- arguments.push_back(L"2018");
-
- return arguments;
- }
-
- ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(IDxcLibrary* dxcLibrary,
- IDxcCompiler* dxcCompiler,
- const ShaderCompilationRequest& request,
- const std::string& hlslSource) {
- ComPtr<IDxcBlobEncoding> sourceBlob;
- DAWN_TRY(
- CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
- hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
- "DXC create blob"));
-
- std::wstring entryPointW;
- DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(request.entryPointName));
-
- std::vector<const wchar_t*> arguments =
- GetDXCArguments(request.compileFlags, request.hasShaderFloat16Feature);
-
- // Build defines for overridable constants
- std::vector<std::pair<std::wstring, std::wstring>> defineStrings;
- defineStrings.reserve(request.defineStrings.size());
- for (const auto& it : request.defineStrings) {
- defineStrings.emplace_back(UTF8ToWStr(it.first.c_str()),
- UTF8ToWStr(it.second.c_str()));
- }
-
- std::vector<DxcDefine> dxcDefines;
- dxcDefines.reserve(defineStrings.size());
- for (const auto& d : defineStrings) {
- dxcDefines.push_back({d.first.c_str(), d.second.c_str()});
- }
-
- ComPtr<IDxcOperationResult> result;
- DAWN_TRY(CheckHRESULT(
- dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
- request.deviceInfo->shaderProfiles[request.stage].c_str(),
- arguments.data(), arguments.size(), dxcDefines.data(),
- dxcDefines.size(), nullptr, &result),
- "DXC compile"));
-
- HRESULT hr;
- DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
-
- if (FAILED(hr)) {
- ComPtr<IDxcBlobEncoding> errors;
- DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
-
- return DAWN_FORMAT_VALIDATION_ERROR("DXC compile failed with: %s",
- static_cast<char*>(errors->GetBufferPointer()));
- }
-
- ComPtr<IDxcBlob> compiledShader;
- DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
- return std::move(compiledShader);
- }
-
- std::string CompileFlagsToStringFXC(uint32_t compileFlags) {
- struct Flag {
- uint32_t value;
- const char* name;
- };
- constexpr Flag flags[] = {
- // Populated from d3dcompiler.h
-#define F(f) Flag{f, #f}
- F(D3DCOMPILE_DEBUG),
- F(D3DCOMPILE_SKIP_VALIDATION),
- F(D3DCOMPILE_SKIP_OPTIMIZATION),
- F(D3DCOMPILE_PACK_MATRIX_ROW_MAJOR),
- F(D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR),
- F(D3DCOMPILE_PARTIAL_PRECISION),
- F(D3DCOMPILE_FORCE_VS_SOFTWARE_NO_OPT),
- F(D3DCOMPILE_FORCE_PS_SOFTWARE_NO_OPT),
- F(D3DCOMPILE_NO_PRESHADER),
- F(D3DCOMPILE_AVOID_FLOW_CONTROL),
- F(D3DCOMPILE_PREFER_FLOW_CONTROL),
- F(D3DCOMPILE_ENABLE_STRICTNESS),
- F(D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY),
- F(D3DCOMPILE_IEEE_STRICTNESS),
- F(D3DCOMPILE_RESERVED16),
- F(D3DCOMPILE_RESERVED17),
- F(D3DCOMPILE_WARNINGS_ARE_ERRORS),
- F(D3DCOMPILE_RESOURCES_MAY_ALIAS),
- F(D3DCOMPILE_ENABLE_UNBOUNDED_DESCRIPTOR_TABLES),
- F(D3DCOMPILE_ALL_RESOURCES_BOUND),
- F(D3DCOMPILE_DEBUG_NAME_FOR_SOURCE),
- F(D3DCOMPILE_DEBUG_NAME_FOR_BINARY),
-#undef F
- };
-
- std::string result;
- for (const Flag& f : flags) {
- if ((compileFlags & f.value) != 0) {
- result += f.name + std::string("\n");
- }
- }
-
- // Optimization level must be handled separately as two bits are used, and the values
- // don't map neatly to 0-3.
- constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
- switch (compileFlags & d3dCompileFlagsBits) {
- case D3DCOMPILE_OPTIMIZATION_LEVEL0:
- result += "D3DCOMPILE_OPTIMIZATION_LEVEL0";
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL1:
- result += "D3DCOMPILE_OPTIMIZATION_LEVEL1";
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL2:
- result += "D3DCOMPILE_OPTIMIZATION_LEVEL2";
- break;
- case D3DCOMPILE_OPTIMIZATION_LEVEL3:
- result += "D3DCOMPILE_OPTIMIZATION_LEVEL3";
- break;
- }
- result += std::string("\n");
-
- return result;
- }
-
- ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(const PlatformFunctions* functions,
- const ShaderCompilationRequest& request,
- const std::string& hlslSource) {
- const char* targetProfile = nullptr;
- switch (request.stage) {
- case SingleShaderStage::Vertex:
- targetProfile = "vs_5_1";
- break;
- case SingleShaderStage::Fragment:
- targetProfile = "ps_5_1";
- break;
- case SingleShaderStage::Compute:
- targetProfile = "cs_5_1";
- break;
- }
-
- ComPtr<ID3DBlob> compiledShader;
- ComPtr<ID3DBlob> errors;
-
- // Build defines for overridable constants
- const D3D_SHADER_MACRO* pDefines = nullptr;
- std::vector<D3D_SHADER_MACRO> fxcDefines;
- if (request.defineStrings.size() > 0) {
- fxcDefines.reserve(request.defineStrings.size() + 1);
- for (const auto& d : request.defineStrings) {
- fxcDefines.push_back({d.first.c_str(), d.second.c_str()});
- }
- // d3dCompile D3D_SHADER_MACRO* pDefines is a nullptr terminated array
- fxcDefines.push_back({nullptr, nullptr});
- pDefines = fxcDefines.data();
- }
-
- DAWN_INVALID_IF(FAILED(functions->d3dCompile(
- hlslSource.c_str(), hlslSource.length(), nullptr, pDefines, nullptr,
- request.entryPointName, targetProfile, request.compileFlags, 0,
- &compiledShader, &errors)),
- "D3D compile failed with: %s",
- static_cast<char*>(errors->GetBufferPointer()));
-
- return std::move(compiledShader);
- }
-
- ResultOrError<std::string> TranslateToHLSL(const ShaderCompilationRequest& request,
- std::string* remappedEntryPointName) {
- std::ostringstream errorStream;
- errorStream << "Tint HLSL failure:" << std::endl;
-
- tint::transform::Manager transformManager;
- tint::transform::DataMap transformInputs;
-
- if (request.isRobustnessEnabled) {
- transformManager.Add<tint::transform::Robustness>();
- }
-
- transformManager.Add<tint::transform::BindingRemapper>();
-
- transformManager.Add<tint::transform::SingleEntryPoint>();
- transformInputs.Add<tint::transform::SingleEntryPoint::Config>(request.entryPointName);
-
- transformManager.Add<tint::transform::Renamer>();
-
- if (request.disableSymbolRenaming) {
- // We still need to rename HLSL reserved keywords
- transformInputs.Add<tint::transform::Renamer::Config>(
- tint::transform::Renamer::Target::kHlslKeywords);
- }
-
- // D3D12 registers like `t3` and `c3` have the same bindingOffset number in
- // the remapping but should not be considered a collision because they have
- // different types.
- const bool mayCollide = true;
- transformInputs.Add<tint::transform::BindingRemapper::Remappings>(
- std::move(request.remappedBindingPoints), std::move(request.remappedAccessControls),
- mayCollide);
-
- tint::Program transformedProgram;
- tint::transform::DataMap transformOutputs;
- DAWN_TRY_ASSIGN(transformedProgram,
- RunTransforms(&transformManager, request.program, transformInputs,
- &transformOutputs, nullptr));
-
- if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
- auto it = data->remappings.find(request.entryPointName);
- if (it != data->remappings.end()) {
- *remappedEntryPointName = it->second;
- } else {
- DAWN_INVALID_IF(!request.disableSymbolRenaming,
- "Could not find remapped name for entry point.");
-
- *remappedEntryPointName = request.entryPointName;
- }
- } else {
- return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
- }
-
- tint::writer::hlsl::Options options;
- options.disable_workgroup_init = request.disableWorkgroupInit;
- if (request.usesNumWorkgroups) {
- options.root_constant_binding_point.group = request.numWorkgroupsRegisterSpace;
- options.root_constant_binding_point.binding = request.numWorkgroupsShaderRegister;
- }
- // TODO(dawn:549): HLSL generation outputs the indices into the
- // array_length_from_uniform buffer that were actually used. When the blob cache can
- // store more than compiled shaders, we should reflect these used indices and store
- // them as well. This would allow us to only upload root constants that are actually
- // read by the shader.
- options.array_length_from_uniform = request.arrayLengthFromUniform;
- auto result = tint::writer::hlsl::Generate(&transformedProgram, options);
- DAWN_INVALID_IF(!result.success, "An error occured while generating HLSL: %s",
- result.error);
-
- return std::move(result.hlsl);
- }
-
- template <typename F>
- MaybeError CompileShader(const PlatformFunctions* functions,
- IDxcLibrary* dxcLibrary,
- IDxcCompiler* dxcCompiler,
- ShaderCompilationRequest&& request,
- bool dumpShaders,
- F&& DumpShadersEmitLog,
- CompiledShader* compiledShader) {
- // Compile the source shader to HLSL.
- std::string hlslSource;
- std::string remappedEntryPoint;
- DAWN_TRY_ASSIGN(hlslSource, TranslateToHLSL(request, &remappedEntryPoint));
- if (dumpShaders) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
- DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
- request.entryPointName = remappedEntryPoint.c_str();
- switch (request.compiler) {
- case ShaderCompilationRequest::Compiler::DXC:
- DAWN_TRY_ASSIGN(compiledShader->compiledDXCShader,
- CompileShaderDXC(dxcLibrary, dxcCompiler, request, hlslSource));
- break;
- case ShaderCompilationRequest::Compiler::FXC:
- DAWN_TRY_ASSIGN(compiledShader->compiledFXCShader,
- CompileShaderFXC(functions, request, hlslSource));
- break;
- }
-
- if (dumpShaders && request.compiler == ShaderCompilationRequest::Compiler::FXC) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* FXC compile flags */ " << std::endl
- << CompileFlagsToStringFXC(request.compileFlags) << std::endl;
-
- dumpedMsg << "/* Dumped disassembled DXBC */" << std::endl;
-
- ComPtr<ID3DBlob> disassembly;
- if (FAILED(functions->d3dDisassemble(
- compiledShader->compiledFXCShader->GetBufferPointer(),
- compiledShader->compiledFXCShader->GetBufferSize(), 0, nullptr,
- &disassembly))) {
- dumpedMsg << "D3D disassemble failed" << std::endl;
- } else {
- dumpedMsg << reinterpret_cast<const char*>(disassembly->GetBufferPointer());
- }
- DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
-
- return {};
- }
-
- } // anonymous namespace
-
- // static
- ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
-
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor) {
- }
-
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
- return InitializeBase(parseResult);
- }
-
- ResultOrError<CompiledShader> ShaderModule::Compile(const ProgrammableStage& programmableStage,
- SingleShaderStage stage,
- PipelineLayout* layout,
- uint32_t compileFlags) {
- ASSERT(!IsError());
- ScopedTintICEHandler scopedICEHandler(GetDevice());
-
- Device* device = ToBackend(GetDevice());
-
- CompiledShader compiledShader = {};
-
- tint::transform::Manager transformManager;
- tint::transform::DataMap transformInputs;
-
- const tint::Program* program;
- tint::Program programAsValue;
- if (stage == SingleShaderStage::Vertex) {
- transformManager.Add<tint::transform::FirstIndexOffset>();
- transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
- layout->GetFirstIndexOffsetShaderRegister(),
- layout->GetFirstIndexOffsetRegisterSpace());
-
- tint::transform::DataMap transformOutputs;
- DAWN_TRY_ASSIGN(programAsValue,
- RunTransforms(&transformManager, GetTintProgram(), transformInputs,
- &transformOutputs, nullptr));
-
- if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
- // TODO(dawn:549): Consider adding this information to the pipeline cache once we
- // can store more than the shader blob in it.
- compiledShader.firstOffsetInfo.usesVertexIndex = data->has_vertex_index;
- if (compiledShader.firstOffsetInfo.usesVertexIndex) {
- compiledShader.firstOffsetInfo.vertexIndexOffset = data->first_vertex_offset;
- }
- compiledShader.firstOffsetInfo.usesInstanceIndex = data->has_instance_index;
- if (compiledShader.firstOffsetInfo.usesInstanceIndex) {
- compiledShader.firstOffsetInfo.instanceIndexOffset =
- data->first_instance_offset;
- }
- }
-
- program = &programAsValue;
- } else {
- program = GetTintProgram();
- }
-
- ShaderCompilationRequest request;
- DAWN_TRY_ASSIGN(
- request, ShaderCompilationRequest::Create(
- programmableStage.entryPoint.c_str(), stage, layout, compileFlags, device,
- program, GetEntryPoint(programmableStage.entryPoint), programmableStage));
-
- PersistentCacheKey shaderCacheKey;
- DAWN_TRY_ASSIGN(shaderCacheKey, request.CreateCacheKey());
-
- DAWN_TRY_ASSIGN(
- compiledShader.cachedShader,
- device->GetPersistentCache()->GetOrCreate(
- shaderCacheKey, [&](auto doCache) -> MaybeError {
- DAWN_TRY(CompileShader(
- device->GetFunctions(),
- device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcLibrary().Get()
- : nullptr,
- device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcCompiler().Get()
- : nullptr,
- std::move(request), device->IsToggleEnabled(Toggle::DumpShaders),
- [&](WGPULoggingType loggingType, const char* message) {
- GetDevice()->EmitLog(loggingType, message);
- },
- &compiledShader));
- const D3D12_SHADER_BYTECODE shader = compiledShader.GetD3D12ShaderBytecode();
- doCache(shader.pShaderBytecode, shader.BytecodeLength);
- return {};
- }));
-
- return std::move(compiledShader);
- }
-
- D3D12_SHADER_BYTECODE CompiledShader::GetD3D12ShaderBytecode() const {
- if (cachedShader.buffer != nullptr) {
- return {cachedShader.buffer.get(), cachedShader.bufferSize};
- } else if (compiledFXCShader != nullptr) {
- return {compiledFXCShader->GetBufferPointer(), compiledFXCShader->GetBufferSize()};
- } else if (compiledDXCShader != nullptr) {
- return {compiledDXCShader->GetBufferPointer(), compiledDXCShader->GetBufferSize()};
- }
- UNREACHABLE();
- return {};
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
deleted file mode 100644
index 02c1f8cea0e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
-#define DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
-
-#include "dawn_native/PersistentCache.h"
-#include "dawn_native/ShaderModule.h"
-
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native {
- struct ProgrammableStage;
-} // namespace dawn_native
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
- class PipelineLayout;
-
- struct FirstOffsetInfo {
- bool usesVertexIndex;
- uint32_t vertexIndexOffset;
- bool usesInstanceIndex;
- uint32_t instanceIndexOffset;
- };
-
- // Manages a ref to one of the various representations of shader blobs and information used to
- // emulate vertex/instance index starts
- struct CompiledShader {
- ScopedCachedBlob cachedShader;
- ComPtr<ID3DBlob> compiledFXCShader;
- ComPtr<IDxcBlob> compiledDXCShader;
- D3D12_SHADER_BYTECODE GetD3D12ShaderBytecode() const;
-
- FirstOffsetInfo firstOffsetInfo;
- };
-
- class ShaderModule final : public ShaderModuleBase {
- public:
- static ResultOrError<Ref<ShaderModule>> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
-
- ResultOrError<CompiledShader> Compile(const ProgrammableStage& programmableStage,
- SingleShaderStage stage,
- PipelineLayout* layout,
- uint32_t compileFlags);
-
- private:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModule() override = default;
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_SHADERMODULED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
deleted file mode 100644
index 916a371cb4e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h"
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- // Limits the min/max heap size to always be some known value for testing.
- // Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
- // We change the value from {1024, 512} to {32, 16} because we use blending
- // for D3D12DescriptorHeapTests.EncodeManyUBO and R16Float has limited range
- // and low precision at big integer.
- static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {32, 16};
-
- uint32_t GetD3D12ShaderVisibleHeapMinSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
- bool useSmallSize) {
- if (useSmallSize) {
- return kShaderVisibleSmallHeapSizes[heapType];
- }
-
- // Minimum heap size must be large enough to satisfy the largest descriptor allocation
- // request and to amortize the cost of sub-allocation. But small enough to avoid wasting
- // memory should only a tiny fraction ever be used.
- // TODO(dawn:155): Figure out these values.
- switch (heapType) {
- case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
- return 4096;
- case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
- return 256;
- default:
- UNREACHABLE();
- }
- }
-
- uint32_t GetD3D12ShaderVisibleHeapMaxSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
- bool useSmallSize) {
- if (useSmallSize) {
- return kShaderVisibleSmallHeapSizes[heapType];
- }
-
- switch (heapType) {
- case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
- return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
- case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
- return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
- default:
- UNREACHABLE();
- }
- }
-
- D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
- switch (heapType) {
- case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
- case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
- return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
- default:
- UNREACHABLE();
- }
- }
-
- // static
- ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>>
- ShaderVisibleDescriptorAllocator::Create(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
- std::unique_ptr<ShaderVisibleDescriptorAllocator> allocator =
- std::make_unique<ShaderVisibleDescriptorAllocator>(device, heapType);
- DAWN_TRY(allocator->AllocateAndSwitchShaderVisibleHeap());
- return std::move(allocator);
- }
-
- ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(
- Device* device,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType)
- : mHeapType(heapType),
- mDevice(device),
- mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
- mDescriptorCount(GetD3D12ShaderVisibleHeapMinSize(
- heapType,
- mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting))) {
- ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
- heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
- }
-
- bool ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(
- uint32_t descriptorCount,
- ExecutionSerial pendingSerial,
- D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
- GPUDescriptorHeapAllocation* allocation) {
- ASSERT(mHeap != nullptr);
- const uint64_t startOffset = mAllocator.Allocate(descriptorCount, pendingSerial);
- if (startOffset == RingBufferAllocator::kInvalidOffset) {
- return false;
- }
-
- ID3D12DescriptorHeap* descriptorHeap = mHeap->GetD3D12DescriptorHeap();
-
- const uint64_t heapOffset = mSizeIncrement * startOffset;
-
- // Check for 32-bit overflow since CPU heap start handle uses size_t.
- const size_t cpuHeapStartPtr = descriptorHeap->GetCPUDescriptorHandleForHeapStart().ptr;
-
- ASSERT(heapOffset <= std::numeric_limits<size_t>::max() - cpuHeapStartPtr);
-
- *baseCPUDescriptor = {cpuHeapStartPtr + static_cast<size_t>(heapOffset)};
-
- const D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor = {
- descriptorHeap->GetGPUDescriptorHandleForHeapStart().ptr + heapOffset};
-
- // Record both the device and heap serials to determine later if the allocations are
- // still valid.
- *allocation = GPUDescriptorHeapAllocation{baseGPUDescriptor, pendingSerial, mHeapSerial};
-
- return true;
- }
-
- ID3D12DescriptorHeap* ShaderVisibleDescriptorAllocator::GetShaderVisibleHeap() const {
- return mHeap->GetD3D12DescriptorHeap();
- }
-
- void ShaderVisibleDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
- mAllocator.Deallocate(completedSerial);
- }
-
- ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>>
- ShaderVisibleDescriptorAllocator::AllocateHeap(uint32_t descriptorCount) const {
- // The size in bytes of a descriptor heap is best calculated by the increment size
- // multiplied by the number of descriptors. In practice, this is only an estimate and
- // the actual size may vary depending on the driver.
- const uint64_t kSize = mSizeIncrement * descriptorCount;
-
- DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
-
- ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
- D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
- heapDescriptor.Type = mHeapType;
- heapDescriptor.NumDescriptors = descriptorCount;
- heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
- heapDescriptor.NodeMask = 0;
- DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
- &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
- "ID3D12Device::CreateDescriptorHeap"));
-
- std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap =
- std::make_unique<ShaderVisibleDescriptorHeap>(std::move(d3d12DescriptorHeap), kSize);
-
- // We must track the allocation in the LRU when it is created, otherwise the residency
- // manager will see the allocation as non-resident in the later call to LockAllocation.
- mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
-
- return std::move(descriptorHeap);
- }
-
- // Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
- MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
- std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
- // Dynamically allocate using a two-phase allocation strategy.
- // The first phase increasingly grows a small heap in binary sizes for light users while the
- // second phase pool-allocates largest sized heaps for heavy users.
- if (mHeap != nullptr) {
- mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
-
- const uint32_t maxDescriptorCount = GetD3D12ShaderVisibleHeapMaxSize(
- mHeapType,
- mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
- if (mDescriptorCount < maxDescriptorCount) {
- // Phase #1. Grow the heaps in powers-of-two.
- mDevice->ReferenceUntilUnused(mHeap->GetD3D12DescriptorHeap());
- mDescriptorCount = std::min(mDescriptorCount * 2, maxDescriptorCount);
- } else {
- // Phase #2. Pool-allocate heaps.
- // Return the switched out heap to the pool and retrieve the oldest heap that is no
- // longer used by GPU. This maintains a heap buffer to avoid frequently re-creating
- // heaps for heavy users.
- // TODO(dawn:256): Consider periodically triming to avoid OOM.
- mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
- if (mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
- descriptorHeap = std::move(mPool.front().heap);
- mPool.pop_front();
- }
- }
- }
-
- if (descriptorHeap == nullptr) {
- DAWN_TRY_ASSIGN(descriptorHeap, AllocateHeap(mDescriptorCount));
- }
-
- DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
-
- // Create a FIFO buffer from the recently created heap.
- mHeap = std::move(descriptorHeap);
- mAllocator = RingBufferAllocator(mDescriptorCount);
-
- // Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
- // serial. When a bindgroup attempts to re-populate, it will compare with its recorded
- // heap serial.
- mHeapSerial++;
-
- return {};
- }
-
- HeapVersionID ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSerialForTesting() const {
- return mHeapSerial;
- }
-
- uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting() const {
- return mAllocator.GetSize();
- }
-
- uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisiblePoolSizeForTesting() const {
- return mPool.size();
- }
-
- bool ShaderVisibleDescriptorAllocator::IsShaderVisibleHeapLockedResidentForTesting() const {
- return mHeap->IsResidencyLocked();
- }
-
- bool ShaderVisibleDescriptorAllocator::IsLastShaderVisibleHeapInLRUForTesting() const {
- ASSERT(!mPool.empty());
- return mPool.back().heap->IsInResidencyLRUCache();
- }
-
- bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(
- const GPUDescriptorHeapAllocation& allocation) const {
- // Consider valid if allocated for the pending submit and the shader visible heaps
- // have not switched over.
- return (allocation.GetLastUsageSerial() > mDevice->GetCompletedCommandSerial() &&
- allocation.GetHeapSerial() == mHeapSerial);
- }
-
- ShaderVisibleDescriptorHeap::ShaderVisibleDescriptorHeap(
- ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
- uint64_t size)
- : Pageable(d3d12DescriptorHeap, MemorySegment::Local, size),
- mD3d12DescriptorHeap(std::move(d3d12DescriptorHeap)) {
- }
-
- ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
- return mD3d12DescriptorHeap.Get();
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
deleted file mode 100644
index a315b5607fc..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
-#define DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
-
-#include "dawn_native/Error.h"
-#include "dawn_native/RingBufferAllocator.h"
-#include "dawn_native/d3d12/IntegerTypes.h"
-#include "dawn_native/d3d12/PageableD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-#include <list>
-
-// |ShaderVisibleDescriptorAllocator| allocates a variable-sized block of descriptors from a GPU
-// descriptor heap pool.
-// Internally, it manages a list of heaps using a ringbuffer block allocator. The heap is in one
-// of two states: switched in or out. Only a switched in heap can be bound to the pipeline. If
-// the heap is full, the caller must switch-in a new heap before re-allocating and the old one
-// is returned to the pool.
-namespace dawn_native { namespace d3d12 {
-
- class Device;
- class GPUDescriptorHeapAllocation;
-
- class ShaderVisibleDescriptorHeap : public Pageable {
- public:
- ShaderVisibleDescriptorHeap(ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
- uint64_t size);
- ID3D12DescriptorHeap* GetD3D12DescriptorHeap() const;
-
- private:
- ComPtr<ID3D12DescriptorHeap> mD3d12DescriptorHeap;
- };
-
- class ShaderVisibleDescriptorAllocator {
- public:
- static ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>> Create(
- Device* device,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType);
-
- ShaderVisibleDescriptorAllocator(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType);
-
- // Returns true if the allocation was successful, when false is returned the current heap is
- // full and AllocateAndSwitchShaderVisibleHeap() must be called.
- bool AllocateGPUDescriptors(uint32_t descriptorCount,
- ExecutionSerial pendingSerial,
- D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
- GPUDescriptorHeapAllocation* allocation);
-
- void Tick(ExecutionSerial completedSerial);
-
- ID3D12DescriptorHeap* GetShaderVisibleHeap() const;
- MaybeError AllocateAndSwitchShaderVisibleHeap();
-
- // For testing purposes only.
- HeapVersionID GetShaderVisibleHeapSerialForTesting() const;
- uint64_t GetShaderVisibleHeapSizeForTesting() const;
- uint64_t GetShaderVisiblePoolSizeForTesting() const;
- bool IsShaderVisibleHeapLockedResidentForTesting() const;
- bool IsLastShaderVisibleHeapInLRUForTesting() const;
-
- bool IsAllocationStillValid(const GPUDescriptorHeapAllocation& allocation) const;
-
- private:
- struct SerialDescriptorHeap {
- ExecutionSerial heapSerial;
- std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
- };
-
- ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>> AllocateHeap(
- uint32_t descriptorCount) const;
-
- std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
- RingBufferAllocator mAllocator;
- std::list<SerialDescriptorHeap> mPool;
- D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
-
- Device* mDevice;
-
- // The serial value of 0 means the shader-visible heaps have not been allocated.
- // This value is never returned in the GPUDescriptorHeapAllocation after
- // AllocateGPUDescriptors() is called.
- HeapVersionID mHeapSerial = HeapVersionID(0);
-
- uint32_t mSizeIncrement;
-
- // The descriptor count is the current size of the heap in number of descriptors.
- // This is stored on the allocator to avoid extra conversions.
- uint32_t mDescriptorCount = 0;
- };
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
deleted file mode 100644
index d35622e9e06..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/StagingBufferD3D12.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/HeapD3D12.h"
-#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- StagingBuffer::StagingBuffer(size_t size, Device* device)
- : StagingBufferBase(size), mDevice(device) {
- }
-
- MaybeError StagingBuffer::Initialize() {
- D3D12_RESOURCE_DESC resourceDescriptor;
- resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
- resourceDescriptor.Alignment = 0;
- resourceDescriptor.Width = GetSize();
- resourceDescriptor.Height = 1;
- resourceDescriptor.DepthOrArraySize = 1;
- resourceDescriptor.MipLevels = 1;
- resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
- resourceDescriptor.SampleDesc.Count = 1;
- resourceDescriptor.SampleDesc.Quality = 0;
- resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
- resourceDescriptor.Flags = D3D12_RESOURCE_FLAG_NONE;
-
- DAWN_TRY_ASSIGN(mUploadHeap,
- mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
- D3D12_RESOURCE_STATE_GENERIC_READ));
-
- // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
- // evicted. This buffer should already have been made resident when it was created.
- DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(
- ToBackend(mUploadHeap.GetResourceHeap())));
-
- SetDebugName(mDevice, GetResource(), "Dawn_StagingBuffer");
-
- return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
- }
-
- StagingBuffer::~StagingBuffer() {
- // Always check if the allocation is valid before Unmap.
- // The resource would not exist had it failed to allocate.
- if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
- return;
- }
-
- // The underlying heap was locked in residency upon creation. We must unlock it when this
- // buffer becomes unmapped.
- mDevice->GetResidencyManager()->UnlockAllocation(ToBackend(mUploadHeap.GetResourceHeap()));
-
- // Invalidate the CPU virtual address & flush cache (if needed).
- GetResource()->Unmap(0, nullptr);
- mMappedPointer = nullptr;
-
- mDevice->DeallocateMemory(mUploadHeap);
- }
-
- ID3D12Resource* StagingBuffer::GetResource() const {
- return mUploadHeap.GetD3D12Resource();
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h
deleted file mode 100644
index aafe60d3c85..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_STAGINGBUFFERD3D12_H_
-#define DAWNNATIVE_STAGINGBUFFERD3D12_H_
-
-#include "dawn_native/StagingBuffer.h"
-#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class StagingBuffer : public StagingBufferBase {
- public:
- StagingBuffer(size_t size, Device* device);
- ~StagingBuffer() override;
-
- ID3D12Resource* GetResource() const;
-
- MaybeError Initialize() override;
-
- private:
- Device* mDevice;
- ResourceHeapAllocation mUploadHeap;
- };
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_STAGINGBUFFERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp
deleted file mode 100644
index 4a622acf9ff..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Math.h"
-
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- StagingDescriptorAllocator::StagingDescriptorAllocator(Device* device,
- uint32_t descriptorCount,
- uint32_t heapSize,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType)
- : mDevice(device),
- mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
- mBlockSize(descriptorCount * mSizeIncrement),
- mHeapSize(RoundUp(heapSize, descriptorCount)),
- mHeapType(heapType) {
- ASSERT(descriptorCount <= heapSize);
- }
-
- StagingDescriptorAllocator::~StagingDescriptorAllocator() {
- const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
- for (auto& buffer : mPool) {
- ASSERT(buffer.freeBlockIndices.size() == freeBlockIndicesSize);
- }
- ASSERT(mAvailableHeaps.size() == mPool.size());
- }
-
- ResultOrError<CPUDescriptorHeapAllocation>
- StagingDescriptorAllocator::AllocateCPUDescriptors() {
- if (mAvailableHeaps.empty()) {
- DAWN_TRY(AllocateCPUHeap());
- }
-
- ASSERT(!mAvailableHeaps.empty());
-
- const uint32_t heapIndex = mAvailableHeaps.back();
- NonShaderVisibleBuffer& buffer = mPool[heapIndex];
-
- ASSERT(!buffer.freeBlockIndices.empty());
-
- const Index blockIndex = buffer.freeBlockIndices.back();
-
- buffer.freeBlockIndices.pop_back();
-
- if (buffer.freeBlockIndices.empty()) {
- mAvailableHeaps.pop_back();
- }
-
- const D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor = {
- buffer.heap->GetCPUDescriptorHandleForHeapStart().ptr + (blockIndex * mBlockSize)};
-
- return CPUDescriptorHeapAllocation{baseCPUDescriptor, heapIndex};
- }
-
- MaybeError StagingDescriptorAllocator::AllocateCPUHeap() {
- D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
- heapDescriptor.Type = mHeapType;
- heapDescriptor.NumDescriptors = mHeapSize;
- heapDescriptor.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
- heapDescriptor.NodeMask = 0;
-
- ComPtr<ID3D12DescriptorHeap> heap;
- DAWN_TRY(CheckHRESULT(
- mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
- "ID3D12Device::CreateDescriptorHeap"));
-
- NonShaderVisibleBuffer newBuffer;
- newBuffer.heap = std::move(heap);
-
- const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
- newBuffer.freeBlockIndices.reserve(freeBlockIndicesSize);
-
- for (Index blockIndex = 0; blockIndex < freeBlockIndicesSize; blockIndex++) {
- newBuffer.freeBlockIndices.push_back(blockIndex);
- }
-
- mAvailableHeaps.push_back(mPool.size());
- mPool.emplace_back(std::move(newBuffer));
-
- return {};
- }
-
- void StagingDescriptorAllocator::Deallocate(CPUDescriptorHeapAllocation* allocation) {
- ASSERT(allocation->IsValid());
-
- const uint32_t heapIndex = allocation->GetHeapIndex();
-
- ASSERT(heapIndex < mPool.size());
-
- // Insert the deallocated block back into the free-list. Order does not matter. However,
- // having blocks be non-contigious could slow down future allocations due to poor cache
- // locality.
- // TODO(dawn:155): Consider more optimization.
- std::vector<Index>& freeBlockIndices = mPool[heapIndex].freeBlockIndices;
- if (freeBlockIndices.empty()) {
- mAvailableHeaps.emplace_back(heapIndex);
- }
-
- const D3D12_CPU_DESCRIPTOR_HANDLE heapStart =
- mPool[heapIndex].heap->GetCPUDescriptorHandleForHeapStart();
-
- const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = allocation->OffsetFrom(0, 0);
-
- const Index blockIndex = (baseDescriptor.ptr - heapStart.ptr) / mBlockSize;
-
- freeBlockIndices.emplace_back(blockIndex);
-
- // Invalidate the handle in case the developer accidentally uses it again.
- allocation->Invalidate();
- }
-
- uint32_t StagingDescriptorAllocator::GetSizeIncrement() const {
- return mSizeIncrement;
- }
-
- StagingDescriptorAllocator::Index StagingDescriptorAllocator::GetFreeBlockIndicesSize() const {
- return ((mHeapSize * mSizeIncrement) / mBlockSize);
- }
-
- ResultOrError<CPUDescriptorHeapAllocation>
- StagingDescriptorAllocator::AllocateTransientCPUDescriptors() {
- CPUDescriptorHeapAllocation allocation;
- DAWN_TRY_ASSIGN(allocation, AllocateCPUDescriptors());
- mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
- return allocation;
- }
-
- void StagingDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
- for (CPUDescriptorHeapAllocation& allocation :
- mAllocationsToDelete.IterateUpTo(completedSerial)) {
- Deallocate(&allocation);
- }
-
- mAllocationsToDelete.ClearUpTo(completedSerial);
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h
deleted file mode 100644
index 96c1cfbc5be..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
-#define DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
-
-#include "dawn_native/Error.h"
-
-#include "dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h"
-
-#include <vector>
-
-// |StagingDescriptorAllocator| allocates a fixed-size block of descriptors from a CPU
-// descriptor heap pool.
-// Internally, it manages a list of heaps using a fixed-size block allocator. The fixed-size
-// block allocator is backed by a list of free blocks (free-list). The heap is in one of two
-// states: AVAILABLE or not. To allocate, the next free block is removed from the free-list
-// and the corresponding heap offset is returned. The AVAILABLE heap always has room for
-// at-least one free block. If no AVAILABLE heap exists, a new heap is created and inserted
-// back into the pool to be immediately used. To deallocate, the block corresponding to the
-// offset is inserted back into the free-list.
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- class StagingDescriptorAllocator {
- public:
- StagingDescriptorAllocator() = default;
- StagingDescriptorAllocator(Device* device,
- uint32_t descriptorCount,
- uint32_t heapSize,
- D3D12_DESCRIPTOR_HEAP_TYPE heapType);
- ~StagingDescriptorAllocator();
-
- ResultOrError<CPUDescriptorHeapAllocation> AllocateCPUDescriptors();
-
- // Will call Deallocate when the serial is passed.
- ResultOrError<CPUDescriptorHeapAllocation> AllocateTransientCPUDescriptors();
-
- void Deallocate(CPUDescriptorHeapAllocation* allocation);
-
- uint32_t GetSizeIncrement() const;
-
- void Tick(ExecutionSerial completedSerial);
-
- private:
- using Index = uint16_t;
-
- struct NonShaderVisibleBuffer {
- ComPtr<ID3D12DescriptorHeap> heap;
- std::vector<Index> freeBlockIndices;
- };
-
- MaybeError AllocateCPUHeap();
-
- Index GetFreeBlockIndicesSize() const;
-
- std::vector<uint32_t> mAvailableHeaps; // Indices into the pool.
- std::vector<NonShaderVisibleBuffer> mPool;
-
- Device* mDevice;
-
- uint32_t mSizeIncrement; // Size of the descriptor (in bytes).
- uint32_t mBlockSize; // Size of the block of descriptors (in bytes).
- uint32_t mHeapSize; // Size of the heap (in number of descriptors).
-
- D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
-
- SerialQueue<ExecutionSerial, CPUDescriptorHeapAllocation> mAllocationsToDelete;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
deleted file mode 100644
index 4c2dd245ce1..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
+++ /dev/null
@@ -1,377 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/SwapChainD3D12.h"
-
-#include "dawn_native/Surface.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/TextureD3D12.h"
-
-#include <dawn/dawn_wsi.h>
-
-#include <windows.ui.xaml.media.dxinterop.h>
-
-namespace dawn_native { namespace d3d12 {
- namespace {
-
- uint32_t PresentModeToBufferCount(wgpu::PresentMode mode) {
- switch (mode) {
- case wgpu::PresentMode::Immediate:
- case wgpu::PresentMode::Fifo:
- return 2;
- case wgpu::PresentMode::Mailbox:
- return 3;
- }
- }
-
- uint32_t PresentModeToSwapInterval(wgpu::PresentMode mode) {
- switch (mode) {
- case wgpu::PresentMode::Immediate:
- case wgpu::PresentMode::Mailbox:
- return 0;
- case wgpu::PresentMode::Fifo:
- return 1;
- }
- }
-
- UINT PresentModeToSwapChainFlags(wgpu::PresentMode mode) {
- UINT flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
-
- if (mode == wgpu::PresentMode::Immediate) {
- flags |= DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
- }
-
- return flags;
- }
-
- DXGI_USAGE ToDXGIUsage(wgpu::TextureUsage usage) {
- DXGI_USAGE dxgiUsage = DXGI_CPU_ACCESS_NONE;
- if (usage & wgpu::TextureUsage::TextureBinding) {
- dxgiUsage |= DXGI_USAGE_SHADER_INPUT;
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- dxgiUsage |= DXGI_USAGE_UNORDERED_ACCESS;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- dxgiUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
- }
- return dxgiUsage;
- }
-
- } // namespace
-
- // OldSwapChain
-
- // static
- Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
- return AcquireRef(new OldSwapChain(device, descriptor));
- }
-
- OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- DawnWSIContextD3D12 wsiContext = {};
- wsiContext.device = ToAPI(GetDevice());
- im.Init(im.userData, &wsiContext);
-
- ASSERT(im.textureUsage != WGPUTextureUsage_None);
- mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
- }
-
- OldSwapChain::~OldSwapChain() = default;
-
- TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- DeviceBase* device = GetDevice();
- const auto& im = GetImplementation();
- DawnSwapChainNextTexture next = {};
- DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
- if (error) {
- device->HandleError(InternalErrorType::Internal, error);
- return nullptr;
- }
-
- ComPtr<ID3D12Resource> d3d12Texture = static_cast<ID3D12Resource*>(next.texture.ptr);
- Ref<Texture> dawnTexture;
- if (device->ConsumedError(
- Texture::Create(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture)),
- &dawnTexture)) {
- return nullptr;
- }
-
- return dawnTexture.Detach();
- }
-
- MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
- Device* device = ToBackend(GetDevice());
-
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
-
- // Perform the necessary transition for the texture to be presented.
- ToBackend(view->GetTexture())
- ->TrackUsageAndTransitionNow(commandContext, mTextureUsage,
- view->GetSubresourceRange());
-
- DAWN_TRY(device->ExecutePendingCommandContext());
-
- return {};
- }
-
- // SwapChain
-
- // static
- ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
- DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain;
- }
-
- SwapChain::~SwapChain() = default;
-
- void SwapChain::DestroyImpl() {
- SwapChainBase::DestroyImpl();
- DetachFromSurface();
- }
-
- // Initializes the swapchain on the surface. Note that `previousSwapChain` may or may not be
- // nullptr. If it is not nullptr it means that it is the swapchain previously in use on the
- // surface and that we have a chance to reuse it's underlying IDXGISwapChain and "buffers".
- MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
- ASSERT(GetSurface()->GetType() == Surface::Type::WindowsHWND);
-
- // Precompute the configuration parameters we want for the DXGI swapchain.
- mConfig.bufferCount = PresentModeToBufferCount(GetPresentMode());
- mConfig.format = D3D12TextureFormat(GetFormat());
- mConfig.swapChainFlags = PresentModeToSwapChainFlags(GetPresentMode());
- mConfig.usage = ToDXGIUsage(GetUsage());
-
- // There is no previous swapchain so we can create one directly and don't have anything else
- // to do.
- if (previousSwapChain == nullptr) {
- return InitializeSwapChainFromScratch();
- }
-
- // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
- // multiple backends one after the other. It probably needs to block until the backend
- // and GPU are completely finished with the previous swapchain.
- DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::D3D12,
- "D3D12 SwapChain cannot switch backend types from %s to %s.",
- previousSwapChain->GetBackendType(), wgpu::BackendType::D3D12);
-
- // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
- SwapChain* previousD3D12SwapChain = static_cast<SwapChain*>(previousSwapChain);
-
- // TODO(crbug.com/dawn/269): Figure out switching an HWND between devices, it might
- // require just losing the reference to the swapchain, but might also need to wait for
- // all previous operations to complete.
- DAWN_INVALID_IF(GetDevice() != previousSwapChain->GetDevice(),
- "D3D12 SwapChain cannot switch between D3D Devices");
-
- // The previous swapchain is on the same device so we want to reuse it but it is still not
- // always possible. Because DXGI requires that a new swapchain be created if the
- // DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING flag is changed.
- bool canReuseSwapChain =
- ((mConfig.swapChainFlags ^ previousD3D12SwapChain->mConfig.swapChainFlags) &
- DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) == 0;
-
- // We can't reuse the previous swapchain, so we destroy it and wait for all of its reference
- // to be forgotten (otherwise DXGI complains that there are outstanding references).
- if (!canReuseSwapChain) {
- DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
- return InitializeSwapChainFromScratch();
- }
-
- // After all this we know we can reuse the swapchain, see if it is possible to also reuse
- // the buffers.
- mDXGISwapChain = std::move(previousD3D12SwapChain->mDXGISwapChain);
-
- bool canReuseBuffers = GetWidth() == previousSwapChain->GetWidth() &&
- GetHeight() == previousSwapChain->GetHeight() &&
- GetFormat() == previousSwapChain->GetFormat() &&
- GetPresentMode() == previousSwapChain->GetPresentMode();
- if (canReuseBuffers) {
- mBuffers = std::move(previousD3D12SwapChain->mBuffers);
- mBufferLastUsedSerials = std::move(previousD3D12SwapChain->mBufferLastUsedSerials);
- mCurrentBuffer = previousD3D12SwapChain->mCurrentBuffer;
- return {};
- }
-
- // We can't reuse the buffers so we need to resize, IDXGSwapChain->ResizeBuffers requires
- // that all references to buffers are lost before it is called. Contrary to D3D11, the
- // application is responsible for keeping references to the buffers until the GPU is done
- // using them so we have no choice but to synchrounously wait for all operations to complete
- // on the previous swapchain and then lose references to its buffers.
- DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
- DAWN_TRY(
- CheckHRESULT(mDXGISwapChain->ResizeBuffers(mConfig.bufferCount, GetWidth(), GetHeight(),
- mConfig.format, mConfig.swapChainFlags),
- "IDXGISwapChain::ResizeBuffer"));
- return CollectSwapChainBuffers();
- }
-
- MaybeError SwapChain::InitializeSwapChainFromScratch() {
- ASSERT(mDXGISwapChain == nullptr);
-
- Device* device = ToBackend(GetDevice());
-
- DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
- swapChainDesc.Width = GetWidth();
- swapChainDesc.Height = GetHeight();
- swapChainDesc.Format = mConfig.format;
- swapChainDesc.Stereo = false;
- swapChainDesc.SampleDesc.Count = 1;
- swapChainDesc.SampleDesc.Quality = 0;
- swapChainDesc.BufferUsage = mConfig.usage;
- swapChainDesc.BufferCount = mConfig.bufferCount;
- swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
- swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
- swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
- swapChainDesc.Flags = mConfig.swapChainFlags;
-
- ComPtr<IDXGIFactory2> factory2 = nullptr;
- DAWN_TRY(CheckHRESULT(device->GetFactory()->QueryInterface(IID_PPV_ARGS(&factory2)),
- "Getting IDXGIFactory2"));
-
- ComPtr<IDXGISwapChain1> swapChain1;
- switch (GetSurface()->GetType()) {
- case Surface::Type::WindowsHWND: {
- DAWN_TRY(CheckHRESULT(
- factory2->CreateSwapChainForHwnd(device->GetCommandQueue().Get(),
- static_cast<HWND>(GetSurface()->GetHWND()),
- &swapChainDesc, nullptr, nullptr, &swapChain1),
- "Creating the IDXGISwapChain1"));
- break;
- }
- case Surface::Type::WindowsCoreWindow: {
- DAWN_TRY(CheckHRESULT(
- factory2->CreateSwapChainForCoreWindow(device->GetCommandQueue().Get(),
- GetSurface()->GetCoreWindow(),
- &swapChainDesc, nullptr, &swapChain1),
- "Creating the IDXGISwapChain1"));
- break;
- }
- case Surface::Type::WindowsSwapChainPanel: {
- DAWN_TRY(CheckHRESULT(
- factory2->CreateSwapChainForComposition(device->GetCommandQueue().Get(),
- &swapChainDesc, nullptr, &swapChain1),
- "Creating the IDXGISwapChain1"));
- ComPtr<ISwapChainPanelNative> swapChainPanelNative;
- DAWN_TRY(CheckHRESULT(GetSurface()->GetSwapChainPanel()->QueryInterface(
- IID_PPV_ARGS(&swapChainPanelNative)),
- "Getting ISwapChainPanelNative"));
- DAWN_TRY(CheckHRESULT(swapChainPanelNative->SetSwapChain(swapChain1.Get()),
- "Setting SwapChain"));
- break;
- }
- default:
- UNREACHABLE();
- }
-
- DAWN_TRY(CheckHRESULT(swapChain1.As(&mDXGISwapChain), "Gettting IDXGISwapChain1"));
-
- return CollectSwapChainBuffers();
- }
-
- MaybeError SwapChain::CollectSwapChainBuffers() {
- ASSERT(mDXGISwapChain != nullptr);
- ASSERT(mBuffers.empty());
-
- mBuffers.resize(mConfig.bufferCount);
- for (uint32_t i = 0; i < mConfig.bufferCount; i++) {
- DAWN_TRY(CheckHRESULT(mDXGISwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])),
- "Getting IDXGISwapChain buffer"));
- }
-
- // Pretend all the buffers were last used at the beginning of time.
- mBufferLastUsedSerials.resize(mConfig.bufferCount, ExecutionSerial(0));
- return {};
- }
-
- MaybeError SwapChain::PresentImpl() {
- Device* device = ToBackend(GetDevice());
-
- // Transition the texture to the present state as required by IDXGISwapChain1::Present()
- // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
- // presentable texture to present at the end of submits that use them.
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
- mApiTexture->TrackUsageAndTransitionNow(commandContext, kPresentTextureUsage,
- mApiTexture->GetAllSubresources());
- DAWN_TRY(device->ExecutePendingCommandContext());
-
- // Do the actual present. DXGI_STATUS_OCCLUDED is a valid return value that's just a
- // message to the application that it could stop rendering.
- HRESULT presentResult =
- mDXGISwapChain->Present(PresentModeToSwapInterval(GetPresentMode()), 0);
- if (presentResult != DXGI_STATUS_OCCLUDED) {
- DAWN_TRY(CheckHRESULT(presentResult, "IDXGISwapChain::Present"));
- }
-
- // Record that "new" is the last time the buffer has been used.
- DAWN_TRY(device->NextSerial());
- mBufferLastUsedSerials[mCurrentBuffer] = device->GetPendingCommandSerial();
-
- mApiTexture->APIDestroy();
- mApiTexture = nullptr;
-
- return {};
- }
-
- ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
- Device* device = ToBackend(GetDevice());
-
- // Synchronously wait until previous operations on the next swapchain buffer are finished.
- // This is the logic that performs frame pacing.
- // TODO(crbug.com/dawn/269): Consider whether this should be lifted for Mailbox so that
- // there is not frame pacing.
- mCurrentBuffer = mDXGISwapChain->GetCurrentBackBufferIndex();
- DAWN_TRY(device->WaitForSerial(mBufferLastUsedSerials[mCurrentBuffer]));
-
- // Create the API side objects for this use of the swapchain's buffer.
- TextureDescriptor descriptor = GetSwapChainBaseTextureDescriptor(this);
- DAWN_TRY_ASSIGN(mApiTexture, Texture::Create(ToBackend(GetDevice()), &descriptor,
- mBuffers[mCurrentBuffer]));
-
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- return mApiTexture->APICreateView();
- }
-
- MaybeError SwapChain::DetachAndWaitForDeallocation() {
- DetachFromSurface();
-
- // DetachFromSurface calls Texture->Destroy that enqueues the D3D12 resource in a
- // SerialQueue with the current "pending serial" so that we don't destroy the texture
- // before it is finished being used. Flush the commands and wait for that serial to be
- // passed, then Tick the device to make sure the reference to the D3D12 texture is removed.
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(device->NextSerial());
- DAWN_TRY(device->WaitForSerial(device->GetLastSubmittedCommandSerial()));
- return device->TickImpl();
- }
-
- void SwapChain::DetachFromSurfaceImpl() {
- if (mApiTexture != nullptr) {
- mApiTexture->APIDestroy();
- mApiTexture = nullptr;
- }
-
- mDXGISwapChain = nullptr;
- mBuffers.clear();
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
deleted file mode 100644
index 8b9c8aea591..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_SWAPCHAIND3D12_H_
-#define DAWNNATIVE_D3D12_SWAPCHAIND3D12_H_
-
-#include "dawn_native/SwapChain.h"
-
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
- class Texture;
-
- class OldSwapChain final : public OldSwapChainBase {
- public:
- static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
-
- protected:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~OldSwapChain() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase* view) override;
-
- wgpu::TextureUsage mTextureUsage;
- };
-
- class SwapChain final : public NewSwapChainBase {
- public:
- static ResultOrError<Ref<SwapChain>> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
-
- private:
- ~SwapChain() override;
-
- void DestroyImpl() override;
-
- using NewSwapChainBase::NewSwapChainBase;
- MaybeError Initialize(NewSwapChainBase* previousSwapChain);
-
- struct Config {
- // Information that's passed to the D3D12 swapchain creation call.
- UINT bufferCount;
- UINT swapChainFlags;
- DXGI_FORMAT format;
- DXGI_USAGE usage;
- };
-
- // NewSwapChainBase implementation
- MaybeError PresentImpl() override;
- ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
- void DetachFromSurfaceImpl() override;
-
- // Does the swapchain initialization steps assuming there is nothing we can reuse.
- MaybeError InitializeSwapChainFromScratch();
- // Does the swapchain initialization step of gathering the buffers.
- MaybeError CollectSwapChainBuffers();
- // Calls DetachFromSurface but also synchronously waits until all references to the
- // swapchain and buffers are removed, as that's a constraint for some DXGI operations.
- MaybeError DetachAndWaitForDeallocation();
-
- Config mConfig;
-
- ComPtr<IDXGISwapChain3> mDXGISwapChain;
- std::vector<ComPtr<ID3D12Resource>> mBuffers;
- std::vector<ExecutionSerial> mBufferLastUsedSerials;
- uint32_t mCurrentBuffer = 0;
-
- Ref<Texture> mApiTexture;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_SWAPCHAIN_D3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
deleted file mode 100644
index 411cce24ecf..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
+++ /dev/null
@@ -1,539 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/TextureCopySplitter.h"
-
-#include "common/Assert.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
- uint32_t offset,
- uint32_t bytesPerRow) {
- ASSERT(bytesPerRow != 0);
- uint32_t byteOffsetX = offset % bytesPerRow;
- uint32_t byteOffsetY = offset - byteOffsetX;
-
- return {byteOffsetX / blockInfo.byteSize * blockInfo.width,
- byteOffsetY / bytesPerRow * blockInfo.height, 0};
- }
-
- uint64_t OffsetToFirstCopiedTexel(const TexelBlockInfo& blockInfo,
- uint32_t bytesPerRow,
- uint64_t alignedOffset,
- Origin3D bufferOffset) {
- ASSERT(bufferOffset.z == 0);
- return alignedOffset + bufferOffset.x * blockInfo.byteSize / blockInfo.width +
- bufferOffset.y * bytesPerRow / blockInfo.height;
- }
-
- uint64_t AlignDownForDataPlacement(uint32_t offset) {
- return offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
- }
- } // namespace
-
- TextureCopySubresource::CopyInfo* TextureCopySubresource::AddCopy() {
- ASSERT(this->count < kMaxTextureCopyRegions);
- return &this->copies[this->count++];
- }
-
- TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow) {
- TextureCopySubresource copy;
-
- ASSERT(bytesPerRow % blockInfo.byteSize == 0);
-
- // The copies must be 512-aligned. To do this, we calculate the first 512-aligned address
- // preceding our data.
- uint64_t alignedOffset = AlignDownForDataPlacement(offset);
-
- // If the provided offset to the data was already 512-aligned, we can simply copy the data
- // without further translation.
- if (offset == alignedOffset) {
- copy.count = 1;
-
- copy.copies[0].alignedOffset = alignedOffset;
- copy.copies[0].textureOffset = origin;
- copy.copies[0].copySize = copySize;
- copy.copies[0].bufferOffset = {0, 0, 0};
- copy.copies[0].bufferSize = copySize;
-
- return copy;
- }
-
- ASSERT(alignedOffset < offset);
- ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
-
- // We must reinterpret our aligned offset into X and Y offsets with respect to the row
- // pitch.
- //
- // You can visualize the data in the buffer like this:
- // |-----------------------++++++++++++++++++++++++++++++++|
- // ^ 512-aligned address ^ Aligned offset ^ End of copy data
- //
- // Now when you consider the row pitch, you can visualize the data like this:
- // |~~~~~~~~~~~~~~~~|
- // |~~~~~+++++++++++|
- // |++++++++++++++++|
- // |+++++~~~~~~~~~~~|
- // |<---row pitch-->|
- //
- // The X and Y offsets calculated in ComputeTexelOffsets can be visualized like this:
- // |YYYYYYYYYYYYYYYY|
- // |XXXXXX++++++++++|
- // |++++++++++++++++|
- // |++++++~~~~~~~~~~|
- // |<---row pitch-->|
- Origin3D texelOffset = ComputeTexelOffsets(
- blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow);
-
- ASSERT(texelOffset.y <= blockInfo.height);
- ASSERT(texelOffset.z == 0);
-
- uint32_t copyBytesPerRowPitch = copySize.width / blockInfo.width * blockInfo.byteSize;
- uint32_t byteOffsetInRowPitch = texelOffset.x / blockInfo.width * blockInfo.byteSize;
- if (copyBytesPerRowPitch + byteOffsetInRowPitch <= bytesPerRow) {
- // The region's rows fit inside the bytes per row. In this case, extend the width of the
- // PlacedFootprint and copy the buffer with an offset location
- // |<------------- bytes per row ------------->|
- //
- // |-------------------------------------------|
- // | |
- // | +++++++++++++++++~~~~~~~~~|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++ |
- // |-------------------------------------------|
-
- // Copy 0:
- // |----------------------------------|
- // | |
- // | +++++++++++++++++|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
- // |~~~~~~~~~~~~~~~~~+++++++++++++++++|
- // |----------------------------------|
-
- copy.count = 1;
-
- copy.copies[0].alignedOffset = alignedOffset;
- copy.copies[0].textureOffset = origin;
- copy.copies[0].copySize = copySize;
- copy.copies[0].bufferOffset = texelOffset;
-
- copy.copies[0].bufferSize.width = copySize.width + texelOffset.x;
- copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
- copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
- return copy;
- }
-
- // The region's rows straddle the bytes per row. Split the copy into two copies
- // |<------------- bytes per row ------------->|
- //
- // |-------------------------------------------|
- // | |
- // | ++++++++|
- // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |+++++++++ |
- // |-------------------------------------------|
-
- // Copy 0:
- // |-------------------------------------------|
- // | |
- // | ++++++++|
- // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
- // |-------------------------------------------|
-
- // Copy 1:
- // |---------|
- // | |
- // | |
- // |+++++++++|
- // |+++++++++|
- // |+++++++++|
- // |+++++++++|
- // |+++++++++|
- // |---------|
-
- copy.count = 2;
-
- copy.copies[0].alignedOffset = alignedOffset;
- copy.copies[0].textureOffset = origin;
-
- ASSERT(bytesPerRow > byteOffsetInRowPitch);
- uint32_t texelsPerRow = bytesPerRow / blockInfo.byteSize * blockInfo.width;
- copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
- copy.copies[0].copySize.height = copySize.height;
- copy.copies[0].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
- copy.copies[0].bufferOffset = texelOffset;
- copy.copies[0].bufferSize.width = texelsPerRow;
- copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
- copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
- uint64_t offsetForCopy1 =
- offset + copy.copies[0].copySize.width / blockInfo.width * blockInfo.byteSize;
- uint64_t alignedOffsetForCopy1 = AlignDownForDataPlacement(offsetForCopy1);
- Origin3D texelOffsetForCopy1 = ComputeTexelOffsets(
- blockInfo, static_cast<uint32_t>(offsetForCopy1 - alignedOffsetForCopy1), bytesPerRow);
-
- ASSERT(texelOffsetForCopy1.y <= blockInfo.height);
- ASSERT(texelOffsetForCopy1.z == 0);
-
- copy.copies[1].alignedOffset = alignedOffsetForCopy1;
- copy.copies[1].textureOffset.x = origin.x + copy.copies[0].copySize.width;
- copy.copies[1].textureOffset.y = origin.y;
- copy.copies[1].textureOffset.z = origin.z;
-
- ASSERT(copySize.width > copy.copies[0].copySize.width);
- copy.copies[1].copySize.width = copySize.width - copy.copies[0].copySize.width;
- copy.copies[1].copySize.height = copySize.height;
- copy.copies[1].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
- copy.copies[1].bufferOffset = texelOffsetForCopy1;
- copy.copies[1].bufferSize.width = copy.copies[1].copySize.width + texelOffsetForCopy1.x;
- copy.copies[1].bufferSize.height = copySize.height + texelOffsetForCopy1.y;
- copy.copies[1].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
- return copy;
- }
-
- TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- TextureCopySplits copies;
-
- const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
-
- // The function Compute2DTextureCopySubresource() decides how to split the copy based on:
- // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
- // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
- // Each layer of a 2D array might need to be split, but because of the WebGPU
- // constraint that "bytesPerRow" must be a multiple of 256, all odd (resp. all even) layers
- // will be at an offset multiple of 512 of each other, which means they will all result in
- // the same 2D split. Thus we can just compute the copy splits for the first and second
- // layers, and reuse them for the remaining layers by adding the related offset of each
- // layer. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
- // share the same copy split, so in this situation we just need to compute copy split once
- // and reuse it for all the layers.
- Extent3D copyOneLayerSize = copySize;
- Origin3D copyFirstLayerOrigin = origin;
- copyOneLayerSize.depthOrArrayLayers = 1;
- copyFirstLayerOrigin.z = 0;
-
- copies.copySubresources[0] = Compute2DTextureCopySubresource(
- copyFirstLayerOrigin, copyOneLayerSize, blockInfo, offset, bytesPerRow);
-
- // When the copy only refers one texture 2D array layer,
- // copies.copySubresources[1] will never be used so we can safely early return here.
- if (copySize.depthOrArrayLayers == 1) {
- return copies;
- }
-
- if (bytesPerLayer % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
- copies.copySubresources[1] = copies.copySubresources[0];
- copies.copySubresources[1].copies[0].alignedOffset += bytesPerLayer;
- copies.copySubresources[1].copies[1].alignedOffset += bytesPerLayer;
- } else {
- const uint64_t bufferOffsetNextLayer = offset + bytesPerLayer;
- copies.copySubresources[1] =
- Compute2DTextureCopySubresource(copyFirstLayerOrigin, copyOneLayerSize, blockInfo,
- bufferOffsetNextLayer, bytesPerRow);
- }
-
- return copies;
- }
-
- void Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
- Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- TextureCopySubresource& copy,
- uint32_t i) {
- // Let's assign data and show why copy region generated by ComputeTextureCopySubresource
- // is incorrect if there is an empty row at the beginning of the copy block.
- // Assuming that bytesPerRow is 256 and we are doing a B2T copy, and copy size is {width: 2,
- // height: 4, depthOrArrayLayers: 3}. Then the data layout in buffer is demonstrated
- // as below:
- //
- // |<----- bytes per row ------>|
- //
- // |----------------------------|
- // row (N - 1) | |
- // row N | ++~~~~~~~~~|
- // row (N + 1) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 2) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 3) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 4) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 5) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 6) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 7) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 8) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 9) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 10) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
- // row (N + 11) |~~~~~~~~~~~~~~~~~++ |
- // |----------------------------|
-
- // The copy we mean to do is the following:
- //
- // - image 0: row N to row (N + 3),
- // - image 1: row (N + 4) to row (N + 7),
- // - image 2: row (N + 8) to row (N + 11).
- //
- // Note that alignedOffset is at the beginning of row (N - 1), while buffer offset makes
- // the copy start at row N. Row (N - 1) is the empty row between alignedOffset and offset.
- //
- // The 2D copy region of image 0 we received from Compute2DTextureCopySubresource() is
- // the following:
- //
- // |-------------------|
- // row (N - 1) | |
- // row N | ++|
- // row (N + 1) |~~~~~~~~~~~~~~~~~++|
- // row (N + 2) |~~~~~~~~~~~~~~~~~++|
- // row (N + 3) |~~~~~~~~~~~~~~~~~++|
- // |-------------------|
- //
- // However, if we simply expand the copy region of image 0 to all depth ranges of a 3D
- // texture, we will copy 5 rows every time, and every first row of each slice will be
- // skipped. As a result, the copied data will be:
- //
- // - image 0: row N to row (N + 3), which is correct. Row (N - 1) is skipped.
- // - image 1: row (N + 5) to row (N + 8) because row (N + 4) is skipped. It is incorrect.
- //
- // Likewise, all other image followed will be incorrect because we wrongly keep skipping
- // one row for each depth slice.
- //
- // Solution: split the copy region to two copies: copy 3 (rowsPerImage - 1) rows in and
- // expand to all depth slices in the first copy. 3 rows + one skipped rows = 4 rows, which
- // equals to rowsPerImage. Then copy the last row in the second copy. However, the copy
- // block of the last row of the last image may out-of-bound (see the details below), so
- // we need an extra copy for the very last row.
-
- // Copy 0: copy 3 rows, not 4 rows.
- // _____________________
- // / /|
- // / / |
- // |-------------------| |
- // row (N - 1) | | |
- // row N | ++| |
- // row (N + 1) |~~~~~~~~~~~~~~~~~++| /
- // row (N + 2) |~~~~~~~~~~~~~~~~~++|/
- // |-------------------|
-
- // Copy 1: move down two rows and copy the last row on image 0, and expand to
- // copySize.depthOrArrayLayers - 1 depth slices. Note that if we expand it to all depth
- // slices, the last copy block will be row (N + 9) to row (N + 12). Row (N + 11) might
- // be the last row of the entire buffer. Then row (N + 12) will be out-of-bound.
- // _____________________
- // / /|
- // / / |
- // |-------------------| |
- // row (N + 1) | | |
- // row (N + 2) | | |
- // row (N + 3) | ++| /
- // row (N + 4) |~~~~~~~~~~~~~~~~~~~|/
- // |-------------------|
- //
- // copy 2: copy the last row of the last image.
- // |-------------------|
- // row (N + 11)| ++|
- // |-------------------|
-
- // Copy 0: copy copySize.height - 1 rows
- TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
- copy0.copySize.height = copySize.height - blockInfo.height;
- copy0.bufferSize.height = rowsPerImage * blockInfo.height; // rowsPerImageInTexels
-
- // Copy 1: move down 2 rows and copy the last row on image 0, and expand to all depth slices
- // but the last one.
- TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
- *copy1 = copy0;
- copy1->alignedOffset += 2 * bytesPerRow;
- copy1->textureOffset.y += copySize.height - blockInfo.height;
- // Offset two rows from the copy height for the bufferOffset (See the figure above):
- // - one for the row we advanced in the buffer: row (N + 4).
- // - one for the last row we want to copy: row (N + 3) itself.
- copy1->bufferOffset.y = copySize.height - 2 * blockInfo.height;
- copy1->copySize.height = blockInfo.height;
- copy1->copySize.depthOrArrayLayers--;
- copy1->bufferSize.depthOrArrayLayers--;
-
- // Copy 2: copy the last row of the last image.
- uint64_t offsetForCopy0 = OffsetToFirstCopiedTexel(blockInfo, bytesPerRow,
- copy0.alignedOffset, copy0.bufferOffset);
- uint64_t offsetForLastRowOfLastImage =
- offsetForCopy0 + bytesPerRow * (copy0.copySize.height +
- rowsPerImage * (copySize.depthOrArrayLayers - 1));
- uint64_t alignedOffsetForLastRowOfLastImage =
- AlignDownForDataPlacement(offsetForLastRowOfLastImage);
- Origin3D texelOffsetForLastRowOfLastImage = ComputeTexelOffsets(
- blockInfo,
- static_cast<uint32_t>(offsetForLastRowOfLastImage - alignedOffsetForLastRowOfLastImage),
- bytesPerRow);
-
- TextureCopySubresource::CopyInfo* copy2 = copy.AddCopy();
- copy2->alignedOffset = alignedOffsetForLastRowOfLastImage;
- copy2->textureOffset = copy1->textureOffset;
- copy2->textureOffset.z = origin.z + copySize.depthOrArrayLayers - 1;
- copy2->copySize = copy1->copySize;
- copy2->copySize.depthOrArrayLayers = 1;
- copy2->bufferOffset = texelOffsetForLastRowOfLastImage;
- copy2->bufferSize.width = copy1->bufferSize.width;
- ASSERT(copy2->copySize.height == 1);
- copy2->bufferSize.height = copy2->bufferOffset.y + copy2->copySize.height;
- copy2->bufferSize.depthOrArrayLayers = 1;
- }
-
- void Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(Extent3D copySize,
- uint32_t bytesPerRow,
- TextureCopySubresource& copy,
- uint32_t i) {
- // Read the comments of Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight() for
- // the reason why it is incorrect if we simply extend the copy region to all depth slices
- // when there is an empty first row at the copy region.
- //
- // If the copy height is odd, we can use two copies to make it correct:
- // - copy 0: only copy the first depth slice. Keep other arguments the same.
- // - copy 1: copy all rest depth slices because it will start without an empty row if
- // copy height is odd. Odd height + one (empty row) is even. An even row number times
- // bytesPerRow (256) will be aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
-
- // Copy 0: copy the first depth slice (image 0)
- TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
- copy0.copySize.depthOrArrayLayers = 1;
- copy0.bufferSize.depthOrArrayLayers = 1;
-
- // Copy 1: copy the rest depth slices in one shot
- TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
- *copy1 = copy0;
- ASSERT(copySize.height % 2 == 1);
- copy1->alignedOffset += (copySize.height + 1) * bytesPerRow;
- ASSERT(copy1->alignedOffset % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0);
- // textureOffset.z should add one because the first slice has already been copied in copy0.
- copy1->textureOffset.z++;
- // bufferOffset.y should be 0 because we skipped the first depth slice and there is no empty
- // row in this copy region.
- copy1->bufferOffset.y = 0;
- copy1->copySize.height = copySize.height;
- copy1->copySize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
- copy1->bufferSize.height = copySize.height;
- copy1->bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
- }
-
- TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- // To compute the copy region(s) for 3D textures, we call Compute2DTextureCopySubresource
- // and get copy region(s) for the first slice of the copy, then extend to all depth slices
- // and become a 3D copy. However, this doesn't work as easily as that due to some corner
- // cases.
- //
- // For example, if bufferSize.height is greater than rowsPerImage in the generated copy
- // region and we simply extend the 2D copy region to all copied depth slices, copied data
- // will be incorrectly offset for each depth slice except the first one.
- //
- // For these special cases, we need to recompute the copy regions for 3D textures via
- // split the incorrect copy region to a couple more copy regions.
-
- // Call Compute2DTextureCopySubresource and get copy regions. This function has already
- // forwarded "copySize.depthOrArrayLayers" to all depth slices.
- TextureCopySubresource copySubresource =
- Compute2DTextureCopySubresource(origin, copySize, blockInfo, offset, bytesPerRow);
-
- ASSERT(copySubresource.count <= 2);
- // If copySize.depth is 1, we can return copySubresource. Because we don't need to extend
- // the copy region(s) to other depth slice(s).
- if (copySize.depthOrArrayLayers == 1) {
- return copySubresource;
- }
-
- uint32_t rowsPerImageInTexels = rowsPerImage * blockInfo.height;
- // The copy region(s) generated by Compute2DTextureCopySubresource might be incorrect.
- // However, we may append a couple more copy regions in the for loop below. We don't need
- // to revise these new added copy regions.
- uint32_t originalCopyCount = copySubresource.count;
- for (uint32_t i = 0; i < originalCopyCount; ++i) {
- // There can be one empty row at most in a copy region.
- ASSERT(copySubresource.copies[i].bufferSize.height <=
- rowsPerImageInTexels + blockInfo.height);
- Extent3D& bufferSize = copySubresource.copies[i].bufferSize;
-
- if (bufferSize.height == rowsPerImageInTexels) {
- // If the copy region's bufferSize.height equals to rowsPerImageInTexels, we can use
- // this copy region without any modification.
- continue;
- }
-
- if (bufferSize.height < rowsPerImageInTexels) {
- // If we are copying multiple depth slices, we should skip rowsPerImageInTexels rows
- // for each slice even though we only copy partial rows in each slice sometimes.
- bufferSize.height = rowsPerImageInTexels;
- } else {
- // bufferSize.height > rowsPerImageInTexels. There is an empty row in this copy
- // region due to alignment adjustment.
-
- // bytesPerRow is definitely 256, and it is definitely a full copy on height.
- // Otherwise, bufferSize.height wount be greater than rowsPerImageInTexels and
- // there won't be an empty row at the beginning of this copy region.
- ASSERT(bytesPerRow == D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
- ASSERT(copySize.height == rowsPerImageInTexels);
-
- if (copySize.height % 2 == 0) {
- // If copySize.height is even and there is an empty row at the beginning of the
- // first slice of the copy region, the offset of all depth slices will never be
- // aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512) and there is always
- // an empty row at each depth slice. We need a totally different approach to
- // split the copy region.
- Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
- origin, copySize, blockInfo, bytesPerRow, rowsPerImage, copySubresource, i);
- } else {
- // If copySize.height is odd and there is an empty row at the beginning of the
- // first slice of the copy region, we can split the copy region into two copies:
- // copy0 to copy the first slice, copy1 to copy the rest slices because the
- // offset of slice 1 is aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
- // without an empty row. This is an easier case relative to cases with even copy
- // height.
- Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(
- copySize, bytesPerRow, copySubresource, i);
- }
- }
- }
-
- return copySubresource;
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
deleted file mode 100644
index 4796ed9de75..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
-#define DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-
-namespace dawn_native {
-
- struct TexelBlockInfo;
-
-} // namespace dawn_native
-
-namespace dawn_native { namespace d3d12 {
-
- struct TextureCopySubresource {
- static constexpr unsigned int kMaxTextureCopyRegions = 4;
-
- struct CopyInfo {
- uint64_t alignedOffset = 0;
- Origin3D textureOffset;
- Origin3D bufferOffset;
- Extent3D bufferSize;
-
- Extent3D copySize;
- };
-
- CopyInfo* AddCopy();
-
- uint32_t count = 0;
- std::array<CopyInfo, kMaxTextureCopyRegions> copies;
- };
-
- struct TextureCopySplits {
- static constexpr uint32_t kMaxTextureCopySubresources = 2;
-
- std::array<TextureCopySubresource, kMaxTextureCopySubresources> copySubresources;
- };
-
- // This function is shared by 2D and 3D texture copy splitter. But it only knows how to handle
- // 2D non-arrayed textures correctly, and just forwards "copySize.depthOrArrayLayers". See
- // details in Compute{2D|3D}TextureCopySplits about how we generate copy regions for 2D array
- // and 3D textures based on this function.
- // The resulting copies triggered by API like CopyTextureRegion are equivalent to the copy
- // regions defines by the arguments of TextureCopySubresource returned by this function and its
- // counterparts. These arguments should strictly conform to particular invariants. Otherwise,
- // D3D12 driver may report validation errors when we call CopyTextureRegion. Some important
- // invariants are listed below. For more details
- // of these invariants, see src/tests/unittests/d3d12/CopySplitTests.cpp.
- // - Inside each copy region: 1) its buffer offset plus copy size should be less than its
- // buffer size, 2) its buffer offset on y-axis should be less than copy format's
- // blockInfo.height, 3) its buffer offset on z-axis should be 0.
- // - Each copy region has an offset (aka alignedOffset) aligned to
- // D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT
- // - The buffer footprint of each copy region should be entirely within the copied buffer,
- // which means that the last "texel" of the buffer footprint doesn't go past the end of
- // the buffer even though the last "texel" might not be copied.
- // - If there are multiple copy regions, each copy region should not overlap with the others.
- // - Copy region(s) combined should exactly be equivalent to the texture region to be copied.
- // - Every pixel accessed by every copy region should not be out of the bound of the copied
- // texture and buffer.
- TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow);
-
- TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage);
-
- TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
- Extent3D copySize,
- const TexelBlockInfo& blockInfo,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage);
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
deleted file mode 100644
index dfdef5b6436..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ /dev/null
@@ -1,1348 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/TextureD3D12.h"
-
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/d3d12/BufferD3D12.h"
-#include "dawn_native/d3d12/CommandRecordingContext.h"
-#include "dawn_native/d3d12/D3D11on12Util.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/HeapD3D12.h"
-#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
-#include "dawn_native/d3d12/StagingBufferD3D12.h"
-#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
-#include "dawn_native/d3d12/TextureCopySplitter.h"
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- D3D12_RESOURCE_STATES D3D12TextureUsage(wgpu::TextureUsage usage, const Format& format) {
- D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
-
- if (usage & kPresentTextureUsage) {
- // The present usage is only used internally by the swapchain and is never used in
- // combination with other usages.
- ASSERT(usage == kPresentTextureUsage);
- return D3D12_RESOURCE_STATE_PRESENT;
- }
-
- if (usage & wgpu::TextureUsage::CopySrc) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
- }
- if (usage & wgpu::TextureUsage::CopyDst) {
- resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
- }
- if (usage & (wgpu::TextureUsage::TextureBinding)) {
- resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
- D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- if (format.HasDepthOrStencil()) {
- resourceState |= D3D12_RESOURCE_STATE_DEPTH_WRITE;
- } else {
- resourceState |= D3D12_RESOURCE_STATE_RENDER_TARGET;
- }
- }
-
- if (usage & kReadOnlyRenderAttachment) {
- // There is no STENCIL_READ state. Readonly for stencil is bundled with DEPTH_READ.
- resourceState |= D3D12_RESOURCE_STATE_DEPTH_READ;
- }
-
- return resourceState;
- }
-
- D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::TextureUsage usage,
- const Format& format,
- bool isMultisampledTexture) {
- D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
-
- if (usage & wgpu::TextureUsage::StorageBinding) {
- flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
- }
-
- // A multisampled resource must have either D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET or
- // D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL set in D3D12_RESOURCE_DESC::Flags.
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_resource_desc
- if ((usage & wgpu::TextureUsage::RenderAttachment) != 0 || isMultisampledTexture) {
- if (format.HasDepthOrStencil()) {
- flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
- } else {
- flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
- }
- }
-
- ASSERT(!(flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
- flags == D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL);
- return flags;
- }
-
- D3D12_RESOURCE_DIMENSION D3D12TextureDimension(wgpu::TextureDimension dimension) {
- switch (dimension) {
- case wgpu::TextureDimension::e2D:
- return D3D12_RESOURCE_DIMENSION_TEXTURE2D;
- case wgpu::TextureDimension::e3D:
- return D3D12_RESOURCE_DIMENSION_TEXTURE3D;
-
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
- }
-
- DXGI_FORMAT D3D12TypelessTextureFormat(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- return DXGI_FORMAT_R8_TYPELESS;
-
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::Depth16Unorm:
- return DXGI_FORMAT_R16_TYPELESS;
-
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- return DXGI_FORMAT_R8G8_TYPELESS;
-
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::R32Float:
- return DXGI_FORMAT_R32_TYPELESS;
-
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- return DXGI_FORMAT_R16G16_TYPELESS;
-
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- return DXGI_FORMAT_R8G8B8A8_TYPELESS;
-
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return DXGI_FORMAT_B8G8R8A8_TYPELESS;
-
- case wgpu::TextureFormat::RGB10A2Unorm:
- return DXGI_FORMAT_R10G10B10A2_TYPELESS;
-
- case wgpu::TextureFormat::RG11B10Ufloat:
- return DXGI_FORMAT_R11G11B10_FLOAT;
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
-
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RG32Float:
- return DXGI_FORMAT_R32G32_TYPELESS;
-
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- return DXGI_FORMAT_R16G16B16A16_TYPELESS;
-
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::RGBA32Float:
- return DXGI_FORMAT_R32G32B32A32_TYPELESS;
-
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- return DXGI_FORMAT_R32_TYPELESS;
-
- case wgpu::TextureFormat::Depth24PlusStencil8:
- return DXGI_FORMAT_R32G8X24_TYPELESS;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- return DXGI_FORMAT_BC1_TYPELESS;
-
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- return DXGI_FORMAT_BC2_TYPELESS;
-
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- return DXGI_FORMAT_BC3_TYPELESS;
-
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC4RUnorm:
- return DXGI_FORMAT_BC4_TYPELESS;
-
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC5RGUnorm:
- return DXGI_FORMAT_BC5_TYPELESS;
-
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- return DXGI_FORMAT_BC6H_TYPELESS;
-
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return DXGI_FORMAT_BC7_TYPELESS;
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
- // TODO(dawn:666): implement stencil8
- case wgpu::TextureFormat::Stencil8:
- // TODO(dawn:690): implement depth24unorm-stencil8
- case wgpu::TextureFormat::Depth24UnormStencil8:
- // TODO(dawn:690): implement depth32float-stencil8
- case wgpu::TextureFormat::Depth32FloatStencil8:
- case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
- }
- }
-
- } // namespace
-
- DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R8Unorm:
- return DXGI_FORMAT_R8_UNORM;
- case wgpu::TextureFormat::R8Snorm:
- return DXGI_FORMAT_R8_SNORM;
- case wgpu::TextureFormat::R8Uint:
- return DXGI_FORMAT_R8_UINT;
- case wgpu::TextureFormat::R8Sint:
- return DXGI_FORMAT_R8_SINT;
-
- case wgpu::TextureFormat::R16Uint:
- return DXGI_FORMAT_R16_UINT;
- case wgpu::TextureFormat::R16Sint:
- return DXGI_FORMAT_R16_SINT;
- case wgpu::TextureFormat::R16Float:
- return DXGI_FORMAT_R16_FLOAT;
- case wgpu::TextureFormat::RG8Unorm:
- return DXGI_FORMAT_R8G8_UNORM;
- case wgpu::TextureFormat::RG8Snorm:
- return DXGI_FORMAT_R8G8_SNORM;
- case wgpu::TextureFormat::RG8Uint:
- return DXGI_FORMAT_R8G8_UINT;
- case wgpu::TextureFormat::RG8Sint:
- return DXGI_FORMAT_R8G8_SINT;
-
- case wgpu::TextureFormat::R32Uint:
- return DXGI_FORMAT_R32_UINT;
- case wgpu::TextureFormat::R32Sint:
- return DXGI_FORMAT_R32_SINT;
- case wgpu::TextureFormat::R32Float:
- return DXGI_FORMAT_R32_FLOAT;
- case wgpu::TextureFormat::RG16Uint:
- return DXGI_FORMAT_R16G16_UINT;
- case wgpu::TextureFormat::RG16Sint:
- return DXGI_FORMAT_R16G16_SINT;
- case wgpu::TextureFormat::RG16Float:
- return DXGI_FORMAT_R16G16_FLOAT;
- case wgpu::TextureFormat::RGBA8Unorm:
- return DXGI_FORMAT_R8G8B8A8_UNORM;
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- return DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
- case wgpu::TextureFormat::RGBA8Snorm:
- return DXGI_FORMAT_R8G8B8A8_SNORM;
- case wgpu::TextureFormat::RGBA8Uint:
- return DXGI_FORMAT_R8G8B8A8_UINT;
- case wgpu::TextureFormat::RGBA8Sint:
- return DXGI_FORMAT_R8G8B8A8_SINT;
- case wgpu::TextureFormat::BGRA8Unorm:
- return DXGI_FORMAT_B8G8R8A8_UNORM;
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
- case wgpu::TextureFormat::RGB10A2Unorm:
- return DXGI_FORMAT_R10G10B10A2_UNORM;
- case wgpu::TextureFormat::RG11B10Ufloat:
- return DXGI_FORMAT_R11G11B10_FLOAT;
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
-
- case wgpu::TextureFormat::RG32Uint:
- return DXGI_FORMAT_R32G32_UINT;
- case wgpu::TextureFormat::RG32Sint:
- return DXGI_FORMAT_R32G32_SINT;
- case wgpu::TextureFormat::RG32Float:
- return DXGI_FORMAT_R32G32_FLOAT;
- case wgpu::TextureFormat::RGBA16Uint:
- return DXGI_FORMAT_R16G16B16A16_UINT;
- case wgpu::TextureFormat::RGBA16Sint:
- return DXGI_FORMAT_R16G16B16A16_SINT;
- case wgpu::TextureFormat::RGBA16Float:
- return DXGI_FORMAT_R16G16B16A16_FLOAT;
-
- case wgpu::TextureFormat::RGBA32Uint:
- return DXGI_FORMAT_R32G32B32A32_UINT;
- case wgpu::TextureFormat::RGBA32Sint:
- return DXGI_FORMAT_R32G32B32A32_SINT;
- case wgpu::TextureFormat::RGBA32Float:
- return DXGI_FORMAT_R32G32B32A32_FLOAT;
-
- case wgpu::TextureFormat::Depth32Float:
- return DXGI_FORMAT_D32_FLOAT;
- case wgpu::TextureFormat::Depth24Plus:
- return DXGI_FORMAT_D32_FLOAT;
- case wgpu::TextureFormat::Depth24PlusStencil8:
- return DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
- case wgpu::TextureFormat::Depth16Unorm:
- return DXGI_FORMAT_D16_UNORM;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- return DXGI_FORMAT_BC1_UNORM;
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- return DXGI_FORMAT_BC1_UNORM_SRGB;
- case wgpu::TextureFormat::BC2RGBAUnorm:
- return DXGI_FORMAT_BC2_UNORM;
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- return DXGI_FORMAT_BC2_UNORM_SRGB;
- case wgpu::TextureFormat::BC3RGBAUnorm:
- return DXGI_FORMAT_BC3_UNORM;
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- return DXGI_FORMAT_BC3_UNORM_SRGB;
- case wgpu::TextureFormat::BC4RSnorm:
- return DXGI_FORMAT_BC4_SNORM;
- case wgpu::TextureFormat::BC4RUnorm:
- return DXGI_FORMAT_BC4_UNORM;
- case wgpu::TextureFormat::BC5RGSnorm:
- return DXGI_FORMAT_BC5_SNORM;
- case wgpu::TextureFormat::BC5RGUnorm:
- return DXGI_FORMAT_BC5_UNORM;
- case wgpu::TextureFormat::BC6HRGBFloat:
- return DXGI_FORMAT_BC6H_SF16;
- case wgpu::TextureFormat::BC6HRGBUfloat:
- return DXGI_FORMAT_BC6H_UF16;
- case wgpu::TextureFormat::BC7RGBAUnorm:
- return DXGI_FORMAT_BC7_UNORM;
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return DXGI_FORMAT_BC7_UNORM_SRGB;
-
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
- return DXGI_FORMAT_NV12;
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-
- // TODO(dawn:666): implement stencil8
- case wgpu::TextureFormat::Stencil8:
- // TODO(dawn:690): implement depth24unorm-stencil8
- case wgpu::TextureFormat::Depth24UnormStencil8:
- // TODO(dawn:690): implement depth32float-stencil8
- case wgpu::TextureFormat::Depth32FloatStencil8:
- case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
- }
- }
-
- MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "Texture dimension (%s) is not %s.", descriptor->dimension,
- wgpu::TextureDimension::e2D);
-
- DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
- "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
- descriptor->sampleCount);
-
- return {};
- }
-
- MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
- const TextureDescriptor* dawnDescriptor) {
- const D3D12_RESOURCE_DESC d3dDescriptor = d3d12Resource->GetDesc();
- DAWN_INVALID_IF(
- (dawnDescriptor->size.width != d3dDescriptor.Width) ||
- (dawnDescriptor->size.height != d3dDescriptor.Height) ||
- (dawnDescriptor->size.depthOrArrayLayers != 1),
- "D3D12 texture size (Width: %u, Height: %u, DepthOrArraySize: 1) doesn't match Dawn "
- "descriptor size (width: %u, height: %u, depthOrArrayLayers: %u).",
- d3dDescriptor.Width, d3dDescriptor.Height, dawnDescriptor->size.width,
- dawnDescriptor->size.height, dawnDescriptor->size.depthOrArrayLayers);
-
- const DXGI_FORMAT dxgiFormatFromDescriptor = D3D12TextureFormat(dawnDescriptor->format);
- DAWN_INVALID_IF(
- dxgiFormatFromDescriptor != d3dDescriptor.Format,
- "D3D12 texture format (%x) is not compatible with Dawn descriptor format (%s).",
- d3dDescriptor.Format, dawnDescriptor->format);
-
- DAWN_INVALID_IF(d3dDescriptor.MipLevels != 1,
- "D3D12 texture number of miplevels (%u) is not 1.",
- d3dDescriptor.MipLevels);
-
- DAWN_INVALID_IF(d3dDescriptor.DepthOrArraySize != 1,
- "D3D12 texture array size (%u) is not 1.", d3dDescriptor.DepthOrArraySize);
-
- // Shared textures cannot be multi-sample so no need to check those.
- ASSERT(d3dDescriptor.SampleDesc.Count == 1);
- ASSERT(d3dDescriptor.SampleDesc.Quality == 0);
-
- return {};
- }
-
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_shared_resource_compatibility_tier
- MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat) {
- const bool supportsSharedResourceCapabilityTier1 =
- device->GetDeviceInfo().supportsSharedResourceCapabilityTier1;
- switch (textureFormat) {
- // MSDN docs are not correct, NV12 requires at-least tier 1.
- case DXGI_FORMAT_NV12:
- if (supportsSharedResourceCapabilityTier1) {
- return {};
- }
- break;
- default:
- break;
- }
-
- return DAWN_FORMAT_VALIDATION_ERROR("DXGI format does not support cross-API sharing.");
- }
-
- // static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const TextureDescriptor* descriptor) {
- Ref<Texture> dawnTexture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
-
- DAWN_INVALID_IF(dawnTexture->GetFormat().IsMultiPlanar(),
- "Cannot create a multi-planar formatted texture directly");
-
- DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
- return std::move(dawnTexture);
- }
-
- // static
- ResultOrError<Ref<Texture>> Texture::CreateExternalImage(
- Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture,
- bool isInitialized) {
- Ref<Texture> dawnTexture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
- DAWN_TRY(dawnTexture->InitializeAsExternalTexture(
- descriptor, std::move(d3d12Texture), std::move(d3d11on12Resource), acquireMutexKey,
- releaseMutexKey, isSwapChainTexture));
-
- // Importing a multi-planar format must be initialized. This is required because
- // a shared multi-planar format cannot be initialized by Dawn.
- DAWN_INVALID_IF(
- !isInitialized && dawnTexture->GetFormat().IsMultiPlanar(),
- "Cannot create a texture with a multi-planar format (%s) with uninitialized data.",
- dawnTexture->GetFormat().format);
-
- dawnTexture->SetIsSubresourceContentInitialized(isInitialized,
- dawnTexture->GetAllSubresources());
- return std::move(dawnTexture);
- }
-
- // static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture) {
- Ref<Texture> dawnTexture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
- DAWN_TRY(dawnTexture->InitializeAsSwapChainTexture(std::move(d3d12Texture)));
- return std::move(dawnTexture);
- }
-
- MaybeError Texture::InitializeAsExternalTexture(
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture) {
- DAWN_TRY(CheckHRESULT(d3d11on12Resource->GetDXGIKeyedMutex()->AcquireSync(
- uint64_t(acquireMutexKey), INFINITE),
- "D3D12 acquiring shared mutex"));
-
- mAcquireMutexKey = acquireMutexKey;
- mReleaseMutexKey = releaseMutexKey;
- mD3D11on12Resource = std::move(d3d11on12Resource);
- mSwapChainTexture = isSwapChainTexture;
-
- D3D12_RESOURCE_DESC desc = d3d12Texture->GetDesc();
- mD3D12ResourceFlags = desc.Flags;
-
- AllocationInfo info;
- info.mMethod = AllocationMethod::kExternal;
- // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
- // texture is owned externally. The texture's owning entity must remain responsible for
- // memory management.
- mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
-
- SetLabelHelper("Dawn_ExternalTexture");
-
- return {};
- }
-
- MaybeError Texture::InitializeAsInternalTexture() {
- D3D12_RESOURCE_DESC resourceDescriptor;
- resourceDescriptor.Dimension = D3D12TextureDimension(GetDimension());
- resourceDescriptor.Alignment = 0;
-
- const Extent3D& size = GetSize();
- resourceDescriptor.Width = size.width;
- resourceDescriptor.Height = size.height;
- resourceDescriptor.DepthOrArraySize = size.depthOrArrayLayers;
-
- // This will need to be much more nuanced when WebGPU has
- // texture view compatibility rules.
- const bool needsTypelessFormat =
- GetFormat().HasDepthOrStencil() &&
- (GetInternalUsage() & wgpu::TextureUsage::TextureBinding) != 0;
-
- DXGI_FORMAT dxgiFormat = needsTypelessFormat
- ? D3D12TypelessTextureFormat(GetFormat().format)
- : D3D12TextureFormat(GetFormat().format);
-
- resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
- resourceDescriptor.Format = dxgiFormat;
- resourceDescriptor.SampleDesc.Count = GetSampleCount();
- resourceDescriptor.SampleDesc.Quality = 0;
- resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
- resourceDescriptor.Flags =
- D3D12ResourceFlags(GetInternalUsage(), GetFormat(), IsMultisampledTexture());
- mD3D12ResourceFlags = resourceDescriptor.Flags;
-
- DAWN_TRY_ASSIGN(mResourceAllocation,
- ToBackend(GetDevice())
- ->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
- D3D12_RESOURCE_STATE_COMMON));
-
- SetLabelImpl();
-
- Device* device = ToBackend(GetDevice());
-
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- CommandRecordingContext* commandContext;
- DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
-
- DAWN_TRY(ClearTexture(commandContext, GetAllSubresources(),
- TextureBase::ClearValue::NonZero));
- }
-
- return {};
- }
-
- MaybeError Texture::InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture) {
- AllocationInfo info;
- info.mMethod = AllocationMethod::kExternal;
- // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
- // texture is owned externally. The texture's owning entity must remain responsible for
- // memory management.
- mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
-
- SetLabelHelper("Dawn_SwapChainTexture");
-
- return {};
- }
-
- Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
- : TextureBase(device, descriptor, state),
- mSubresourceStateAndDecay(
- GetFormat().aspects,
- GetArrayLayers(),
- GetNumMipLevels(),
- {D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON, kMaxExecutionSerial, false}) {
- }
-
- Texture::~Texture() {
- }
-
- void Texture::DestroyImpl() {
- TextureBase::DestroyImpl();
-
- Device* device = ToBackend(GetDevice());
-
- // In PIX's D3D12-only mode, there is no way to determine frame boundaries
- // for WebGPU since Dawn does not manage DXGI swap chains. Without assistance,
- // PIX will wait forever for a present that never happens.
- // If we know we're dealing with a swapbuffer texture, inform PIX we've
- // "presented" the texture so it can determine frame boundaries and use its
- // contents for the UI.
- if (mSwapChainTexture) {
- ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
- if (d3dSharingContract != nullptr) {
- d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource(), 0, 0);
- }
- }
-
- device->DeallocateMemory(mResourceAllocation);
-
- // Now that we've deallocated the memory, the texture is no longer a swap chain texture.
- // We can set mSwapChainTexture to false to avoid passing a nullptr to
- // ID3D12SharingContract::Present.
- mSwapChainTexture = false;
-
- if (mD3D11on12Resource != nullptr) {
- mD3D11on12Resource->GetDXGIKeyedMutex()->ReleaseSync(uint64_t(mReleaseMutexKey));
- }
- }
-
- DXGI_FORMAT Texture::GetD3D12Format() const {
- return D3D12TextureFormat(GetFormat().format);
- }
-
- ID3D12Resource* Texture::GetD3D12Resource() const {
- return mResourceAllocation.GetD3D12Resource();
- }
-
- DXGI_FORMAT Texture::GetD3D12CopyableSubresourceFormat(Aspect aspect) const {
- ASSERT(GetFormat().aspects & aspect);
-
- switch (GetFormat().format) {
- case wgpu::TextureFormat::Depth24PlusStencil8:
- switch (aspect) {
- case Aspect::Depth:
- return DXGI_FORMAT_R32_FLOAT;
- case Aspect::Stencil:
- return DXGI_FORMAT_R8_UINT;
- default:
- UNREACHABLE();
- }
- default:
- ASSERT(HasOneBit(GetFormat().aspects));
- return GetD3D12Format();
- }
- }
-
- void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage,
- const SubresourceRange& range) {
- TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()), range);
- }
-
- void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage) {
- TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()),
- GetAllSubresources());
- }
-
- void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState) {
- TrackUsageAndTransitionNow(commandContext, newState, GetAllSubresources());
- }
-
- void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState,
- const SubresourceRange& range) {
- if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
- // Track the underlying heap to ensure residency.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
- }
-
- std::vector<D3D12_RESOURCE_BARRIER> barriers;
-
- // TODO(enga): Consider adding a Count helper.
- uint32_t aspectCount = 0;
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- aspectCount++;
- DAWN_UNUSED(aspect);
- }
-
- barriers.reserve(range.levelCount * range.layerCount * aspectCount);
-
- TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
- if (barriers.size()) {
- commandContext->GetCommandList()->ResourceBarrier(barriers.size(), barriers.data());
- }
- }
-
- void Texture::TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- const SubresourceRange& range,
- StateAndDecay* state,
- D3D12_RESOURCE_STATES newState,
- ExecutionSerial pendingCommandSerial) const {
- // Reuse the subresource(s) directly and avoid transition when it isn't needed, and
- // return false.
- if (state->lastState == newState) {
- return;
- }
-
- D3D12_RESOURCE_STATES lastState = state->lastState;
-
- // The COMMON state represents a state where no write operations can be pending, and
- // where all pixels are uncompressed. This makes it possible to transition to and
- // from some states without synchronization (i.e. without an explicit
- // ResourceBarrier call). Textures can be implicitly promoted to 1) a single write
- // state, or 2) multiple read states. Textures will implicitly decay to the COMMON
- // state when all of the following are true: 1) the texture is accessed on a command
- // list, 2) the ExecuteCommandLists call that uses that command list has ended, and
- // 3) the texture was promoted implicitly to a read-only state and is still in that
- // state.
- // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
-
- // To track implicit decays, we must record the pending serial on which that
- // transition will occur. When that texture is used again, the previously recorded
- // serial must be compared to the last completed serial to determine if the texture
- // has implicity decayed to the common state.
- if (state->isValidToDecay && pendingCommandSerial > state->lastDecaySerial) {
- lastState = D3D12_RESOURCE_STATE_COMMON;
- }
-
- // Update the tracked state.
- state->lastState = newState;
-
- // Destination states that qualify for an implicit promotion for a
- // non-simultaneous-access texture: NON_PIXEL_SHADER_RESOURCE,
- // PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
- {
- static constexpr D3D12_RESOURCE_STATES kD3D12PromotableReadOnlyStates =
- D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
- D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
-
- if (lastState == D3D12_RESOURCE_STATE_COMMON) {
- if (IsSubset(newState, kD3D12PromotableReadOnlyStates)) {
- // Implicit texture state decays can only occur when the texture was implicitly
- // transitioned to a read-only state. isValidToDecay is needed to differentiate
- // between resources that were implictly or explicitly transitioned to a
- // read-only state.
- state->isValidToDecay = true;
- state->lastDecaySerial = pendingCommandSerial;
- return;
- } else if (newState == D3D12_RESOURCE_STATE_COPY_DEST) {
- state->isValidToDecay = false;
- return;
- }
- }
- }
-
- D3D12_RESOURCE_BARRIER barrier;
- barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
- barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier.Transition.pResource = GetD3D12Resource();
- barrier.Transition.StateBefore = lastState;
- barrier.Transition.StateAfter = newState;
-
- bool isFullRange = range.baseArrayLayer == 0 && range.baseMipLevel == 0 &&
- range.layerCount == GetArrayLayers() &&
- range.levelCount == GetNumMipLevels() &&
- range.aspects == GetFormat().aspects;
-
- // Use a single transition for all subresources if possible.
- if (isFullRange) {
- barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
- barriers->push_back(barrier);
- } else {
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
- barrier.Transition.Subresource =
- GetSubresourceIndex(range.baseMipLevel + mipLevel,
- range.baseArrayLayer + arrayLayer, aspect);
- barriers->push_back(barrier);
- }
- }
- }
- }
-
- state->isValidToDecay = false;
- }
-
- void Texture::HandleTransitionSpecialCases(CommandRecordingContext* commandContext) {
- // Textures with keyed mutexes can be written from other graphics queues. Hence, they
- // must be acquired before command list submission to ensure work from the other queues
- // has finished. See Device::ExecuteCommandContext.
- if (mD3D11on12Resource != nullptr) {
- commandContext->AddToSharedTextureList(this);
- }
- }
-
- void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- wgpu::TextureUsage usage,
- const SubresourceRange& range) {
- TransitionUsageAndGetResourceBarrier(commandContext, barrier,
- D3D12TextureUsage(usage, GetFormat()), range);
- }
-
- void Texture::TransitionUsageAndGetResourceBarrier(
- CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- D3D12_RESOURCE_STATES newState,
- const SubresourceRange& range) {
- HandleTransitionSpecialCases(commandContext);
-
- const ExecutionSerial pendingCommandSerial =
- ToBackend(GetDevice())->GetPendingCommandSerial();
-
- ASSERT(GetDimension() != wgpu::TextureDimension::e1D);
-
- mSubresourceStateAndDecay.Update(
- range, [&](const SubresourceRange& updateRange, StateAndDecay* state) {
- TransitionSubresourceRange(barriers, updateRange, state, newState,
- pendingCommandSerial);
- });
- }
-
- void Texture::TrackUsageAndGetResourceBarrierForPass(
- CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- const TextureSubresourceUsage& textureUsages) {
- if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
- // Track the underlying heap to ensure residency.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
- }
-
- HandleTransitionSpecialCases(commandContext);
-
- const ExecutionSerial pendingCommandSerial =
- ToBackend(GetDevice())->GetPendingCommandSerial();
- // TODO(crbug.com/dawn/814): support 1D textures.
- ASSERT(GetDimension() != wgpu::TextureDimension::e1D);
-
- mSubresourceStateAndDecay.Merge(textureUsages, [&](const SubresourceRange& mergeRange,
- StateAndDecay* state,
- wgpu::TextureUsage usage) {
- // Skip if this subresource is not used during the current pass
- if (usage == wgpu::TextureUsage::None) {
- return;
- }
-
- D3D12_RESOURCE_STATES newState = D3D12TextureUsage(usage, GetFormat());
- TransitionSubresourceRange(barriers, mergeRange, state, newState, pendingCommandSerial);
- });
- }
-
- D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t mipLevel,
- uint32_t baseSlice,
- uint32_t sliceCount) const {
- D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
- rtvDesc.Format = GetD3D12Format();
- if (IsMultisampledTexture()) {
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
- ASSERT(GetNumMipLevels() == 1);
- ASSERT(sliceCount == 1);
- ASSERT(baseSlice == 0);
- ASSERT(mipLevel == 0);
- rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
- return rtvDesc;
- }
- switch (GetDimension()) {
- case wgpu::TextureDimension::e2D:
- // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
- // array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
- // them as 1-layer 2D array textures. (Just like how we treat SRVs)
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
- // _rtv
- rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
- rtvDesc.Texture2DArray.FirstArraySlice = baseSlice;
- rtvDesc.Texture2DArray.ArraySize = sliceCount;
- rtvDesc.Texture2DArray.MipSlice = mipLevel;
- rtvDesc.Texture2DArray.PlaneSlice = 0;
- break;
- case wgpu::TextureDimension::e3D:
- rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
- rtvDesc.Texture3D.MipSlice = mipLevel;
- rtvDesc.Texture3D.FirstWSlice = baseSlice;
- rtvDesc.Texture3D.WSize = sliceCount;
- break;
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- break;
- }
- return rtvDesc;
- }
-
- D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t mipLevel,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- Aspect aspects,
- bool depthReadOnly,
- bool stencilReadOnly) const {
- D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc;
- dsvDesc.Format = GetD3D12Format();
- dsvDesc.Flags = D3D12_DSV_FLAG_NONE;
- if (depthReadOnly && aspects & Aspect::Depth) {
- dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_DEPTH;
- }
- if (stencilReadOnly && aspects & Aspect::Stencil) {
- dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_STENCIL;
- }
-
- if (IsMultisampledTexture()) {
- ASSERT(GetNumMipLevels() == 1);
- ASSERT(layerCount == 1);
- ASSERT(baseArrayLayer == 0);
- ASSERT(mipLevel == 0);
- dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
- } else {
- dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
- dsvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
- dsvDesc.Texture2DArray.ArraySize = layerCount;
- dsvDesc.Texture2DArray.MipSlice = mipLevel;
- }
-
- return dsvDesc;
- }
-
- MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue) {
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
- Device* device = ToBackend(GetDevice());
-
- uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
- float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
-
- if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) != 0) {
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
-
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- // Iterate the aspects individually to determine which clear flags to use.
- D3D12_CLEAR_FLAGS clearFlags = {};
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- switch (aspect) {
- case Aspect::Depth:
- clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
- break;
- case Aspect::Stencil:
- clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
- break;
- default:
- UNREACHABLE();
- }
- }
-
- if (clearFlags == 0) {
- continue;
- }
-
- CPUDescriptorHeapAllocation dsvHandle;
- DAWN_TRY_ASSIGN(
- dsvHandle,
- device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
- const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor =
- dsvHandle.GetBaseDescriptor();
- D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc =
- GetDSVDescriptor(level, layer, 1, range.aspects, false, false);
- device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
- baseDescriptor);
-
- commandList->ClearDepthStencilView(baseDescriptor, clearFlags, fClearColor,
- clearColor, 0, nullptr);
- }
- }
- } else if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) != 0) {
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET, range);
-
- const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor, fClearColor};
-
- ASSERT(range.aspects == Aspect::Color);
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- CPUDescriptorHeapAllocation rtvHeap;
- DAWN_TRY_ASSIGN(
- rtvHeap,
- device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
- const D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetBaseDescriptor();
-
- uint32_t baseSlice = layer;
- uint32_t sliceCount = 1;
- if (GetDimension() == wgpu::TextureDimension::e3D) {
- baseSlice = 0;
- sliceCount = std::max(GetDepth() >> level, 1u);
- }
- D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
- GetRTVDescriptor(level, baseSlice, sliceCount);
- device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
- rtvHandle);
- commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
- }
- }
- } else {
- // create temp buffer with clear color to copy to the texture image
- TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
-
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
-
- Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
-
- uint32_t bytesPerRow =
- Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
- kTextureBytesPerRowAlignment);
- uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
- largestMipSize.depthOrArrayLayers;
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
- blockInfo.byteSize));
- memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
-
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- // compute d3d12 texture copy locations for texture and buffer
- Extent3D copySize = GetMipLevelPhysicalSize(level);
-
- TextureCopySubresource copySplit = Compute2DTextureCopySubresource(
- {0, 0, 0}, copySize, blockInfo, uploadHandle.startOffset, bytesPerRow);
-
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- RecordCopyBufferToTextureFromTextureCopySplit(
- commandList, copySplit,
- ToBackend(uploadHandle.stagingBuffer)->GetResource(), 0, bytesPerRow,
- this, level, layer, aspect);
- }
- }
- }
- }
- if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, range);
- GetDevice()->IncrementLazyClearCountForTesting();
- }
- return {};
- }
-
- void Texture::SetLabelHelper(const char* prefix) {
- SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), prefix,
- GetLabel());
- }
-
- void Texture::SetLabelImpl() {
- SetLabelHelper("Dawn_InternalTexture");
- }
-
- void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- const SubresourceRange& range) {
- if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- return;
- }
- if (!IsSubresourceContentInitialized(range)) {
- // If subresource has not been initialized, clear it to black as it could contain
- // dirty bits from recycled memory
- GetDevice()->ConsumedError(
- ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
- }
- }
-
- bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
- return lastState == other.lastState && lastDecaySerial == other.lastDecaySerial &&
- isValidToDecay == other.isValidToDecay;
- }
-
- // static
- Ref<TextureView> TextureView::Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return AcquireRef(new TextureView(texture, descriptor));
- }
-
- TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
- : TextureViewBase(texture, descriptor) {
- mSrvDesc.Format = D3D12TextureFormat(descriptor->format);
- mSrvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
-
- // TODO(enga): This will need to be much more nuanced when WebGPU has
- // texture view compatibility rules.
- UINT planeSlice = 0;
- if (GetFormat().HasDepthOrStencil()) {
- // Configure the SRV descriptor to reinterpret the texture allocated as
- // TYPELESS as a single-plane shader-accessible view.
- switch (descriptor->format) {
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT;
- break;
- case wgpu::TextureFormat::Depth16Unorm:
- mSrvDesc.Format = DXGI_FORMAT_R16_UNORM;
- break;
- case wgpu::TextureFormat::Depth24PlusStencil8:
- switch (descriptor->aspect) {
- case wgpu::TextureAspect::DepthOnly:
- planeSlice = 0;
- mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
- break;
- case wgpu::TextureAspect::StencilOnly:
- planeSlice = 1;
- mSrvDesc.Format = DXGI_FORMAT_X32_TYPELESS_G8X24_UINT;
- // Stencil is accessed using the .g component in the shader.
- // Map it to the zeroth component to match other APIs.
- mSrvDesc.Shader4ComponentMapping =
- D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
- D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
- D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
- break;
- case wgpu::TextureAspect::All:
- // A single aspect is not selected. The texture view must not be
- // sampled.
- mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
- break;
-
- // Depth formats cannot use plane aspects.
- case wgpu::TextureAspect::Plane0Only:
- case wgpu::TextureAspect::Plane1Only:
- UNREACHABLE();
- break;
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // Per plane view formats must have the plane slice number be the index of the plane in the
- // array of textures.
- if (texture->GetFormat().IsMultiPlanar()) {
- const Aspect planeAspect = ConvertViewAspect(GetFormat(), descriptor->aspect);
- planeSlice = GetAspectIndex(planeAspect);
- mSrvDesc.Format = D3D12TextureFormat(GetFormat().GetAspectInfo(planeAspect).format);
- }
-
- // Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer
- // and layer count in D3D12_TEX2D_SRV. For 2D texture views, we treat them as 1-layer 2D
- // array textures.
- // Multisampled textures may only be one array layer, so we use
- // D3D12_SRV_DIMENSION_TEXTURE2DMS.
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_srv
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array_srv
- // TODO(crbug.com/dawn/814): support 1D textures.
- if (GetTexture()->IsMultisampledTexture()) {
- switch (descriptor->dimension) {
- case wgpu::TextureViewDimension::e2DArray:
- ASSERT(texture->GetArrayLayers() == 1);
- DAWN_FALLTHROUGH;
- case wgpu::TextureViewDimension::e2D:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DMS;
- break;
-
- default:
- UNREACHABLE();
- }
- } else {
- switch (descriptor->dimension) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
- mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
- mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
- mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.Texture2DArray.PlaneSlice = planeSlice;
- mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
- break;
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- ASSERT(descriptor->arrayLayerCount % 6 == 0);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
- mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
- mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
- mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
- break;
- case wgpu::TextureViewDimension::e3D:
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e3D);
- mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE3D;
- mSrvDesc.Texture3D.MostDetailedMip = descriptor->baseMipLevel;
- mSrvDesc.Texture3D.MipLevels = descriptor->mipLevelCount;
- mSrvDesc.Texture3D.ResourceMinLODClamp = 0;
- break;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- }
- }
- }
-
- DXGI_FORMAT TextureView::GetD3D12Format() const {
- return D3D12TextureFormat(GetFormat().format);
- }
-
- const D3D12_SHADER_RESOURCE_VIEW_DESC& TextureView::GetSRVDescriptor() const {
- ASSERT(mSrvDesc.Format != DXGI_FORMAT_UNKNOWN);
- return mSrvDesc;
- }
-
- D3D12_RENDER_TARGET_VIEW_DESC TextureView::GetRTVDescriptor() const {
- return ToBackend(GetTexture())
- ->GetRTVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount());
- }
-
- D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor(bool depthReadOnly,
- bool stencilReadOnly) const {
- ASSERT(GetLevelCount() == 1);
- return ToBackend(GetTexture())
- ->GetDSVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount(),
- GetAspects(), depthReadOnly, stencilReadOnly);
- }
-
- D3D12_UNORDERED_ACCESS_VIEW_DESC TextureView::GetUAVDescriptor() const {
- D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc;
- uavDesc.Format = GetD3D12Format();
-
- ASSERT(!GetTexture()->IsMultisampledTexture());
- switch (GetDimension()) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
- uavDesc.Texture2DArray.FirstArraySlice = GetBaseArrayLayer();
- uavDesc.Texture2DArray.ArraySize = GetLayerCount();
- uavDesc.Texture2DArray.MipSlice = GetBaseMipLevel();
- uavDesc.Texture2DArray.PlaneSlice = 0;
- break;
- case wgpu::TextureViewDimension::e3D:
- uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE3D;
- uavDesc.Texture3D.FirstWSlice = 0;
- uavDesc.Texture3D.WSize = GetTexture()->GetDepth() >> GetBaseMipLevel();
- uavDesc.Texture3D.MipSlice = GetBaseMipLevel();
- break;
- // TODO(crbug.com/dawn/814): support 1D textures.
- case wgpu::TextureViewDimension::e1D:
- // Cube and Cubemap can't be used as storage texture. So there is no need to create UAV
- // descriptor for them.
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- }
- return uavDesc;
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
deleted file mode 100644
index 4d49d6fa9ff..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_TEXTURED3D12_H_
-#define DAWNNATIVE_D3D12_TEXTURED3D12_H_
-
-#include "dawn_native/Texture.h"
-
-#include "dawn_native/DawnNative.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/PassResourceUsage.h"
-#include "dawn_native/d3d12/IntegerTypes.h"
-#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class CommandRecordingContext;
- class Device;
- class D3D11on12ResourceCacheEntry;
-
- DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
- MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
- const TextureDescriptor* descriptor);
- MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
- MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat);
-
- class Texture final : public TextureBase {
- public:
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const TextureDescriptor* descriptor);
- static ResultOrError<Ref<Texture>> CreateExternalImage(
- Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture,
- bool isInitialized);
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture);
-
- DXGI_FORMAT GetD3D12Format() const;
- ID3D12Resource* GetD3D12Resource() const;
- DXGI_FORMAT GetD3D12CopyableSubresourceFormat(Aspect aspect) const;
-
- D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(uint32_t mipLevel,
- uint32_t baseSlice,
- uint32_t sliceCount) const;
- D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t mipLevel,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- Aspect aspects,
- bool depthReadOnly,
- bool stencilReadOnly) const;
- void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- const SubresourceRange& range);
-
- void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- const TextureSubresourceUsage& textureUsages);
- void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- wgpu::TextureUsage usage,
- const SubresourceRange& range);
- void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage,
- const SubresourceRange& range);
- void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState,
- const SubresourceRange& range);
- void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage);
- void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState);
-
- private:
- Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
- ~Texture() override;
- using TextureBase::TextureBase;
-
- MaybeError InitializeAsInternalTexture();
- MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture);
- MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
-
- void SetLabelHelper(const char* prefix);
-
- // Dawn API
- void SetLabelImpl() override;
- void DestroyImpl() override;
-
- MaybeError ClearTexture(CommandRecordingContext* commandContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue);
-
- // Barriers implementation details.
- struct StateAndDecay {
- D3D12_RESOURCE_STATES lastState;
- ExecutionSerial lastDecaySerial;
- bool isValidToDecay;
-
- bool operator==(const StateAndDecay& other) const;
- };
- void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- std::vector<D3D12_RESOURCE_BARRIER>* barrier,
- D3D12_RESOURCE_STATES newState,
- const SubresourceRange& range);
- void TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
- const SubresourceRange& range,
- StateAndDecay* state,
- D3D12_RESOURCE_STATES subresourceNewState,
- ExecutionSerial pendingCommandSerial) const;
- void HandleTransitionSpecialCases(CommandRecordingContext* commandContext);
-
- SubresourceStorage<StateAndDecay> mSubresourceStateAndDecay;
-
- ResourceHeapAllocation mResourceAllocation;
- bool mSwapChainTexture = false;
- D3D12_RESOURCE_FLAGS mD3D12ResourceFlags;
-
- ExternalMutexSerial mAcquireMutexKey = ExternalMutexSerial(0);
- ExternalMutexSerial mReleaseMutexKey = ExternalMutexSerial(0);
- Ref<D3D11on12ResourceCacheEntry> mD3D11on12Resource;
- };
-
- class TextureView final : public TextureViewBase {
- public:
- static Ref<TextureView> Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor);
-
- DXGI_FORMAT GetD3D12Format() const;
-
- const D3D12_SHADER_RESOURCE_VIEW_DESC& GetSRVDescriptor() const;
- D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor() const;
- D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(bool depthReadOnly,
- bool stencilReadOnly) const;
- D3D12_UNORDERED_ACCESS_VIEW_DESC GetUAVDescriptor() const;
-
- private:
- TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
-
- D3D12_SHADER_RESOURCE_VIEW_DESC mSrvDesc;
- };
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_TEXTURED3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
deleted file mode 100644
index 38479eba103..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
+++ /dev/null
@@ -1,391 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/UtilsD3D12.h"
-
-#include "common/Assert.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/d3d12/BufferD3D12.h"
-#include "dawn_native/d3d12/CommandRecordingContext.h"
-#include "dawn_native/d3d12/D3D12Error.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-
-#include <stringapiset.h>
-
-namespace dawn_native { namespace d3d12 {
-
- ResultOrError<std::wstring> ConvertStringToWstring(const char* str) {
- size_t len = strlen(str);
- if (len == 0) {
- return std::wstring();
- }
- int numChars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, nullptr, 0);
- if (numChars == 0) {
- return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
- }
- std::wstring result;
- result.resize(numChars);
- int numConvertedChars =
- MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, &result[0], numChars);
- if (numConvertedChars != numChars) {
- return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
- }
- return std::move(result);
- }
-
- D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
- switch (func) {
- case wgpu::CompareFunction::Never:
- return D3D12_COMPARISON_FUNC_NEVER;
- case wgpu::CompareFunction::Less:
- return D3D12_COMPARISON_FUNC_LESS;
- case wgpu::CompareFunction::LessEqual:
- return D3D12_COMPARISON_FUNC_LESS_EQUAL;
- case wgpu::CompareFunction::Greater:
- return D3D12_COMPARISON_FUNC_GREATER;
- case wgpu::CompareFunction::GreaterEqual:
- return D3D12_COMPARISON_FUNC_GREATER_EQUAL;
- case wgpu::CompareFunction::Equal:
- return D3D12_COMPARISON_FUNC_EQUAL;
- case wgpu::CompareFunction::NotEqual:
- return D3D12_COMPARISON_FUNC_NOT_EQUAL;
- case wgpu::CompareFunction::Always:
- return D3D12_COMPARISON_FUNC_ALWAYS;
-
- case wgpu::CompareFunction::Undefined:
- UNREACHABLE();
- }
- }
-
- D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
- uint32_t level,
- uint32_t layer,
- Aspect aspect) {
- D3D12_TEXTURE_COPY_LOCATION copyLocation;
- copyLocation.pResource = texture->GetD3D12Resource();
- copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
- copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, layer, aspect);
-
- return copyLocation;
- }
-
- D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
- const Texture* texture,
- ID3D12Resource* bufferResource,
- const Extent3D& bufferSize,
- const uint64_t offset,
- const uint32_t rowPitch,
- Aspect aspect) {
- D3D12_TEXTURE_COPY_LOCATION bufferLocation;
- bufferLocation.pResource = bufferResource;
- bufferLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
- bufferLocation.PlacedFootprint.Offset = offset;
- bufferLocation.PlacedFootprint.Footprint.Format =
- texture->GetD3D12CopyableSubresourceFormat(aspect);
- bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
- bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
- bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depthOrArrayLayers;
- bufferLocation.PlacedFootprint.Footprint.RowPitch = rowPitch;
- return bufferLocation;
- }
-
- D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize) {
- D3D12_BOX sourceRegion;
- sourceRegion.left = offset.x;
- sourceRegion.top = offset.y;
- sourceRegion.front = offset.z;
- sourceRegion.right = offset.x + copySize.width;
- sourceRegion.bottom = offset.y + copySize.height;
- sourceRegion.back = offset.z + copySize.depthOrArrayLayers;
- return sourceRegion;
- }
-
- bool IsTypeless(DXGI_FORMAT format) {
- // List generated from <dxgiformat.h>
- switch (format) {
- case DXGI_FORMAT_R32G32B32A32_TYPELESS:
- case DXGI_FORMAT_R32G32B32_TYPELESS:
- case DXGI_FORMAT_R16G16B16A16_TYPELESS:
- case DXGI_FORMAT_R32G32_TYPELESS:
- case DXGI_FORMAT_R32G8X24_TYPELESS:
- case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
- case DXGI_FORMAT_R10G10B10A2_TYPELESS:
- case DXGI_FORMAT_R8G8B8A8_TYPELESS:
- case DXGI_FORMAT_R16G16_TYPELESS:
- case DXGI_FORMAT_R32_TYPELESS:
- case DXGI_FORMAT_R24G8_TYPELESS:
- case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
- case DXGI_FORMAT_R8G8_TYPELESS:
- case DXGI_FORMAT_R16_TYPELESS:
- case DXGI_FORMAT_R8_TYPELESS:
- case DXGI_FORMAT_BC1_TYPELESS:
- case DXGI_FORMAT_BC2_TYPELESS:
- case DXGI_FORMAT_BC3_TYPELESS:
- case DXGI_FORMAT_BC4_TYPELESS:
- case DXGI_FORMAT_BC5_TYPELESS:
- case DXGI_FORMAT_B8G8R8A8_TYPELESS:
- case DXGI_FORMAT_B8G8R8X8_TYPELESS:
- case DXGI_FORMAT_BC6H_TYPELESS:
- case DXGI_FORMAT_BC7_TYPELESS:
- return true;
- default:
- return false;
- }
- }
-
- void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const TextureCopySubresource& baseCopySplit,
- ID3D12Resource* bufferResource,
- uint64_t baseOffset,
- uint64_t bufferBytesPerRow,
- Texture* texture,
- uint32_t textureMiplevel,
- uint32_t textureLayer,
- Aspect aspect) {
- ASSERT(HasOneBit(aspect));
- const D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
-
- for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
- const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
-
- // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
- // members in TextureCopySubresource::CopyInfo.
- const uint64_t offsetBytes = info.alignedOffset + baseOffset;
- const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
- offsetBytes, bufferBytesPerRow, aspect);
- const D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
-
- commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
- info.textureOffset.y, info.textureOffset.z,
- &bufferLocation, &sourceRegion);
- }
- }
-
- void CopyBufferTo2DTextureWithCopySplit(CommandRecordingContext* commandContext,
- const TextureCopy& textureCopy,
- ID3D12Resource* bufferResource,
- const uint64_t offset,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- const Extent3D& copySize,
- Texture* texture,
- Aspect aspect) {
- ASSERT(HasOneBit(aspect));
- // See comments in Compute2DTextureCopySplits() for more details.
- const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(aspect).block;
- const TextureCopySplits copySplits = Compute2DTextureCopySplits(
- textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
-
- const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
-
- // copySplits.copySubresources[1] is always calculated for the second copy layer with
- // extra "bytesPerLayer" copy offset compared with the first copy layer. So
- // here we use an array bufferOffsetsForNextLayer to record the extra offsets
- // for each copy layer: bufferOffsetsForNextLayer[0] is the extra offset for
- // the next copy layer that uses copySplits.copySubresources[0], and
- // bufferOffsetsForNextLayer[1] is the extra offset for the next copy layer
- // that uses copySplits.copySubresources[1].
- std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources>
- bufferOffsetsForNextLayer = {{0u, 0u}};
-
- for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
- const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
-
- const TextureCopySubresource& copySplitPerLayerBase =
- copySplits.copySubresources[splitIndex];
- const uint64_t bufferOffsetForNextLayer = bufferOffsetsForNextLayer[splitIndex];
- const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
-
- RecordCopyBufferToTextureFromTextureCopySplit(
- commandContext->GetCommandList(), copySplitPerLayerBase, bufferResource,
- bufferOffsetForNextLayer, bytesPerRow, texture, textureCopy.mipLevel,
- copyTextureLayer, aspect);
-
- bufferOffsetsForNextLayer[splitIndex] +=
- bytesPerLayer * copySplits.copySubresources.size();
- }
- }
-
- void CopyBufferTo3DTexture(CommandRecordingContext* commandContext,
- const TextureCopy& textureCopy,
- ID3D12Resource* bufferResource,
- const uint64_t offset,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- const Extent3D& copySize,
- Texture* texture,
- Aspect aspect) {
- ASSERT(HasOneBit(aspect));
- // See comments in Compute3DTextureCopySplits() for more details.
- const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(aspect).block;
- const TextureCopySubresource copyRegions = Compute3DTextureCopySplits(
- textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
-
- RecordCopyBufferToTextureFromTextureCopySplit(commandContext->GetCommandList(), copyRegions,
- bufferResource, 0, bytesPerRow, texture,
- textureCopy.mipLevel, 0, aspect);
- }
-
- void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
- const TextureCopy& textureCopy,
- ID3D12Resource* bufferResource,
- const uint64_t offset,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- const Extent3D& copySize,
- Texture* texture,
- Aspect aspect) {
- // Record the CopyTextureRegion commands for 3D textures. Multiple depths of 3D
- // textures can be copied in one shot and copySplits are not needed.
- if (texture->GetDimension() == wgpu::TextureDimension::e3D) {
- CopyBufferTo3DTexture(commandContext, textureCopy, bufferResource, offset, bytesPerRow,
- rowsPerImage, copySize, texture, aspect);
- } else {
- // Compute the copySplits and record the CopyTextureRegion commands for 2D
- // textures.
- CopyBufferTo2DTextureWithCopySplit(commandContext, textureCopy, bufferResource, offset,
- bytesPerRow, rowsPerImage, copySize, texture,
- aspect);
- }
- }
-
- void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const TextureCopySubresource& baseCopySplit,
- Buffer* buffer,
- uint64_t baseOffset,
- uint64_t bufferBytesPerRow,
- Texture* texture,
- uint32_t textureMiplevel,
- uint32_t textureLayer,
- Aspect aspect) {
- const D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
-
- for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
- const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
-
- // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
- // members in TextureCopySubresource::CopyInfo.
- const uint64_t offsetBytes = info.alignedOffset + baseOffset;
- const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(texture, buffer->GetD3D12Resource(),
- info.bufferSize, offsetBytes,
- bufferBytesPerRow, aspect);
- const D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
-
- commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
- info.bufferOffset.y, info.bufferOffset.z,
- &textureLocation, &sourceRegion);
- }
- }
-
- void Copy2DTextureToBufferWithCopySplit(ID3D12GraphicsCommandList* commandList,
- const TextureCopy& textureCopy,
- const BufferCopy& bufferCopy,
- Texture* texture,
- Buffer* buffer,
- const Extent3D& copySize) {
- ASSERT(HasOneBit(textureCopy.aspect));
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
-
- // See comments around Compute2DTextureCopySplits() for more details.
- const TextureCopySplits copySplits =
- Compute2DTextureCopySplits(textureCopy.origin, copySize, blockInfo, bufferCopy.offset,
- bufferCopy.bytesPerRow, bufferCopy.rowsPerImage);
-
- const uint64_t bytesPerLayer = bufferCopy.bytesPerRow * bufferCopy.rowsPerImage;
-
- // copySplits.copySubresources[1] is always calculated for the second copy layer with
- // extra "bytesPerLayer" copy offset compared with the first copy layer. So
- // here we use an array bufferOffsetsForNextLayer to record the extra offsets
- // for each copy layer: bufferOffsetsForNextLayer[0] is the extra offset for
- // the next copy layer that uses copySplits.copySubresources[0], and
- // bufferOffsetsForNextLayer[1] is the extra offset for the next copy layer
- // that uses copySplits.copySubresources[1].
- std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources>
- bufferOffsetsForNextLayer = {{0u, 0u}};
- for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
- const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
-
- const TextureCopySubresource& copySplitPerLayerBase =
- copySplits.copySubresources[splitIndex];
- const uint64_t bufferOffsetForNextLayer = bufferOffsetsForNextLayer[splitIndex];
- const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
-
- RecordCopyTextureToBufferFromTextureCopySplit(
- commandList, copySplitPerLayerBase, buffer, bufferOffsetForNextLayer,
- bufferCopy.bytesPerRow, texture, textureCopy.mipLevel, copyTextureLayer,
- textureCopy.aspect);
-
- bufferOffsetsForNextLayer[splitIndex] +=
- bytesPerLayer * copySplits.copySubresources.size();
- }
- }
-
- void Copy3DTextureToBuffer(ID3D12GraphicsCommandList* commandList,
- const TextureCopy& textureCopy,
- const BufferCopy& bufferCopy,
- Texture* texture,
- Buffer* buffer,
- const Extent3D& copySize) {
- ASSERT(HasOneBit(textureCopy.aspect));
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
-
- // See comments around Compute3DTextureCopySplits() for more details.
- const TextureCopySubresource copyRegions =
- Compute3DTextureCopySplits(textureCopy.origin, copySize, blockInfo, bufferCopy.offset,
- bufferCopy.bytesPerRow, bufferCopy.rowsPerImage);
-
- RecordCopyTextureToBufferFromTextureCopySplit(commandList, copyRegions, buffer, 0,
- bufferCopy.bytesPerRow, texture,
- textureCopy.mipLevel, 0, textureCopy.aspect);
- }
-
- void RecordCopyTextureToBuffer(ID3D12GraphicsCommandList* commandList,
- const TextureCopy& textureCopy,
- const BufferCopy& bufferCopy,
- Texture* texture,
- Buffer* buffer,
- const Extent3D& copySize) {
- if (texture->GetDimension() == wgpu::TextureDimension::e3D) {
- Copy3DTextureToBuffer(commandList, textureCopy, bufferCopy, texture, buffer, copySize);
- } else {
- Copy2DTextureToBufferWithCopySplit(commandList, textureCopy, bufferCopy, texture,
- buffer, copySize);
- }
- }
-
- void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label) {
- if (!object) {
- return;
- }
-
- if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
- object->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(prefix), prefix);
- return;
- }
-
- std::string objectName = prefix;
- objectName += "_";
- objectName += label;
- object->SetPrivateData(WKPDID_D3DDebugObjectName, objectName.length(), objectName.c_str());
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
deleted file mode 100644
index 2a3f3d5b95c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_UTILSD3D12_H_
-#define DAWNNATIVE_D3D12_UTILSD3D12_H_
-
-#include "dawn_native/Commands.h"
-#include "dawn_native/d3d12/BufferD3D12.h"
-#include "dawn_native/d3d12/TextureCopySplitter.h"
-#include "dawn_native/d3d12/TextureD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- ResultOrError<std::wstring> ConvertStringToWstring(const char* str);
-
- D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
-
- D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
- uint32_t level,
- uint32_t layer,
- Aspect aspect);
-
- D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
- const Texture* texture,
- ID3D12Resource* bufferResource,
- const Extent3D& bufferSize,
- const uint64_t offset,
- const uint32_t rowPitch,
- Aspect aspect);
- D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize);
-
- bool IsTypeless(DXGI_FORMAT format);
-
- void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const TextureCopySubresource& baseCopySplit,
- ID3D12Resource* bufferResource,
- uint64_t baseOffset,
- uint64_t bufferBytesPerRow,
- Texture* texture,
- uint32_t textureMiplevel,
- uint32_t textureLayer,
- Aspect aspect);
-
- void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
- const TextureCopy& textureCopy,
- ID3D12Resource* bufferResource,
- const uint64_t offset,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- const Extent3D& copySize,
- Texture* texture,
- Aspect aspect);
-
- void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const TextureCopySubresource& baseCopySplit,
- Buffer* buffer,
- uint64_t baseOffset,
- uint64_t bufferBytesPerRow,
- Texture* texture,
- uint32_t textureMiplevel,
- uint32_t textureLayer,
- Aspect aspect);
-
- void RecordCopyTextureToBuffer(ID3D12GraphicsCommandList* commandList,
- const TextureCopy& textureCopy,
- const BufferCopy& bufferCopy,
- Texture* texture,
- Buffer* buffer,
- const Extent3D& copySize);
-
- void SetDebugName(Device* device,
- ID3D12Object* object,
- const char* prefix,
- std::string label = "");
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_UTILSD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h b/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h
deleted file mode 100644
index 1c733c8256e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_D3D12PLATFORM_H_
-#define DAWNNATIVE_D3D12_D3D12PLATFORM_H_
-
-// Pre-emptively include windows.h but remove its macros so that they aren't set when declaring the
-// COM interfaces. Otherwise ID3D12InfoQueue::GetMessage would be either GetMessageA or GetMessageW
-// which causes compilation errors.
-#include "common/windows_with_undefs.h"
-
-#include <d3d11_2.h>
-#include <d3d11on12.h>
-#include <d3d12.h>
-#include <dxcapi.h>
-#include <dxgi1_4.h>
-#include <wrl.h>
-
-// DXProgrammableCapture.h takes a dependency on other platform header
-// files, so it must be defined after them.
-#include <DXProgrammableCapture.h>
-#include <dxgidebug.h>
-
-using Microsoft::WRL::ComPtr;
-
-#endif // DAWNNATIVE_D3D12_D3D12PLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/dawn_platform.h b/chromium/third_party/dawn/src/dawn_native/dawn_platform.h
deleted file mode 100644
index de9ef509206..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/dawn_platform.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_DAWNPLATFORM_H_
-#define DAWNNATIVE_DAWNPLATFORM_H_
-
-// Use webgpu_cpp to have the enum and bitfield definitions
-#include <dawn/webgpu_cpp.h>
-
-#include <dawn_native/dawn_platform_autogen.h>
-
-namespace dawn_native {
-
- // kEnumCount is a constant specifying the number of enums in a WebGPU enum type,
- // if the enums are contiguous, making it suitable for iteration.
- // It is defined in dawn_platform_autogen.h
- template <typename T>
- constexpr uint32_t kEnumCount = EnumCount<T>::value;
-
- // Extra buffer usages
- // Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
- // usage as storage buffer in the internal pipeline.
- static constexpr wgpu::BufferUsage kInternalStorageBuffer =
- static_cast<wgpu::BufferUsage>(0x40000000);
-
- // Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
- static constexpr wgpu::BufferUsage kReadOnlyStorageBuffer =
- static_cast<wgpu::BufferUsage>(0x80000000);
-
- // Extra texture usages
- // Add an extra texture usage (readonly render attachment usage) for render pass resource
- // tracking
- static constexpr wgpu::TextureUsage kReadOnlyRenderAttachment =
- static_cast<wgpu::TextureUsage>(0x40000000);
-
- // Internal usage to help tracking when a subresource is used as render attachment usage
- // more than once in a render pass.
- static constexpr wgpu::TextureUsage kAgainAsRenderAttachment =
- static_cast<wgpu::TextureUsage>(0x80000001);
-
- // Add an extra texture usage for textures that will be presented, for use in backends
- // that needs to transition to present usage.
- // This currently aliases wgpu::TextureUsage::Present, we would assign it
- // some bit when wgpu::TextureUsage::Present is removed.
- static constexpr wgpu::TextureUsage kPresentTextureUsage = wgpu::TextureUsage::Present;
-
- static constexpr wgpu::BufferBindingType kInternalStorageBufferBinding =
- static_cast<wgpu::BufferBindingType>(0xFFFFFFFF);
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_DAWNPLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.h
deleted file mode 100644
index 6cffa43ac6d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_BACKENDMTL_H_
-#define DAWNNATIVE_METAL_BACKENDMTL_H_
-
-#include "dawn_native/BackendConnection.h"
-
-namespace dawn_native { namespace metal {
-
- class Backend : public BackendConnection {
- public:
- Backend(InstanceBase* instance);
-
- std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override;
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) override;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_BACKENDMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
deleted file mode 100644
index f4166f0a0f5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
+++ /dev/null
@@ -1,605 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/BackendMTL.h"
-
-#include "common/CoreFoundationRef.h"
-#include "common/GPUInfo.h"
-#include "common/NSRef.h"
-#include "common/Platform.h"
-#include "common/SystemUtils.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/MetalBackend.h"
-#include "dawn_native/metal/BufferMTL.h"
-#include "dawn_native/metal/DeviceMTL.h"
-
-#if defined(DAWN_PLATFORM_MACOS)
-# import <IOKit/IOKitLib.h>
-# include "common/IOKitRef.h"
-#endif
-
-#include <vector>
-
-namespace dawn_native { namespace metal {
-
- namespace {
-
- struct PCIIDs {
- uint32_t vendorId;
- uint32_t deviceId;
- };
-
- struct Vendor {
- const char* trademark;
- uint32_t vendorId;
- };
-
-#if defined(DAWN_PLATFORM_MACOS)
- const Vendor kVendors[] = {{"AMD", gpu_info::kVendorID_AMD},
- {"Radeon", gpu_info::kVendorID_AMD},
- {"Intel", gpu_info::kVendorID_Intel},
- {"Geforce", gpu_info::kVendorID_Nvidia},
- {"Quadro", gpu_info::kVendorID_Nvidia}};
-
- // Find vendor ID from MTLDevice name.
- MaybeError GetVendorIdFromVendors(id<MTLDevice> device, PCIIDs* ids) {
- uint32_t vendorId = 0;
- const char* deviceName = [device.name UTF8String];
- for (const auto& it : kVendors) {
- if (strstr(deviceName, it.trademark) != nullptr) {
- vendorId = it.vendorId;
- break;
- }
- }
-
- if (vendorId == 0) {
- return DAWN_INTERNAL_ERROR("Failed to find vendor id with the device");
- }
-
- // Set vendor id with 0
- *ids = PCIIDs{vendorId, 0};
- return {};
- }
-
- // Extracts an integer property from a registry entry.
- uint32_t GetEntryProperty(io_registry_entry_t entry, CFStringRef name) {
- uint32_t value = 0;
-
- // Recursively search registry entry and its parents for property name
- // The data should release with CFRelease
- CFRef<CFDataRef> data =
- AcquireCFRef(static_cast<CFDataRef>(IORegistryEntrySearchCFProperty(
- entry, kIOServicePlane, name, kCFAllocatorDefault,
- kIORegistryIterateRecursively | kIORegistryIterateParents)));
-
- if (data == nullptr) {
- return value;
- }
-
- // CFDataGetBytePtr() is guaranteed to return a read-only pointer
- value = *reinterpret_cast<const uint32_t*>(CFDataGetBytePtr(data.Get()));
- return value;
- }
-
- // Queries the IO Registry to find the PCI device and vendor IDs of the MTLDevice.
- // The registry entry correponding to [device registryID] doesn't contain the exact PCI ids
- // because it corresponds to a driver. However its parent entry corresponds to the device
- // itself and has uint32_t "device-id" and "registry-id" keys. For example on a dual-GPU
- // MacBook Pro 2017 the IORegistry explorer shows the following tree (simplified here):
- //
- // - PCI0@0
- // | - AppleACPIPCI
- // | | - IGPU@2 (type IOPCIDevice)
- // | | | - IntelAccelerator (type IOGraphicsAccelerator2)
- // | | - PEG0@1
- // | | | - IOPP
- // | | | | - GFX0@0 (type IOPCIDevice)
- // | | | | | - AMDRadeonX4000_AMDBaffinGraphicsAccelerator (type IOGraphicsAccelerator2)
- //
- // [device registryID] is the ID for one of the IOGraphicsAccelerator2 and we can see that
- // their parent always is an IOPCIDevice that has properties for the device and vendor IDs.
- MaybeError API_AVAILABLE(macos(10.13))
- GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
- // Get a matching dictionary for the IOGraphicsAccelerator2
- CFRef<CFMutableDictionaryRef> matchingDict =
- AcquireCFRef(IORegistryEntryIDMatching([device registryID]));
- if (matchingDict == nullptr) {
- return DAWN_INTERNAL_ERROR("Failed to create the matching dict for the device");
- }
-
- // IOServiceGetMatchingService will consume the reference on the matching dictionary,
- // so we don't need to release the dictionary.
- IORef<io_registry_entry_t> acceleratorEntry = AcquireIORef(
- IOServiceGetMatchingService(kIOMasterPortDefault, matchingDict.Detach()));
- if (acceleratorEntry == IO_OBJECT_NULL) {
- return DAWN_INTERNAL_ERROR(
- "Failed to get the IO registry entry for the accelerator");
- }
-
- // Get the parent entry that will be the IOPCIDevice
- IORef<io_registry_entry_t> deviceEntry;
- if (IORegistryEntryGetParentEntry(acceleratorEntry.Get(), kIOServicePlane,
- deviceEntry.InitializeInto()) != kIOReturnSuccess) {
- return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the device");
- }
-
- ASSERT(deviceEntry != IO_OBJECT_NULL);
-
- uint32_t vendorId = GetEntryProperty(deviceEntry.Get(), CFSTR("vendor-id"));
- uint32_t deviceId = GetEntryProperty(deviceEntry.Get(), CFSTR("device-id"));
-
- *ids = PCIIDs{vendorId, deviceId};
-
- return {};
- }
-
- MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
- // [device registryID] is introduced on macOS 10.13+, otherwise workaround to get vendor
- // id by vendor name on old macOS
- if (@available(macos 10.13, *)) {
- return GetDeviceIORegistryPCIInfo(device, ids);
- } else {
- return GetVendorIdFromVendors(device, ids);
- }
- }
-
- bool IsMetalSupported() {
- // Metal was first introduced in macOS 10.11
- // WebGPU is targeted at macOS 10.12+
- // TODO(dawn:1181): Dawn native should allow non-conformant WebGPU on macOS 10.11
- return IsMacOSVersionAtLeast(10, 12);
- }
-#elif defined(DAWN_PLATFORM_IOS)
- MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
- DAWN_UNUSED(device);
- *ids = PCIIDs{0, 0};
- return {};
- }
-
- bool IsMetalSupported() {
- return true;
- }
-#else
-# error "Unsupported Apple platform."
-#endif
-
- DAWN_NOINLINE bool IsCounterSamplingBoundarySupport(id<MTLDevice> device)
- API_AVAILABLE(macos(11.0), ios(14.0)) {
- bool isBlitBoundarySupported =
- [device supportsCounterSampling:MTLCounterSamplingPointAtBlitBoundary];
- bool isDispatchBoundarySupported =
- [device supportsCounterSampling:MTLCounterSamplingPointAtDispatchBoundary];
- bool isDrawBoundarySupported =
- [device supportsCounterSampling:MTLCounterSamplingPointAtDrawBoundary];
-
- return isBlitBoundarySupported && isDispatchBoundarySupported &&
- isDrawBoundarySupported;
- }
-
- DAWN_NOINLINE bool IsGPUCounterSupported(id<MTLDevice> device,
- MTLCommonCounterSet counterSetName,
- std::vector<MTLCommonCounter> counterNames)
- API_AVAILABLE(macos(10.15), ios(14.0)) {
- // MTLDevice’s counterSets property declares which counter sets it supports. Check
- // whether it's available on the device before requesting a counter set.
- id<MTLCounterSet> counterSet = nil;
- for (id<MTLCounterSet> set in device.counterSets) {
- if ([set.name caseInsensitiveCompare:counterSetName] == NSOrderedSame) {
- counterSet = set;
- break;
- }
- }
-
- // The counter set is not supported.
- if (counterSet == nil) {
- return false;
- }
-
- // A GPU might support a counter set, but only support a subset of the counters in that
- // set, check if the counter set supports all specific counters we need. Return false
- // if there is a counter unsupported.
- for (MTLCommonCounter counterName : counterNames) {
- bool found = false;
- for (id<MTLCounter> counter in counterSet.counters) {
- if ([counter.name caseInsensitiveCompare:counterName] == NSOrderedSame) {
- found = true;
- break;
- }
- }
- if (!found) {
- return false;
- }
- }
-
- if (@available(macOS 11.0, iOS 14.0, *)) {
- // Check whether it can read GPU counters at the specified command boundary. Apple
- // family GPUs do not support sampling between different Metal commands, because
- // they defer fragment processing until after the GPU processes all the primitives
- // in the render pass.
- if (!IsCounterSamplingBoundarySupport(device)) {
- return false;
- }
- }
-
- return true;
- }
-
- } // anonymous namespace
-
- // The Metal backend's Adapter.
-
- class Adapter : public AdapterBase {
- public:
- Adapter(InstanceBase* instance, id<MTLDevice> device)
- : AdapterBase(instance, wgpu::BackendType::Metal), mDevice(device) {
- mPCIInfo.name = std::string([[*mDevice name] UTF8String]);
-
- PCIIDs ids;
- if (!instance->ConsumedError(GetDevicePCIInfo(device, &ids))) {
- mPCIInfo.vendorId = ids.vendorId;
- mPCIInfo.deviceId = ids.deviceId;
- }
-
-#if defined(DAWN_PLATFORM_IOS)
- mAdapterType = wgpu::AdapterType::IntegratedGPU;
- const char* systemName = "iOS ";
-#elif defined(DAWN_PLATFORM_MACOS)
- if ([device isLowPower]) {
- mAdapterType = wgpu::AdapterType::IntegratedGPU;
- } else {
- mAdapterType = wgpu::AdapterType::DiscreteGPU;
- }
- const char* systemName = "macOS ";
-#else
-# error "Unsupported Apple platform."
-#endif
-
- NSString* osVersion = [[NSProcessInfo processInfo] operatingSystemVersionString];
- mDriverDescription =
- "Metal driver on " + std::string(systemName) + [osVersion UTF8String];
- }
-
- // AdapterBase Implementation
- bool SupportsExternalImages() const override {
- // Via dawn_native::metal::WrapIOSurface
- return true;
- }
-
- private:
- ResultOrError<DeviceBase*> CreateDeviceImpl(
- const DawnDeviceDescriptor* descriptor) override {
- return Device::Create(this, mDevice, descriptor);
- }
-
- MaybeError InitializeImpl() override {
- return {};
- }
-
- MaybeError InitializeSupportedFeaturesImpl() override {
-#if defined(DAWN_PLATFORM_MACOS)
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
- }
-#endif
-
- if (@available(macOS 10.15, iOS 14.0, *)) {
- if (IsGPUCounterSupported(
- *mDevice, MTLCommonCounterSetStatistic,
- {MTLCommonCounterVertexInvocations, MTLCommonCounterClipperInvocations,
- MTLCommonCounterClipperPrimitivesOut, MTLCommonCounterFragmentInvocations,
- MTLCommonCounterComputeKernelInvocations})) {
- mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
- }
-
- if (IsGPUCounterSupported(*mDevice, MTLCommonCounterSetTimestamp,
- {MTLCommonCounterTimestamp})) {
- bool enableTimestampQuery = true;
-
-#if defined(DAWN_PLATFORM_MACOS)
- // Disable timestamp query on macOS 10.15 on AMD GPU because WriteTimestamp
- // fails to call without any copy commands on MTLBlitCommandEncoder. This issue
- // has been fixed on macOS 11.0. See crbug.com/dawn/545
- enableTimestampQuery &=
- !(gpu_info::IsAMD(GetPCIInfo().vendorId) && IsMacOSVersionAtLeast(11));
-#endif
-
- if (enableTimestampQuery) {
- mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
- }
- }
- }
-
- if (@available(macOS 10.11, iOS 11.0, *)) {
- mSupportedFeatures.EnableFeature(Feature::DepthClamping);
- }
-
- return {};
- }
-
- enum class MTLGPUFamily {
- Apple1,
- Apple2,
- Apple3,
- Apple4,
- Apple5,
- Apple6,
- Apple7,
- Mac1,
- Mac2,
- };
-
- ResultOrError<MTLGPUFamily> GetMTLGPUFamily() const {
- // https://developer.apple.com/documentation/metal/mtldevice/detecting_gpu_features_and_metal_software_versions?language=objc
-
- if (@available(macOS 10.15, iOS 10.13, *)) {
- if ([*mDevice supportsFamily:MTLGPUFamilyMac2]) {
- return MTLGPUFamily::Mac2;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
- return MTLGPUFamily::Mac1;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple7]) {
- return MTLGPUFamily::Apple7;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple6]) {
- return MTLGPUFamily::Apple6;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple5]) {
- return MTLGPUFamily::Apple5;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple4]) {
- return MTLGPUFamily::Apple4;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
- return MTLGPUFamily::Apple3;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
- return MTLGPUFamily::Apple2;
- }
- if ([*mDevice supportsFamily:MTLGPUFamilyApple1]) {
- return MTLGPUFamily::Apple1;
- }
- }
-
-#if TARGET_OS_OSX
- if (@available(macOS 10.14, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily2_v1]) {
- return MTLGPUFamily::Mac2;
- }
- }
- if (@available(macOS 10.11, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
- return MTLGPUFamily::Mac1;
- }
- }
-#elif TARGET_OS_IOS
- if (@available(iOS 10.11, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily4_v1]) {
- return MTLGPUFamily::Apple4;
- }
- }
- if (@available(iOS 9.0, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
- return MTLGPUFamily::Apple3;
- }
- }
- if (@available(iOS 8.0, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
- return MTLGPUFamily::Apple2;
- }
- }
- if (@available(iOS 8.0, *)) {
- if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
- return MTLGPUFamily::Apple1;
- }
- }
-#endif
- return DAWN_INTERNAL_ERROR("Unsupported Metal device");
- }
-
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
- struct MTLDeviceLimits {
- uint32_t maxVertexAttribsPerDescriptor;
- uint32_t maxBufferArgumentEntriesPerFunc;
- uint32_t maxTextureArgumentEntriesPerFunc;
- uint32_t maxSamplerStateArgumentEntriesPerFunc;
- uint32_t maxThreadsPerThreadgroup;
- uint32_t maxTotalThreadgroupMemory;
- uint32_t maxFragmentInputComponents;
- uint32_t max1DTextureSize;
- uint32_t max2DTextureSize;
- uint32_t max3DTextureSize;
- uint32_t maxTextureArrayLayers;
- uint32_t minBufferOffsetAlignment;
- };
-
- struct LimitsForFamily {
- uint32_t MTLDeviceLimits::*limit;
- ityp::array<MTLGPUFamily, uint32_t, 9> values;
- };
-
- // clang-format off
- // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
- // Apple Mac
- // 1, 2, 3, 4, 5, 6, 7, 1, 2
- constexpr LimitsForFamily kMTLLimits[12] = {
- {&MTLDeviceLimits::maxVertexAttribsPerDescriptor, { 31u, 31u, 31u, 31u, 31u, 31u, 31u, 31u, 31u }},
- {&MTLDeviceLimits::maxBufferArgumentEntriesPerFunc, { 31u, 31u, 31u, 31u, 31u, 31u, 31u, 31u, 31u }},
- {&MTLDeviceLimits::maxTextureArgumentEntriesPerFunc, { 31u, 31u, 31u, 96u, 96u, 128u, 128u, 128u, 128u }},
- {&MTLDeviceLimits::maxSamplerStateArgumentEntriesPerFunc, { 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u }},
- {&MTLDeviceLimits::maxThreadsPerThreadgroup, { 512u, 512u, 512u, 1024u, 1024u, 1024u, 1024u, 1024u, 1024u }},
- {&MTLDeviceLimits::maxTotalThreadgroupMemory, { 16352u, 16352u, 16384u, 32768u, 32768u, 32768u, 32768u, 32768u, 32768u }},
- {&MTLDeviceLimits::maxFragmentInputComponents, { 60u, 60u, 60u, 124u, 124u, 124u, 124u, 124u, 124u }},
- {&MTLDeviceLimits::max1DTextureSize, { 8192u, 8192u, 16384u, 16384u, 16384u, 16384u, 16384u, 16384u, 16384u }},
- {&MTLDeviceLimits::max2DTextureSize, { 8192u, 8192u, 16384u, 16384u, 16384u, 16384u, 16384u, 16384u, 16384u }},
- {&MTLDeviceLimits::max3DTextureSize, { 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u }},
- {&MTLDeviceLimits::maxTextureArrayLayers, { 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u, 2048u }},
- {&MTLDeviceLimits::minBufferOffsetAlignment, { 4u, 4u, 4u, 4u, 4u, 4u, 4u, 256u, 256u }},
- };
- // clang-format on
-
- MTLGPUFamily mtlGPUFamily;
- DAWN_TRY_ASSIGN(mtlGPUFamily, GetMTLGPUFamily());
-
- MTLDeviceLimits mtlLimits;
- for (const auto& limitsForFamily : kMTLLimits) {
- mtlLimits.*limitsForFamily.limit = limitsForFamily.values[mtlGPUFamily];
- }
-
- GetDefaultLimits(&limits->v1);
-
- limits->v1.maxTextureDimension1D = mtlLimits.max1DTextureSize;
- limits->v1.maxTextureDimension2D = mtlLimits.max2DTextureSize;
- limits->v1.maxTextureDimension3D = mtlLimits.max3DTextureSize;
- limits->v1.maxTextureArrayLayers = mtlLimits.maxTextureArrayLayers;
-
- uint32_t maxBuffersPerStage = mtlLimits.maxBufferArgumentEntriesPerFunc;
- maxBuffersPerStage -= 1; // One slot is reserved to store buffer lengths.
-
- uint32_t baseMaxBuffersPerStage = limits->v1.maxStorageBuffersPerShaderStage +
- limits->v1.maxUniformBuffersPerShaderStage +
- limits->v1.maxVertexBuffers;
-
- ASSERT(maxBuffersPerStage >= baseMaxBuffersPerStage);
- {
- uint32_t additional = maxBuffersPerStage - baseMaxBuffersPerStage;
- limits->v1.maxStorageBuffersPerShaderStage += additional / 3;
- limits->v1.maxUniformBuffersPerShaderStage += additional / 3;
- limits->v1.maxVertexBuffers += (additional - 2 * (additional / 3));
- }
-
- uint32_t baseMaxTexturesPerStage = limits->v1.maxSampledTexturesPerShaderStage +
- limits->v1.maxStorageTexturesPerShaderStage;
-
- ASSERT(mtlLimits.maxTextureArgumentEntriesPerFunc >= baseMaxTexturesPerStage);
- {
- uint32_t additional =
- mtlLimits.maxTextureArgumentEntriesPerFunc - baseMaxTexturesPerStage;
- limits->v1.maxSampledTexturesPerShaderStage += additional / 2;
- limits->v1.maxStorageTexturesPerShaderStage += (additional - additional / 2);
- }
-
- limits->v1.maxSamplersPerShaderStage = mtlLimits.maxSamplerStateArgumentEntriesPerFunc;
-
- // Metal limits are per-function, so the layout limits are the same as the stage
- // limits. Note: this should likely change if the implementation uses Metal argument
- // buffers. Non-dynamic buffers will probably be bound argument buffers, but dynamic
- // buffers may be set directly.
- // Mac GPU families with tier 1 argument buffers support 64
- // buffers, 128 textures, and 16 samplers. Mac GPU families
- // with tier 2 argument buffers support 500000 buffers and
- // textures, and 1024 unique samplers
- limits->v1.maxDynamicUniformBuffersPerPipelineLayout =
- limits->v1.maxUniformBuffersPerShaderStage;
- limits->v1.maxDynamicStorageBuffersPerPipelineLayout =
- limits->v1.maxStorageBuffersPerShaderStage;
-
- // The WebGPU limit is the limit across all vertex buffers, combined.
- limits->v1.maxVertexAttributes =
- limits->v1.maxVertexBuffers * mtlLimits.maxVertexAttribsPerDescriptor;
-
- limits->v1.maxInterStageShaderComponents = mtlLimits.maxFragmentInputComponents;
-
- limits->v1.maxComputeWorkgroupStorageSize = mtlLimits.maxTotalThreadgroupMemory;
- limits->v1.maxComputeInvocationsPerWorkgroup = mtlLimits.maxThreadsPerThreadgroup;
- limits->v1.maxComputeWorkgroupSizeX = mtlLimits.maxThreadsPerThreadgroup;
- limits->v1.maxComputeWorkgroupSizeY = mtlLimits.maxThreadsPerThreadgroup;
- limits->v1.maxComputeWorkgroupSizeZ = mtlLimits.maxThreadsPerThreadgroup;
-
- limits->v1.minUniformBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
- limits->v1.minStorageBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
-
- uint64_t maxBufferSize = Buffer::QueryMaxBufferLength(*mDevice);
-
- // Metal has no documented limit on the size of a binding. Use the maximum
- // buffer size.
- limits->v1.maxUniformBufferBindingSize = maxBufferSize;
- limits->v1.maxStorageBufferBindingSize = maxBufferSize;
-
- // TODO(crbug.com/dawn/685):
- // LIMITS NOT SET:
- // - maxBindGroups
- // - maxVertexBufferArrayStride
-
- return {};
- }
-
- NSPRef<id<MTLDevice>> mDevice;
- };
-
- // Implementation of the Metal backend's BackendConnection
-
- Backend::Backend(InstanceBase* instance)
- : BackendConnection(instance, wgpu::BackendType::Metal) {
- if (GetInstance()->IsBackendValidationEnabled()) {
- setenv("METAL_DEVICE_WRAPPER_TYPE", "1", 1);
- }
- }
-
- std::vector<std::unique_ptr<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- AdapterDiscoveryOptions options;
- auto result = DiscoverAdapters(&options);
- if (result.IsError()) {
- GetInstance()->ConsumedError(result.AcquireError());
- return {};
- }
- return result.AcquireSuccess();
- }
-
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> Backend::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) {
- ASSERT(optionsBase->backendType == WGPUBackendType_Metal);
-
- std::vector<std::unique_ptr<AdapterBase>> adapters;
- BOOL supportedVersion = NO;
-#if defined(DAWN_PLATFORM_MACOS)
- if (@available(macOS 10.11, *)) {
- supportedVersion = YES;
-
- NSRef<NSArray<id<MTLDevice>>> devices = AcquireNSRef(MTLCopyAllDevices());
-
- for (id<MTLDevice> device in devices.Get()) {
- std::unique_ptr<Adapter> adapter = std::make_unique<Adapter>(GetInstance(), device);
- if (!GetInstance()->ConsumedError(adapter->Initialize())) {
- adapters.push_back(std::move(adapter));
- }
- }
- }
-#endif
-
-#if defined(DAWN_PLATFORM_IOS)
- if (@available(iOS 8.0, *)) {
- supportedVersion = YES;
- // iOS only has a single device so MTLCopyAllDevices doesn't exist there.
- std::unique_ptr<Adapter> adapter =
- std::make_unique<Adapter>(GetInstance(), MTLCreateSystemDefaultDevice());
- if (!GetInstance()->ConsumedError(adapter->Initialize())) {
- adapters.push_back(std::move(adapter));
- }
- }
-#endif
- if (!supportedVersion) {
- UNREACHABLE();
- }
- return adapters;
- }
-
- BackendConnection* Connect(InstanceBase* instance) {
- if (!IsMetalSupported()) {
- return nullptr;
- }
- return new Backend(instance);
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
deleted file mode 100644
index 1d2c2a93342..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
-#define DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
-
-#include "common/SlabAllocator.h"
-#include "dawn_native/BindGroupLayout.h"
-
-namespace dawn_native { namespace metal {
-
- class BindGroup;
- class Device;
-
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- static Ref<BindGroupLayout> Create(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup);
-
- private:
- BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
- ~BindGroupLayout() override = default;
-
- SlabAllocator<BindGroup> mBindGroupAllocator;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
deleted file mode 100644
index 5d748c1f787..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/BindGroupLayoutMTL.h"
-
-#include "dawn_native/metal/BindGroupMTL.h"
-
-namespace dawn_native { namespace metal {
-
- // static
- Ref<BindGroupLayout> BindGroupLayout::Create(
- DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
- }
-
- BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
- mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- }
-
- Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor) {
- return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
- }
-
- void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
- mBindGroupAllocator.Deallocate(bindGroup);
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h
deleted file mode 100644
index 9c875833c74..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_BINDGROUPMTL_H_
-#define DAWNNATIVE_METAL_BINDGROUPMTL_H_
-
-#include "common/PlacementAllocated.h"
-#include "dawn_native/BindGroup.h"
-
-namespace dawn_native { namespace metal {
-
- class Device;
-
- class BindGroup final : public BindGroupBase, public PlacementAllocated {
- public:
- static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
-
- BindGroup(Device* device, const BindGroupDescriptor* descriptor);
-
- private:
- ~BindGroup() override;
-
- void DestroyImpl() override;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_BINDGROUPMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm
deleted file mode 100644
index bbb5827fa07..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/BindGroupMTL.h"
-
-#include "dawn_native/metal/BindGroupLayoutMTL.h"
-#include "dawn_native/metal/DeviceMTL.h"
-namespace dawn_native { namespace metal {
-
- BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
- : BindGroupBase(this, device, descriptor) {
- }
-
- BindGroup::~BindGroup() = default;
-
- void BindGroup::DestroyImpl() {
- BindGroupBase::DestroyImpl();
- ToBackend(GetLayout())->DeallocateBindGroup(this);
- }
-
- // static
- Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
- return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h
deleted file mode 100644
index 61747bedecb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_BUFFERMTL_H_
-#define DAWNNATIVE_METAL_BUFFERMTL_H_
-
-#include "common/NSRef.h"
-#include "common/SerialQueue.h"
-#include "dawn_native/Buffer.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class CommandRecordingContext;
- class Device;
-
- class Buffer final : public BufferBase {
- public:
- static ResultOrError<Ref<Buffer>> Create(Device* device,
- const BufferDescriptor* descriptor);
- id<MTLBuffer> GetMTLBuffer() const;
-
- bool EnsureDataInitialized(CommandRecordingContext* commandContext);
- bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- uint64_t offset,
- uint64_t size);
- bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- const CopyTextureToBufferCmd* copy);
-
- static uint64_t QueryMaxBufferLength(id<MTLDevice> mtlDevice);
-
- private:
- using BufferBase::BufferBase;
- MaybeError Initialize(bool mappedAtCreation);
-
- ~Buffer() override;
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- void* GetMappedPointerImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
-
- void InitializeToZero(CommandRecordingContext* commandContext);
- void ClearBuffer(CommandRecordingContext* commandContext,
- uint8_t clearValue,
- uint64_t offset = 0,
- uint64_t size = 0);
-
- NSPRef<id<MTLBuffer>> mMtlBuffer;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_BUFFERMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
deleted file mode 100644
index 6907c9407cc..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/BufferMTL.h"
-
-#include "common/Math.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/metal/CommandRecordingContext.h"
-#include "dawn_native/metal/DeviceMTL.h"
-
-#include <limits>
-
-namespace dawn_native { namespace metal {
- // The size of uniform buffer and storage buffer need to be aligned to 16 bytes which is the
- // largest alignment of supported data types
- static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
-
- // static
- ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
- DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
- return std::move(buffer);
- }
-
- // static
- uint64_t Buffer::QueryMaxBufferLength(id<MTLDevice> mtlDevice) {
- if (@available(iOS 12, tvOS 12, macOS 10.14, *)) {
- return [mtlDevice maxBufferLength];
- }
-
- // Earlier versions of Metal had maximums defined in the Metal feature set tables
- // https://metalbyexample.com/wp-content/uploads/Metal-Feature-Set-Tables-2018.pdf
-#if defined(DAWN_PLATFORM_MACOS)
- // 10.12 and 10.13 have a 1Gb limit.
- if (@available(macOS 10.12, *)) {
- // |maxBufferLength| isn't always available on older systems. If available, use
- // |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
- // but don't have a way to discover a better limit. MoltenVK also uses this heuristic.
- return 1024 * 1024 * 1024;
- }
- // 10.11 has a 256Mb limit
- if (@available(maxOS 10.11, *)) {
- return 256 * 1024 * 1024;
- }
-#else
- // macOS / tvOS: 256Mb limit in versions without [MTLDevice maxBufferLength]
- return 256 * 1024 * 1024;
-#endif
- }
-
- MaybeError Buffer::Initialize(bool mappedAtCreation) {
- MTLResourceOptions storageMode;
- if (GetUsage() & kMappableBufferUsages) {
- storageMode = MTLResourceStorageModeShared;
- } else {
- storageMode = MTLResourceStorageModePrivate;
- }
-
- uint32_t alignment = 1;
-#ifdef DAWN_PLATFORM_MACOS
- // [MTLBlitCommandEncoder fillBuffer] requires the size to be a multiple of 4 on MacOS.
- alignment = 4;
-#endif
-
- // Metal validation layer requires the size of uniform buffer and storage buffer to be no
- // less than the size of the buffer block defined in shader, and the overall size of the
- // buffer must be aligned to the largest alignment of its members.
- if (GetUsage() &
- (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
- ASSERT(IsAligned(kMinUniformOrStorageBufferAlignment, alignment));
- alignment = kMinUniformOrStorageBufferAlignment;
- }
-
- // The vertex pulling transform requires at least 4 bytes in the buffer.
- // 0-sized vertex buffer bindings are allowed, so we always need an additional 4 bytes
- // after the end.
- NSUInteger extraBytes = 0u;
- if ((GetUsage() & wgpu::BufferUsage::Vertex) != 0) {
- extraBytes = 4u;
- }
-
- if (GetSize() > std::numeric_limits<NSUInteger>::max() - extraBytes) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
- NSUInteger currentSize =
- std::max(static_cast<NSUInteger>(GetSize()) + extraBytes, NSUInteger(4));
-
- if (currentSize > std::numeric_limits<NSUInteger>::max() - alignment) {
- // Alignment would overlow.
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
- currentSize = Align(currentSize, alignment);
-
- uint64_t maxBufferSize = QueryMaxBufferLength(ToBackend(GetDevice())->GetMTLDevice());
- if (currentSize > maxBufferSize) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
-
- mAllocatedSize = currentSize;
- mMtlBuffer.Acquire([ToBackend(GetDevice())->GetMTLDevice()
- newBufferWithLength:currentSize
- options:storageMode]);
- if (mMtlBuffer == nullptr) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation failed");
- }
-
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
- !mappedAtCreation) {
- CommandRecordingContext* commandContext =
- ToBackend(GetDevice())->GetPendingCommandContext();
- ClearBuffer(commandContext, uint8_t(1u));
- }
-
- // Initialize the padding bytes to zero.
- if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
- !mappedAtCreation) {
- uint32_t paddingBytes = GetAllocatedSize() - GetSize();
- if (paddingBytes > 0) {
- uint32_t clearSize = Align(paddingBytes, 4);
- uint64_t clearOffset = GetAllocatedSize() - clearSize;
-
- CommandRecordingContext* commandContext =
- ToBackend(GetDevice())->GetPendingCommandContext();
- ClearBuffer(commandContext, 0, clearOffset, clearSize);
- }
- }
- return {};
- }
-
- Buffer::~Buffer() = default;
-
- id<MTLBuffer> Buffer::GetMTLBuffer() const {
- return mMtlBuffer.Get();
- }
-
- bool Buffer::IsCPUWritableAtCreation() const {
- // TODO(enga): Handle CPU-visible memory on UMA
- return GetUsage() & kMappableBufferUsages;
- }
-
- MaybeError Buffer::MapAtCreationImpl() {
- return {};
- }
-
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- CommandRecordingContext* commandContext =
- ToBackend(GetDevice())->GetPendingCommandContext();
- EnsureDataInitialized(commandContext);
-
- return {};
- }
-
- void* Buffer::GetMappedPointerImpl() {
- return [*mMtlBuffer contents];
- }
-
- void Buffer::UnmapImpl() {
- // Nothing to do, Metal StorageModeShared buffers are always mapped.
- }
-
- void Buffer::DestroyImpl() {
- BufferBase::DestroyImpl();
- mMtlBuffer = nullptr;
- }
-
- bool Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- InitializeToZero(commandContext);
- return true;
- }
-
- bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- uint64_t offset,
- uint64_t size) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferRange(offset, size)) {
- SetIsDataInitialized();
- return false;
- }
-
- InitializeToZero(commandContext);
- return true;
- }
-
- bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
- const CopyTextureToBufferCmd* copy) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
- SetIsDataInitialized();
- return false;
- }
-
- InitializeToZero(commandContext);
- return true;
- }
-
- void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
- ASSERT(NeedsInitialization());
-
- ClearBuffer(commandContext, uint8_t(0u));
-
- SetIsDataInitialized();
- GetDevice()->IncrementLazyClearCountForTesting();
- }
-
- void Buffer::ClearBuffer(CommandRecordingContext* commandContext,
- uint8_t clearValue,
- uint64_t offset,
- uint64_t size) {
- ASSERT(commandContext != nullptr);
- size = size > 0 ? size : GetAllocatedSize();
- ASSERT(size > 0);
- [commandContext->EnsureBlit() fillBuffer:mMtlBuffer.Get()
- range:NSMakeRange(offset, size)
- value:clearValue];
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
deleted file mode 100644
index 328ac8400d3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
-#define DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
-
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/Error.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native {
- class CommandEncoder;
-}
-
-namespace dawn_native { namespace metal {
-
- class CommandRecordingContext;
- class Device;
- class Texture;
-
- void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
- id<MTLBuffer> mtlBuffer,
- uint64_t bufferSize,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- Texture* texture,
- uint32_t mipLevel,
- const Origin3D& origin,
- Aspect aspect,
- const Extent3D& copySize);
-
- class CommandBuffer final : public CommandBufferBase {
- public:
- static Ref<CommandBuffer> Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor);
-
- MaybeError FillCommands(CommandRecordingContext* commandContext);
-
- private:
- using CommandBufferBase::CommandBufferBase;
-
- MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
- MaybeError EncodeRenderPass(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height);
-
- MaybeError EncodeRenderPassInternal(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height);
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
deleted file mode 100644
index 45ff8d97e32..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ /dev/null
@@ -1,1563 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/CommandBufferMTL.h"
-
-#include "dawn_native/BindGroupTracker.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/ExternalTexture.h"
-#include "dawn_native/RenderBundle.h"
-#include "dawn_native/metal/BindGroupMTL.h"
-#include "dawn_native/metal/BufferMTL.h"
-#include "dawn_native/metal/ComputePipelineMTL.h"
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/PipelineLayoutMTL.h"
-#include "dawn_native/metal/QuerySetMTL.h"
-#include "dawn_native/metal/RenderPipelineMTL.h"
-#include "dawn_native/metal/SamplerMTL.h"
-#include "dawn_native/metal/StagingBufferMTL.h"
-#include "dawn_native/metal/TextureMTL.h"
-#include "dawn_native/metal/UtilsMetal.h"
-
-#include <tint/tint.h>
-
-namespace dawn_native { namespace metal {
-
- namespace {
-
- // Allows this file to use MTLStoreActionStoreAndMultismapleResolve because the logic is
- // first to compute what the "best" Metal render pass descriptor is, then fix it up if we
- // are not on macOS 10.12 (i.e. the EmulateStoreAndMSAAResolve toggle is on).
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability"
- constexpr MTLStoreAction kMTLStoreActionStoreAndMultisampleResolve =
- MTLStoreActionStoreAndMultisampleResolve;
-#pragma clang diagnostic pop
-
- MTLIndexType MTLIndexFormat(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Uint16:
- return MTLIndexTypeUInt16;
- case wgpu::IndexFormat::Uint32:
- return MTLIndexTypeUInt32;
- case wgpu::IndexFormat::Undefined:
- UNREACHABLE();
- }
- }
-
- NSRef<MTLRenderPassDescriptor> CreateMTLRenderPassDescriptor(
- BeginRenderPassCmd* renderPass) {
- // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
- NSRef<MTLRenderPassDescriptor> descriptorRef =
- [MTLRenderPassDescriptor renderPassDescriptor];
- MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
-
- for (ColorAttachmentIndex attachment :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- uint8_t i = static_cast<uint8_t>(attachment);
- auto& attachmentInfo = renderPass->colorAttachments[attachment];
-
- switch (attachmentInfo.loadOp) {
- case wgpu::LoadOp::Clear:
- descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
- descriptor.colorAttachments[i].clearColor = MTLClearColorMake(
- attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
- attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
- break;
-
- case wgpu::LoadOp::Load:
- descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
- break;
- }
-
- descriptor.colorAttachments[i].texture =
- ToBackend(attachmentInfo.view->GetTexture())->GetMTLTexture();
- descriptor.colorAttachments[i].level = attachmentInfo.view->GetBaseMipLevel();
- descriptor.colorAttachments[i].slice = attachmentInfo.view->GetBaseArrayLayer();
-
- bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
- if (hasResolveTarget) {
- descriptor.colorAttachments[i].resolveTexture =
- ToBackend(attachmentInfo.resolveTarget->GetTexture())->GetMTLTexture();
- descriptor.colorAttachments[i].resolveLevel =
- attachmentInfo.resolveTarget->GetBaseMipLevel();
- descriptor.colorAttachments[i].resolveSlice =
- attachmentInfo.resolveTarget->GetBaseArrayLayer();
-
- switch (attachmentInfo.storeOp) {
- case wgpu::StoreOp::Store:
- descriptor.colorAttachments[i].storeAction =
- kMTLStoreActionStoreAndMultisampleResolve;
- break;
- case wgpu::StoreOp::Discard:
- descriptor.colorAttachments[i].storeAction =
- MTLStoreActionMultisampleResolve;
- break;
- }
- } else {
- switch (attachmentInfo.storeOp) {
- case wgpu::StoreOp::Store:
- descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
- break;
- case wgpu::StoreOp::Discard:
- descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
- break;
- }
- }
- }
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto& attachmentInfo = renderPass->depthStencilAttachment;
-
- id<MTLTexture> texture =
- ToBackend(attachmentInfo.view->GetTexture())->GetMTLTexture();
- const Format& format = attachmentInfo.view->GetTexture()->GetFormat();
-
- if (format.HasDepth()) {
- descriptor.depthAttachment.texture = texture;
- descriptor.depthAttachment.level = attachmentInfo.view->GetBaseMipLevel();
- descriptor.depthAttachment.slice = attachmentInfo.view->GetBaseArrayLayer();
-
- switch (attachmentInfo.depthStoreOp) {
- case wgpu::StoreOp::Store:
- descriptor.depthAttachment.storeAction = MTLStoreActionStore;
- break;
-
- case wgpu::StoreOp::Discard:
- descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
- break;
- }
-
- switch (attachmentInfo.depthLoadOp) {
- case wgpu::LoadOp::Clear:
- descriptor.depthAttachment.loadAction = MTLLoadActionClear;
- descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
- break;
-
- case wgpu::LoadOp::Load:
- descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
- break;
- }
- }
-
- if (format.HasStencil()) {
- descriptor.stencilAttachment.texture = texture;
- descriptor.stencilAttachment.level = attachmentInfo.view->GetBaseMipLevel();
- descriptor.stencilAttachment.slice = attachmentInfo.view->GetBaseArrayLayer();
-
- switch (attachmentInfo.stencilStoreOp) {
- case wgpu::StoreOp::Store:
- descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
- break;
-
- case wgpu::StoreOp::Discard:
- descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
- break;
- }
-
- switch (attachmentInfo.stencilLoadOp) {
- case wgpu::LoadOp::Clear:
- descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
- descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
- break;
-
- case wgpu::LoadOp::Load:
- descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
- break;
- }
- }
- }
-
- if (renderPass->occlusionQuerySet.Get() != nullptr) {
- descriptor.visibilityResultBuffer =
- ToBackend(renderPass->occlusionQuerySet.Get())->GetVisibilityBuffer();
- }
-
- return descriptorRef;
- }
-
- // Helper function for Toggle EmulateStoreAndMSAAResolve
- void ResolveInAnotherRenderPass(
- CommandRecordingContext* commandContext,
- const MTLRenderPassDescriptor* mtlRenderPass,
- const std::array<id<MTLTexture>, kMaxColorAttachments>& resolveTextures) {
- // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
- NSRef<MTLRenderPassDescriptor> mtlRenderPassForResolveRef =
- [MTLRenderPassDescriptor renderPassDescriptor];
- MTLRenderPassDescriptor* mtlRenderPassForResolve = mtlRenderPassForResolveRef.Get();
-
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- if (resolveTextures[i] == nullptr) {
- continue;
- }
-
- mtlRenderPassForResolve.colorAttachments[i].texture =
- mtlRenderPass.colorAttachments[i].texture;
- mtlRenderPassForResolve.colorAttachments[i].loadAction = MTLLoadActionLoad;
- mtlRenderPassForResolve.colorAttachments[i].storeAction =
- MTLStoreActionMultisampleResolve;
- mtlRenderPassForResolve.colorAttachments[i].resolveTexture = resolveTextures[i];
- mtlRenderPassForResolve.colorAttachments[i].resolveLevel =
- mtlRenderPass.colorAttachments[i].resolveLevel;
- mtlRenderPassForResolve.colorAttachments[i].resolveSlice =
- mtlRenderPass.colorAttachments[i].resolveSlice;
- }
-
- commandContext->BeginRender(mtlRenderPassForResolve);
- commandContext->EndRender();
- }
-
- // Helper functions for Toggle AlwaysResolveIntoZeroLevelAndLayer
- ResultOrError<NSPRef<id<MTLTexture>>> CreateResolveTextureForWorkaround(
- Device* device,
- MTLPixelFormat mtlFormat,
- uint32_t width,
- uint32_t height) {
- NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
- MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
-
- mtlDesc.textureType = MTLTextureType2D;
- mtlDesc.usage = MTLTextureUsageRenderTarget;
- mtlDesc.pixelFormat = mtlFormat;
- mtlDesc.width = width;
- mtlDesc.height = height;
- mtlDesc.depth = 1;
- mtlDesc.mipmapLevelCount = 1;
- mtlDesc.arrayLength = 1;
- mtlDesc.storageMode = MTLStorageModePrivate;
- mtlDesc.sampleCount = 1;
-
- id<MTLTexture> texture = [device->GetMTLDevice() newTextureWithDescriptor:mtlDesc];
- if (texture == nil) {
- return DAWN_OUT_OF_MEMORY_ERROR("Allocation of temporary texture failed.");
- }
-
- return AcquireNSPRef(texture);
- }
-
- void CopyIntoTrueResolveTarget(CommandRecordingContext* commandContext,
- id<MTLTexture> mtlTrueResolveTexture,
- uint32_t trueResolveLevel,
- uint32_t trueResolveSlice,
- id<MTLTexture> temporaryResolveTexture,
- uint32_t width,
- uint32_t height) {
- [commandContext->EnsureBlit() copyFromTexture:temporaryResolveTexture
- sourceSlice:0
- sourceLevel:0
- sourceOrigin:MTLOriginMake(0, 0, 0)
- sourceSize:MTLSizeMake(width, height, 1)
- toTexture:mtlTrueResolveTexture
- destinationSlice:trueResolveSlice
- destinationLevel:trueResolveLevel
- destinationOrigin:MTLOriginMake(0, 0, 0)];
- }
-
- // Metal uses a physical addressing mode which means buffers in the shading language are
- // just pointers to the virtual address of their start. This means there is no way to know
- // the length of a buffer to compute the length() of unsized arrays at the end of storage
- // buffers. SPIRV-Cross implements the length() of unsized arrays by requiring an extra
- // buffer that contains the length of other buffers. This structure that keeps track of the
- // length of storage buffers and can apply them to the reserved "buffer length buffer" when
- // needed for a draw or a dispatch.
- struct StorageBufferLengthTracker {
- wgpu::ShaderStage dirtyStages = wgpu::ShaderStage::None;
-
- // The lengths of buffers are stored as 32bit integers because that is the width the
- // MSL code generated by SPIRV-Cross expects.
- // UBOs require we align the max buffer count to 4 elements (16 bytes).
- static constexpr size_t MaxBufferCount = ((kGenericMetalBufferSlots + 3) / 4) * 4;
- PerStage<std::array<uint32_t, MaxBufferCount>> data;
-
- void Apply(id<MTLRenderCommandEncoder> render,
- RenderPipeline* pipeline,
- bool enableVertexPulling) {
- wgpu::ShaderStage stagesToApply =
- dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
-
- if (stagesToApply == wgpu::ShaderStage::None) {
- return;
- }
-
- if (stagesToApply & wgpu::ShaderStage::Vertex) {
- uint32_t bufferCount = ToBackend(pipeline->GetLayout())
- ->GetBufferBindingCount(SingleShaderStage::Vertex);
-
- if (enableVertexPulling) {
- bufferCount += pipeline->GetVertexBufferCount();
- }
-
- bufferCount = Align(bufferCount, 4);
- ASSERT(bufferCount <= data[SingleShaderStage::Vertex].size());
-
- [render setVertexBytes:data[SingleShaderStage::Vertex].data()
- length:sizeof(uint32_t) * bufferCount
- atIndex:kBufferLengthBufferSlot];
- }
-
- if (stagesToApply & wgpu::ShaderStage::Fragment) {
- uint32_t bufferCount = ToBackend(pipeline->GetLayout())
- ->GetBufferBindingCount(SingleShaderStage::Fragment);
- bufferCount = Align(bufferCount, 4);
- ASSERT(bufferCount <= data[SingleShaderStage::Fragment].size());
-
- [render setFragmentBytes:data[SingleShaderStage::Fragment].data()
- length:sizeof(uint32_t) * bufferCount
- atIndex:kBufferLengthBufferSlot];
- }
-
- // Only mark clean stages that were actually applied.
- dirtyStages ^= stagesToApply;
- }
-
- void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
- if (!(dirtyStages & wgpu::ShaderStage::Compute)) {
- return;
- }
-
- if (!pipeline->RequiresStorageBufferLength()) {
- return;
- }
-
- uint32_t bufferCount = ToBackend(pipeline->GetLayout())
- ->GetBufferBindingCount(SingleShaderStage::Compute);
- bufferCount = Align(bufferCount, 4);
- ASSERT(bufferCount <= data[SingleShaderStage::Compute].size());
-
- [compute setBytes:data[SingleShaderStage::Compute].data()
- length:sizeof(uint32_t) * bufferCount
- atIndex:kBufferLengthBufferSlot];
-
- dirtyStages ^= wgpu::ShaderStage::Compute;
- }
- };
-
- // Keeps track of the dirty bind groups so they can be lazily applied when we know the
- // pipeline state.
- // Bind groups may be inherited because bind groups are packed in the buffer /
- // texture tables in contiguous order.
- class BindGroupTracker : public BindGroupTrackerBase<true, uint64_t> {
- public:
- explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
- : BindGroupTrackerBase(), mLengthTracker(lengthTracker) {
- }
-
- template <typename Encoder>
- void Apply(Encoder encoder) {
- BeforeApply();
- for (BindGroupIndex index :
- IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
- mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
- ToBackend(mPipelineLayout));
- }
- AfterApply();
- }
-
- private:
- // Handles a call to SetBindGroup, directing the commands to the correct encoder.
- // There is a single function that takes both encoders to factor code. Other approaches
- // like templates wouldn't work because the name of methods are different between the
- // two encoder types.
- void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
- id<MTLComputeCommandEncoder> compute,
- BindGroupIndex index,
- BindGroup* group,
- uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets,
- PipelineLayout* pipelineLayout) {
- uint32_t currentDynamicBufferIndex = 0;
-
- // TODO(crbug.com/dawn/854): Maintain buffers and offsets arrays in BindGroup
- // so that we only have to do one setVertexBuffers and one setFragmentBuffers
- // call here.
- for (BindingIndex bindingIndex{0};
- bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo =
- group->GetLayout()->GetBindingInfo(bindingIndex);
-
- bool hasVertStage =
- bindingInfo.visibility & wgpu::ShaderStage::Vertex && render != nullptr;
- bool hasFragStage =
- bindingInfo.visibility & wgpu::ShaderStage::Fragment && render != nullptr;
- bool hasComputeStage =
- bindingInfo.visibility & wgpu::ShaderStage::Compute && compute != nullptr;
-
- uint32_t vertIndex = 0;
- uint32_t fragIndex = 0;
- uint32_t computeIndex = 0;
-
- if (hasVertStage) {
- vertIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Vertex)[index][bindingIndex];
- }
- if (hasFragStage) {
- fragIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Fragment)[index][bindingIndex];
- }
- if (hasComputeStage) {
- computeIndex = pipelineLayout->GetBindingIndexInfo(
- SingleShaderStage::Compute)[index][bindingIndex];
- }
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- const BufferBinding& binding =
- group->GetBindingAsBufferBinding(bindingIndex);
- const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
- NSUInteger offset = binding.offset;
-
- // TODO(crbug.com/dawn/854): Record bound buffer status to use
- // setBufferOffset to achieve better performance.
- if (bindingInfo.buffer.hasDynamicOffset) {
- offset += dynamicOffsets[currentDynamicBufferIndex];
- currentDynamicBufferIndex++;
- }
-
- if (hasVertStage) {
- mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] =
- binding.size;
- mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
- [render setVertexBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(vertIndex, 1)];
- }
- if (hasFragStage) {
- mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] =
- binding.size;
- mLengthTracker->dirtyStages |= wgpu::ShaderStage::Fragment;
- [render setFragmentBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(fragIndex, 1)];
- }
- if (hasComputeStage) {
- mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
- binding.size;
- mLengthTracker->dirtyStages |= wgpu::ShaderStage::Compute;
- [compute setBuffers:&buffer
- offsets:&offset
- withRange:NSMakeRange(computeIndex, 1)];
- }
-
- break;
- }
-
- case BindingInfoType::Sampler: {
- auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
- if (hasVertStage) {
- [render setVertexSamplerState:sampler->GetMTLSamplerState()
- atIndex:vertIndex];
- }
- if (hasFragStage) {
- [render setFragmentSamplerState:sampler->GetMTLSamplerState()
- atIndex:fragIndex];
- }
- if (hasComputeStage) {
- [compute setSamplerState:sampler->GetMTLSamplerState()
- atIndex:computeIndex];
- }
- break;
- }
-
- case BindingInfoType::Texture:
- case BindingInfoType::StorageTexture: {
- auto textureView =
- ToBackend(group->GetBindingAsTextureView(bindingIndex));
- if (hasVertStage) {
- [render setVertexTexture:textureView->GetMTLTexture()
- atIndex:vertIndex];
- }
- if (hasFragStage) {
- [render setFragmentTexture:textureView->GetMTLTexture()
- atIndex:fragIndex];
- }
- if (hasComputeStage) {
- [compute setTexture:textureView->GetMTLTexture()
- atIndex:computeIndex];
- }
- break;
- }
-
- case BindingInfoType::ExternalTexture: {
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& views =
- group->GetBindingAsExternalTexture(bindingIndex)->GetTextureViews();
-
- // Only single-plane formats are supported right now, so assert only one
- // view exists.
- ASSERT(views[1].Get() == nullptr);
- ASSERT(views[2].Get() == nullptr);
-
- TextureView* textureView = ToBackend(views[0].Get());
-
- if (hasVertStage) {
- [render setVertexTexture:textureView->GetMTLTexture()
- atIndex:vertIndex];
- }
- if (hasFragStage) {
- [render setFragmentTexture:textureView->GetMTLTexture()
- atIndex:fragIndex];
- }
- if (hasComputeStage) {
- [compute setTexture:textureView->GetMTLTexture()
- atIndex:computeIndex];
- }
- break;
- }
- }
- }
- }
-
- template <typename... Args>
- void ApplyBindGroup(id<MTLRenderCommandEncoder> encoder, Args&&... args) {
- ApplyBindGroupImpl(encoder, nullptr, std::forward<Args&&>(args)...);
- }
-
- template <typename... Args>
- void ApplyBindGroup(id<MTLComputeCommandEncoder> encoder, Args&&... args) {
- ApplyBindGroupImpl(nullptr, encoder, std::forward<Args&&>(args)...);
- }
-
- StorageBufferLengthTracker* mLengthTracker;
- };
-
- // Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
- // all the relevant state.
- class VertexBufferTracker {
- public:
- explicit VertexBufferTracker(StorageBufferLengthTracker* lengthTracker)
- : mLengthTracker(lengthTracker) {
- }
-
- void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset) {
- mVertexBuffers[slot] = buffer->GetMTLBuffer();
- mVertexBufferOffsets[slot] = offset;
-
- ASSERT(buffer->GetSize() < std::numeric_limits<uint32_t>::max());
- mVertexBufferBindingSizes[slot] =
- static_cast<uint32_t>(buffer->GetAllocatedSize() - offset);
- mDirtyVertexBuffers.set(slot);
- }
-
- void OnSetPipeline(RenderPipeline* lastPipeline, RenderPipeline* pipeline) {
- // When a new pipeline is bound we must set all the vertex buffers again because
- // they might have been offset by the pipeline layout, and they might be packed
- // differently from the previous pipeline.
- mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
- }
-
- void Apply(id<MTLRenderCommandEncoder> encoder,
- RenderPipeline* pipeline,
- bool enableVertexPulling) {
- const auto& vertexBuffersToApply =
- mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
-
- for (VertexBufferSlot slot : IterateBitSet(vertexBuffersToApply)) {
- uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(slot);
-
- if (enableVertexPulling) {
- // Insert lengths for vertex buffers bound as storage buffers
- mLengthTracker->data[SingleShaderStage::Vertex][metalIndex] =
- mVertexBufferBindingSizes[slot];
- mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
- }
-
- [encoder setVertexBuffers:&mVertexBuffers[slot]
- offsets:&mVertexBufferOffsets[slot]
- withRange:NSMakeRange(metalIndex, 1)];
- }
-
- mDirtyVertexBuffers.reset();
- }
-
- private:
- // All the indices in these arrays are Dawn vertex buffer indices
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
- ityp::array<VertexBufferSlot, id<MTLBuffer>, kMaxVertexBuffers> mVertexBuffers;
- ityp::array<VertexBufferSlot, NSUInteger, kMaxVertexBuffers> mVertexBufferOffsets;
- ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mVertexBufferBindingSizes;
-
- StorageBufferLengthTracker* mLengthTracker;
- };
-
- } // anonymous namespace
-
- void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
- id<MTLBuffer> mtlBuffer,
- uint64_t bufferSize,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- Texture* texture,
- uint32_t mipLevel,
- const Origin3D& origin,
- Aspect aspect,
- const Extent3D& copySize) {
- TextureBufferCopySplit splitCopies =
- ComputeTextureBufferCopySplit(texture, mipLevel, origin, copySize, bufferSize, offset,
- bytesPerRow, rowsPerImage, aspect);
-
- MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), aspect);
-
- for (const auto& copyInfo : splitCopies) {
- uint64_t bufferOffset = copyInfo.bufferOffset;
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e2D: {
- const MTLOrigin textureOrigin =
- MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
- const MTLSize copyExtent =
- MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
-
- for (uint32_t z = copyInfo.textureOrigin.z;
- z < copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers;
- ++z) {
- [commandContext->EnsureBlit() copyFromBuffer:mtlBuffer
- sourceOffset:bufferOffset
- sourceBytesPerRow:copyInfo.bytesPerRow
- sourceBytesPerImage:copyInfo.bytesPerImage
- sourceSize:copyExtent
- toTexture:texture->GetMTLTexture()
- destinationSlice:z
- destinationLevel:mipLevel
- destinationOrigin:textureOrigin
- options:blitOption];
- bufferOffset += copyInfo.bytesPerImage;
- }
- break;
- }
- case wgpu::TextureDimension::e3D: {
- [commandContext->EnsureBlit()
- copyFromBuffer:mtlBuffer
- sourceOffset:bufferOffset
- sourceBytesPerRow:copyInfo.bytesPerRow
- sourceBytesPerImage:copyInfo.bytesPerImage
- sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
- copyInfo.copyExtent.height,
- copyInfo.copyExtent.depthOrArrayLayers)
- toTexture:texture->GetMTLTexture()
- destinationSlice:0
- destinationLevel:mipLevel
- destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
- copyInfo.textureOrigin.y,
- copyInfo.textureOrigin.z)
- options:blitOption];
- break;
- }
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
- }
- }
-
- // static
- Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
- }
-
- MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
- size_t nextComputePassNumber = 0;
- size_t nextRenderPassNumber = 0;
-
- auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
- CommandRecordingContext* commandContext) {
- for (size_t i = 0; i < scope.textures.size(); ++i) {
- Texture* texture = ToBackend(scope.textures[i]);
-
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- scope.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
- }
- });
- }
- for (BufferBase* bufferBase : scope.buffers) {
- ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
- }
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::BeginComputePass: {
- mCommands.NextCommand<BeginComputePassCmd>();
-
- for (const SyncScopeResourceUsage& scope :
- GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
- LazyClearSyncScope(scope, commandContext);
- }
- commandContext->EndBlit();
-
- DAWN_TRY(EncodeComputePass(commandContext));
-
- nextComputePassNumber++;
- break;
- }
-
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
-
- LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
- commandContext);
- commandContext->EndBlit();
-
- LazyClearRenderPassAttachments(cmd);
- NSRef<MTLRenderPassDescriptor> descriptor = CreateMTLRenderPassDescriptor(cmd);
- DAWN_TRY(EncodeRenderPass(commandContext, descriptor.Get(), cmd->width,
- cmd->height));
-
- nextRenderPassNumber++;
- break;
- }
-
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- if (copy->size == 0) {
- // Skip no-op copies.
- break;
- }
-
- ToBackend(copy->source)->EnsureDataInitialized(commandContext);
- ToBackend(copy->destination)
- ->EnsureDataInitializedAsDestination(commandContext,
- copy->destinationOffset, copy->size);
-
- [commandContext->EnsureBlit()
- copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
- sourceOffset:copy->sourceOffset
- toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
- destinationOffset:copy->destinationOffset
- size:copy->size];
- break;
- }
-
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
- auto& copySize = copy->copySize;
- Buffer* buffer = ToBackend(src.buffer.Get());
- Texture* texture = ToBackend(dst.texture.Get());
-
- buffer->EnsureDataInitialized(commandContext);
- EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
-
- RecordCopyBufferToTexture(commandContext, buffer->GetMTLBuffer(),
- buffer->GetSize(), src.offset, src.bytesPerRow,
- src.rowsPerImage, texture, dst.mipLevel, dst.origin,
- dst.aspect, copySize);
- break;
- }
-
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
- auto& copySize = copy->copySize;
- Texture* texture = ToBackend(src.texture.Get());
- Buffer* buffer = ToBackend(dst.buffer.Get());
-
- buffer->EnsureDataInitializedAsDestination(commandContext, copy);
-
- texture->EnsureSubresourceContentInitialized(
- commandContext, GetSubresourcesAffectedByCopy(src, copySize));
-
- TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
- texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
- dst.bytesPerRow, dst.rowsPerImage, src.aspect);
-
- for (const auto& copyInfo : splitCopies) {
- MTLBlitOption blitOption =
- ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
- uint64_t bufferOffset = copyInfo.bufferOffset;
-
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e2D: {
- const MTLOrigin textureOrigin = MTLOriginMake(
- copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
- const MTLSize copyExtent = MTLSizeMake(
- copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
-
- for (uint32_t z = copyInfo.textureOrigin.z;
- z < copyInfo.textureOrigin.z +
- copyInfo.copyExtent.depthOrArrayLayers;
- ++z) {
- [commandContext->EnsureBlit()
- copyFromTexture:texture->GetMTLTexture()
- sourceSlice:z
- sourceLevel:src.mipLevel
- sourceOrigin:textureOrigin
- sourceSize:copyExtent
- toBuffer:buffer->GetMTLBuffer()
- destinationOffset:bufferOffset
- destinationBytesPerRow:copyInfo.bytesPerRow
- destinationBytesPerImage:copyInfo.bytesPerImage
- options:blitOption];
- bufferOffset += copyInfo.bytesPerImage;
- }
- break;
- }
- case wgpu::TextureDimension::e3D: {
- [commandContext->EnsureBlit()
- copyFromTexture:texture->GetMTLTexture()
- sourceSlice:0
- sourceLevel:src.mipLevel
- sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
- copyInfo.textureOrigin.y,
- copyInfo.textureOrigin.z)
- sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
- copyInfo.copyExtent.height,
- copyInfo.copyExtent
- .depthOrArrayLayers)
- toBuffer:buffer->GetMTLBuffer()
- destinationOffset:bufferOffset
- destinationBytesPerRow:copyInfo.bytesPerRow
- destinationBytesPerImage:copyInfo.bytesPerImage
- options:blitOption];
- break;
- }
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
- }
- break;
- }
-
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- mCommands.NextCommand<CopyTextureToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- Texture* srcTexture = ToBackend(copy->source.texture.Get());
- Texture* dstTexture = ToBackend(copy->destination.texture.Get());
-
- srcTexture->EnsureSubresourceContentInitialized(
- commandContext,
- GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
- EnsureDestinationTextureInitialized(commandContext, dstTexture,
- copy->destination, copy->copySize);
-
- // TODO(crbug.com/dawn/814): support copies with 1D textures.
- ASSERT(srcTexture->GetDimension() != wgpu::TextureDimension::e1D &&
- dstTexture->GetDimension() != wgpu::TextureDimension::e1D);
-
- const MTLSize sizeOneSlice =
- MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
-
- uint32_t sourceLayer = 0;
- uint32_t sourceOriginZ = 0;
-
- uint32_t destinationLayer = 0;
- uint32_t destinationOriginZ = 0;
-
- uint32_t* sourceZPtr;
- if (srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
- sourceZPtr = &sourceLayer;
- } else {
- sourceZPtr = &sourceOriginZ;
- }
-
- uint32_t* destinationZPtr;
- if (dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
- destinationZPtr = &destinationLayer;
- } else {
- destinationZPtr = &destinationOriginZ;
- }
-
- // TODO(crbug.com/dawn/782): Do a single T2T copy if both are 3D.
- for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
- *sourceZPtr = copy->source.origin.z + z;
- *destinationZPtr = copy->destination.origin.z + z;
-
- [commandContext->EnsureBlit()
- copyFromTexture:srcTexture->GetMTLTexture()
- sourceSlice:sourceLayer
- sourceLevel:copy->source.mipLevel
- sourceOrigin:MTLOriginMake(copy->source.origin.x,
- copy->source.origin.y, sourceOriginZ)
- sourceSize:sizeOneSlice
- toTexture:dstTexture->GetMTLTexture()
- destinationSlice:destinationLayer
- destinationLevel:copy->destination.mipLevel
- destinationOrigin:MTLOriginMake(copy->destination.origin.x,
- copy->destination.origin.y,
- destinationOriginZ)];
- }
- break;
- }
-
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
- if (cmd->size == 0) {
- // Skip no-op copies.
- break;
- }
- Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
-
- bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
- commandContext, cmd->offset, cmd->size);
-
- if (!clearedToZero) {
- [commandContext->EnsureBlit() fillBuffer:dstBuffer->GetMTLBuffer()
- range:NSMakeRange(cmd->offset, cmd->size)
- value:0u];
- }
-
- break;
- }
-
- case Command::ResolveQuerySet: {
- ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- Buffer* destination = ToBackend(cmd->destination.Get());
-
- destination->EnsureDataInitializedAsDestination(
- commandContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
-
- if (querySet->GetQueryType() == wgpu::QueryType::Occlusion) {
- [commandContext->EnsureBlit()
- copyFromBuffer:querySet->GetVisibilityBuffer()
- sourceOffset:NSUInteger(cmd->firstQuery * sizeof(uint64_t))
- toBuffer:destination->GetMTLBuffer()
- destinationOffset:NSUInteger(cmd->destinationOffset)
- size:NSUInteger(cmd->queryCount * sizeof(uint64_t))];
- } else {
- if (@available(macos 10.15, iOS 14.0, *)) {
- [commandContext->EnsureBlit()
- resolveCounters:querySet->GetCounterSampleBuffer()
- inRange:NSMakeRange(cmd->firstQuery, cmd->queryCount)
- destinationBuffer:destination->GetMTLBuffer()
- destinationOffset:NSUInteger(cmd->destinationOffset)];
- } else {
- UNREACHABLE();
- }
- }
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
-
- if (@available(macos 10.15, iOS 14.0, *)) {
- [commandContext->EnsureBlit()
- sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
- atSampleIndex:NSUInteger(cmd->queryIndex)
- withBarrier:YES];
- } else {
- UNREACHABLE();
- }
- break;
- }
-
- case Command::InsertDebugMarker: {
- // MTLCommandBuffer does not implement insertDebugSignpost
- SkipCommand(&mCommands, type);
- break;
- }
-
- case Command::PopDebugGroup: {
- mCommands.NextCommand<PopDebugGroupCmd>();
-
- if (@available(macos 10.13, *)) {
- [commandContext->GetCommands() popDebugGroup];
- }
- break;
- }
-
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- char* label = mCommands.NextData<char>(cmd->length + 1);
-
- if (@available(macos 10.13, *)) {
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [commandContext->GetCommands() pushDebugGroup:mtlLabel.Get()];
- }
-
- break;
- }
-
- case Command::WriteBuffer: {
- WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
- const uint64_t offset = write->offset;
- const uint64_t size = write->size;
- if (size == 0) {
- continue;
- }
-
- Buffer* dstBuffer = ToBackend(write->buffer.Get());
- uint8_t* data = mCommands.NextData<uint8_t>(size);
- Device* device = ToBackend(GetDevice());
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
- memcpy(uploadHandle.mappedBuffer, data, size);
-
- dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset, size);
-
- [commandContext->EnsureBlit()
- copyFromBuffer:ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle()
- sourceOffset:uploadHandle.startOffset
- toBuffer:dstBuffer->GetMTLBuffer()
- destinationOffset:offset
- size:size];
- break;
- }
-
- default:
- UNREACHABLE();
- }
- }
-
- commandContext->EndBlit();
- return {};
- }
-
- MaybeError CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
- ComputePipeline* lastPipeline = nullptr;
- StorageBufferLengthTracker storageBufferLengths = {};
- BindGroupTracker bindGroups(&storageBufferLengths);
-
- id<MTLComputeCommandEncoder> encoder = commandContext->BeginCompute();
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndComputePass: {
- mCommands.NextCommand<EndComputePassCmd>();
- commandContext->EndCompute();
- return {};
- }
-
- case Command::Dispatch: {
- DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
-
- // Skip noop dispatches, it can causes issues on some systems.
- if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
- break;
- }
-
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline);
-
- [encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
- threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
- break;
- }
-
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
-
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline);
-
- Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
- id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
- [encoder dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
- indirectBufferOffset:dispatch->indirectOffset
- threadsPerThreadgroup:lastPipeline
- ->GetLocalWorkGroupSize()];
- break;
- }
-
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
- lastPipeline = ToBackend(cmd->pipeline).Get();
-
- bindGroups.OnSetPipeline(lastPipeline);
-
- lastPipeline->Encode(encoder);
- break;
- }
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
-
- bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
- cmd->dynamicOffsetCount, dynamicOffsets);
- break;
- }
-
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- char* label = mCommands.NextData<char>(cmd->length + 1);
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [encoder insertDebugSignpost:mtlLabel.Get()];
- break;
- }
-
- case Command::PopDebugGroup: {
- mCommands.NextCommand<PopDebugGroupCmd>();
-
- [encoder popDebugGroup];
- break;
- }
-
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- char* label = mCommands.NextData<char>(cmd->length + 1);
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [encoder pushDebugGroup:mtlLabel.Get()];
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
-
- if (@available(macos 10.15, iOS 14.0, *)) {
- [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
- atSampleIndex:NSUInteger(cmd->queryIndex)
- withBarrier:YES];
- } else {
- UNREACHABLE();
- }
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- }
-
- // EndComputePass should have been called
- UNREACHABLE();
- }
-
- MaybeError CommandBuffer::EncodeRenderPass(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height) {
- ASSERT(mtlRenderPass);
-
- Device* device = ToBackend(GetDevice());
-
- // Handle Toggle AlwaysResolveIntoZeroLevelAndLayer. We must handle this before applying
- // the store + MSAA resolve workaround, otherwise this toggle will never be handled because
- // the resolve texture is removed when applying the store + MSAA resolve workaround.
- if (device->IsToggleEnabled(Toggle::AlwaysResolveIntoZeroLevelAndLayer)) {
- std::array<id<MTLTexture>, kMaxColorAttachments> trueResolveTextures = {};
- std::array<uint32_t, kMaxColorAttachments> trueResolveLevels = {};
- std::array<uint32_t, kMaxColorAttachments> trueResolveSlices = {};
-
- // Use temporary resolve texture on the resolve targets with non-zero resolveLevel or
- // resolveSlice.
- bool useTemporaryResolveTexture = false;
- std::array<NSPRef<id<MTLTexture>>, kMaxColorAttachments> temporaryResolveTextures = {};
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- if (mtlRenderPass.colorAttachments[i].resolveTexture == nullptr) {
- continue;
- }
-
- if (mtlRenderPass.colorAttachments[i].resolveLevel == 0 &&
- mtlRenderPass.colorAttachments[i].resolveSlice == 0) {
- continue;
- }
-
- trueResolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
- trueResolveLevels[i] = mtlRenderPass.colorAttachments[i].resolveLevel;
- trueResolveSlices[i] = mtlRenderPass.colorAttachments[i].resolveSlice;
-
- const MTLPixelFormat mtlFormat = trueResolveTextures[i].pixelFormat;
- DAWN_TRY_ASSIGN(temporaryResolveTextures[i], CreateResolveTextureForWorkaround(
- device, mtlFormat, width, height));
-
- mtlRenderPass.colorAttachments[i].resolveTexture =
- temporaryResolveTextures[i].Get();
- mtlRenderPass.colorAttachments[i].resolveLevel = 0;
- mtlRenderPass.colorAttachments[i].resolveSlice = 0;
- useTemporaryResolveTexture = true;
- }
-
- // If we need to use a temporary resolve texture we need to copy the result of MSAA
- // resolve back to the true resolve targets.
- if (useTemporaryResolveTexture) {
- DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- if (trueResolveTextures[i] == nullptr) {
- continue;
- }
-
- ASSERT(temporaryResolveTextures[i] != nullptr);
- CopyIntoTrueResolveTarget(commandContext, trueResolveTextures[i],
- trueResolveLevels[i], trueResolveSlices[i],
- temporaryResolveTextures[i].Get(), width, height);
- }
- return {};
- }
- }
-
- // Handle Store + MSAA resolve workaround (Toggle EmulateStoreAndMSAAResolve).
- if (device->IsToggleEnabled(Toggle::EmulateStoreAndMSAAResolve)) {
- bool hasStoreAndMSAAResolve = false;
-
- // Remove any store + MSAA resolve and remember them.
- std::array<id<MTLTexture>, kMaxColorAttachments> resolveTextures = {};
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- if (mtlRenderPass.colorAttachments[i].storeAction ==
- kMTLStoreActionStoreAndMultisampleResolve) {
- hasStoreAndMSAAResolve = true;
- resolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
-
- mtlRenderPass.colorAttachments[i].storeAction = MTLStoreActionStore;
- mtlRenderPass.colorAttachments[i].resolveTexture = nullptr;
- }
- }
-
- // If we found a store + MSAA resolve we need to resolve in a different render pass.
- if (hasStoreAndMSAAResolve) {
- DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
- ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
- return {};
- }
- }
-
- DAWN_TRY(EncodeRenderPassInternal(commandContext, mtlRenderPass, width, height));
- return {};
- }
-
- MaybeError CommandBuffer::EncodeRenderPassInternal(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height) {
- bool enableVertexPulling = GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling);
- RenderPipeline* lastPipeline = nullptr;
- id<MTLBuffer> indexBuffer = nullptr;
- uint32_t indexBufferBaseOffset = 0;
- MTLIndexType indexBufferType;
- uint64_t indexFormatSize = 0;
-
- StorageBufferLengthTracker storageBufferLengths = {};
- VertexBufferTracker vertexBuffers(&storageBufferLengths);
- BindGroupTracker bindGroups(&storageBufferLengths);
-
- id<MTLRenderCommandEncoder> encoder = commandContext->BeginRender(mtlRenderPass);
-
- auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
- switch (type) {
- case Command::Draw: {
- DrawCmd* draw = iter->NextCommand<DrawCmd>();
-
- vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
- // The instance count must be non-zero, otherwise no-op
- if (draw->instanceCount != 0) {
- // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance
- if (draw->firstInstance == 0) {
- [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- vertexStart:draw->firstVertex
- vertexCount:draw->vertexCount
- instanceCount:draw->instanceCount];
- } else {
- [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- vertexStart:draw->firstVertex
- vertexCount:draw->vertexCount
- instanceCount:draw->instanceCount
- baseInstance:draw->firstInstance];
- }
- }
- break;
- }
-
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
-
- vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
- // The index and instance count must be non-zero, otherwise no-op
- if (draw->indexCount != 0 && draw->instanceCount != 0) {
- // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance and
- // baseVertex.
- if (draw->baseVertex == 0 && draw->firstInstance == 0) {
- [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indexCount:draw->indexCount
- indexType:indexBufferType
- indexBuffer:indexBuffer
- indexBufferOffset:indexBufferBaseOffset +
- draw->firstIndex * indexFormatSize
- instanceCount:draw->instanceCount];
- } else {
- [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indexCount:draw->indexCount
- indexType:indexBufferType
- indexBuffer:indexBuffer
- indexBufferOffset:indexBufferBaseOffset +
- draw->firstIndex * indexFormatSize
- instanceCount:draw->instanceCount
- baseVertex:draw->baseVertex
- baseInstance:draw->firstInstance];
- }
- }
- break;
- }
-
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
-
- vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
- [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indirectBuffer:indirectBuffer
- indirectBufferOffset:draw->indirectOffset];
- break;
- }
-
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
-
- vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
- bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- ASSERT(buffer != nullptr);
-
- id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
- [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indexType:indexBufferType
- indexBuffer:indexBuffer
- indexBufferOffset:indexBufferBaseOffset
- indirectBuffer:indirectBuffer
- indirectBufferOffset:draw->indirectOffset];
- break;
- }
-
- case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
- char* label = iter->NextData<char>(cmd->length + 1);
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [encoder insertDebugSignpost:mtlLabel.Get()];
- break;
- }
-
- case Command::PopDebugGroup: {
- iter->NextCommand<PopDebugGroupCmd>();
-
- [encoder popDebugGroup];
- break;
- }
-
- case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
- char* label = iter->NextData<char>(cmd->length + 1);
- NSRef<NSString> mtlLabel =
- AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
- [encoder pushDebugGroup:mtlLabel.Get()];
- break;
- }
-
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
- RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
-
- vertexBuffers.OnSetPipeline(lastPipeline, newPipeline);
- bindGroups.OnSetPipeline(newPipeline);
-
- [encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
- [encoder setFrontFacingWinding:newPipeline->GetMTLFrontFace()];
- [encoder setCullMode:newPipeline->GetMTLCullMode()];
- [encoder setDepthBias:newPipeline->GetDepthBias()
- slopeScale:newPipeline->GetDepthBiasSlopeScale()
- clamp:newPipeline->GetDepthBiasClamp()];
- if (@available(macOS 10.11, iOS 11.0, *)) {
- MTLDepthClipMode clipMode = newPipeline->ShouldClampDepth() ?
- MTLDepthClipModeClamp : MTLDepthClipModeClip;
- [encoder setDepthClipMode:clipMode];
- }
- newPipeline->Encode(encoder);
-
- lastPipeline = newPipeline;
- break;
- }
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
-
- bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
- cmd->dynamicOffsetCount, dynamicOffsets);
- break;
- }
-
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
- auto b = ToBackend(cmd->buffer.Get());
- indexBuffer = b->GetMTLBuffer();
- indexBufferBaseOffset = cmd->offset;
- indexBufferType = MTLIndexFormat(cmd->format);
- indexFormatSize = IndexFormatSize(cmd->format);
- break;
- }
-
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
-
- vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
- cmd->offset);
- break;
- }
-
- default:
- UNREACHABLE();
- break;
- }
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndRenderPass: {
- mCommands.NextCommand<EndRenderPassCmd>();
- commandContext->EndRender();
- return {};
- }
-
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
- [encoder setStencilReferenceValue:cmd->reference];
- break;
- }
-
- case Command::SetViewport: {
- SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
- MTLViewport viewport;
- viewport.originX = cmd->x;
- viewport.originY = cmd->y;
- viewport.width = cmd->width;
- viewport.height = cmd->height;
- viewport.znear = cmd->minDepth;
- viewport.zfar = cmd->maxDepth;
-
- [encoder setViewport:viewport];
- break;
- }
-
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
- MTLScissorRect rect;
- rect.x = cmd->x;
- rect.y = cmd->y;
- rect.width = cmd->width;
- rect.height = cmd->height;
-
- [encoder setScissorRect:rect];
- break;
- }
-
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
- [encoder setBlendColorRed:cmd->color.r
- green:cmd->color.g
- blue:cmd->color.b
- alpha:cmd->color.a];
- break;
- }
-
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
- auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
-
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* iter = bundles[i]->GetCommands();
- iter->Reset();
- while (iter->NextCommandId(&type)) {
- EncodeRenderBundleCommand(iter, type);
- }
- }
- break;
- }
-
- case Command::BeginOcclusionQuery: {
- BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
-
- [encoder setVisibilityResultMode:MTLVisibilityResultModeBoolean
- offset:cmd->queryIndex * sizeof(uint64_t)];
- break;
- }
-
- case Command::EndOcclusionQuery: {
- EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
-
- [encoder setVisibilityResultMode:MTLVisibilityResultModeDisabled
- offset:cmd->queryIndex * sizeof(uint64_t)];
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
-
- if (@available(macos 10.15, iOS 14.0, *)) {
- [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
- atSampleIndex:NSUInteger(cmd->queryIndex)
- withBarrier:YES];
- } else {
- UNREACHABLE();
- }
- break;
- }
-
- default: {
- EncodeRenderBundleCommand(&mCommands, type);
- break;
- }
- }
- }
-
- // EndRenderPass should have been called
- UNREACHABLE();
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.h
deleted file mode 100644
index 5189a53e745..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#ifndef DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
-#define DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
-
-#include "common/NSRef.h"
-#include "common/NonCopyable.h"
-#include "dawn_native/Error.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- // This class wraps a MTLCommandBuffer and tracks which Metal encoder is open.
- // Only one encoder may be open at a time.
- class CommandRecordingContext : NonMovable {
- public:
- CommandRecordingContext();
- ~CommandRecordingContext();
-
- id<MTLCommandBuffer> GetCommands();
- void MarkUsed();
- bool WasUsed() const;
-
- MaybeError PrepareNextCommandBuffer(id<MTLCommandQueue> queue);
- NSPRef<id<MTLCommandBuffer>> AcquireCommands();
-
- id<MTLBlitCommandEncoder> EnsureBlit();
- void EndBlit();
-
- id<MTLComputeCommandEncoder> BeginCompute();
- void EndCompute();
-
- id<MTLRenderCommandEncoder> BeginRender(MTLRenderPassDescriptor* descriptor);
- void EndRender();
-
- private:
- NSPRef<id<MTLCommandBuffer>> mCommands;
- NSPRef<id<MTLBlitCommandEncoder>> mBlit;
- NSPRef<id<MTLComputeCommandEncoder>> mCompute;
- NSPRef<id<MTLRenderCommandEncoder>> mRender;
- bool mInEncoder = false;
- bool mUsed = false;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm
deleted file mode 100644
index f07c48c7ae7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/CommandRecordingContext.h"
-
-#include "common/Assert.h"
-
-namespace dawn_native { namespace metal {
-
- CommandRecordingContext::CommandRecordingContext() = default;
-
- CommandRecordingContext::~CommandRecordingContext() {
- // Commands must be acquired.
- ASSERT(mCommands == nullptr);
- }
-
- id<MTLCommandBuffer> CommandRecordingContext::GetCommands() {
- return mCommands.Get();
- }
-
- void CommandRecordingContext::MarkUsed() {
- mUsed = true;
- }
- bool CommandRecordingContext::WasUsed() const {
- return mUsed;
- }
-
- MaybeError CommandRecordingContext::PrepareNextCommandBuffer(id<MTLCommandQueue> queue) {
- ASSERT(mCommands == nil);
- ASSERT(!mUsed);
-
- // The MTLCommandBuffer will be autoreleased by default.
- // The autorelease pool may drain before the command buffer is submitted. Retain so it stays
- // alive.
- mCommands = AcquireNSPRef([[queue commandBuffer] retain]);
- if (mCommands == nil) {
- return DAWN_INTERNAL_ERROR("Failed to allocate an MTLCommandBuffer");
- }
-
- return {};
- }
-
- NSPRef<id<MTLCommandBuffer>> CommandRecordingContext::AcquireCommands() {
- // A blit encoder can be left open from WriteBuffer, make sure we close it.
- if (mCommands != nullptr) {
- EndBlit();
- }
-
- ASSERT(!mInEncoder);
- mUsed = false;
- return std::move(mCommands);
- }
-
- id<MTLBlitCommandEncoder> CommandRecordingContext::EnsureBlit() {
- ASSERT(mCommands != nullptr);
-
- if (mBlit == nullptr) {
- ASSERT(!mInEncoder);
- mInEncoder = true;
-
- // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
- // draining from under us.
- mBlit.Acquire([[*mCommands blitCommandEncoder] retain]);
- }
- return mBlit.Get();
- }
-
- void CommandRecordingContext::EndBlit() {
- ASSERT(mCommands != nullptr);
-
- if (mBlit != nullptr) {
- [*mBlit endEncoding];
- mBlit = nullptr;
- mInEncoder = false;
- }
- }
-
- id<MTLComputeCommandEncoder> CommandRecordingContext::BeginCompute() {
- ASSERT(mCommands != nullptr);
- ASSERT(mCompute == nullptr);
- ASSERT(!mInEncoder);
-
- mInEncoder = true;
- // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
- // draining from under us.
- mCompute.Acquire([[*mCommands computeCommandEncoder] retain]);
- return mCompute.Get();
- }
-
- void CommandRecordingContext::EndCompute() {
- ASSERT(mCommands != nullptr);
- ASSERT(mCompute != nullptr);
-
- [*mCompute endEncoding];
- mCompute = nullptr;
- mInEncoder = false;
- }
-
- id<MTLRenderCommandEncoder> CommandRecordingContext::BeginRender(
- MTLRenderPassDescriptor* descriptor) {
- ASSERT(mCommands != nullptr);
- ASSERT(mRender == nullptr);
- ASSERT(!mInEncoder);
-
- mInEncoder = true;
- // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
- // draining from under us.
- mRender.Acquire([[*mCommands renderCommandEncoderWithDescriptor:descriptor] retain]);
- return mRender.Get();
- }
-
- void CommandRecordingContext::EndRender() {
- ASSERT(mCommands != nullptr);
- ASSERT(mRender != nullptr);
-
- [*mRender endEncoding];
- mRender = nullptr;
- mInEncoder = false;
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
deleted file mode 100644
index 20b2080ee49..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
-#define DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
-
-#include "dawn_native/ComputePipeline.h"
-
-#include "common/NSRef.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class Device;
-
- class ComputePipeline final : public ComputePipelineBase {
- public:
- static Ref<ComputePipeline> CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- void Encode(id<MTLComputeCommandEncoder> encoder);
- MTLSize GetLocalWorkGroupSize() const;
- bool RequiresStorageBufferLength() const;
-
- private:
- using ComputePipelineBase::ComputePipelineBase;
- MaybeError Initialize() override;
-
- NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
- MTLSize mLocalWorkgroupSize;
- bool mRequiresStorageBufferLength;
- std::vector<uint32_t> mWorkgroupAllocations;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_COMPUTEPIPELINEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
deleted file mode 100644
index 48d36ae14a6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/ComputePipelineMTL.h"
-
-#include "dawn_native/CreatePipelineAsyncTask.h"
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/ShaderModuleMTL.h"
-#include "dawn_native/metal/UtilsMetal.h"
-
-namespace dawn_native { namespace metal {
-
- // static
- Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(device, descriptor));
- }
-
- MaybeError ComputePipeline::Initialize() {
- auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
-
- const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
- ShaderModule::MetalFunctionData computeData;
-
- DAWN_TRY(CreateMTLFunction(computeStage, SingleShaderStage::Compute, ToBackend(GetLayout()),
- &computeData));
-
- NSError* error = nullptr;
- mMtlComputePipelineState.Acquire([mtlDevice
- newComputePipelineStateWithFunction:computeData.function.Get()
- error:&error]);
- if (error != nullptr) {
- return DAWN_INTERNAL_ERROR("Error creating pipeline state" +
- std::string([error.localizedDescription UTF8String]));
- }
- ASSERT(mMtlComputePipelineState != nil);
-
- // Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
- Origin3D localSize = GetStage(SingleShaderStage::Compute).metadata->localWorkgroupSize;
- mLocalWorkgroupSize = MTLSizeMake(localSize.x, localSize.y, localSize.z);
-
- mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
- mWorkgroupAllocations = std::move(computeData.workgroupAllocations);
- return {};
- }
-
- void ComputePipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
- [encoder setComputePipelineState:mMtlComputePipelineState.Get()];
- for (size_t i = 0; i < mWorkgroupAllocations.size(); ++i) {
- if (mWorkgroupAllocations[i] == 0) {
- continue;
- }
- [encoder setThreadgroupMemoryLength:mWorkgroupAllocations[i] atIndex:i];
- }
- }
-
- MTLSize ComputePipeline::GetLocalWorkGroupSize() const {
- return mLocalWorkgroupSize;
- }
-
- bool ComputePipeline::RequiresStorageBufferLength() const {
- return mRequiresStorageBufferLength;
- }
-
- void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
- userdata);
- CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
deleted file mode 100644
index 6f5153bc870..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_DEVICEMTL_H_
-#define DAWNNATIVE_METAL_DEVICEMTL_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/metal/CommandRecordingContext.h"
-#include "dawn_native/metal/Forward.h"
-
-#import <IOSurface/IOSurfaceRef.h>
-#import <Metal/Metal.h>
-#import <QuartzCore/QuartzCore.h>
-
-#include <atomic>
-#include <memory>
-#include <mutex>
-
-namespace dawn_native { namespace metal {
-
- namespace {
- struct KalmanInfo;
- }
-
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Device*> Create(AdapterBase* adapter,
- NSPRef<id<MTLDevice>> mtlDevice,
- const DawnDeviceDescriptor* descriptor);
- ~Device() override;
-
- MaybeError Initialize();
-
- MaybeError TickImpl() override;
-
- id<MTLDevice> GetMTLDevice();
- id<MTLCommandQueue> GetMTLQueue();
-
- CommandRecordingContext* GetPendingCommandContext();
- MaybeError SubmitPendingCommandBuffer();
-
- Ref<Texture> CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane);
- void WaitForCommandsToBeScheduled();
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& dataLayout,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- private:
- Device(AdapterBase* adapter,
- NSPRef<id<MTLDevice>> mtlDevice,
- const DawnDeviceDescriptor* descriptor);
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) override;
- void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) override;
-
- void InitTogglesFromDriver();
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
-
- NSPRef<id<MTLDevice>> mMtlDevice;
- NSPRef<id<MTLCommandQueue>> mCommandQueue;
-
- CommandRecordingContext mCommandContext;
-
- // The completed serial is updated in a Metal completion handler that can be fired on a
- // different thread, so it needs to be atomic.
- std::atomic<uint64_t> mCompletedSerial;
-
- // mLastSubmittedCommands will be accessed in a Metal schedule handler that can be fired on
- // a different thread so we guard access to it with a mutex.
- std::mutex mLastSubmittedCommandsMutex;
- NSPRef<id<MTLCommandBuffer>> mLastSubmittedCommands;
-
- // The current estimation of timestamp period
- float mTimestampPeriod = 1.0f;
- // The base of CPU timestamp and GPU timestamp to measure the linear regression between GPU
- // and CPU timestamps.
- MTLTimestamp mCpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
- MTLTimestamp mGpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
- // The parameters for kalman filter
- std::unique_ptr<KalmanInfo> mKalmanInfo;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_DEVICEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
deleted file mode 100644
index c79cff412c9..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ /dev/null
@@ -1,506 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/DeviceMTL.h"
-
-#include "common/GPUInfo.h"
-#include "common/Platform.h"
-#include "dawn_native/BackendConnection.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/metal/BindGroupLayoutMTL.h"
-#include "dawn_native/metal/BindGroupMTL.h"
-#include "dawn_native/metal/BufferMTL.h"
-#include "dawn_native/metal/CommandBufferMTL.h"
-#include "dawn_native/metal/ComputePipelineMTL.h"
-#include "dawn_native/metal/PipelineLayoutMTL.h"
-#include "dawn_native/metal/QuerySetMTL.h"
-#include "dawn_native/metal/QueueMTL.h"
-#include "dawn_native/metal/RenderPipelineMTL.h"
-#include "dawn_native/metal/SamplerMTL.h"
-#include "dawn_native/metal/ShaderModuleMTL.h"
-#include "dawn_native/metal/StagingBufferMTL.h"
-#include "dawn_native/metal/SwapChainMTL.h"
-#include "dawn_native/metal/TextureMTL.h"
-#include "dawn_native/metal/UtilsMetal.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-
-#include <type_traits>
-
-namespace dawn_native { namespace metal {
-
- namespace {
-
- // The time interval for each round of kalman filter
- static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
-
- struct KalmanInfo {
- float filterValue; // The estimation value
- float kalmanGain; // The kalman gain
- float R; // The covariance of the observation noise
- float P; // The a posteriori estimate covariance
- };
-
- // A simplified kalman filter for estimating timestamp period based on measured values
- float KalmanFilter(KalmanInfo* info, float measuredValue) {
- // Optimize kalman gain
- info->kalmanGain = info->P / (info->P + info->R);
-
- // Correct filter value
- info->filterValue =
- info->kalmanGain * measuredValue + (1.0 - info->kalmanGain) * info->filterValue;
- // Update estimate covariance
- info->P = (1.0f - info->kalmanGain) * info->P;
- return info->filterValue;
- }
-
- void API_AVAILABLE(macos(10.15), ios(14))
- UpdateTimestampPeriod(id<MTLDevice> device,
- KalmanInfo* info,
- MTLTimestamp* cpuTimestampStart,
- MTLTimestamp* gpuTimestampStart,
- float* timestampPeriod) {
- // The filter value is converged to an optimal value when the kalman gain is less than
- // 0.01. At this time, the weight of the measured value is too small to change the next
- // filter value, the sampling and calculations do not need to continue anymore.
- if (info->kalmanGain < 0.01f) {
- return;
- }
-
- MTLTimestamp cpuTimestampEnd = 0, gpuTimestampEnd = 0;
- [device sampleTimestamps:&cpuTimestampEnd gpuTimestamp:&gpuTimestampEnd];
-
- // Update the timestamp start values when timestamp reset happens
- if (cpuTimestampEnd < *cpuTimestampStart || gpuTimestampEnd < *gpuTimestampStart) {
- *cpuTimestampStart = cpuTimestampEnd;
- *gpuTimestampStart = gpuTimestampEnd;
- return;
- }
-
- if (cpuTimestampEnd - *cpuTimestampStart >= kFilterIntervalInMs) {
- // The measured timestamp period
- float measurement = (cpuTimestampEnd - *cpuTimestampStart) /
- static_cast<float>(gpuTimestampEnd - *gpuTimestampStart);
-
- // Measurement update
- *timestampPeriod = KalmanFilter(info, measurement);
-
- *cpuTimestampStart = cpuTimestampEnd;
- *gpuTimestampStart = gpuTimestampEnd;
- }
- }
-
- } // namespace
-
- // static
- ResultOrError<Device*> Device::Create(AdapterBase* adapter,
- NSPRef<id<MTLDevice>> mtlDevice,
- const DawnDeviceDescriptor* descriptor) {
- Ref<Device> device = AcquireRef(new Device(adapter, std::move(mtlDevice), descriptor));
- DAWN_TRY(device->Initialize());
- return device.Detach();
- }
-
- Device::Device(AdapterBase* adapter,
- NSPRef<id<MTLDevice>> mtlDevice,
- const DawnDeviceDescriptor* descriptor)
- : DeviceBase(adapter, descriptor), mMtlDevice(std::move(mtlDevice)), mCompletedSerial(0) {
- }
-
- Device::~Device() {
- Destroy();
- }
-
- MaybeError Device::Initialize() {
- InitTogglesFromDriver();
-
- mCommandQueue.Acquire([*mMtlDevice newCommandQueue]);
- if (mCommandQueue == nil) {
- return DAWN_INTERNAL_ERROR("Failed to allocate MTLCommandQueue.");
- }
-
- DAWN_TRY(mCommandContext.PrepareNextCommandBuffer(*mCommandQueue));
-
- if (IsFeatureEnabled(Feature::TimestampQuery)) {
- // Make a best guess of timestamp period based on device vendor info, and converge it to
- // an accurate value by the following calculations.
- mTimestampPeriod =
- gpu_info::IsIntel(GetAdapter()->GetPCIInfo().vendorId) ? 83.333f : 1.0f;
-
- // Initialize kalman filter parameters
- mKalmanInfo = std::make_unique<KalmanInfo>();
- mKalmanInfo->filterValue = 0.0f;
- mKalmanInfo->kalmanGain = 0.5f;
- mKalmanInfo->R =
- 0.0001f; // The smaller this value is, the smaller the error of measured value is,
- // the more we can trust the measured value.
- mKalmanInfo->P = 1.0f;
-
- if (@available(macos 10.15, iOS 14.0, *)) {
- // Sample CPU timestamp and GPU timestamp for first time at device creation
- [*mMtlDevice sampleTimestamps:&mCpuTimestamp gpuTimestamp:&mGpuTimestamp];
- }
- }
-
- return DeviceBase::Initialize(new Queue(this));
- }
-
- void Device::InitTogglesFromDriver() {
- {
- bool haveStoreAndMSAAResolve = false;
-#if defined(DAWN_PLATFORM_MACOS)
- if (@available(macOS 10.12, *)) {
- haveStoreAndMSAAResolve =
- [*mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
- }
-#elif defined(DAWN_PLATFORM_IOS)
- haveStoreAndMSAAResolve =
- [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
-#endif
- // On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
- SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
-
- bool haveSamplerCompare = true;
-#if defined(DAWN_PLATFORM_IOS)
- haveSamplerCompare = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
-#endif
- // TODO(crbug.com/dawn/342): Investigate emulation -- possibly expensive.
- SetToggle(Toggle::MetalDisableSamplerCompare, !haveSamplerCompare);
-
- bool haveBaseVertexBaseInstance = true;
-#if defined(DAWN_PLATFORM_IOS)
- haveBaseVertexBaseInstance =
- [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
-#endif
- // TODO(crbug.com/dawn/343): Investigate emulation.
- SetToggle(Toggle::DisableBaseVertex, !haveBaseVertexBaseInstance);
- SetToggle(Toggle::DisableBaseInstance, !haveBaseVertexBaseInstance);
- }
-
- // Vertex buffer robustness is implemented by using programmable vertex pulling. Enable
- // that code path if it isn't explicitly disabled.
- if (IsRobustnessEnabled()) {
- SetToggle(Toggle::MetalEnableVertexPulling, true);
- }
-
- // TODO(crbug.com/dawn/846): tighten this workaround when the driver bug is fixed.
- SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
-
- const PCIInfo& pciInfo = GetAdapter()->GetPCIInfo();
-
- // TODO(crbug.com/dawn/847): Use MTLStorageModeShared instead of MTLStorageModePrivate when
- // creating MTLCounterSampleBuffer in QuerySet on Intel platforms, otherwise it fails to
- // create the buffer. Change to use MTLStorageModePrivate when the bug is fixed.
- if (@available(macOS 10.15, iOS 14.0, *)) {
- bool useSharedMode = gpu_info::IsIntel(pciInfo.vendorId);
- SetToggle(Toggle::MetalUseSharedModeForCounterSampleBuffer, useSharedMode);
- }
-
- // TODO(crbug.com/dawn/1071): r8unorm and rg8unorm textures with multiple mip levels don't
- // clear properly on Intel Macs.
- if (gpu_info::IsIntel(pciInfo.vendorId)) {
- SetToggle(Toggle::DisableR8RG8Mipmaps, true);
- }
-
- // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
- // shader provided. Create a dummy fragment shader module to work around this issue.
- if (gpu_info::IsIntel(this->GetAdapter()->GetPCIInfo().vendorId)) {
- bool useDummyFragmentShader = true;
- if (gpu_info::IsSkylake(this->GetAdapter()->GetPCIInfo().deviceId)) {
- useDummyFragmentShader = false;
- }
- SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, useDummyFragmentShader);
- }
- }
-
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- return BindGroup::Create(this, descriptor);
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return CommandBuffer::Create(encoder, descriptor);
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return ComputePipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return PipelineLayout::Create(this, descriptor);
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return QuerySet::Create(this, descriptor);
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return Sampler::Create(this, descriptor);
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- return ShaderModule::Create(this, descriptor, parseResult);
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return OldSwapChain::Create(this, descriptor);
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, surface, previousSwapChain, descriptor);
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return Texture::Create(this, descriptor);
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return TextureView::Create(texture, descriptor);
- }
- void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
- }
- void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
- }
-
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
- if (frontendCompletedSerial > mCompletedSerial) {
- // sometimes we increase the serials, in which case the completed serial in
- // the device base will surpass the completed serial we have in the metal backend, so we
- // must update ours when we see that the completed serial from device base has
- // increased.
- mCompletedSerial = frontendCompletedSerial;
- }
- return ExecutionSerial(mCompletedSerial.load());
- }
-
- MaybeError Device::TickImpl() {
- DAWN_TRY(SubmitPendingCommandBuffer());
-
- // Just run timestamp period calculation when timestamp feature is enabled.
- if (IsFeatureEnabled(Feature::TimestampQuery)) {
- if (@available(macos 10.15, iOS 14.0, *)) {
- UpdateTimestampPeriod(GetMTLDevice(), mKalmanInfo.get(), &mCpuTimestamp,
- &mGpuTimestamp, &mTimestampPeriod);
- }
- }
-
- return {};
- }
-
- id<MTLDevice> Device::GetMTLDevice() {
- return mMtlDevice.Get();
- }
-
- id<MTLCommandQueue> Device::GetMTLQueue() {
- return mCommandQueue.Get();
- }
-
- CommandRecordingContext* Device::GetPendingCommandContext() {
- mCommandContext.MarkUsed();
- return &mCommandContext;
- }
-
- MaybeError Device::SubmitPendingCommandBuffer() {
- if (!mCommandContext.WasUsed()) {
- return {};
- }
-
- IncrementLastSubmittedCommandSerial();
-
- // Acquire the pending command buffer, which is retained. It must be released later.
- NSPRef<id<MTLCommandBuffer>> pendingCommands = mCommandContext.AcquireCommands();
-
- // Replace mLastSubmittedCommands with the mutex held so we avoid races between the
- // schedule handler and this code.
- {
- std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
- mLastSubmittedCommands = pendingCommands;
- }
-
- // Make a local copy of the pointer to the commands because it's not clear how ObjC blocks
- // handle types with copy / move constructors being referenced in the block..
- id<MTLCommandBuffer> pendingCommandsPointer = pendingCommands.Get();
- [*pendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
- // This is DRF because we hold the mutex for mLastSubmittedCommands and pendingCommands
- // is a local value (and not the member itself).
- std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
- if (this->mLastSubmittedCommands.Get() == pendingCommandsPointer) {
- this->mLastSubmittedCommands = nullptr;
- }
- }];
-
- // Update the completed serial once the completed handler is fired. Make a local copy of
- // mLastSubmittedSerial so it is captured by value.
- ExecutionSerial pendingSerial = GetLastSubmittedCommandSerial();
- // this ObjC block runs on a different thread
- [*pendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
- TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
- uint64_t(pendingSerial));
- ASSERT(uint64_t(pendingSerial) > mCompletedSerial.load());
- this->mCompletedSerial = uint64_t(pendingSerial);
- }];
-
- TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
- uint64_t(pendingSerial));
- [*pendingCommands commit];
-
- return mCommandContext.PrepareNextCommandBuffer(*mCommandQueue);
- }
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer =
- std::make_unique<StagingBuffer>(size, this);
- DAWN_TRY(stagingBuffer->Initialize());
- return std::move(stagingBuffer);
- }
-
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- // Metal validation layers forbid 0-sized copies, assert it is skipped prior to calling
- // this function.
- ASSERT(size != 0);
-
- ToBackend(destination)
- ->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset,
- size);
-
- id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
- id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
- [GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
- sourceOffset:sourceOffset
- toBuffer:buffer
- destinationOffset:destinationOffset
- size:size];
- return {};
- }
-
- // In Metal we don't write from the CPU to the texture directly which can be done using the
- // replaceRegion function, because the function requires a non-private storage mode and Dawn
- // sets the private storage mode by default for all textures except IOSurfaces on macOS.
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& dataLayout,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- Texture* texture = ToBackend(dst->texture.Get());
- EnsureDestinationTextureInitialized(GetPendingCommandContext(), texture, *dst,
- copySizePixels);
-
- RecordCopyBufferToTexture(GetPendingCommandContext(), ToBackend(source)->GetBufferHandle(),
- source->GetSize(), dataLayout.offset, dataLayout.bytesPerRow,
- dataLayout.rowsPerImage, texture, dst->mipLevel, dst->origin,
- dst->aspect, copySizePixels);
- return {};
- }
-
- Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
- return nullptr;
- }
- if (ConsumedError(
- ValidateIOSurfaceCanBeWrapped(this, textureDescriptor, ioSurface, plane))) {
- return nullptr;
- }
-
- Ref<Texture> result;
- if (ConsumedError(Texture::CreateFromIOSurface(this, descriptor, ioSurface, plane),
- &result)) {
- return nullptr;
- }
- return result;
- }
-
- void Device::WaitForCommandsToBeScheduled() {
- if (ConsumedError(SubmitPendingCommandBuffer())) {
- return;
- }
-
- // Only lock the object while we take a reference to it, otherwise we could block further
- // progress if the driver calls the scheduled handler (which also acquires the lock) before
- // finishing the waitUntilScheduled.
- NSPRef<id<MTLCommandBuffer>> lastSubmittedCommands;
- {
- std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
- lastSubmittedCommands = mLastSubmittedCommands;
- }
- [*lastSubmittedCommands waitUntilScheduled];
- }
-
- MaybeError Device::WaitForIdleForDestruction() {
- // Forget all pending commands.
- mCommandContext.AcquireCommands();
- DAWN_TRY(CheckPassedSerials());
-
- // Wait for all commands to be finished so we can free resources
- while (GetCompletedCommandSerial() != GetLastSubmittedCommandSerial()) {
- usleep(100);
- DAWN_TRY(CheckPassedSerials());
- }
-
- return {};
- }
-
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
-
- // Forget all pending commands.
- mCommandContext.AcquireCommands();
-
- mCommandQueue = nullptr;
- mMtlDevice = nullptr;
- }
-
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return 1;
- }
-
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return 1;
- }
-
- float Device::GetTimestampPeriodInNS() const {
- return mTimestampPeriod;
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/Forward.h b/chromium/third_party/dawn/src/dawn_native/metal/Forward.h
deleted file mode 100644
index 9481348f520..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/Forward.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_FORWARD_H_
-#define DAWNNATIVE_METAL_FORWARD_H_
-
-#include "dawn_native/ToBackend.h"
-
-namespace dawn_native { namespace metal {
-
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- class Framebuffer;
- class PipelineLayout;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- class Sampler;
- class ShaderModule;
- class StagingBuffer;
- class SwapChain;
- class Texture;
- class TextureView;
-
- struct MetalBackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using StagingBufferType = StagingBuffer;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
-
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
- return ToBackendBase<MetalBackendTraits>(common);
- }
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm b/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm
deleted file mode 100644
index 4d0824d2f51..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// MetalBackend.cpp: contains the definition of symbols exported by MetalBackend.h so that they
-// can be compiled twice: once export (shared library), once not exported (static library)
-
-#include "dawn_native/MetalBackend.h"
-
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/TextureMTL.h"
-
-namespace dawn_native { namespace metal {
-
- id<MTLDevice> GetMetalDevice(WGPUDevice device) {
- return ToBackend(FromAPI(device))->GetMTLDevice();
- }
-
- AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_Metal) {
- }
-
- ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
- : ExternalImageDescriptor(ExternalImageType::IOSurface) {
- }
-
- WGPUTexture WrapIOSurface(WGPUDevice device,
- const ExternalImageDescriptorIOSurface* cDescriptor) {
- Device* backendDevice = ToBackend(FromAPI(device));
- Ref<TextureBase> texture = backendDevice->CreateTextureWrappingIOSurface(
- cDescriptor, cDescriptor->ioSurface, cDescriptor->plane);
- return ToAPI(texture.Detach());
- }
-
- void WaitForCommandsToBeScheduled(WGPUDevice device) {
- ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
deleted file mode 100644
index ae803e1d7bb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
-#define DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
-
-#include "common/ityp_stack_vec.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/PipelineLayout.h"
-
-#include "dawn_native/PerStage.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class Device;
-
- // The number of Metal buffers usable by applications in general
- static constexpr size_t kMetalBufferTableSize = 31;
- // The Metal buffer slot that Dawn reserves for its own use to pass more data to shaders
- static constexpr size_t kBufferLengthBufferSlot = kMetalBufferTableSize - 1;
- // The number of Metal buffers Dawn can use in a generic way (i.e. that aren't reserved)
- static constexpr size_t kGenericMetalBufferSlots = kMetalBufferTableSize - 1;
-
- static constexpr BindGroupIndex kPullingBufferBindingSet = BindGroupIndex(kMaxBindGroups);
-
- class PipelineLayout final : public PipelineLayoutBase {
- public:
- static Ref<PipelineLayout> Create(Device* device,
- const PipelineLayoutDescriptor* descriptor);
-
- using BindingIndexInfo =
- ityp::array<BindGroupIndex,
- ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup>,
- kMaxBindGroups>;
- const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
-
- // The number of Metal vertex stage buffers used for the whole pipeline layout.
- uint32_t GetBufferBindingCount(SingleShaderStage stage);
-
- private:
- PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
- ~PipelineLayout() override = default;
- PerStage<BindingIndexInfo> mIndexInfo;
- PerStage<uint32_t> mBufferBindingCount;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
deleted file mode 100644
index 4faf5db1f69..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/PipelineLayoutMTL.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/metal/DeviceMTL.h"
-
-namespace dawn_native { namespace metal {
-
- // static
- Ref<PipelineLayout> PipelineLayout::Create(Device* device,
- const PipelineLayoutDescriptor* descriptor) {
- return AcquireRef(new PipelineLayout(device, descriptor));
- }
-
- PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
- : PipelineLayoutBase(device, descriptor) {
- // Each stage has its own numbering namespace in CompilerMSL.
- for (auto stage : IterateStages(kAllStages)) {
- uint32_t bufferIndex = 0;
- uint32_t samplerIndex = 0;
- uint32_t textureIndex = 0;
-
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- mIndexInfo[stage][group].resize(GetBindGroupLayout(group)->GetBindingCount());
-
- for (BindingIndex bindingIndex{0};
- bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo =
- GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
- if (!(bindingInfo.visibility & StageBit(stage))) {
- continue;
- }
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- mIndexInfo[stage][group][bindingIndex] = bufferIndex;
- bufferIndex++;
- break;
-
- case BindingInfoType::Sampler:
- mIndexInfo[stage][group][bindingIndex] = samplerIndex;
- samplerIndex++;
- break;
-
- case BindingInfoType::Texture:
- case BindingInfoType::StorageTexture:
- case BindingInfoType::ExternalTexture:
- mIndexInfo[stage][group][bindingIndex] = textureIndex;
- textureIndex++;
- break;
- }
- }
- }
-
- mBufferBindingCount[stage] = bufferIndex;
- }
- }
-
- const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(
- SingleShaderStage stage) const {
- return mIndexInfo[stage];
- }
-
- uint32_t PipelineLayout::GetBufferBindingCount(SingleShaderStage stage) {
- return mBufferBindingCount[stage];
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h
deleted file mode 100644
index a7b1ad7fa34..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_QUERYSETMTL_H_
-#define DAWNNATIVE_METAL_QUERYSETMTL_H_
-
-#include "dawn_native/QuerySet.h"
-
-#include "common/NSRef.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class Device;
-
- class QuerySet final : public QuerySetBase {
- public:
- static ResultOrError<Ref<QuerySet>> Create(Device* device,
- const QuerySetDescriptor* descriptor);
-
- id<MTLBuffer> GetVisibilityBuffer() const;
- id<MTLCounterSampleBuffer> GetCounterSampleBuffer() const
- API_AVAILABLE(macos(10.15), ios(14.0));
-
- private:
- ~QuerySet() override;
- using QuerySetBase::QuerySetBase;
- MaybeError Initialize();
-
- // Dawn API
- void DestroyImpl() override;
-
- NSPRef<id<MTLBuffer>> mVisibilityBuffer;
- // Note that mCounterSampleBuffer cannot be an NSRef because the API_AVAILABLE macros don't
- // propagate nicely through templates.
- id<MTLCounterSampleBuffer> mCounterSampleBuffer API_AVAILABLE(macos(10.15),
- ios(14.0)) = nullptr;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_QUERYSETMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm
deleted file mode 100644
index a8b53afb503..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/QuerySetMTL.h"
-
-#include "common/Math.h"
-#include "common/Platform.h"
-#include "dawn_native/metal/DeviceMTL.h"
-
-namespace dawn_native { namespace metal {
-
- namespace {
-
- ResultOrError<id<MTLCounterSampleBuffer>> CreateCounterSampleBuffer(
- Device* device,
- MTLCommonCounterSet counterSet,
- uint32_t count) API_AVAILABLE(macos(10.15), ios(14.0)) {
- NSRef<MTLCounterSampleBufferDescriptor> descriptorRef =
- AcquireNSRef([MTLCounterSampleBufferDescriptor new]);
- MTLCounterSampleBufferDescriptor* descriptor = descriptorRef.Get();
-
- // To determine which counters are available from a device, we need to iterate through
- // the counterSets property of a MTLDevice. Then configure which counters will be
- // sampled by creating a MTLCounterSampleBufferDescriptor and setting its counterSet
- // property to the matched one of the available set.
- for (id<MTLCounterSet> set in device->GetMTLDevice().counterSets) {
- if ([set.name isEqualToString:counterSet]) {
- descriptor.counterSet = set;
- break;
- }
- }
- ASSERT(descriptor.counterSet != nullptr);
-
- descriptor.sampleCount = static_cast<NSUInteger>(std::max(count, uint32_t(1u)));
- descriptor.storageMode = MTLStorageModePrivate;
- if (device->IsToggleEnabled(Toggle::MetalUseSharedModeForCounterSampleBuffer)) {
- descriptor.storageMode = MTLStorageModeShared;
- }
-
- NSError* error = nullptr;
- id<MTLCounterSampleBuffer> counterSampleBuffer =
- [device->GetMTLDevice() newCounterSampleBufferWithDescriptor:descriptor
- error:&error];
- if (error != nullptr) {
- return DAWN_OUT_OF_MEMORY_ERROR(std::string("Error creating query set: ") +
- [error.localizedDescription UTF8String]);
- }
-
- return counterSampleBuffer;
- }
- }
-
- // static
- ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
- Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
- DAWN_TRY(queryset->Initialize());
- return queryset;
- }
-
- MaybeError QuerySet::Initialize() {
- Device* device = ToBackend(GetDevice());
-
- switch (GetQueryType()) {
- case wgpu::QueryType::Occlusion: {
- // Create buffer for writing 64-bit results.
- NSUInteger bufferSize = static_cast<NSUInteger>(
- std::max(GetQueryCount() * sizeof(uint64_t), size_t(4u)));
- mVisibilityBuffer = AcquireNSPRef([device->GetMTLDevice()
- newBufferWithLength:bufferSize
- options:MTLResourceStorageModePrivate]);
-
- if (mVisibilityBuffer == nil) {
- return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate query set.");
- }
- break;
- }
- case wgpu::QueryType::PipelineStatistics:
- if (@available(macOS 10.15, iOS 14.0, *)) {
- DAWN_TRY_ASSIGN(mCounterSampleBuffer,
- CreateCounterSampleBuffer(device, MTLCommonCounterSetStatistic,
- GetQueryCount()));
- } else {
- UNREACHABLE();
- }
- break;
- case wgpu::QueryType::Timestamp:
- if (@available(macOS 10.15, iOS 14.0, *)) {
- DAWN_TRY_ASSIGN(mCounterSampleBuffer,
- CreateCounterSampleBuffer(device, MTLCommonCounterSetTimestamp,
- GetQueryCount()));
- } else {
- UNREACHABLE();
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- return {};
- }
-
- id<MTLBuffer> QuerySet::GetVisibilityBuffer() const {
- return mVisibilityBuffer.Get();
- }
-
- id<MTLCounterSampleBuffer> QuerySet::GetCounterSampleBuffer() const
- API_AVAILABLE(macos(10.15), ios(14.0)) {
- return mCounterSampleBuffer;
- }
-
- QuerySet::~QuerySet() = default;
-
- void QuerySet::DestroyImpl() {
- QuerySetBase::DestroyImpl();
-
- mVisibilityBuffer = nullptr;
-
- // mCounterSampleBuffer isn't an NSRef because API_AVAILABLE doesn't work will with
- // templates.
- if (@available(macOS 10.15, iOS 14.0, *)) {
- [mCounterSampleBuffer release];
- mCounterSampleBuffer = nullptr;
- }
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h
deleted file mode 100644
index 38e79eddc45..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_QUEUEMTL_H_
-#define DAWNNATIVE_METAL_QUEUEMTL_H_
-
-#include "dawn_native/Queue.h"
-
-namespace dawn_native { namespace metal {
-
- class Device;
-
- class Queue final : public QueueBase {
- public:
- Queue(Device* device);
-
- private:
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_QUEUEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
deleted file mode 100644
index ad1fad6f0e9..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/QueueMTL.h"
-
-#include "common/Math.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/metal/CommandBufferMTL.h"
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-
-namespace dawn_native { namespace metal {
-
- Queue::Queue(Device* device) : QueueBase(device) {
- }
-
- MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
- Device* device = ToBackend(GetDevice());
-
- DAWN_TRY(device->Tick());
-
- CommandRecordingContext* commandContext = device->GetPendingCommandContext();
-
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->FillCommands(commandContext));
- }
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
-
- return device->SubmitPendingCommandBuffer();
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
deleted file mode 100644
index d6bedfaedb1..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
-#define DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
-
-#include "dawn_native/RenderPipeline.h"
-
-#include "common/NSRef.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class Device;
-
- class RenderPipeline final : public RenderPipelineBase {
- public:
- static Ref<RenderPipelineBase> CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- MTLPrimitiveType GetMTLPrimitiveTopology() const;
- MTLWinding GetMTLFrontFace() const;
- MTLCullMode GetMTLCullMode() const;
-
- void Encode(id<MTLRenderCommandEncoder> encoder);
-
- id<MTLDepthStencilState> GetMTLDepthStencilState();
-
- // For each Dawn vertex buffer, give the index in which it will be positioned in the Metal
- // vertex buffer table.
- uint32_t GetMtlVertexBufferIndex(VertexBufferSlot slot) const;
-
- wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
-
- MaybeError Initialize() override;
-
- private:
- using RenderPipelineBase::RenderPipelineBase;
-
- MTLVertexDescriptor* MakeVertexDesc();
-
- MTLPrimitiveType mMtlPrimitiveTopology;
- MTLWinding mMtlFrontFace;
- MTLCullMode mMtlCullMode;
- NSPRef<id<MTLRenderPipelineState>> mMtlRenderPipelineState;
- NSPRef<id<MTLDepthStencilState>> mMtlDepthStencilState;
- ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mMtlVertexBufferIndices;
-
- wgpu::ShaderStage mStagesRequiringStorageBufferLength = wgpu::ShaderStage::None;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_RENDERPIPELINEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
deleted file mode 100644
index a4ca812e980..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
+++ /dev/null
@@ -1,506 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/RenderPipelineMTL.h"
-
-#include "dawn_native/CreatePipelineAsyncTask.h"
-#include "dawn_native/VertexFormat.h"
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/PipelineLayoutMTL.h"
-#include "dawn_native/metal/ShaderModuleMTL.h"
-#include "dawn_native/metal/TextureMTL.h"
-#include "dawn_native/metal/UtilsMetal.h"
-
-namespace dawn_native { namespace metal {
-
- namespace {
- MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- return MTLVertexFormatUChar2;
- case wgpu::VertexFormat::Uint8x4:
- return MTLVertexFormatUChar4;
- case wgpu::VertexFormat::Sint8x2:
- return MTLVertexFormatChar2;
- case wgpu::VertexFormat::Sint8x4:
- return MTLVertexFormatChar4;
- case wgpu::VertexFormat::Unorm8x2:
- return MTLVertexFormatUChar2Normalized;
- case wgpu::VertexFormat::Unorm8x4:
- return MTLVertexFormatUChar4Normalized;
- case wgpu::VertexFormat::Snorm8x2:
- return MTLVertexFormatChar2Normalized;
- case wgpu::VertexFormat::Snorm8x4:
- return MTLVertexFormatChar4Normalized;
- case wgpu::VertexFormat::Uint16x2:
- return MTLVertexFormatUShort2;
- case wgpu::VertexFormat::Uint16x4:
- return MTLVertexFormatUShort4;
- case wgpu::VertexFormat::Sint16x2:
- return MTLVertexFormatShort2;
- case wgpu::VertexFormat::Sint16x4:
- return MTLVertexFormatShort4;
- case wgpu::VertexFormat::Unorm16x2:
- return MTLVertexFormatUShort2Normalized;
- case wgpu::VertexFormat::Unorm16x4:
- return MTLVertexFormatUShort4Normalized;
- case wgpu::VertexFormat::Snorm16x2:
- return MTLVertexFormatShort2Normalized;
- case wgpu::VertexFormat::Snorm16x4:
- return MTLVertexFormatShort4Normalized;
- case wgpu::VertexFormat::Float16x2:
- return MTLVertexFormatHalf2;
- case wgpu::VertexFormat::Float16x4:
- return MTLVertexFormatHalf4;
- case wgpu::VertexFormat::Float32:
- return MTLVertexFormatFloat;
- case wgpu::VertexFormat::Float32x2:
- return MTLVertexFormatFloat2;
- case wgpu::VertexFormat::Float32x3:
- return MTLVertexFormatFloat3;
- case wgpu::VertexFormat::Float32x4:
- return MTLVertexFormatFloat4;
- case wgpu::VertexFormat::Uint32:
- return MTLVertexFormatUInt;
- case wgpu::VertexFormat::Uint32x2:
- return MTLVertexFormatUInt2;
- case wgpu::VertexFormat::Uint32x3:
- return MTLVertexFormatUInt3;
- case wgpu::VertexFormat::Uint32x4:
- return MTLVertexFormatUInt4;
- case wgpu::VertexFormat::Sint32:
- return MTLVertexFormatInt;
- case wgpu::VertexFormat::Sint32x2:
- return MTLVertexFormatInt2;
- case wgpu::VertexFormat::Sint32x3:
- return MTLVertexFormatInt3;
- case wgpu::VertexFormat::Sint32x4:
- return MTLVertexFormatInt4;
- default:
- UNREACHABLE();
- }
- }
-
- MTLVertexStepFunction VertexStepModeFunction(wgpu::VertexStepMode mode) {
- switch (mode) {
- case wgpu::VertexStepMode::Vertex:
- return MTLVertexStepFunctionPerVertex;
- case wgpu::VertexStepMode::Instance:
- return MTLVertexStepFunctionPerInstance;
- }
- }
-
- MTLPrimitiveType MTLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return MTLPrimitiveTypePoint;
- case wgpu::PrimitiveTopology::LineList:
- return MTLPrimitiveTypeLine;
- case wgpu::PrimitiveTopology::LineStrip:
- return MTLPrimitiveTypeLineStrip;
- case wgpu::PrimitiveTopology::TriangleList:
- return MTLPrimitiveTypeTriangle;
- case wgpu::PrimitiveTopology::TriangleStrip:
- return MTLPrimitiveTypeTriangleStrip;
- }
- }
-
- MTLPrimitiveTopologyClass MTLInputPrimitiveTopology(
- wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return MTLPrimitiveTopologyClassPoint;
- case wgpu::PrimitiveTopology::LineList:
- case wgpu::PrimitiveTopology::LineStrip:
- return MTLPrimitiveTopologyClassLine;
- case wgpu::PrimitiveTopology::TriangleList:
- case wgpu::PrimitiveTopology::TriangleStrip:
- return MTLPrimitiveTopologyClassTriangle;
- }
- }
-
- MTLBlendFactor MetalBlendFactor(wgpu::BlendFactor factor, bool alpha) {
- switch (factor) {
- case wgpu::BlendFactor::Zero:
- return MTLBlendFactorZero;
- case wgpu::BlendFactor::One:
- return MTLBlendFactorOne;
- case wgpu::BlendFactor::Src:
- return MTLBlendFactorSourceColor;
- case wgpu::BlendFactor::OneMinusSrc:
- return MTLBlendFactorOneMinusSourceColor;
- case wgpu::BlendFactor::SrcAlpha:
- return MTLBlendFactorSourceAlpha;
- case wgpu::BlendFactor::OneMinusSrcAlpha:
- return MTLBlendFactorOneMinusSourceAlpha;
- case wgpu::BlendFactor::Dst:
- return MTLBlendFactorDestinationColor;
- case wgpu::BlendFactor::OneMinusDst:
- return MTLBlendFactorOneMinusDestinationColor;
- case wgpu::BlendFactor::DstAlpha:
- return MTLBlendFactorDestinationAlpha;
- case wgpu::BlendFactor::OneMinusDstAlpha:
- return MTLBlendFactorOneMinusDestinationAlpha;
- case wgpu::BlendFactor::SrcAlphaSaturated:
- return MTLBlendFactorSourceAlphaSaturated;
- case wgpu::BlendFactor::Constant:
- return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
- case wgpu::BlendFactor::OneMinusConstant:
- return alpha ? MTLBlendFactorOneMinusBlendAlpha
- : MTLBlendFactorOneMinusBlendColor;
- }
- }
-
- MTLBlendOperation MetalBlendOperation(wgpu::BlendOperation operation) {
- switch (operation) {
- case wgpu::BlendOperation::Add:
- return MTLBlendOperationAdd;
- case wgpu::BlendOperation::Subtract:
- return MTLBlendOperationSubtract;
- case wgpu::BlendOperation::ReverseSubtract:
- return MTLBlendOperationReverseSubtract;
- case wgpu::BlendOperation::Min:
- return MTLBlendOperationMin;
- case wgpu::BlendOperation::Max:
- return MTLBlendOperationMax;
- }
- }
-
- MTLColorWriteMask MetalColorWriteMask(wgpu::ColorWriteMask writeMask,
- bool isDeclaredInFragmentShader) {
- if (!isDeclaredInFragmentShader) {
- return MTLColorWriteMaskNone;
- }
-
- MTLColorWriteMask mask = MTLColorWriteMaskNone;
-
- if (writeMask & wgpu::ColorWriteMask::Red) {
- mask |= MTLColorWriteMaskRed;
- }
- if (writeMask & wgpu::ColorWriteMask::Green) {
- mask |= MTLColorWriteMaskGreen;
- }
- if (writeMask & wgpu::ColorWriteMask::Blue) {
- mask |= MTLColorWriteMaskBlue;
- }
- if (writeMask & wgpu::ColorWriteMask::Alpha) {
- mask |= MTLColorWriteMaskAlpha;
- }
-
- return mask;
- }
-
- void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
- const ColorTargetState* state,
- bool isDeclaredInFragmentShader) {
- attachment.blendingEnabled = state->blend != nullptr;
- if (attachment.blendingEnabled) {
- attachment.sourceRGBBlendFactor =
- MetalBlendFactor(state->blend->color.srcFactor, false);
- attachment.destinationRGBBlendFactor =
- MetalBlendFactor(state->blend->color.dstFactor, false);
- attachment.rgbBlendOperation = MetalBlendOperation(state->blend->color.operation);
- attachment.sourceAlphaBlendFactor =
- MetalBlendFactor(state->blend->alpha.srcFactor, true);
- attachment.destinationAlphaBlendFactor =
- MetalBlendFactor(state->blend->alpha.dstFactor, true);
- attachment.alphaBlendOperation = MetalBlendOperation(state->blend->alpha.operation);
- }
- attachment.writeMask =
- MetalColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
- }
-
- MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
- switch (stencilOperation) {
- case wgpu::StencilOperation::Keep:
- return MTLStencilOperationKeep;
- case wgpu::StencilOperation::Zero:
- return MTLStencilOperationZero;
- case wgpu::StencilOperation::Replace:
- return MTLStencilOperationReplace;
- case wgpu::StencilOperation::Invert:
- return MTLStencilOperationInvert;
- case wgpu::StencilOperation::IncrementClamp:
- return MTLStencilOperationIncrementClamp;
- case wgpu::StencilOperation::DecrementClamp:
- return MTLStencilOperationDecrementClamp;
- case wgpu::StencilOperation::IncrementWrap:
- return MTLStencilOperationIncrementWrap;
- case wgpu::StencilOperation::DecrementWrap:
- return MTLStencilOperationDecrementWrap;
- }
- }
-
- NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(const DepthStencilState* descriptor) {
- NSRef<MTLDepthStencilDescriptor> mtlDepthStencilDescRef =
- AcquireNSRef([MTLDepthStencilDescriptor new]);
- MTLDepthStencilDescriptor* mtlDepthStencilDescriptor = mtlDepthStencilDescRef.Get();
-
- mtlDepthStencilDescriptor.depthCompareFunction =
- ToMetalCompareFunction(descriptor->depthCompare);
- mtlDepthStencilDescriptor.depthWriteEnabled = descriptor->depthWriteEnabled;
-
- if (StencilTestEnabled(descriptor)) {
- NSRef<MTLStencilDescriptor> backFaceStencilRef =
- AcquireNSRef([MTLStencilDescriptor new]);
- MTLStencilDescriptor* backFaceStencil = backFaceStencilRef.Get();
- NSRef<MTLStencilDescriptor> frontFaceStencilRef =
- AcquireNSRef([MTLStencilDescriptor new]);
- MTLStencilDescriptor* frontFaceStencil = frontFaceStencilRef.Get();
-
- backFaceStencil.stencilCompareFunction =
- ToMetalCompareFunction(descriptor->stencilBack.compare);
- backFaceStencil.stencilFailureOperation =
- MetalStencilOperation(descriptor->stencilBack.failOp);
- backFaceStencil.depthFailureOperation =
- MetalStencilOperation(descriptor->stencilBack.depthFailOp);
- backFaceStencil.depthStencilPassOperation =
- MetalStencilOperation(descriptor->stencilBack.passOp);
- backFaceStencil.readMask = descriptor->stencilReadMask;
- backFaceStencil.writeMask = descriptor->stencilWriteMask;
-
- frontFaceStencil.stencilCompareFunction =
- ToMetalCompareFunction(descriptor->stencilFront.compare);
- frontFaceStencil.stencilFailureOperation =
- MetalStencilOperation(descriptor->stencilFront.failOp);
- frontFaceStencil.depthFailureOperation =
- MetalStencilOperation(descriptor->stencilFront.depthFailOp);
- frontFaceStencil.depthStencilPassOperation =
- MetalStencilOperation(descriptor->stencilFront.passOp);
- frontFaceStencil.readMask = descriptor->stencilReadMask;
- frontFaceStencil.writeMask = descriptor->stencilWriteMask;
-
- mtlDepthStencilDescriptor.backFaceStencil = backFaceStencil;
- mtlDepthStencilDescriptor.frontFaceStencil = frontFaceStencil;
- }
-
- return mtlDepthStencilDescRef;
- }
-
- MTLWinding MTLFrontFace(wgpu::FrontFace face) {
- switch (face) {
- case wgpu::FrontFace::CW:
- return MTLWindingClockwise;
- case wgpu::FrontFace::CCW:
- return MTLWindingCounterClockwise;
- }
- }
-
- MTLCullMode ToMTLCullMode(wgpu::CullMode mode) {
- switch (mode) {
- case wgpu::CullMode::None:
- return MTLCullModeNone;
- case wgpu::CullMode::Front:
- return MTLCullModeFront;
- case wgpu::CullMode::Back:
- return MTLCullModeBack;
- }
- }
-
- } // anonymous namespace
-
- // static
- Ref<RenderPipelineBase> RenderPipeline::CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(device, descriptor));
- }
-
- MaybeError RenderPipeline::Initialize() {
- mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
- mMtlFrontFace = MTLFrontFace(GetFrontFace());
- mMtlCullMode = ToMTLCullMode(GetCullMode());
- auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
-
- NSRef<MTLRenderPipelineDescriptor> descriptorMTLRef =
- AcquireNSRef([MTLRenderPipelineDescriptor new]);
- MTLRenderPipelineDescriptor* descriptorMTL = descriptorMTLRef.Get();
-
- // TODO: MakeVertexDesc should be const in the future, so we don't need to call it here when
- // vertex pulling is enabled
- NSRef<MTLVertexDescriptor> vertexDesc = MakeVertexDesc();
-
- // Calling MakeVertexDesc first is important since it sets indices for packed bindings
- if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
- vertexDesc = AcquireNSRef([MTLVertexDescriptor new]);
- }
- descriptorMTL.vertexDescriptor = vertexDesc.Get();
-
- const PerStage<ProgrammableStage>& allStages = GetAllStages();
- const ProgrammableStage& vertexStage = allStages[wgpu::ShaderStage::Vertex];
- ShaderModule::MetalFunctionData vertexData;
- DAWN_TRY(CreateMTLFunction(vertexStage, SingleShaderStage::Vertex, ToBackend(GetLayout()),
- &vertexData, 0xFFFFFFFF, this));
-
- descriptorMTL.vertexFunction = vertexData.function.Get();
- if (vertexData.needsStorageBufferLength) {
- mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
- }
-
- if (GetStageMask() & wgpu::ShaderStage::Fragment) {
- const ProgrammableStage& fragmentStage = allStages[wgpu::ShaderStage::Fragment];
- ShaderModule::MetalFunctionData fragmentData;
- DAWN_TRY(CreateMTLFunction(fragmentStage, SingleShaderStage::Fragment,
- ToBackend(GetLayout()), &fragmentData, GetSampleMask()));
-
- descriptorMTL.fragmentFunction = fragmentData.function.Get();
- if (fragmentData.needsStorageBufferLength) {
- mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
- }
-
- const auto& fragmentOutputsWritten = fragmentStage.metadata->fragmentOutputsWritten;
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
- MetalPixelFormat(GetColorAttachmentFormat(i));
- const ColorTargetState* descriptor = GetColorTargetState(i);
- ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)],
- descriptor, fragmentOutputsWritten[i]);
- }
- }
-
- if (HasDepthStencilAttachment()) {
- wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
- const Format& internalFormat = GetDevice()->GetValidInternalFormat(depthStencilFormat);
- MTLPixelFormat metalFormat = MetalPixelFormat(depthStencilFormat);
-
- if (internalFormat.HasDepth()) {
- descriptorMTL.depthAttachmentPixelFormat = metalFormat;
- }
- if (internalFormat.HasStencil()) {
- descriptorMTL.stencilAttachmentPixelFormat = metalFormat;
- }
- }
-
- descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
- descriptorMTL.sampleCount = GetSampleCount();
- descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
-
- NSError* error = nullptr;
- mMtlRenderPipelineState =
- AcquireNSPRef([mtlDevice newRenderPipelineStateWithDescriptor:descriptorMTL
- error:&error]);
- if (error != nullptr) {
- return DAWN_INTERNAL_ERROR(std::string("Error creating pipeline state") +
- [error.localizedDescription UTF8String]);
- }
- ASSERT(mMtlRenderPipelineState != nil);
-
- // Create depth stencil state and cache it, fetch the cached depth stencil state when we
- // call setDepthStencilState() for a given render pipeline in CommandEncoder, in order
- // to improve performance.
- NSRef<MTLDepthStencilDescriptor> depthStencilDesc =
- MakeDepthStencilDesc(GetDepthStencilState());
- mMtlDepthStencilState =
- AcquireNSPRef([mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc.Get()]);
-
- return {};
- }
-
- MTLPrimitiveType RenderPipeline::GetMTLPrimitiveTopology() const {
- return mMtlPrimitiveTopology;
- }
-
- MTLWinding RenderPipeline::GetMTLFrontFace() const {
- return mMtlFrontFace;
- }
-
- MTLCullMode RenderPipeline::GetMTLCullMode() const {
- return mMtlCullMode;
- }
-
- void RenderPipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
- [encoder setRenderPipelineState:mMtlRenderPipelineState.Get()];
- }
-
- id<MTLDepthStencilState> RenderPipeline::GetMTLDepthStencilState() {
- return mMtlDepthStencilState.Get();
- }
-
- uint32_t RenderPipeline::GetMtlVertexBufferIndex(VertexBufferSlot slot) const {
- ASSERT(slot < kMaxVertexBuffersTyped);
- return mMtlVertexBufferIndices[slot];
- }
-
- wgpu::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
- return mStagesRequiringStorageBufferLength;
- }
-
- MTLVertexDescriptor* RenderPipeline::MakeVertexDesc() {
- MTLVertexDescriptor* mtlVertexDescriptor = [MTLVertexDescriptor new];
-
- // Vertex buffers are packed after all the buffers for the bind groups.
- uint32_t mtlVertexBufferIndex =
- ToBackend(GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
-
- for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
- const VertexBufferInfo& info = GetVertexBuffer(slot);
-
- MTLVertexBufferLayoutDescriptor* layoutDesc = [MTLVertexBufferLayoutDescriptor new];
- if (info.arrayStride == 0) {
- // For MTLVertexStepFunctionConstant, the stepRate must be 0,
- // but the arrayStride must NOT be 0, so we made up it with
- // max(attrib.offset + sizeof(attrib) for each attrib)
- size_t maxArrayStride = 0;
- for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
- const VertexAttributeInfo& attrib = GetAttribute(loc);
- // Only use the attributes that use the current input
- if (attrib.vertexBufferSlot != slot) {
- continue;
- }
- maxArrayStride =
- std::max(maxArrayStride, GetVertexFormatInfo(attrib.format).byteSize +
- size_t(attrib.offset));
- }
- layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
- layoutDesc.stepRate = 0;
- // Metal requires the stride must be a multiple of 4 bytes, align it with next
- // multiple of 4 if it's not.
- layoutDesc.stride = Align(maxArrayStride, 4);
- } else {
- layoutDesc.stepFunction = VertexStepModeFunction(info.stepMode);
- layoutDesc.stepRate = 1;
- layoutDesc.stride = info.arrayStride;
- }
-
- mtlVertexDescriptor.layouts[mtlVertexBufferIndex] = layoutDesc;
- [layoutDesc release];
-
- mMtlVertexBufferIndices[slot] = mtlVertexBufferIndex;
- mtlVertexBufferIndex++;
- }
-
- for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
- const VertexAttributeInfo& info = GetAttribute(loc);
-
- auto attribDesc = [MTLVertexAttributeDescriptor new];
- attribDesc.format = VertexFormatType(info.format);
- attribDesc.offset = info.offset;
- attribDesc.bufferIndex = mMtlVertexBufferIndices[info.vertexBufferSlot];
- mtlVertexDescriptor.attributes[static_cast<uint8_t>(loc)] = attribDesc;
- [attribDesc release];
- }
-
- return mtlVertexDescriptor;
- }
-
- void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
- std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
- userdata);
- CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h
deleted file mode 100644
index 274ba2033f2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_SAMPLERMTL_H_
-#define DAWNNATIVE_METAL_SAMPLERMTL_H_
-
-#include "dawn_native/Sampler.h"
-
-#include "common/NSRef.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class Device;
-
- class Sampler final : public SamplerBase {
- public:
- static ResultOrError<Ref<Sampler>> Create(Device* device,
- const SamplerDescriptor* descriptor);
-
- id<MTLSamplerState> GetMTLSamplerState();
-
- private:
- using SamplerBase::SamplerBase;
- MaybeError Initialize(const SamplerDescriptor* descriptor);
-
- NSPRef<id<MTLSamplerState>> mMtlSamplerState;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_SAMPLERMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
deleted file mode 100644
index 608c6bbebe9..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/SamplerMTL.h"
-
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/UtilsMetal.h"
-
-namespace dawn_native { namespace metal {
-
- namespace {
- MTLSamplerMinMagFilter FilterModeToMinMagFilter(wgpu::FilterMode mode) {
- switch (mode) {
- case wgpu::FilterMode::Nearest:
- return MTLSamplerMinMagFilterNearest;
- case wgpu::FilterMode::Linear:
- return MTLSamplerMinMagFilterLinear;
- }
- }
-
- MTLSamplerMipFilter FilterModeToMipFilter(wgpu::FilterMode mode) {
- switch (mode) {
- case wgpu::FilterMode::Nearest:
- return MTLSamplerMipFilterNearest;
- case wgpu::FilterMode::Linear:
- return MTLSamplerMipFilterLinear;
- }
- }
-
- MTLSamplerAddressMode AddressMode(wgpu::AddressMode mode) {
- switch (mode) {
- case wgpu::AddressMode::Repeat:
- return MTLSamplerAddressModeRepeat;
- case wgpu::AddressMode::MirrorRepeat:
- return MTLSamplerAddressModeMirrorRepeat;
- case wgpu::AddressMode::ClampToEdge:
- return MTLSamplerAddressModeClampToEdge;
- }
- }
- }
-
- // static
- ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
- const SamplerDescriptor* descriptor) {
- DAWN_INVALID_IF(
- descriptor->compare != wgpu::CompareFunction::Undefined &&
- device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare),
- "Sampler compare function (%s) not supported. Compare functions are disabled with the "
- "Metal backend.",
- descriptor->compare);
-
- Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
- DAWN_TRY(sampler->Initialize(descriptor));
- return sampler;
- }
-
- MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
- NSRef<MTLSamplerDescriptor> mtlDescRef = AcquireNSRef([MTLSamplerDescriptor new]);
- MTLSamplerDescriptor* mtlDesc = mtlDescRef.Get();
-
- mtlDesc.minFilter = FilterModeToMinMagFilter(descriptor->minFilter);
- mtlDesc.magFilter = FilterModeToMinMagFilter(descriptor->magFilter);
- mtlDesc.mipFilter = FilterModeToMipFilter(descriptor->mipmapFilter);
-
- mtlDesc.sAddressMode = AddressMode(descriptor->addressModeU);
- mtlDesc.tAddressMode = AddressMode(descriptor->addressModeV);
- mtlDesc.rAddressMode = AddressMode(descriptor->addressModeW);
-
- mtlDesc.lodMinClamp = descriptor->lodMinClamp;
- mtlDesc.lodMaxClamp = descriptor->lodMaxClamp;
- // https://developer.apple.com/documentation/metal/mtlsamplerdescriptor/1516164-maxanisotropy
- mtlDesc.maxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
-
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- // Sampler compare is unsupported before A9, which we validate in
- // Sampler::Create.
- mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
- // The value is default-initialized in the else-case, and we don't set it or the
- // Metal debug device errors.
- }
-
- mMtlSamplerState = AcquireNSPRef(
- [ToBackend(GetDevice())->GetMTLDevice() newSamplerStateWithDescriptor:mtlDesc]);
-
- if (mMtlSamplerState == nil) {
- return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate sampler.");
- }
- return {};
- }
-
- id<MTLSamplerState> Sampler::GetMTLSamplerState() {
- return mMtlSamplerState.Get();
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
deleted file mode 100644
index e82ffad3f81..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_SHADERMODULEMTL_H_
-#define DAWNNATIVE_METAL_SHADERMODULEMTL_H_
-
-#include "dawn_native/ShaderModule.h"
-
-#include "common/NSRef.h"
-#include "dawn_native/Error.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class Device;
- class PipelineLayout;
- class RenderPipeline;
-
- class ShaderModule final : public ShaderModuleBase {
- public:
- static ResultOrError<Ref<ShaderModule>> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
-
- struct MetalFunctionData {
- NSPRef<id<MTLFunction>> function;
- bool needsStorageBufferLength;
- std::vector<uint32_t> workgroupAllocations;
- };
-
- // MTLFunctionConstantValues needs @available tag to compile
- // Use id (like void*) in function signature as workaround and do static cast inside
- MaybeError CreateFunction(const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- MetalFunctionData* out,
- id constantValues = nil,
- uint32_t sampleMask = 0xFFFFFFFF,
- const RenderPipeline* renderPipeline = nullptr);
-
- private:
- ResultOrError<std::string> TranslateToMSL(const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline,
- std::string* remappedEntryPointName,
- bool* needsStorageBufferLength,
- bool* hasInvariantAttribute,
- std::vector<uint32_t>* workgroupAllocations);
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModule() override = default;
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_SHADERMODULEMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
deleted file mode 100644
index 9189cf84c6c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/ShaderModuleMTL.h"
-
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/TintUtils.h"
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/PipelineLayoutMTL.h"
-#include "dawn_native/metal/RenderPipelineMTL.h"
-
-#include <tint/tint.h>
-
-#include <sstream>
-
-namespace dawn_native { namespace metal {
-
- // static
- ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
-
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor) {
- }
-
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
- return InitializeBase(parseResult);
- }
-
- ResultOrError<std::string> ShaderModule::TranslateToMSL(
- const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline,
- std::string* remappedEntryPointName,
- bool* needsStorageBufferLength,
- bool* hasInvariantAttribute,
- std::vector<uint32_t>* workgroupAllocations) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
-
- std::ostringstream errorStream;
- errorStream << "Tint MSL failure:" << std::endl;
-
- // Remap BindingNumber to BindingIndex in WGSL shader
- using BindingRemapper = tint::transform::BindingRemapper;
- using BindingPoint = tint::transform::BindingPoint;
- BindingRemapper::BindingPoints bindingPoints;
- BindingRemapper::AccessControls accessControls;
-
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase::BindingMap& bindingMap =
- layout->GetBindGroupLayout(group)->GetBindingMap();
- for (const auto& it : bindingMap) {
- BindingNumber bindingNumber = it.first;
- BindingIndex bindingIndex = it.second;
-
- const BindingInfo& bindingInfo =
- layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
-
- if (!(bindingInfo.visibility & StageBit(stage))) {
- continue;
- }
-
- uint32_t shaderIndex = layout->GetBindingIndexInfo(stage)[group][bindingIndex];
-
- BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(bindingNumber)};
- BindingPoint dstBindingPoint{0, shaderIndex};
- if (srcBindingPoint != dstBindingPoint) {
- bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
- }
- }
-
- tint::transform::Manager transformManager;
- tint::transform::DataMap transformInputs;
-
- // We only remap bindings for the target entry point, so we need to strip all other entry
- // points to avoid generating invalid bindings for them.
- transformManager.Add<tint::transform::SingleEntryPoint>();
- transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
-
- if (stage == SingleShaderStage::Vertex &&
- GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
- transformManager.Add<tint::transform::VertexPulling>();
- AddVertexPullingTransformConfig(*renderPipeline, entryPointName,
- kPullingBufferBindingSet, &transformInputs);
-
- for (VertexBufferSlot slot :
- IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
- uint32_t metalIndex = renderPipeline->GetMtlVertexBufferIndex(slot);
-
- // Tell Tint to map (kPullingBufferBindingSet, slot) to this MSL buffer index.
- BindingPoint srcBindingPoint{static_cast<uint32_t>(kPullingBufferBindingSet),
- static_cast<uint8_t>(slot)};
- BindingPoint dstBindingPoint{0, metalIndex};
- if (srcBindingPoint != dstBindingPoint) {
- bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
- }
- }
- if (GetDevice()->IsRobustnessEnabled()) {
- transformManager.Add<tint::transform::Robustness>();
- }
- transformManager.Add<tint::transform::BindingRemapper>();
- transformManager.Add<tint::transform::Renamer>();
-
- if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
- // We still need to rename MSL reserved keywords
- transformInputs.Add<tint::transform::Renamer::Config>(
- tint::transform::Renamer::Target::kMslKeywords);
- }
-
- transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
- std::move(accessControls),
- /* mayCollide */ true);
-
- tint::Program program;
- tint::transform::DataMap transformOutputs;
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
- &transformOutputs, nullptr));
-
- if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
- auto it = data->remappings.find(entryPointName);
- if (it != data->remappings.end()) {
- *remappedEntryPointName = it->second;
- } else {
- DAWN_INVALID_IF(!GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming),
- "Could not find remapped name for entry point.");
-
- *remappedEntryPointName = entryPointName;
- }
- } else {
- return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
- }
-
- tint::writer::msl::Options options;
- options.buffer_size_ubo_index = kBufferLengthBufferSlot;
- options.fixed_sample_mask = sampleMask;
- options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- options.emit_vertex_point_size =
- stage == SingleShaderStage::Vertex &&
- renderPipeline->GetPrimitiveTopology() == wgpu::PrimitiveTopology::PointList;
- auto result = tint::writer::msl::Generate(&program, options);
- DAWN_INVALID_IF(!result.success, "An error occured while generating MSL: %s.",
- result.error);
-
- *needsStorageBufferLength = result.needs_storage_buffer_sizes;
- *hasInvariantAttribute = result.has_invariant_attribute;
- *workgroupAllocations = std::move(result.workgroup_allocations[*remappedEntryPointName]);
-
- return std::move(result.msl);
- }
-
- MaybeError ShaderModule::CreateFunction(const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- ShaderModule::MetalFunctionData* out,
- id constantValuesPointer,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline) {
- ASSERT(!IsError());
- ASSERT(out);
-
- // Vertex stages must specify a renderPipeline
- if (stage == SingleShaderStage::Vertex) {
- ASSERT(renderPipeline != nullptr);
- }
-
- std::string remappedEntryPointName;
- std::string msl;
- bool hasInvariantAttribute = false;
- DAWN_TRY_ASSIGN(msl,
- TranslateToMSL(entryPointName, stage, layout, sampleMask, renderPipeline,
- &remappedEntryPointName, &out->needsStorageBufferLength,
- &hasInvariantAttribute, &out->workgroupAllocations));
-
- // Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
- // category. -Wunused-variable in particular comes up a lot in generated code, and some
- // (old?) Metal drivers accidentally treat it as a MTLLibraryErrorCompileError instead
- // of a warning.
- msl = R"(
-#ifdef __clang__
-#pragma clang diagnostic ignored "-Wall"
-#endif
-)" + msl;
-
- if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* Dumped generated MSL */" << std::endl << msl;
- GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
-
- NSRef<NSString> mslSource = AcquireNSRef([[NSString alloc] initWithUTF8String:msl.c_str()]);
-
- NSRef<MTLCompileOptions> compileOptions = AcquireNSRef([[MTLCompileOptions alloc] init]);
- if (hasInvariantAttribute) {
- if (@available(macOS 11.0, iOS 13.0, *)) {
- (*compileOptions).preserveInvariance = true;
- }
- }
- auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
- NSError* error = nullptr;
- NSPRef<id<MTLLibrary>> library =
- AcquireNSPRef([mtlDevice newLibraryWithSource:mslSource.Get()
- options:compileOptions.Get()
- error:&error]);
- if (error != nullptr) {
- DAWN_INVALID_IF(error.code != MTLLibraryErrorCompileWarning,
- "Unable to create library object: %s.",
- [error.localizedDescription UTF8String]);
- }
- ASSERT(library != nil);
-
- NSRef<NSString> name =
- AcquireNSRef([[NSString alloc] initWithUTF8String:remappedEntryPointName.c_str()]);
-
- if (constantValuesPointer != nil) {
- if (@available(macOS 10.12, *)) {
- MTLFunctionConstantValues* constantValues = constantValuesPointer;
- out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()
- constantValues:constantValues
- error:&error]);
- if (error != nullptr) {
- if (error.code != MTLLibraryErrorCompileWarning) {
- return DAWN_VALIDATION_ERROR(std::string("Function compile error: ") +
- [error.localizedDescription UTF8String]);
- }
- }
- ASSERT(out->function != nil);
- } else {
- UNREACHABLE();
- }
- } else {
- out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()]);
- }
-
- if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
- GetEntryPoint(entryPointName).usedVertexInputs.any()) {
- out->needsStorageBufferLength = true;
- }
-
- return {};
- }
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.h
deleted file mode 100644
index b2d6551cd5f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_STAGINGBUFFERMETAL_H_
-#define DAWNNATIVE_STAGINGBUFFERMETAL_H_
-
-#include "dawn_native/StagingBuffer.h"
-
-#include "common/NSRef.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class Device;
-
- class StagingBuffer : public StagingBufferBase {
- public:
- StagingBuffer(size_t size, Device* device);
-
- id<MTLBuffer> GetBufferHandle() const;
-
- MaybeError Initialize() override;
-
- private:
- Device* mDevice;
- NSPRef<id<MTLBuffer>> mBuffer;
- };
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_STAGINGBUFFERMETAL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm
deleted file mode 100644
index af06b3548de..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/StagingBufferMTL.h"
-#include "dawn_native/metal/DeviceMTL.h"
-
-namespace dawn_native { namespace metal {
-
- StagingBuffer::StagingBuffer(size_t size, Device* device)
- : StagingBufferBase(size), mDevice(device) {
- }
-
- MaybeError StagingBuffer::Initialize() {
- const size_t bufferSize = GetSize();
- mBuffer = AcquireNSPRef([mDevice->GetMTLDevice()
- newBufferWithLength:bufferSize
- options:MTLResourceStorageModeShared]);
-
- if (mBuffer == nullptr) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
- }
-
- mMappedPointer = [*mBuffer contents];
- if (mMappedPointer == nullptr) {
- return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
- }
-
- return {};
- }
-
- id<MTLBuffer> StagingBuffer::GetBufferHandle() const {
- return mBuffer.Get();
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
deleted file mode 100644
index 3b72163a6ed..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_SWAPCHAINMTL_H_
-#define DAWNNATIVE_METAL_SWAPCHAINMTL_H_
-
-#include "dawn_native/SwapChain.h"
-
-#include "common/NSRef.h"
-
-@class CAMetalLayer;
-@protocol CAMetalDrawable;
-
-namespace dawn_native { namespace metal {
-
- class Device;
- class Texture;
-
- class OldSwapChain final : public OldSwapChainBase {
- public:
- static Ref<OldSwapChain> Create(Device* deivce, const SwapChainDescriptor* descriptor);
-
- protected:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~OldSwapChain() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase* view) override;
- };
-
- class SwapChain final : public NewSwapChainBase {
- public:
- static ResultOrError<Ref<SwapChain>> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
- ~SwapChain() override;
-
- private:
- void DestroyImpl() override;
-
- using NewSwapChainBase::NewSwapChainBase;
- MaybeError Initialize(NewSwapChainBase* previousSwapChain);
-
- NSRef<CAMetalLayer> mLayer;
-
- NSPRef<id<CAMetalDrawable>> mCurrentDrawable;
- Ref<Texture> mTexture;
-
- MaybeError PresentImpl() override;
- ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
- void DetachFromSurfaceImpl() override;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_SWAPCHAINMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
deleted file mode 100644
index d932986aad4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/SwapChainMTL.h"
-
-#include "dawn_native/Surface.h"
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/TextureMTL.h"
-
-#include <dawn/dawn_wsi.h>
-
-#import <QuartzCore/CAMetalLayer.h>
-
-namespace dawn_native { namespace metal {
-
- // OldSwapChain
-
- // static
- Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
- return AcquireRef(new OldSwapChain(device, descriptor));
- }
-
- OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- DawnWSIContextMetal wsiContext = {};
- wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
- wsiContext.queue = ToBackend(GetDevice())->GetMTLQueue();
- im.Init(im.userData, &wsiContext);
- }
-
- OldSwapChain::~OldSwapChain() {
- }
-
- TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- const auto& im = GetImplementation();
- DawnSwapChainNextTexture next = {};
- DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
- if (error) {
- GetDevice()->HandleError(InternalErrorType::Internal, error);
- return nullptr;
- }
-
- id<MTLTexture> nativeTexture = reinterpret_cast<id<MTLTexture>>(next.texture.ptr);
-
- return Texture::CreateWrapping(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
- }
-
- MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
- return {};
- }
-
- // SwapChain
-
- // static
- ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
- DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain;
- }
-
- SwapChain::~SwapChain() = default;
-
- void SwapChain::DestroyImpl() {
- SwapChainBase::DestroyImpl();
- DetachFromSurface();
- }
-
- MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
- ASSERT(GetSurface()->GetType() == Surface::Type::MetalLayer);
-
- if (previousSwapChain != nullptr) {
- // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
- // multiple backends one after the other. It probably needs to block until the backend
- // and GPU are completely finished with the previous swapchain.
- DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Metal,
- "Metal SwapChain cannot switch backend types from %s to %s.",
- previousSwapChain->GetBackendType(), wgpu::BackendType::Metal);
-
- previousSwapChain->DetachFromSurface();
- }
-
- mLayer = static_cast<CAMetalLayer*>(GetSurface()->GetMetalLayer());
- ASSERT(mLayer != nullptr);
-
- CGSize size = {};
- size.width = GetWidth();
- size.height = GetHeight();
- [*mLayer setDrawableSize:size];
-
- [*mLayer setFramebufferOnly:(GetUsage() == wgpu::TextureUsage::RenderAttachment)];
- [*mLayer setDevice:ToBackend(GetDevice())->GetMTLDevice()];
- [*mLayer setPixelFormat:MetalPixelFormat(GetFormat())];
-
-#if defined(DAWN_PLATFORM_MACOS)
- if (@available(macos 10.13, *)) {
- [*mLayer setDisplaySyncEnabled:(GetPresentMode() != wgpu::PresentMode::Immediate)];
- }
-#endif // defined(DAWN_PLATFORM_MACOS)
-
- // There is no way to control Fifo vs. Mailbox in Metal.
-
- return {};
- }
-
- MaybeError SwapChain::PresentImpl() {
- ASSERT(mCurrentDrawable != nullptr);
- [*mCurrentDrawable present];
-
- mTexture->APIDestroy();
- mTexture = nullptr;
-
- mCurrentDrawable = nullptr;
-
- return {};
- }
-
- ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
- ASSERT(mCurrentDrawable == nullptr);
- mCurrentDrawable = [*mLayer nextDrawable];
-
- TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
-
- mTexture = Texture::CreateWrapping(ToBackend(GetDevice()), &textureDesc,
- [*mCurrentDrawable texture]);
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- return mTexture->APICreateView();
- }
-
- void SwapChain::DetachFromSurfaceImpl() {
- ASSERT((mTexture == nullptr) == (mCurrentDrawable == nullptr));
-
- if (mTexture != nullptr) {
- mTexture->APIDestroy();
- mTexture = nullptr;
-
- mCurrentDrawable = nullptr;
- }
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
deleted file mode 100644
index bfcf02f8fb3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_TEXTUREMTL_H_
-#define DAWNNATIVE_METAL_TEXTUREMTL_H_
-
-#include "dawn_native/Texture.h"
-
-#include "common/NSRef.h"
-#include "dawn_native/DawnNative.h"
-
-#include <IOSurface/IOSurfaceRef.h>
-#import <Metal/Metal.h>
-
-namespace dawn_native { namespace metal {
-
- class CommandRecordingContext;
- class Device;
-
- MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
- MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
- const TextureDescriptor* descriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane);
-
- class Texture final : public TextureBase {
- public:
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const TextureDescriptor* descriptor);
- static ResultOrError<Ref<Texture>> CreateFromIOSurface(
- Device* device,
- const ExternalImageDescriptor* descriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane);
- static Ref<Texture> CreateWrapping(Device* device,
- const TextureDescriptor* descriptor,
- NSPRef<id<MTLTexture>> wrapped);
-
- id<MTLTexture> GetMTLTexture();
-
- void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- const SubresourceRange& range);
-
- private:
- using TextureBase::TextureBase;
- ~Texture() override;
-
- NSRef<MTLTextureDescriptor> CreateMetalTextureDescriptor() const;
-
- MaybeError InitializeAsInternalTexture(const TextureDescriptor* descriptor);
- MaybeError InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
- const TextureDescriptor* textureDescriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane);
- void InitializeAsWrapping(const TextureDescriptor* descriptor,
- NSPRef<id<MTLTexture>> wrapped);
-
- void DestroyImpl() override;
-
- MaybeError ClearTexture(CommandRecordingContext* commandContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue);
-
- NSPRef<id<MTLTexture>> mMtlTexture;
- MTLTextureUsage mMtlUsage;
- };
-
- class TextureView final : public TextureViewBase {
- public:
- static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor);
-
- id<MTLTexture> GetMTLTexture();
-
- private:
- using TextureViewBase::TextureViewBase;
- MaybeError Initialize(const TextureViewDescriptor* descriptor);
-
- NSPRef<id<MTLTexture>> mMtlTextureView;
- };
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_TEXTUREMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
deleted file mode 100644
index 37eb9bc3fd9..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ /dev/null
@@ -1,784 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/TextureMTL.h"
-
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "common/Platform.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/StagingBufferMTL.h"
-#include "dawn_native/metal/UtilsMetal.h"
-
-#include <CoreVideo/CVPixelBuffer.h>
-
-namespace dawn_native { namespace metal {
-
- namespace {
- bool UsageNeedsTextureView(wgpu::TextureUsage usage) {
- constexpr wgpu::TextureUsage kUsageNeedsTextureView =
- wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
- return usage & kUsageNeedsTextureView;
- }
-
- MTLTextureUsage MetalTextureUsage(const Format& format,
- wgpu::TextureUsage usage,
- uint32_t sampleCount) {
- MTLTextureUsage result = MTLTextureUsageUnknown; // This is 0
-
- if (usage & (wgpu::TextureUsage::StorageBinding)) {
- result |= MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
- }
-
- if (usage & (wgpu::TextureUsage::TextureBinding)) {
- result |= MTLTextureUsageShaderRead;
-
- // For sampling stencil aspect of combined depth/stencil. See TextureView
- // constructor.
- if (@available(macOS 10.12, iOS 10.0, *)) {
- if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
- result |= MTLTextureUsagePixelFormatView;
- }
- }
- }
-
- // MTLTextureUsageRenderTarget is needed to clear multisample textures.
- if (usage & (wgpu::TextureUsage::RenderAttachment) || sampleCount > 1) {
- result |= MTLTextureUsageRenderTarget;
- }
-
- return result;
- }
-
- MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
- unsigned int sampleCount) {
- switch (dimension) {
- case wgpu::TextureViewDimension::e2D:
- return (sampleCount > 1) ? MTLTextureType2DMultisample : MTLTextureType2D;
- case wgpu::TextureViewDimension::e2DArray:
- return MTLTextureType2DArray;
- case wgpu::TextureViewDimension::Cube:
- return MTLTextureTypeCube;
- case wgpu::TextureViewDimension::CubeArray:
- return MTLTextureTypeCubeArray;
- case wgpu::TextureViewDimension::e3D:
- return MTLTextureType3D;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- }
- }
-
- bool RequiresCreatingNewTextureView(const TextureBase* texture,
- const TextureViewDescriptor* textureViewDescriptor) {
- if (texture->GetFormat().format != textureViewDescriptor->format) {
- return true;
- }
-
- if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount) {
- return true;
- }
-
- if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
- return true;
- }
-
- if (IsSubset(Aspect::Depth | Aspect::Stencil, texture->GetFormat().aspects) &&
- textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
- return true;
- }
-
- switch (textureViewDescriptor->dimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return true;
- default:
- break;
- }
-
- return false;
- }
-
- ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
- switch (format) {
- case kCVPixelFormatType_32RGBA:
- return wgpu::TextureFormat::RGBA8Unorm;
- case kCVPixelFormatType_32BGRA:
- return wgpu::TextureFormat::BGRA8Unorm;
- case kCVPixelFormatType_TwoComponent8:
- return wgpu::TextureFormat::RG8Unorm;
- case kCVPixelFormatType_OneComponent8:
- return wgpu::TextureFormat::R8Unorm;
- default:
- return DAWN_FORMAT_VALIDATION_ERROR("Unsupported IOSurface format (%x).",
- format);
- }
- }
-
-#if defined(DAWN_PLATFORM_MACOS)
- MTLStorageMode kIOSurfaceStorageMode = MTLStorageModeManaged;
-#elif defined(DAWN_PLATFORM_IOS)
- MTLStorageMode kIOSurfaceStorageMode = MTLStorageModePrivate;
-#else
-# error "Unsupported Apple platform."
-#endif
- }
-
- MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R8Unorm:
- return MTLPixelFormatR8Unorm;
- case wgpu::TextureFormat::R8Snorm:
- return MTLPixelFormatR8Snorm;
- case wgpu::TextureFormat::R8Uint:
- return MTLPixelFormatR8Uint;
- case wgpu::TextureFormat::R8Sint:
- return MTLPixelFormatR8Sint;
-
- case wgpu::TextureFormat::R16Uint:
- return MTLPixelFormatR16Uint;
- case wgpu::TextureFormat::R16Sint:
- return MTLPixelFormatR16Sint;
- case wgpu::TextureFormat::R16Float:
- return MTLPixelFormatR16Float;
- case wgpu::TextureFormat::RG8Unorm:
- return MTLPixelFormatRG8Unorm;
- case wgpu::TextureFormat::RG8Snorm:
- return MTLPixelFormatRG8Snorm;
- case wgpu::TextureFormat::RG8Uint:
- return MTLPixelFormatRG8Uint;
- case wgpu::TextureFormat::RG8Sint:
- return MTLPixelFormatRG8Sint;
-
- case wgpu::TextureFormat::R32Uint:
- return MTLPixelFormatR32Uint;
- case wgpu::TextureFormat::R32Sint:
- return MTLPixelFormatR32Sint;
- case wgpu::TextureFormat::R32Float:
- return MTLPixelFormatR32Float;
- case wgpu::TextureFormat::RG16Uint:
- return MTLPixelFormatRG16Uint;
- case wgpu::TextureFormat::RG16Sint:
- return MTLPixelFormatRG16Sint;
- case wgpu::TextureFormat::RG16Float:
- return MTLPixelFormatRG16Float;
- case wgpu::TextureFormat::RGBA8Unorm:
- return MTLPixelFormatRGBA8Unorm;
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- return MTLPixelFormatRGBA8Unorm_sRGB;
- case wgpu::TextureFormat::RGBA8Snorm:
- return MTLPixelFormatRGBA8Snorm;
- case wgpu::TextureFormat::RGBA8Uint:
- return MTLPixelFormatRGBA8Uint;
- case wgpu::TextureFormat::RGBA8Sint:
- return MTLPixelFormatRGBA8Sint;
- case wgpu::TextureFormat::BGRA8Unorm:
- return MTLPixelFormatBGRA8Unorm;
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return MTLPixelFormatBGRA8Unorm_sRGB;
- case wgpu::TextureFormat::RGB10A2Unorm:
- return MTLPixelFormatRGB10A2Unorm;
- case wgpu::TextureFormat::RG11B10Ufloat:
- return MTLPixelFormatRG11B10Float;
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return MTLPixelFormatRGB9E5Float;
-
- case wgpu::TextureFormat::RG32Uint:
- return MTLPixelFormatRG32Uint;
- case wgpu::TextureFormat::RG32Sint:
- return MTLPixelFormatRG32Sint;
- case wgpu::TextureFormat::RG32Float:
- return MTLPixelFormatRG32Float;
- case wgpu::TextureFormat::RGBA16Uint:
- return MTLPixelFormatRGBA16Uint;
- case wgpu::TextureFormat::RGBA16Sint:
- return MTLPixelFormatRGBA16Sint;
- case wgpu::TextureFormat::RGBA16Float:
- return MTLPixelFormatRGBA16Float;
-
- case wgpu::TextureFormat::RGBA32Uint:
- return MTLPixelFormatRGBA32Uint;
- case wgpu::TextureFormat::RGBA32Sint:
- return MTLPixelFormatRGBA32Sint;
- case wgpu::TextureFormat::RGBA32Float:
- return MTLPixelFormatRGBA32Float;
-
- case wgpu::TextureFormat::Depth32Float:
- return MTLPixelFormatDepth32Float;
- case wgpu::TextureFormat::Depth24Plus:
- return MTLPixelFormatDepth32Float;
- case wgpu::TextureFormat::Depth24PlusStencil8:
- return MTLPixelFormatDepth32Float_Stencil8;
- case wgpu::TextureFormat::Depth16Unorm:
- if (@available(macOS 10.12, iOS 13.0, *)) {
- return MTLPixelFormatDepth16Unorm;
- } else {
- // TODO (dawn:1181): Allow non-conformant implementation on macOS 10.11
- UNREACHABLE();
- }
-
-#if defined(DAWN_PLATFORM_MACOS)
- case wgpu::TextureFormat::BC1RGBAUnorm:
- return MTLPixelFormatBC1_RGBA;
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- return MTLPixelFormatBC1_RGBA_sRGB;
- case wgpu::TextureFormat::BC2RGBAUnorm:
- return MTLPixelFormatBC2_RGBA;
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- return MTLPixelFormatBC2_RGBA_sRGB;
- case wgpu::TextureFormat::BC3RGBAUnorm:
- return MTLPixelFormatBC3_RGBA;
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- return MTLPixelFormatBC3_RGBA_sRGB;
- case wgpu::TextureFormat::BC4RSnorm:
- return MTLPixelFormatBC4_RSnorm;
- case wgpu::TextureFormat::BC4RUnorm:
- return MTLPixelFormatBC4_RUnorm;
- case wgpu::TextureFormat::BC5RGSnorm:
- return MTLPixelFormatBC5_RGSnorm;
- case wgpu::TextureFormat::BC5RGUnorm:
- return MTLPixelFormatBC5_RGUnorm;
- case wgpu::TextureFormat::BC6HRGBFloat:
- return MTLPixelFormatBC6H_RGBFloat;
- case wgpu::TextureFormat::BC6HRGBUfloat:
- return MTLPixelFormatBC6H_RGBUfloat;
- case wgpu::TextureFormat::BC7RGBAUnorm:
- return MTLPixelFormatBC7_RGBAUnorm;
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return MTLPixelFormatBC7_RGBAUnorm_sRGB;
-#else
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-#endif
-
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-
- // TODO(dawn:666): implement stencil8
- case wgpu::TextureFormat::Stencil8:
- // TODO(dawn:690): implement depth24unorm-stencil8
- case wgpu::TextureFormat::Depth24UnormStencil8:
- // TODO(dawn:690): implement depth32float-stencil8
- case wgpu::TextureFormat::Depth32FloatStencil8:
- case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
- }
- }
-
- MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase*,
- const TextureDescriptor* descriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane) {
- // IOSurfaceGetPlaneCount can return 0 for non-planar IOSurfaces but we will treat
- // non-planar like it is a single plane.
- size_t surfacePlaneCount = std::max(size_t(1), IOSurfaceGetPlaneCount(ioSurface));
- DAWN_INVALID_IF(plane >= surfacePlaneCount,
- "IOSurface plane (%u) exceeds the surface's plane count (%u).", plane,
- surfacePlaneCount);
-
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "Texture dimension (%s) is not %s.", descriptor->dimension,
- wgpu::TextureDimension::e2D);
-
- DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
- "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
- descriptor->sampleCount);
-
- uint32_t surfaceWidth = IOSurfaceGetWidthOfPlane(ioSurface, plane);
- uint32_t surfaceHeight = IOSurfaceGetHeightOfPlane(ioSurface, plane);
-
- DAWN_INVALID_IF(
- descriptor->size.width != surfaceWidth || descriptor->size.height != surfaceHeight ||
- descriptor->size.depthOrArrayLayers != 1,
- "IOSurface size (width: %u, height %u, depth: 1) doesn't match descriptor size %s.",
- surfaceWidth, surfaceHeight, &descriptor->size);
-
- wgpu::TextureFormat ioSurfaceFormat;
- DAWN_TRY_ASSIGN(ioSurfaceFormat,
- GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
- DAWN_INVALID_IF(descriptor->format != ioSurfaceFormat,
- "IOSurface format (%s) doesn't match the descriptor format (%s).",
- ioSurfaceFormat, descriptor->format);
-
- return {};
- }
-
- NSRef<MTLTextureDescriptor> Texture::CreateMetalTextureDescriptor() const {
- NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
- MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
-
- mtlDesc.width = GetWidth();
- mtlDesc.height = GetHeight();
- mtlDesc.sampleCount = GetSampleCount();
- // TODO: add MTLTextureUsagePixelFormatView when needed when we support format
- // reinterpretation.
- mtlDesc.usage = MetalTextureUsage(GetFormat(), GetInternalUsage(), GetSampleCount());
- mtlDesc.pixelFormat = MetalPixelFormat(GetFormat().format);
- mtlDesc.mipmapLevelCount = GetNumMipLevels();
- mtlDesc.storageMode = MTLStorageModePrivate;
-
- // Choose the correct MTLTextureType and paper over differences in how the array layer count
- // is specified.
- switch (GetDimension()) {
- case wgpu::TextureDimension::e2D:
- mtlDesc.depth = 1;
- mtlDesc.arrayLength = GetArrayLayers();
- if (mtlDesc.arrayLength > 1) {
- ASSERT(mtlDesc.sampleCount == 1);
- mtlDesc.textureType = MTLTextureType2DArray;
- } else if (mtlDesc.sampleCount > 1) {
- mtlDesc.textureType = MTLTextureType2DMultisample;
- } else {
- mtlDesc.textureType = MTLTextureType2D;
- }
- break;
- case wgpu::TextureDimension::e3D:
- mtlDesc.depth = GetDepth();
- mtlDesc.arrayLength = 1;
- ASSERT(mtlDesc.sampleCount == 1);
- mtlDesc.textureType = MTLTextureType3D;
- break;
-
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
-
- return mtlDescRef;
- }
-
- // static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const TextureDescriptor* descriptor) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
- DAWN_TRY(texture->InitializeAsInternalTexture(descriptor));
- return texture;
- }
-
- // static
- ResultOrError<Ref<Texture>> Texture::CreateFromIOSurface(
- Device* device,
- const ExternalImageDescriptor* descriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- Ref<Texture> texture =
- AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
- DAWN_TRY(texture->InitializeFromIOSurface(descriptor, textureDescriptor, ioSurface, plane));
- return texture;
- }
-
- // static
- Ref<Texture> Texture::CreateWrapping(Device* device,
- const TextureDescriptor* descriptor,
- NSPRef<id<MTLTexture>> wrapped) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
- texture->InitializeAsWrapping(descriptor, std::move(wrapped));
- return texture;
- }
-
- MaybeError Texture::InitializeAsInternalTexture(const TextureDescriptor* descriptor) {
- Device* device = ToBackend(GetDevice());
-
- NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
- mMtlUsage = [*mtlDesc usage];
- mMtlTexture =
- AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
-
- if (mMtlTexture == nil) {
- return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture.");
- }
-
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- DAWN_TRY(ClearTexture(device->GetPendingCommandContext(), GetAllSubresources(),
- TextureBase::ClearValue::NonZero));
- }
-
- return {};
- }
-
- void Texture::InitializeAsWrapping(const TextureDescriptor* descriptor,
- NSPRef<id<MTLTexture>> wrapped) {
- NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
- mMtlUsage = [*mtlDesc usage];
- mMtlTexture = std::move(wrapped);
- }
-
- MaybeError Texture::InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
- const TextureDescriptor* textureDescriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane) {
- Device* device = ToBackend(GetDevice());
-
- NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
- [*mtlDesc setStorageMode:kIOSurfaceStorageMode];
-
- mMtlUsage = [*mtlDesc usage];
- mMtlTexture = AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()
- iosurface:ioSurface
- plane:plane]);
-
- SetIsSubresourceContentInitialized(descriptor->isInitialized, GetAllSubresources());
-
- return {};
- }
-
- Texture::~Texture() {
- }
-
- void Texture::DestroyImpl() {
- TextureBase::DestroyImpl();
- mMtlTexture = nullptr;
- }
-
- id<MTLTexture> Texture::GetMTLTexture() {
- return mMtlTexture.Get();
- }
-
- MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue) {
- Device* device = ToBackend(GetDevice());
-
- const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
- const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
-
- if ((mMtlUsage & MTLTextureUsageRenderTarget) != 0) {
- ASSERT(GetFormat().isRenderable);
-
- // End the blit encoder if it is open.
- commandContext->EndBlit();
-
- if (GetFormat().HasDepthOrStencil()) {
- // Create a render pass to clear each subresource.
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
- level, arrayLayer, range.aspects))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- // Note that this creates a descriptor that's autoreleased so we don't use
- // AcquireNSRef
- NSRef<MTLRenderPassDescriptor> descriptorRef =
- [MTLRenderPassDescriptor renderPassDescriptor];
- MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
-
- // At least one aspect needs clearing. Iterate the aspects individually to
- // determine which to clear.
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
- level, arrayLayer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
- switch (aspect) {
- case Aspect::Depth:
- descriptor.depthAttachment.texture = GetMTLTexture();
- descriptor.depthAttachment.level = level;
- descriptor.depthAttachment.slice = arrayLayer;
- descriptor.depthAttachment.loadAction = MTLLoadActionClear;
- descriptor.depthAttachment.storeAction = MTLStoreActionStore;
- descriptor.depthAttachment.clearDepth = dClearColor;
- break;
- case Aspect::Stencil:
- descriptor.stencilAttachment.texture = GetMTLTexture();
- descriptor.stencilAttachment.level = level;
- descriptor.stencilAttachment.slice = arrayLayer;
- descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
- descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
- descriptor.stencilAttachment.clearStencil =
- static_cast<uint32_t>(clearColor);
- break;
- default:
- UNREACHABLE();
- }
- }
-
- commandContext->BeginRender(descriptor);
- commandContext->EndRender();
- }
- }
- } else {
- ASSERT(GetFormat().IsColor());
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- // Create multiple render passes with each subresource as a color attachment to
- // clear them all. Only do this for array layers to ensure all attachments have
- // the same size.
- NSRef<MTLRenderPassDescriptor> descriptor;
- uint32_t attachment = 0;
-
- uint32_t numZSlices = GetMipLevelVirtualSize(level).depthOrArrayLayers;
-
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
- level, arrayLayer, Aspect::Color))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- for (uint32_t z = 0; z < numZSlices; ++z) {
- if (descriptor == nullptr) {
- // Note that this creates a descriptor that's autoreleased so we
- // don't use AcquireNSRef
- descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
- }
-
- [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
- [*descriptor colorAttachments][attachment].loadAction =
- MTLLoadActionClear;
- [*descriptor colorAttachments][attachment].storeAction =
- MTLStoreActionStore;
- [*descriptor colorAttachments][attachment].clearColor =
- MTLClearColorMake(dClearColor, dClearColor, dClearColor,
- dClearColor);
- [*descriptor colorAttachments][attachment].level = level;
- [*descriptor colorAttachments][attachment].slice = arrayLayer;
- [*descriptor colorAttachments][attachment].depthPlane = z;
-
- attachment++;
-
- if (attachment == kMaxColorAttachments) {
- attachment = 0;
- commandContext->BeginRender(descriptor.Get());
- commandContext->EndRender();
- descriptor = nullptr;
- }
- }
- }
-
- if (descriptor != nullptr) {
- commandContext->BeginRender(descriptor.Get());
- commandContext->EndRender();
- }
- }
- }
- } else {
- Extent3D largestMipSize = GetMipLevelVirtualSize(range.baseMipLevel);
-
- // Encode a buffer to texture copy to clear each subresource.
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- // Compute the buffer size big enough to fill the largest mip.
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
-
- // Metal validation layers: sourceBytesPerRow must be at least 64.
- uint32_t largestMipBytesPerRow =
- std::max((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 64u);
-
- // Metal validation layers: sourceBytesPerImage must be at least 512.
- uint64_t largestMipBytesPerImage =
- std::max(static_cast<uint64_t>(largestMipBytesPerRow) *
- (largestMipSize.height / blockInfo.height),
- 512llu);
-
- uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
-
- if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
- }
-
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
- blockInfo.byteSize));
- memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
-
- id<MTLBuffer> uploadBuffer =
- ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
-
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- Extent3D virtualSize = GetMipLevelVirtualSize(level);
-
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- MTLBlitOption blitOption = ComputeMTLBlitOption(GetFormat(), aspect);
- [commandContext->EnsureBlit()
- copyFromBuffer:uploadBuffer
- sourceOffset:uploadHandle.startOffset
- sourceBytesPerRow:largestMipBytesPerRow
- sourceBytesPerImage:largestMipBytesPerImage
- sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
- virtualSize.depthOrArrayLayers)
- toTexture:GetMTLTexture()
- destinationSlice:arrayLayer
- destinationLevel:level
- destinationOrigin:MTLOriginMake(0, 0, 0)
- options:blitOption];
- }
- }
- }
- }
-
- if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, range);
- device->IncrementLazyClearCountForTesting();
- }
- return {};
- }
-
- void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
- const SubresourceRange& range) {
- if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- return;
- }
- if (!IsSubresourceContentInitialized(range)) {
- // If subresource has not been initialized, clear it to black as it could
- // contain dirty bits from recycled memory
- GetDevice()->ConsumedError(
- ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
- }
- }
-
- // static
- ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
- DAWN_TRY(view->Initialize(descriptor));
- return view;
- }
-
- MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
- Texture* texture = ToBackend(GetTexture());
-
- // Texture could be destroyed by the time we make a view.
- if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
- return {};
- }
-
- id<MTLTexture> mtlTexture = texture->GetMTLTexture();
-
- if (!UsageNeedsTextureView(texture->GetInternalUsage())) {
- mMtlTextureView = nullptr;
- } else if (!RequiresCreatingNewTextureView(texture, descriptor)) {
- mMtlTextureView = mtlTexture;
- } else {
- MTLPixelFormat format = MetalPixelFormat(descriptor->format);
- if (descriptor->aspect == wgpu::TextureAspect::StencilOnly) {
- if (@available(macOS 10.12, iOS 10.0, *)) {
- ASSERT(format == MTLPixelFormatDepth32Float_Stencil8);
- format = MTLPixelFormatX32_Stencil8;
- } else {
- // TODO(enga): Add a workaround to back combined depth/stencil textures
- // with Sampled usage using two separate textures.
- // Or, consider always using the workaround for D32S8.
- GetDevice()->ConsumedError(
- DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
- "combined depth/stencil format."));
- }
- }
-
- MTLTextureType textureViewType =
- MetalTextureViewType(descriptor->dimension, texture->GetSampleCount());
- auto mipLevelRange = NSMakeRange(descriptor->baseMipLevel, descriptor->mipLevelCount);
- auto arrayLayerRange =
- NSMakeRange(descriptor->baseArrayLayer, descriptor->arrayLayerCount);
-
- mMtlTextureView =
- AcquireNSPRef([mtlTexture newTextureViewWithPixelFormat:format
- textureType:textureViewType
- levels:mipLevelRange
- slices:arrayLayerRange]);
- if (mMtlTextureView == nil) {
- return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view.");
- }
- }
-
- return {};
- }
-
- id<MTLTexture> TextureView::GetMTLTexture() {
- ASSERT(mMtlTextureView != nullptr);
- return mMtlTextureView.Get();
- }
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
deleted file mode 100644
index 3a17c99b274..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METAL_UTILSMETAL_H_
-#define DAWNNATIVE_METAL_UTILSMETAL_H_
-
-#include "dawn_native/dawn_platform.h"
-#include "dawn_native/metal/DeviceMTL.h"
-#include "dawn_native/metal/ShaderModuleMTL.h"
-#include "dawn_native/metal/TextureMTL.h"
-
-#import <Metal/Metal.h>
-
-namespace dawn_native {
- struct ProgrammableStage;
- struct EntryPointMetadata;
- enum class SingleShaderStage;
-}
-
-namespace dawn_native { namespace metal {
-
- MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
-
- struct TextureBufferCopySplit {
- static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
-
- struct CopyInfo {
- NSUInteger bufferOffset;
- NSUInteger bytesPerRow;
- NSUInteger bytesPerImage;
- Origin3D textureOrigin;
- Extent3D copyExtent;
- };
-
- uint32_t count = 0;
- std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
-
- auto begin() const {
- return copies.begin();
- }
-
- auto end() const {
- return copies.begin() + count;
- }
- };
-
- TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
- uint32_t mipLevel,
- Origin3D origin,
- Extent3D copyExtent,
- uint64_t bufferSize,
- uint64_t bufferOffset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- Aspect aspect);
-
- void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
- Texture* texture,
- const TextureCopy& dst,
- const Extent3D& size);
-
- MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect);
-
- // Helper function to create function with constant values wrapped in
- // if available branch
- MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
- SingleShaderStage singleShaderStage,
- PipelineLayout* pipelineLayout,
- ShaderModule::MetalFunctionData* functionData,
- uint32_t sampleMask = 0xFFFFFFFF,
- const RenderPipeline* renderPipeline = nullptr);
-
-}} // namespace dawn_native::metal
-
-#endif // DAWNNATIVE_METAL_UTILSMETAL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
deleted file mode 100644
index 1a7962afde0..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/metal/UtilsMetal.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/Pipeline.h"
-#include "dawn_native/ShaderModule.h"
-
-#include "common/Assert.h"
-
-namespace dawn_native { namespace metal {
-
- MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction) {
- switch (compareFunction) {
- case wgpu::CompareFunction::Never:
- return MTLCompareFunctionNever;
- case wgpu::CompareFunction::Less:
- return MTLCompareFunctionLess;
- case wgpu::CompareFunction::LessEqual:
- return MTLCompareFunctionLessEqual;
- case wgpu::CompareFunction::Greater:
- return MTLCompareFunctionGreater;
- case wgpu::CompareFunction::GreaterEqual:
- return MTLCompareFunctionGreaterEqual;
- case wgpu::CompareFunction::NotEqual:
- return MTLCompareFunctionNotEqual;
- case wgpu::CompareFunction::Equal:
- return MTLCompareFunctionEqual;
- case wgpu::CompareFunction::Always:
- return MTLCompareFunctionAlways;
-
- case wgpu::CompareFunction::Undefined:
- UNREACHABLE();
- }
- }
-
- TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
- uint32_t mipLevel,
- Origin3D origin,
- Extent3D copyExtent,
- uint64_t bufferSize,
- uint64_t bufferOffset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- Aspect aspect) {
- TextureBufferCopySplit copy;
- const Format textureFormat = texture->GetFormat();
- const TexelBlockInfo& blockInfo = textureFormat.GetAspectInfo(aspect).block;
-
- // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
- // compute the correct range when checking if the buffer is big enough to contain the
- // data for the whole copy. Instead of looking at the position of the last texel in the
- // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
- // format.blockHeight) * copySize.depthOrArrayLayers. For example considering the pixel
- // buffer below where in memory, each row data (D) of the texture is followed by some
- // padding data (P):
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDA|PP|
- // The last pixel read will be A, but the driver will think it is the whole last padding
- // row, causing it to generate an error when the pixel buffer is just big enough.
-
- // We work around this limitation by detecting when Metal would complain and copy the
- // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
- uint32_t bytesPerImage = bytesPerRow * rowsPerImage;
-
- // Metal validation layer requires that if the texture's pixel format is a compressed
- // format, the sourceSize must be a multiple of the pixel format's block size or be
- // clamped to the edge of the texture if the block extends outside the bounds of a
- // texture.
- const Extent3D clampedCopyExtent =
- texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
-
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
-
- // Check whether buffer size is big enough.
- bool needWorkaround =
- bufferSize - bufferOffset < bytesPerImage * copyExtent.depthOrArrayLayers;
- if (!needWorkaround) {
- copy.count = 1;
- copy.copies[0].bufferOffset = bufferOffset;
- copy.copies[0].bytesPerRow = bytesPerRow;
- copy.copies[0].bytesPerImage = bytesPerImage;
- copy.copies[0].textureOrigin = origin;
- copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
- copyExtent.depthOrArrayLayers};
- return copy;
- }
-
- uint64_t currentOffset = bufferOffset;
-
- // Doing all the copy except the last image.
- if (copyExtent.depthOrArrayLayers > 1) {
- copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = bytesPerRow;
- copy.copies[copy.count].bytesPerImage = bytesPerImage;
- copy.copies[copy.count].textureOrigin = origin;
- copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
- copyExtent.depthOrArrayLayers - 1};
-
- ++copy.count;
-
- // Update offset to copy to the last image.
- currentOffset += (copyExtent.depthOrArrayLayers - 1) * bytesPerImage;
- }
-
- // Doing all the copy in last image except the last row.
- uint32_t copyBlockRowCount = copyExtent.height / blockInfo.height;
- if (copyBlockRowCount > 1) {
- copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = bytesPerRow;
- copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
- copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
- origin.z + copyExtent.depthOrArrayLayers - 1};
-
- ASSERT(copyExtent.height - blockInfo.height <
- texture->GetMipLevelVirtualSize(mipLevel).height);
- copy.copies[copy.count].copyExtent = {clampedCopyExtent.width,
- copyExtent.height - blockInfo.height, 1};
-
- ++copy.count;
-
- // Update offset to copy to the last row.
- currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
- }
-
- // Doing the last row copy with the exact number of bytes in last row.
- // Workaround this issue in a way just like the copy to a 1D texture.
- uint32_t lastRowDataSize = (copyExtent.width / blockInfo.width) * blockInfo.byteSize;
- uint32_t lastRowCopyExtentHeight =
- blockInfo.height + clampedCopyExtent.height - copyExtent.height;
- ASSERT(lastRowCopyExtentHeight <= blockInfo.height);
-
- copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = lastRowDataSize;
- copy.copies[copy.count].bytesPerImage = lastRowDataSize;
- copy.copies[copy.count].textureOrigin = {origin.x,
- origin.y + copyExtent.height - blockInfo.height,
- origin.z + copyExtent.depthOrArrayLayers - 1};
- copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
- ++copy.count;
-
- return copy;
- }
-
- void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
- Texture* texture,
- const TextureCopy& dst,
- const Extent3D& size) {
- ASSERT(texture == dst.texture.Get());
- SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
- if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- texture->EnsureSubresourceContentInitialized(commandContext, range);
- }
- }
-
- MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect) {
- ASSERT(HasOneBit(aspect));
- ASSERT(format.aspects & aspect);
-
- if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
- // We only provide a blit option if the format has both depth and stencil.
- // It is invalid to provide a blit option otherwise.
- switch (aspect) {
- case Aspect::Depth:
- return MTLBlitOptionDepthFromDepthStencil;
- case Aspect::Stencil:
- return MTLBlitOptionStencilFromDepthStencil;
- default:
- UNREACHABLE();
- }
- }
- return MTLBlitOptionNone;
- }
-
- MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
- SingleShaderStage singleShaderStage,
- PipelineLayout* pipelineLayout,
- ShaderModule::MetalFunctionData* functionData,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline) {
- ShaderModule* shaderModule = ToBackend(programmableStage.module.Get());
- const char* shaderEntryPoint = programmableStage.entryPoint.c_str();
- const auto& entryPointMetadata = programmableStage.module->GetEntryPoint(shaderEntryPoint);
- if (entryPointMetadata.overridableConstants.size() == 0) {
- DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage,
- pipelineLayout, functionData, nil, sampleMask,
- renderPipeline));
- return {};
- }
-
- if (@available(macOS 10.12, *)) {
- // MTLFunctionConstantValues can only be created within the if available branch
- NSRef<MTLFunctionConstantValues> constantValues =
- AcquireNSRef([MTLFunctionConstantValues new]);
-
- std::unordered_set<std::string> overriddenConstants;
-
- auto switchType = [&](EntryPointMetadata::OverridableConstant::Type dawnType,
- MTLDataType* type, OverridableConstantScalar* entry,
- double value = 0) {
- switch (dawnType) {
- case EntryPointMetadata::OverridableConstant::Type::Boolean:
- *type = MTLDataTypeBool;
- if (entry) {
- entry->b = static_cast<int32_t>(value);
- }
- break;
- case EntryPointMetadata::OverridableConstant::Type::Float32:
- *type = MTLDataTypeFloat;
- if (entry) {
- entry->f32 = static_cast<float>(value);
- }
- break;
- case EntryPointMetadata::OverridableConstant::Type::Int32:
- *type = MTLDataTypeInt;
- if (entry) {
- entry->i32 = static_cast<int32_t>(value);
- }
- break;
- case EntryPointMetadata::OverridableConstant::Type::Uint32:
- *type = MTLDataTypeUInt;
- if (entry) {
- entry->u32 = static_cast<uint32_t>(value);
- }
- break;
- default:
- UNREACHABLE();
- }
- };
-
- for (const auto& pipelineConstant : programmableStage.constants) {
- const std::string& name = pipelineConstant.first;
- double value = pipelineConstant.second;
-
- overriddenConstants.insert(name);
-
- // This is already validated so `name` must exist
- const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
-
- MTLDataType type;
- OverridableConstantScalar entry{};
-
- switchType(moduleConstant.type, &type, &entry, value);
-
- [constantValues.Get() setConstantValue:&entry type:type atIndex:moduleConstant.id];
- }
-
- // Set shader initialized default values because MSL function_constant
- // has no default value
- for (const std::string& name : entryPointMetadata.initializedOverridableConstants) {
- if (overriddenConstants.count(name) != 0) {
- // This constant already has overridden value
- continue;
- }
-
- // Must exist because it is validated
- const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
- ASSERT(moduleConstant.isInitialized);
- MTLDataType type;
-
- switchType(moduleConstant.type, &type, nullptr);
-
- [constantValues.Get() setConstantValue:&moduleConstant.defaultValue
- type:type
- atIndex:moduleConstant.id];
- }
-
- DAWN_TRY(shaderModule->CreateFunction(
- shaderEntryPoint, singleShaderStage, pipelineLayout, functionData,
- constantValues.Get(), sampleMask, renderPipeline));
- } else {
- UNREACHABLE();
- }
- return {};
- }
-
-}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
deleted file mode 100644
index b7146b91473..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
+++ /dev/null
@@ -1,515 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/null/DeviceNull.h"
-
-#include "dawn_native/BackendConnection.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/Surface.h"
-
-namespace dawn_native { namespace null {
-
- // Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
-
- Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
- mPCIInfo.name = "Null backend";
- mAdapterType = wgpu::AdapterType::CPU;
- MaybeError err = Initialize();
- ASSERT(err.IsSuccess());
- }
-
- Adapter::~Adapter() = default;
-
- bool Adapter::SupportsExternalImages() const {
- return false;
- }
-
- // Used for the tests that intend to use an adapter without all features enabled.
- void Adapter::SetSupportedFeatures(const std::vector<const char*>& requiredFeatures) {
- mSupportedFeatures = GetInstance()->FeatureNamesToFeaturesSet(requiredFeatures);
- }
-
- MaybeError Adapter::InitializeImpl() {
- return {};
- }
-
- MaybeError Adapter::InitializeSupportedFeaturesImpl() {
- // Enable all features by default for the convenience of tests.
- mSupportedFeatures.featuresBitSet.set();
- return {};
- }
-
- MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
- GetDefaultLimits(&limits->v1);
- return {};
- }
-
- ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DawnDeviceDescriptor* descriptor) {
- return Device::Create(this, descriptor);
- }
-
- class Backend : public BackendConnection {
- public:
- Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Null) {
- }
-
- std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override {
- // There is always a single Null adapter because it is purely CPU based and doesn't
- // depend on the system.
- std::vector<std::unique_ptr<AdapterBase>> adapters;
- std::unique_ptr<Adapter> adapter = std::make_unique<Adapter>(GetInstance());
- adapters.push_back(std::move(adapter));
- return adapters;
- }
- };
-
- BackendConnection* Connect(InstanceBase* instance) {
- return new Backend(instance);
- }
-
- struct CopyFromStagingToBufferOperation : PendingOperation {
- virtual void Execute() {
- destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
- }
-
- StagingBufferBase* staging;
- Ref<Buffer> destination;
- uint64_t sourceOffset;
- uint64_t destinationOffset;
- uint64_t size;
- };
-
- // Device
-
- // static
- ResultOrError<Device*> Device::Create(Adapter* adapter,
- const DawnDeviceDescriptor* descriptor) {
- Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
- DAWN_TRY(device->Initialize());
- return device.Detach();
- }
-
- Device::~Device() {
- Destroy();
- }
-
- MaybeError Device::Initialize() {
- return DeviceBase::Initialize(new Queue(this));
- }
-
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- return AcquireRef(new BindGroup(this, descriptor));
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- DAWN_TRY(IncrementMemoryUsage(descriptor->size));
- return AcquireRef(new Buffer(this, descriptor));
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(this, descriptor));
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return AcquireRef(new PipelineLayout(this, descriptor));
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return AcquireRef(new QuerySet(this, descriptor));
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(this, descriptor));
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return AcquireRef(new Sampler(this, descriptor));
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return AcquireRef(new OldSwapChain(this, descriptor));
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, surface, previousSwapChain, descriptor);
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return AcquireRef(new TextureView(texture, descriptor));
- }
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer =
- std::make_unique<StagingBuffer>(size, this);
- DAWN_TRY(stagingBuffer->Initialize());
- return std::move(stagingBuffer);
- }
-
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
-
- // Clear pending operations before checking mMemoryUsage because some operations keep a
- // reference to Buffers.
- mPendingOperations.clear();
- ASSERT(mMemoryUsage == 0);
- }
-
- MaybeError Device::WaitForIdleForDestruction() {
- mPendingOperations.clear();
- return {};
- }
-
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- destination->SetIsDataInitialized();
- }
-
- auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
- operation->staging = source;
- operation->destination = ToBackend(destination);
- operation->sourceOffset = sourceOffset;
- operation->destinationOffset = destinationOffset;
- operation->size = size;
-
- AddPendingOperation(std::move(operation));
-
- return {};
- }
-
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- return {};
- }
-
- MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
- static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max(), "");
- if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
- return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
- }
- mMemoryUsage += bytes;
- return {};
- }
-
- void Device::DecrementMemoryUsage(uint64_t bytes) {
- ASSERT(mMemoryUsage >= bytes);
- mMemoryUsage -= bytes;
- }
-
- MaybeError Device::TickImpl() {
- return SubmitPendingOperations();
- }
-
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- return GetLastSubmittedCommandSerial();
- }
-
- void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
- mPendingOperations.emplace_back(std::move(operation));
- }
-
- MaybeError Device::SubmitPendingOperations() {
- for (auto& operation : mPendingOperations) {
- operation->Execute();
- }
- mPendingOperations.clear();
-
- DAWN_TRY(CheckPassedSerials());
- IncrementLastSubmittedCommandSerial();
-
- return {};
- }
-
- // BindGroupDataHolder
-
- BindGroupDataHolder::BindGroupDataHolder(size_t size)
- : mBindingDataAllocation(malloc(size)) // malloc is guaranteed to return a
- // pointer aligned enough for the allocation
- {
- }
-
- BindGroupDataHolder::~BindGroupDataHolder() {
- free(mBindingDataAllocation);
- }
-
- // BindGroup
-
- BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
- : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
- BindGroupBase(device, descriptor, mBindingDataAllocation) {
- }
-
- // BindGroupLayout
-
- BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) {
- }
-
- // Buffer
-
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor) {
- mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
- mAllocatedSize = GetSize();
- }
-
- bool Buffer::IsCPUWritableAtCreation() const {
- // Only return true for mappable buffers so we can test cases that need / don't need a
- // staging buffer.
- return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
- }
-
- MaybeError Buffer::MapAtCreationImpl() {
- return {};
- }
-
- void Buffer::CopyFromStaging(StagingBufferBase* staging,
- uint64_t sourceOffset,
- uint64_t destinationOffset,
- uint64_t size) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
- memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
- }
-
- void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
- ASSERT(bufferOffset + size <= GetSize());
- ASSERT(mBackingData);
- memcpy(mBackingData.get() + bufferOffset, data, size);
- }
-
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- return {};
- }
-
- void* Buffer::GetMappedPointerImpl() {
- return mBackingData.get();
- }
-
- void Buffer::UnmapImpl() {
- }
-
- void Buffer::DestroyImpl() {
- BufferBase::DestroyImpl();
- ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
- }
-
- // CommandBuffer
-
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
- }
-
- // QuerySet
-
- QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
- : QuerySetBase(device, descriptor) {
- }
-
- // Queue
-
- Queue::Queue(Device* device) : QueueBase(device) {
- }
-
- Queue::~Queue() {
- }
-
- MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
- Device* device = ToBackend(GetDevice());
-
- // The Vulkan, D3D12 and Metal implementation all tick the device here,
- // for testing purposes we should also tick in the null implementation.
- DAWN_TRY(device->Tick());
-
- return device->SubmitPendingOperations();
- }
-
- MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
- return {};
- }
-
- // ComputePipeline
- MaybeError ComputePipeline::Initialize() {
- return {};
- }
-
- // RenderPipeline
- MaybeError RenderPipeline::Initialize() {
- return {};
- }
-
- // SwapChain
-
- // static
- ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
- DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain;
- }
-
- MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
- if (previousSwapChain != nullptr) {
- // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
- // multiple backends one after the other. It probably needs to block until the backend
- // and GPU are completely finished with the previous swapchain.
- if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
- return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
- }
- }
-
- return {};
- }
-
- SwapChain::~SwapChain() = default;
-
- MaybeError SwapChain::PresentImpl() {
- mTexture->APIDestroy();
- mTexture = nullptr;
- return {};
- }
-
- ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
- TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- mTexture = AcquireRef(
- new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- return mTexture->APICreateView();
- }
-
- void SwapChain::DetachFromSurfaceImpl() {
- if (mTexture != nullptr) {
- mTexture->APIDestroy();
- mTexture = nullptr;
- }
- }
-
- // ShaderModule
-
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- return InitializeBase(parseResult);
- }
-
- // OldSwapChain
-
- OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- im.Init(im.userData, nullptr);
- }
-
- OldSwapChain::~OldSwapChain() {
- }
-
- TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- return GetDevice()->APICreateTexture(descriptor);
- }
-
- MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
- return {};
- }
-
- // NativeSwapChainImpl
-
- void NativeSwapChainImpl::Init(WSIContext* context) {
- }
-
- DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height) {
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::Present() {
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return wgpu::TextureFormat::RGBA8Unorm;
- }
-
- // StagingBuffer
-
- StagingBuffer::StagingBuffer(size_t size, Device* device)
- : StagingBufferBase(size), mDevice(device) {
- }
-
- StagingBuffer::~StagingBuffer() {
- if (mBuffer) {
- mDevice->DecrementMemoryUsage(GetSize());
- }
- }
-
- MaybeError StagingBuffer::Initialize() {
- DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
- mBuffer = std::make_unique<uint8_t[]>(GetSize());
- mMappedPointer = mBuffer.get();
- return {};
- }
-
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return 1;
- }
-
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return 1;
- }
-
- float Device::GetTimestampPeriodInNS() const {
- return 1.0f;
- }
-
-}} // namespace dawn_native::null
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
deleted file mode 100644
index 380295bba7c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_NULL_DEVICENULL_H_
-#define DAWNNATIVE_NULL_DEVICENULL_H_
-
-#include "dawn_native/Adapter.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/ComputePipeline.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/PipelineLayout.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/Queue.h"
-#include "dawn_native/RenderPipeline.h"
-#include "dawn_native/RingBufferAllocator.h"
-#include "dawn_native/Sampler.h"
-#include "dawn_native/ShaderModule.h"
-#include "dawn_native/StagingBuffer.h"
-#include "dawn_native/SwapChain.h"
-#include "dawn_native/Texture.h"
-#include "dawn_native/ToBackend.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native { namespace null {
-
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- using PipelineLayout = PipelineLayoutBase;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- using Sampler = SamplerBase;
- class ShaderModule;
- class SwapChain;
- using Texture = TextureBase;
- using TextureView = TextureViewBase;
-
- struct NullBackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
-
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<NullBackendTraits>(common)) {
- return ToBackendBase<NullBackendTraits>(common);
- }
-
- struct PendingOperation {
- virtual ~PendingOperation() = default;
- virtual void Execute() = 0;
- };
-
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Device*> Create(Adapter* adapter,
- const DawnDeviceDescriptor* descriptor);
- ~Device() override;
-
- MaybeError Initialize();
-
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
-
- MaybeError TickImpl() override;
-
- void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
- MaybeError SubmitPendingOperations();
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- MaybeError IncrementMemoryUsage(uint64_t bytes);
- void DecrementMemoryUsage(uint64_t bytes);
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- private:
- using DeviceBase::DeviceBase;
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
-
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
-
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
-
- std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
-
- static constexpr uint64_t kMaxMemoryUsage = 512 * 1024 * 1024;
- size_t mMemoryUsage = 0;
- };
-
- class Adapter : public AdapterBase {
- public:
- Adapter(InstanceBase* instance);
- ~Adapter() override;
-
- // AdapterBase Implementation
- bool SupportsExternalImages() const override;
-
- // Used for the tests that intend to use an adapter without all features enabled.
- void SetSupportedFeatures(const std::vector<const char*>& requiredFeatures);
-
- private:
- MaybeError InitializeImpl() override;
- MaybeError InitializeSupportedFeaturesImpl() override;
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
-
- ResultOrError<DeviceBase*> CreateDeviceImpl(
- const DawnDeviceDescriptor* descriptor) override;
- };
-
- // Helper class so |BindGroup| can allocate memory for its binding data,
- // before calling the BindGroupBase base class constructor.
- class BindGroupDataHolder {
- protected:
- explicit BindGroupDataHolder(size_t size);
- ~BindGroupDataHolder();
-
- void* mBindingDataAllocation;
- };
-
- // We don't have the complexity of placement-allocation of bind group data in
- // the Null backend. This class, keeps the binding data in a separate allocation for simplicity.
- class BindGroup final : private BindGroupDataHolder, public BindGroupBase {
- public:
- BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor);
-
- private:
- ~BindGroup() override = default;
- };
-
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- private:
- ~BindGroupLayout() override = default;
- };
-
- class Buffer final : public BufferBase {
- public:
- Buffer(Device* device, const BufferDescriptor* descriptor);
-
- void CopyFromStaging(StagingBufferBase* staging,
- uint64_t sourceOffset,
- uint64_t destinationOffset,
- uint64_t size);
-
- void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
-
- private:
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
- void* GetMappedPointerImpl() override;
-
- std::unique_ptr<uint8_t[]> mBackingData;
- };
-
- class CommandBuffer final : public CommandBufferBase {
- public:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- };
-
- class QuerySet final : public QuerySetBase {
- public:
- QuerySet(Device* device, const QuerySetDescriptor* descriptor);
- };
-
- class Queue final : public QueueBase {
- public:
- Queue(Device* device);
-
- private:
- ~Queue() override;
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- MaybeError WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) override;
- };
-
- class ComputePipeline final : public ComputePipelineBase {
- public:
- using ComputePipelineBase::ComputePipelineBase;
-
- MaybeError Initialize() override;
- };
-
- class RenderPipeline final : public RenderPipelineBase {
- public:
- using RenderPipelineBase::RenderPipelineBase;
-
- MaybeError Initialize() override;
- };
-
- class ShaderModule final : public ShaderModuleBase {
- public:
- using ShaderModuleBase::ShaderModuleBase;
-
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- };
-
- class SwapChain final : public NewSwapChainBase {
- public:
- static ResultOrError<Ref<SwapChain>> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
- ~SwapChain() override;
-
- private:
- using NewSwapChainBase::NewSwapChainBase;
- MaybeError Initialize(NewSwapChainBase* previousSwapChain);
-
- Ref<Texture> mTexture;
-
- MaybeError PresentImpl() override;
- ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
- void DetachFromSurfaceImpl() override;
- };
-
- class OldSwapChain final : public OldSwapChainBase {
- public:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
-
- protected:
- ~OldSwapChain() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase*) override;
- };
-
- class NativeSwapChainImpl {
- public:
- using WSIContext = struct {};
- void Init(WSIContext* context);
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height);
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
- DawnSwapChainError Present();
- wgpu::TextureFormat GetPreferredFormat() const;
- };
-
- class StagingBuffer : public StagingBufferBase {
- public:
- StagingBuffer(size_t size, Device* device);
- ~StagingBuffer() override;
- MaybeError Initialize() override;
-
- private:
- Device* mDevice;
- std::unique_ptr<uint8_t[]> mBuffer;
- };
-
-}} // namespace dawn_native::null
-
-#endif // DAWNNATIVE_NULL_DEVICENULL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp b/chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp
deleted file mode 100644
index a48dcdccc4e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// NullBackend.cpp: contains the definition of symbols exported by NullBackend.h so that they
-// can be compiled twice: once export (shared library), once not exported (static library)
-
-#include "dawn_native/NullBackend.h"
-
-#include "common/SwapChainUtils.h"
-#include "dawn_native/null/DeviceNull.h"
-
-namespace dawn_native { namespace null {
-
- DawnSwapChainImplementation CreateNativeSwapChainImpl() {
- DawnSwapChainImplementation impl;
- impl = CreateSwapChainImplementation(new NativeSwapChainImpl());
- impl.textureUsage = WGPUTextureUsage_Present;
- return impl;
- }
-
-}} // namespace dawn_native::null
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
deleted file mode 100644
index 3c57200b597..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/BackendGL.h"
-
-#include "common/GPUInfo.h"
-#include "common/Log.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/OpenGLBackend.h"
-#include "dawn_native/opengl/DeviceGL.h"
-
-#include <cstring>
-
-namespace dawn_native { namespace opengl {
-
- namespace {
-
- struct Vendor {
- const char* vendorName;
- uint32_t vendorId;
- };
-
- const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
- {"ARM", gpu_info::kVendorID_ARM},
- {"Imagination", gpu_info::kVendorID_ImgTec},
- {"Intel", gpu_info::kVendorID_Intel},
- {"NVIDIA", gpu_info::kVendorID_Nvidia},
- {"Qualcomm", gpu_info::kVendorID_Qualcomm}};
-
- uint32_t GetVendorIdFromVendors(const char* vendor) {
- uint32_t vendorId = 0;
- for (const auto& it : kVendors) {
- // Matching vendor name with vendor string
- if (strstr(vendor, it.vendorName) != nullptr) {
- vendorId = it.vendorId;
- break;
- }
- }
- return vendorId;
- }
-
- void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
- GLenum type,
- GLuint id,
- GLenum severity,
- GLsizei length,
- const GLchar* message,
- const void* userParam) {
- const char* sourceText;
- switch (source) {
- case GL_DEBUG_SOURCE_API:
- sourceText = "OpenGL";
- break;
- case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
- sourceText = "Window System";
- break;
- case GL_DEBUG_SOURCE_SHADER_COMPILER:
- sourceText = "Shader Compiler";
- break;
- case GL_DEBUG_SOURCE_THIRD_PARTY:
- sourceText = "Third Party";
- break;
- case GL_DEBUG_SOURCE_APPLICATION:
- sourceText = "Application";
- break;
- case GL_DEBUG_SOURCE_OTHER:
- sourceText = "Other";
- break;
- default:
- sourceText = "UNKNOWN";
- break;
- }
-
- const char* severityText;
- switch (severity) {
- case GL_DEBUG_SEVERITY_HIGH:
- severityText = "High";
- break;
- case GL_DEBUG_SEVERITY_MEDIUM:
- severityText = "Medium";
- break;
- case GL_DEBUG_SEVERITY_LOW:
- severityText = "Low";
- break;
- case GL_DEBUG_SEVERITY_NOTIFICATION:
- severityText = "Notification";
- break;
- default:
- severityText = "UNKNOWN";
- break;
- }
-
- if (type == GL_DEBUG_TYPE_ERROR) {
- dawn::WarningLog() << "OpenGL error:"
- << "\n Source: " << sourceText //
- << "\n ID: " << id //
- << "\n Severity: " << severityText //
- << "\n Message: " << message;
-
- // Abort on an error when in Debug mode.
- UNREACHABLE();
- }
- }
-
- } // anonymous namespace
-
- // The OpenGL backend's Adapter.
-
- class Adapter : public AdapterBase {
- public:
- Adapter(InstanceBase* instance, wgpu::BackendType backendType)
- : AdapterBase(instance, backendType) {
- }
-
- MaybeError InitializeGLFunctions(void* (*getProc)(const char*)) {
- // Use getProc to populate the dispatch table
- return mFunctions.Initialize(getProc);
- }
-
- ~Adapter() override = default;
-
- // AdapterBase Implementation
- bool SupportsExternalImages() const override {
- // Via dawn_native::opengl::WrapExternalEGLImage
- return GetBackendType() == wgpu::BackendType::OpenGLES;
- }
-
- private:
- MaybeError InitializeImpl() override {
- if (mFunctions.GetVersion().IsES()) {
- ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
- } else {
- ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
- }
-
- // Use the debug output functionality to get notified about GL errors
- // TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
- // extensions
- bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
-
- if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
- mFunctions.Enable(GL_DEBUG_OUTPUT);
- mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
-
- // Any GL error; dangerous undefined behavior; any shader compiler and linker errors
- mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH,
- 0, nullptr, GL_TRUE);
-
- // Severe performance warnings; GLSL or other shader compiler and linker warnings;
- // use of currently deprecated behavior
- mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM,
- 0, nullptr, GL_TRUE);
-
- // Performance warnings from redundant state changes; trivial undefined behavior
- // This is disabled because we do an incredible amount of redundant state changes.
- mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
- nullptr, GL_FALSE);
-
- // Any message which is not an error or performance concern
- mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE,
- GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr,
- GL_FALSE);
- mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
- }
-
- // Set state that never changes between devices.
- mFunctions.Enable(GL_DEPTH_TEST);
- mFunctions.Enable(GL_SCISSOR_TEST);
- mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
- if (mFunctions.GetVersion().IsDesktop()) {
- // These are not necessary on GLES. The functionality is enabled by default, and
- // works by specifying sample counts and SRGB textures, respectively.
- mFunctions.Enable(GL_MULTISAMPLE);
- mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
- }
- mFunctions.Enable(GL_SAMPLE_MASK);
-
- mPCIInfo.name = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
-
- // Workaroud to find vendor id from vendor name
- const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
- mPCIInfo.vendorId = GetVendorIdFromVendors(vendor);
-
- mDriverDescription = std::string("OpenGL version ") +
- reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
-
- if (mPCIInfo.name.find("SwiftShader") != std::string::npos) {
- mAdapterType = wgpu::AdapterType::CPU;
- }
-
- return {};
- }
-
- MaybeError InitializeSupportedFeaturesImpl() override {
- // TextureCompressionBC
- {
- // BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
- bool supportsS3TC =
- mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
- (mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
- mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
- mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
-
- // COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
- // COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
- // GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
- // (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
- bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
-
- // GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
- // NVidia GLES drivers don't support this extension, but they do support
- // GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
- // GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
- // SRGB support even if S3TC is supported; see
- // https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
- bool supportsS3TCSRGB =
- mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
- mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
-
- // BC4 and BC5
- bool supportsRGTC =
- mFunctions.IsAtLeastGL(3, 0) ||
- mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
- mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
-
- // BC6 and BC7
- bool supportsBPTC =
- mFunctions.IsAtLeastGL(4, 2) ||
- mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
- mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
-
- if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
- supportsBPTC) {
- mSupportedFeatures.EnableFeature(dawn_native::Feature::TextureCompressionBC);
- }
- }
-
- return {};
- }
-
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
- GetDefaultLimits(&limits->v1);
- return {};
- }
-
- ResultOrError<DeviceBase*> CreateDeviceImpl(
- const DawnDeviceDescriptor* descriptor) override {
- // There is no limit on the number of devices created from this adapter because they can
- // all share the same backing OpenGL context.
- return Device::Create(this, descriptor, mFunctions);
- }
-
- OpenGLFunctions mFunctions;
- };
-
- // Implementation of the OpenGL backend's BackendConnection
-
- Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
- : BackendConnection(instance, backendType) {
- }
-
- std::vector<std::unique_ptr<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- // The OpenGL backend needs at least "getProcAddress" to discover an adapter.
- return {};
- }
-
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> Backend::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) {
- // TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
- // know how to handle MakeCurrent.
- DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
-
- ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
- const AdapterDiscoveryOptions* options =
- static_cast<const AdapterDiscoveryOptions*>(optionsBase);
-
- DAWN_INVALID_IF(options->getProc == nullptr,
- "AdapterDiscoveryOptions::getProc must be set");
-
- std::unique_ptr<Adapter> adapter = std::make_unique<Adapter>(
- GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType));
- DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
- DAWN_TRY(adapter->Initialize());
-
- mCreatedAdapter = true;
- std::vector<std::unique_ptr<AdapterBase>> adapters;
- adapters.push_back(std::unique_ptr<AdapterBase>(adapter.release()));
- return std::move(adapters);
- }
-
- BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
- return new Backend(instance, backendType);
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.h
deleted file mode 100644
index a586a850c76..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_BACKENDGL_H_
-#define DAWNNATIVE_OPENGL_BACKENDGL_H_
-
-#include "dawn_native/BackendConnection.h"
-
-namespace dawn_native { namespace opengl {
-
- class Backend : public BackendConnection {
- public:
- Backend(InstanceBase* instance, wgpu::BackendType backendType);
-
- std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override;
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* options) override;
-
- private:
- bool mCreatedAdapter = false;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_BACKENDGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
deleted file mode 100644
index b77e0ffc1d5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/BindGroupGL.h"
-
-#include "dawn_native/Texture.h"
-#include "dawn_native/opengl/BindGroupLayoutGL.h"
-#include "dawn_native/opengl/DeviceGL.h"
-
-namespace dawn_native { namespace opengl {
-
- MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
- const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
- for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
- const BindGroupEntry& entry = descriptor->entries[i];
-
- const auto& it = bindingMap.find(BindingNumber(entry.binding));
- BindingIndex bindingIndex = it->second;
- ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
-
- const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
- if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
- ASSERT(entry.textureView != nullptr);
- const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
- DAWN_INVALID_IF(
- textureViewLayerCount != 1 &&
- textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
- "%s binds %u layers. Currently the OpenGL backend only supports either binding "
- "1 layer or the all layers (%u) for storage texture.",
- entry.textureView, textureViewLayerCount,
- entry.textureView->GetTexture()->GetArrayLayers());
- }
- }
-
- return {};
- }
-
- BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
- : BindGroupBase(this, device, descriptor) {
- }
-
- BindGroup::~BindGroup() = default;
-
- void BindGroup::DestroyImpl() {
- BindGroupBase::DestroyImpl();
- ToBackend(GetLayout())->DeallocateBindGroup(this);
- }
-
- // static
- Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
- return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
deleted file mode 100644
index 994795a3e91..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_BINDGROUPGL_H_
-#define DAWNNATIVE_OPENGL_BINDGROUPGL_H_
-
-#include "common/PlacementAllocated.h"
-#include "dawn_native/BindGroup.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
-
- class BindGroup final : public BindGroupBase, public PlacementAllocated {
- public:
- static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
-
- BindGroup(Device* device, const BindGroupDescriptor* descriptor);
-
- private:
- ~BindGroup() override;
-
- void DestroyImpl() override;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_BINDGROUPGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
deleted file mode 100644
index d008b1d48a4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/BindGroupLayoutGL.h"
-
-#include "dawn_native/opengl/BindGroupGL.h"
-
-namespace dawn_native { namespace opengl {
-
- BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
- mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- }
-
- Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor) {
- return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
- }
-
- void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
- mBindGroupAllocator.Deallocate(bindGroup);
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
deleted file mode 100644
index 136bd0a7e5a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
-#define DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
-
-#include "common/SlabAllocator.h"
-#include "dawn_native/BindGroupLayout.h"
-
-namespace dawn_native { namespace opengl {
-
- class BindGroup;
- class Device;
-
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup);
-
- private:
- ~BindGroupLayout() override = default;
- SlabAllocator<BindGroup> mBindGroupAllocator;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
deleted file mode 100644
index a23556bc039..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/BufferGL.h"
-
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/opengl/DeviceGL.h"
-
-namespace dawn_native { namespace opengl {
-
- // Buffer
-
- // static
- ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
- const BufferDescriptor* descriptor,
- bool shouldLazyClear) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
- if (descriptor->mappedAtCreation) {
- DAWN_TRY(buffer->MapAtCreationInternal());
- }
-
- return std::move(buffer);
- }
-
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
- : BufferBase(device, descriptor) {
- // Allocate at least 4 bytes so clamped accesses are always in bounds.
- mAllocatedSize = std::max(GetSize(), uint64_t(4u));
-
- device->gl.GenBuffers(1, &mBuffer);
- device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
-
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
- !descriptor->mappedAtCreation) {
- std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
- device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(),
- GL_STATIC_DRAW);
- } else {
- // Buffers start zeroed if you pass nullptr to glBufferData.
- device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
- }
- }
-
- Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
- : Buffer(device, descriptor) {
- if (!shouldLazyClear) {
- SetIsDataInitialized();
- }
- }
-
- Buffer::~Buffer() = default;
-
- GLuint Buffer::GetHandle() const {
- return mBuffer;
- }
-
- bool Buffer::EnsureDataInitialized() {
- if (!NeedsInitialization()) {
- return false;
- }
-
- InitializeToZero();
- return true;
- }
-
- bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferRange(offset, size)) {
- SetIsDataInitialized();
- return false;
- }
-
- InitializeToZero();
- return true;
- }
-
- bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
- SetIsDataInitialized();
- return false;
- }
-
- InitializeToZero();
- return true;
- }
-
- void Buffer::InitializeToZero() {
- ASSERT(NeedsInitialization());
-
- const uint64_t size = GetAllocatedSize();
- Device* device = ToBackend(GetDevice());
-
- const std::vector<uint8_t> clearValues(size, 0u);
- device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
- device->IncrementLazyClearCountForTesting();
-
- SetIsDataInitialized();
- }
-
- bool Buffer::IsCPUWritableAtCreation() const {
- // TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
- // driver to migrate it to shared memory.
- return true;
- }
-
- MaybeError Buffer::MapAtCreationImpl() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
- return {};
- }
-
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- // It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
- // so we extend the range to be 4 bytes.
- if (size == 0) {
- if (offset != 0) {
- offset -= 4;
- }
- size = 4;
- }
-
- EnsureDataInitialized();
-
- // This does GPU->CPU synchronization, we could require a high
- // version of OpenGL that would let us map the buffer unsynchronized.
- gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- void* mappedData = nullptr;
- if (mode & wgpu::MapMode::Read) {
- mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
- } else {
- ASSERT(mode & wgpu::MapMode::Write);
- mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
- }
-
- // The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
- // the resource but OpenGL gives us the pointer at offset. Remove the offset.
- mMappedData = static_cast<uint8_t*>(mappedData) - offset;
- return {};
- }
-
- void* Buffer::GetMappedPointerImpl() {
- // The mapping offset has already been removed.
- return mMappedData;
- }
-
- void Buffer::UnmapImpl() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
- gl.UnmapBuffer(GL_ARRAY_BUFFER);
- mMappedData = nullptr;
- }
-
- void Buffer::DestroyImpl() {
- BufferBase::DestroyImpl();
- ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
- mBuffer = 0;
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h
deleted file mode 100644
index 596b229f22b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_BUFFERGL_H_
-#define DAWNNATIVE_OPENGL_BUFFERGL_H_
-
-#include "dawn_native/Buffer.h"
-
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- class Buffer final : public BufferBase {
- public:
- static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
- const BufferDescriptor* descriptor,
- bool shouldLazyClear);
-
- Buffer(Device* device, const BufferDescriptor* descriptor);
-
- GLuint GetHandle() const;
-
- bool EnsureDataInitialized();
- bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
- bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
-
- private:
- Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
- ~Buffer() override;
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
- void* GetMappedPointerImpl() override;
-
- void InitializeToZero();
-
- GLuint mBuffer = 0;
- void* mMappedData = nullptr;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_BUFFERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
deleted file mode 100644
index f94fa50ea2d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ /dev/null
@@ -1,1491 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/CommandBufferGL.h"
-
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/BindGroupTracker.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/ExternalTexture.h"
-#include "dawn_native/RenderBundle.h"
-#include "dawn_native/VertexFormat.h"
-#include "dawn_native/opengl/BufferGL.h"
-#include "dawn_native/opengl/ComputePipelineGL.h"
-#include "dawn_native/opengl/DeviceGL.h"
-#include "dawn_native/opengl/Forward.h"
-#include "dawn_native/opengl/PersistentPipelineStateGL.h"
-#include "dawn_native/opengl/PipelineLayoutGL.h"
-#include "dawn_native/opengl/RenderPipelineGL.h"
-#include "dawn_native/opengl/SamplerGL.h"
-#include "dawn_native/opengl/TextureGL.h"
-#include "dawn_native/opengl/UtilsGL.h"
-
-#include <cstring>
-
-namespace dawn_native { namespace opengl {
-
- namespace {
-
- GLenum IndexFormatType(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Uint16:
- return GL_UNSIGNED_SHORT;
- case wgpu::IndexFormat::Uint32:
- return GL_UNSIGNED_INT;
- case wgpu::IndexFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- GLenum VertexFormatType(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- case wgpu::VertexFormat::Uint8x4:
- case wgpu::VertexFormat::Unorm8x2:
- case wgpu::VertexFormat::Unorm8x4:
- return GL_UNSIGNED_BYTE;
- case wgpu::VertexFormat::Sint8x2:
- case wgpu::VertexFormat::Sint8x4:
- case wgpu::VertexFormat::Snorm8x2:
- case wgpu::VertexFormat::Snorm8x4:
- return GL_BYTE;
- case wgpu::VertexFormat::Uint16x2:
- case wgpu::VertexFormat::Uint16x4:
- case wgpu::VertexFormat::Unorm16x2:
- case wgpu::VertexFormat::Unorm16x4:
- return GL_UNSIGNED_SHORT;
- case wgpu::VertexFormat::Sint16x2:
- case wgpu::VertexFormat::Sint16x4:
- case wgpu::VertexFormat::Snorm16x2:
- case wgpu::VertexFormat::Snorm16x4:
- return GL_SHORT;
- case wgpu::VertexFormat::Float16x2:
- case wgpu::VertexFormat::Float16x4:
- return GL_HALF_FLOAT;
- case wgpu::VertexFormat::Float32:
- case wgpu::VertexFormat::Float32x2:
- case wgpu::VertexFormat::Float32x3:
- case wgpu::VertexFormat::Float32x4:
- return GL_FLOAT;
- case wgpu::VertexFormat::Uint32:
- case wgpu::VertexFormat::Uint32x2:
- case wgpu::VertexFormat::Uint32x3:
- case wgpu::VertexFormat::Uint32x4:
- return GL_UNSIGNED_INT;
- case wgpu::VertexFormat::Sint32:
- case wgpu::VertexFormat::Sint32x2:
- case wgpu::VertexFormat::Sint32x3:
- case wgpu::VertexFormat::Sint32x4:
- return GL_INT;
- default:
- UNREACHABLE();
- }
- }
-
- GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Unorm8x2:
- case wgpu::VertexFormat::Unorm8x4:
- case wgpu::VertexFormat::Snorm8x2:
- case wgpu::VertexFormat::Snorm8x4:
- case wgpu::VertexFormat::Unorm16x2:
- case wgpu::VertexFormat::Unorm16x4:
- case wgpu::VertexFormat::Snorm16x2:
- case wgpu::VertexFormat::Snorm16x4:
- return GL_TRUE;
- default:
- return GL_FALSE;
- }
- }
-
- bool VertexFormatIsInt(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- case wgpu::VertexFormat::Uint8x4:
- case wgpu::VertexFormat::Sint8x2:
- case wgpu::VertexFormat::Sint8x4:
- case wgpu::VertexFormat::Uint16x2:
- case wgpu::VertexFormat::Uint16x4:
- case wgpu::VertexFormat::Sint16x2:
- case wgpu::VertexFormat::Sint16x4:
- case wgpu::VertexFormat::Uint32:
- case wgpu::VertexFormat::Uint32x2:
- case wgpu::VertexFormat::Uint32x3:
- case wgpu::VertexFormat::Uint32x4:
- case wgpu::VertexFormat::Sint32:
- case wgpu::VertexFormat::Sint32x2:
- case wgpu::VertexFormat::Sint32x3:
- case wgpu::VertexFormat::Sint32x4:
- return true;
- default:
- return false;
- }
- }
-
- // Vertex buffers and index buffers are implemented as part of an OpenGL VAO that
- // corresponds to a VertexState. On the contrary in Dawn they are part of the global state.
- // This means that we have to re-apply these buffers on a VertexState change.
- class VertexStateBufferBindingTracker {
- public:
- void OnSetIndexBuffer(BufferBase* buffer) {
- mIndexBufferDirty = true;
- mIndexBuffer = ToBackend(buffer);
- }
-
- void OnSetVertexBuffer(VertexBufferSlot slot, BufferBase* buffer, uint64_t offset) {
- mVertexBuffers[slot] = ToBackend(buffer);
- mVertexBufferOffsets[slot] = offset;
- mDirtyVertexBuffers.set(slot);
- }
-
- void OnSetPipeline(RenderPipelineBase* pipeline) {
- if (mLastPipeline == pipeline) {
- return;
- }
-
- mIndexBufferDirty = true;
- mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
-
- mLastPipeline = pipeline;
- }
-
- void Apply(const OpenGLFunctions& gl) {
- if (mIndexBufferDirty && mIndexBuffer != nullptr) {
- gl.BindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer->GetHandle());
- mIndexBufferDirty = false;
- }
-
- for (VertexBufferSlot slot : IterateBitSet(
- mDirtyVertexBuffers & mLastPipeline->GetVertexBufferSlotsUsed())) {
- for (VertexAttributeLocation location : IterateBitSet(
- ToBackend(mLastPipeline)->GetAttributesUsingVertexBuffer(slot))) {
- const VertexAttributeInfo& attribute =
- mLastPipeline->GetAttribute(location);
-
- GLuint attribIndex = static_cast<GLuint>(static_cast<uint8_t>(location));
- GLuint buffer = mVertexBuffers[slot]->GetHandle();
- uint64_t offset = mVertexBufferOffsets[slot];
-
- const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
- uint32_t components = GetVertexFormatInfo(attribute.format).componentCount;
- GLenum formatType = VertexFormatType(attribute.format);
-
- GLboolean normalized = VertexFormatIsNormalized(attribute.format);
- gl.BindBuffer(GL_ARRAY_BUFFER, buffer);
- if (VertexFormatIsInt(attribute.format)) {
- gl.VertexAttribIPointer(
- attribIndex, components, formatType, vertexBuffer.arrayStride,
- reinterpret_cast<void*>(
- static_cast<intptr_t>(offset + attribute.offset)));
- } else {
- gl.VertexAttribPointer(attribIndex, components, formatType, normalized,
- vertexBuffer.arrayStride,
- reinterpret_cast<void*>(static_cast<intptr_t>(
- offset + attribute.offset)));
- }
- }
- }
-
- mDirtyVertexBuffers.reset();
- }
-
- private:
- bool mIndexBufferDirty = false;
- Buffer* mIndexBuffer = nullptr;
-
- ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
- ityp::array<VertexBufferSlot, Buffer*, kMaxVertexBuffers> mVertexBuffers;
- ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferOffsets;
-
- RenderPipelineBase* mLastPipeline = nullptr;
- };
-
- class BindGroupTracker : public BindGroupTrackerBase<false, uint64_t> {
- public:
- void OnSetPipeline(RenderPipeline* pipeline) {
- BindGroupTrackerBase::OnSetPipeline(pipeline);
- mPipeline = pipeline;
- }
-
- void OnSetPipeline(ComputePipeline* pipeline) {
- BindGroupTrackerBase::OnSetPipeline(pipeline);
- mPipeline = pipeline;
- }
-
- void Apply(const OpenGLFunctions& gl) {
- BeforeApply();
- for (BindGroupIndex index :
- IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
- mDynamicOffsets[index].data());
- }
- AfterApply();
- }
-
- private:
- void ApplyBindGroup(const OpenGLFunctions& gl,
- BindGroupIndex index,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets) {
- const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
- uint32_t currentDynamicOffsetIndex = 0;
-
- for (BindingIndex bindingIndex{0};
- bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo =
- group->GetLayout()->GetBindingInfo(bindingIndex);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
- GLuint buffer = ToBackend(binding.buffer)->GetHandle();
- GLuint index = indices[bindingIndex];
- GLuint offset = binding.offset;
-
- if (bindingInfo.buffer.hasDynamicOffset) {
- offset += dynamicOffsets[currentDynamicOffsetIndex];
- ++currentDynamicOffsetIndex;
- }
-
- GLenum target;
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- target = GL_UNIFORM_BUFFER;
- break;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- target = GL_SHADER_STORAGE_BUFFER;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
-
- gl.BindBufferRange(target, index, buffer, offset, binding.size);
- break;
- }
-
- case BindingInfoType::Sampler: {
- Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
- GLuint samplerIndex = indices[bindingIndex];
-
- for (PipelineGL::SamplerUnit unit :
- mPipeline->GetTextureUnitsForSampler(samplerIndex)) {
- // Only use filtering for certain texture units, because int
- // and uint texture are only complete without filtering
- if (unit.shouldUseFiltering) {
- gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
- } else {
- gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
- }
- }
- break;
- }
-
- case BindingInfoType::Texture: {
- TextureView* view =
- ToBackend(group->GetBindingAsTextureView(bindingIndex));
- GLuint handle = view->GetHandle();
- GLenum target = view->GetGLTarget();
- GLuint viewIndex = indices[bindingIndex];
-
- for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
- gl.ActiveTexture(GL_TEXTURE0 + unit);
- gl.BindTexture(target, handle);
- if (ToBackend(view->GetTexture())->GetGLFormat().format ==
- GL_DEPTH_STENCIL) {
- Aspect aspect = view->GetAspects();
- ASSERT(HasOneBit(aspect));
- switch (aspect) {
- case Aspect::None:
- case Aspect::Color:
- case Aspect::CombinedDepthStencil:
- case Aspect::Plane0:
- case Aspect::Plane1:
- UNREACHABLE();
- case Aspect::Depth:
- gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
- GL_DEPTH_COMPONENT);
- break;
- case Aspect::Stencil:
- gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
- GL_STENCIL_INDEX);
- break;
- }
- }
- }
- break;
- }
-
- case BindingInfoType::StorageTexture: {
- TextureView* view =
- ToBackend(group->GetBindingAsTextureView(bindingIndex));
- Texture* texture = ToBackend(view->GetTexture());
- GLuint handle = texture->GetHandle();
- GLuint imageIndex = indices[bindingIndex];
-
- GLenum access;
- switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::WriteOnly:
- access = GL_WRITE_ONLY;
- break;
- case wgpu::StorageTextureAccess::Undefined:
- UNREACHABLE();
- }
-
- // OpenGL ES only supports either binding a layer or the entire
- // texture in glBindImageTexture().
- GLboolean isLayered;
- if (view->GetLayerCount() == 1) {
- isLayered = GL_FALSE;
- } else if (texture->GetArrayLayers() == view->GetLayerCount()) {
- isLayered = GL_TRUE;
- } else {
- UNREACHABLE();
- }
-
- gl.BindImageTexture(imageIndex, handle, view->GetBaseMipLevel(),
- isLayered, view->GetBaseArrayLayer(), access,
- texture->GetGLFormat().internalFormat);
- break;
- }
-
- case BindingInfoType::ExternalTexture: {
- const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>&
- textureViews = mBindGroups[index]
- ->GetBindingAsExternalTexture(bindingIndex)
- ->GetTextureViews();
-
- // Only single-plane formats are supported right now, so assert only one
- // view exists.
- ASSERT(textureViews[1].Get() == nullptr);
- ASSERT(textureViews[2].Get() == nullptr);
-
- TextureView* view = ToBackend(textureViews[0].Get());
- GLuint handle = view->GetHandle();
- GLenum target = view->GetGLTarget();
- GLuint viewIndex = indices[bindingIndex];
-
- for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
- gl.ActiveTexture(GL_TEXTURE0 + unit);
- gl.BindTexture(target, handle);
- }
- break;
- }
- }
- }
- }
-
- PipelineGL* mPipeline = nullptr;
- };
-
- void ResolveMultisampledRenderTargets(const OpenGLFunctions& gl,
- const BeginRenderPassCmd* renderPass) {
- ASSERT(renderPass != nullptr);
-
- GLuint readFbo = 0;
- GLuint writeFbo = 0;
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
- if (readFbo == 0) {
- ASSERT(writeFbo == 0);
- gl.GenFramebuffers(1, &readFbo);
- gl.GenFramebuffers(1, &writeFbo);
- }
-
- const TextureBase* colorTexture =
- renderPass->colorAttachments[i].view->GetTexture();
- ASSERT(colorTexture->IsMultisampledTexture());
- ASSERT(colorTexture->GetArrayLayers() == 1);
- ASSERT(renderPass->colorAttachments[i].view->GetBaseMipLevel() == 0);
-
- GLuint colorHandle = ToBackend(colorTexture)->GetHandle();
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
- gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- ToBackend(colorTexture)->GetGLTarget(), colorHandle, 0);
-
- const TextureBase* resolveTexture =
- renderPass->colorAttachments[i].resolveTarget->GetTexture();
- GLuint resolveTextureHandle = ToBackend(resolveTexture)->GetHandle();
- GLuint resolveTargetMipmapLevel =
- renderPass->colorAttachments[i].resolveTarget->GetBaseMipLevel();
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, writeFbo);
- if (resolveTexture->GetArrayLayers() == 1) {
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- GL_TEXTURE_2D, resolveTextureHandle,
- resolveTargetMipmapLevel);
- } else {
- GLuint resolveTargetArrayLayer =
- renderPass->colorAttachments[i].resolveTarget->GetBaseArrayLayer();
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
- resolveTextureHandle, resolveTargetMipmapLevel,
- resolveTargetArrayLayer);
- }
-
- gl.BlitFramebuffer(0, 0, renderPass->width, renderPass->height, 0, 0,
- renderPass->width, renderPass->height, GL_COLOR_BUFFER_BIT,
- GL_NEAREST);
- }
- }
-
- gl.DeleteFramebuffers(1, &readFbo);
- gl.DeleteFramebuffers(1, &writeFbo);
- }
-
- // OpenGL SPEC requires the source/destination region must be a region that is contained
- // within srcImage/dstImage. Here the size of the image refers to the virtual size, while
- // Dawn validates texture copy extent with the physical size, so we need to re-calculate the
- // texture copy extent to ensure it should fit in the virtual size of the subresource.
- Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy,
- const Extent3D& copySize) {
- Extent3D validTextureCopyExtent = copySize;
- const TextureBase* texture = textureCopy.texture.Get();
- Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
- ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
- ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
- if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
- ASSERT(texture->GetFormat().isCompressed);
- validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
- }
- if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
- ASSERT(texture->GetFormat().isCompressed);
- validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
- }
-
- return validTextureCopyExtent;
- }
-
- void CopyTextureToTextureWithBlit(const OpenGLFunctions& gl,
- const TextureCopy& src,
- const TextureCopy& dst,
- const Extent3D& copySize) {
- Texture* srcTexture = ToBackend(src.texture.Get());
- Texture* dstTexture = ToBackend(dst.texture.Get());
-
- // Generate temporary framebuffers for the blits.
- GLuint readFBO = 0, drawFBO = 0;
- gl.GenFramebuffers(1, &readFBO);
- gl.GenFramebuffers(1, &drawFBO);
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
-
- // Reset state that may affect glBlitFramebuffer().
- gl.Disable(GL_SCISSOR_TEST);
- GLenum blitMask = 0;
- if (src.aspect & Aspect::Color) {
- blitMask |= GL_COLOR_BUFFER_BIT;
- }
- if (src.aspect & Aspect::Depth) {
- blitMask |= GL_DEPTH_BUFFER_BIT;
- }
- if (src.aspect & Aspect::Stencil) {
- blitMask |= GL_STENCIL_BUFFER_BIT;
- }
- // Iterate over all layers, doing a single blit for each.
- for (uint32_t layer = 0; layer < copySize.depthOrArrayLayers; ++layer) {
- // Bind all required aspects for this layer.
- for (Aspect aspect : IterateEnumMask(src.aspect)) {
- GLenum glAttachment;
- switch (aspect) {
- case Aspect::Color:
- glAttachment = GL_COLOR_ATTACHMENT0;
- break;
- case Aspect::Depth:
- glAttachment = GL_DEPTH_ATTACHMENT;
- break;
- case Aspect::Stencil:
- glAttachment = GL_STENCIL_ATTACHMENT;
- break;
- case Aspect::CombinedDepthStencil:
- case Aspect::None:
- case Aspect::Plane0:
- case Aspect::Plane1:
- UNREACHABLE();
- }
- if (srcTexture->GetArrayLayers() == 1 &&
- srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
- gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment,
- srcTexture->GetGLTarget(), srcTexture->GetHandle(),
- src.mipLevel);
- } else {
- gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
- srcTexture->GetHandle(),
- static_cast<GLint>(src.mipLevel),
- static_cast<GLint>(src.origin.z + layer));
- }
- if (dstTexture->GetArrayLayers() == 1 &&
- dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment,
- dstTexture->GetGLTarget(), dstTexture->GetHandle(),
- dst.mipLevel);
- } else {
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment,
- dstTexture->GetHandle(),
- static_cast<GLint>(dst.mipLevel),
- static_cast<GLint>(dst.origin.z + layer));
- }
- }
- gl.BlitFramebuffer(src.origin.x, src.origin.y, src.origin.x + copySize.width,
- src.origin.y + copySize.height, dst.origin.x, dst.origin.y,
- dst.origin.x + copySize.width, dst.origin.y + copySize.height,
- blitMask, GL_NEAREST);
- }
- gl.Enable(GL_SCISSOR_TEST);
- gl.DeleteFramebuffers(1, &readFBO);
- gl.DeleteFramebuffers(1, &drawFBO);
- }
- bool TextureFormatIsSnorm(wgpu::TextureFormat format) {
- return format == wgpu::TextureFormat::RGBA8Snorm ||
- format == wgpu::TextureFormat::RG8Snorm ||
- format == wgpu::TextureFormat::R8Snorm;
- }
- } // namespace
-
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
- }
-
- MaybeError CommandBuffer::Execute() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
- for (size_t i = 0; i < scope.textures.size(); i++) {
- Texture* texture = ToBackend(scope.textures[i]);
-
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- scope.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(range);
- }
- });
- }
-
- for (BufferBase* bufferBase : scope.buffers) {
- ToBackend(bufferBase)->EnsureDataInitialized();
- }
- };
-
- size_t nextComputePassNumber = 0;
- size_t nextRenderPassNumber = 0;
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::BeginComputePass: {
- mCommands.NextCommand<BeginComputePassCmd>();
- for (const SyncScopeResourceUsage& scope :
- GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
- LazyClearSyncScope(scope);
- }
- DAWN_TRY(ExecuteComputePass());
-
- nextComputePassNumber++;
- break;
- }
-
- case Command::BeginRenderPass: {
- auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
- LazyClearRenderPassAttachments(cmd);
- DAWN_TRY(ExecuteRenderPass(cmd));
-
- nextRenderPassNumber++;
- break;
- }
-
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- if (copy->size == 0) {
- // Skip no-op copies.
- break;
- }
-
- ToBackend(copy->source)->EnsureDataInitialized();
- ToBackend(copy->destination)
- ->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
-
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER,
- ToBackend(copy->destination)->GetHandle());
- gl.CopyBufferSubData(GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER,
- copy->sourceOffset, copy->destinationOffset, copy->size);
-
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- break;
- }
-
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
- Buffer* buffer = ToBackend(src.buffer.Get());
-
- DAWN_INVALID_IF(
- dst.aspect == Aspect::Stencil,
- "Copies to stencil textures are unsupported on the OpenGL backend.");
-
- ASSERT(dst.aspect == Aspect::Color);
-
- buffer->EnsureDataInitialized();
- SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
- if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
- dst.mipLevel)) {
- dst.texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
- }
-
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
-
- TextureDataLayout dataLayout;
- dataLayout.offset = 0;
- dataLayout.bytesPerRow = src.bytesPerRow;
- dataLayout.rowsPerImage = src.rowsPerImage;
-
- DoTexSubImage(gl, dst, reinterpret_cast<void*>(src.offset), dataLayout,
- copy->copySize);
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- break;
- }
-
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
- auto& copySize = copy->copySize;
- Texture* texture = ToBackend(src.texture.Get());
- Buffer* buffer = ToBackend(dst.buffer.Get());
- const Format& formatInfo = texture->GetFormat();
- const GLFormat& format = texture->GetGLFormat();
- GLenum target = texture->GetGLTarget();
-
- // TODO(crbug.com/dawn/667): Implement validation in WebGPU/Compat to
- // avoid this codepath. OpenGL does not support readback from non-renderable
- // texture formats.
- if (formatInfo.isCompressed ||
- (TextureFormatIsSnorm(formatInfo.format) &&
- GetDevice()->IsToggleEnabled(Toggle::DisableSnormRead))) {
- UNREACHABLE();
- }
-
- buffer->EnsureDataInitializedAsDestination(copy);
-
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
- SubresourceRange subresources =
- GetSubresourcesAffectedByCopy(src, copy->copySize);
- texture->EnsureSubresourceContentInitialized(subresources);
- // The only way to move data from a texture to a buffer in GL is via
- // glReadPixels with a pack buffer. Create a temporary FBO for the copy.
- gl.BindTexture(target, texture->GetHandle());
-
- GLuint readFBO = 0;
- gl.GenFramebuffers(1, &readFBO);
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
-
- const TexelBlockInfo& blockInfo = formatInfo.GetAspectInfo(src.aspect).block;
-
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
- gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / blockInfo.byteSize);
-
- GLenum glAttachment;
- GLenum glFormat;
- GLenum glType;
- switch (src.aspect) {
- case Aspect::Color:
- glAttachment = GL_COLOR_ATTACHMENT0;
- glFormat = format.format;
- glType = format.type;
- break;
- case Aspect::Depth:
- glAttachment = GL_DEPTH_ATTACHMENT;
- glFormat = GL_DEPTH_COMPONENT;
- glType = GL_FLOAT;
- break;
- case Aspect::Stencil:
- glAttachment = GL_STENCIL_ATTACHMENT;
- glFormat = GL_STENCIL_INDEX;
- glType = GL_UNSIGNED_BYTE;
- break;
-
- case Aspect::CombinedDepthStencil:
- case Aspect::None:
- case Aspect::Plane0:
- case Aspect::Plane1:
- UNREACHABLE();
- }
-
- uint8_t* offset =
- reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
- switch (texture->GetDimension()) {
- case wgpu::TextureDimension::e2D: {
- if (texture->GetArrayLayers() == 1) {
- gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
- texture->GetHandle(), src.mipLevel);
- gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
- copySize.height, glFormat, glType, offset);
- break;
- }
- // Implementation for 2D array is the same as 3D.
- DAWN_FALLTHROUGH;
- }
-
- case wgpu::TextureDimension::e3D: {
- const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
- for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
- gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
- texture->GetHandle(), src.mipLevel,
- src.origin.z + z);
- gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
- copySize.height, glFormat, glType, offset);
-
- offset += bytesPerImage;
- }
- break;
- }
-
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
-
- gl.PixelStorei(GL_PACK_ROW_LENGTH, 0);
-
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
- gl.DeleteFramebuffers(1, &readFBO);
- break;
- }
-
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- mCommands.NextCommand<CopyTextureToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
-
- // TODO(crbug.com/dawn/817): add workaround for the case that imageExtentSrc
- // is not equal to imageExtentDst. For example when copySize fits in the virtual
- // size of the source image but does not fit in the one of the destination
- // image.
- Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
- Texture* srcTexture = ToBackend(src.texture.Get());
- Texture* dstTexture = ToBackend(dst.texture.Get());
-
- SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
- SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
-
- srcTexture->EnsureSubresourceContentInitialized(srcRange);
- if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
- dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
- } else {
- dstTexture->EnsureSubresourceContentInitialized(dstRange);
- }
- if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
- gl.CopyImageSubData(srcTexture->GetHandle(), srcTexture->GetGLTarget(),
- src.mipLevel, src.origin.x, src.origin.y, src.origin.z,
- dstTexture->GetHandle(), dstTexture->GetGLTarget(),
- dst.mipLevel, dst.origin.x, dst.origin.y, dst.origin.z,
- copySize.width, copySize.height,
- copy->copySize.depthOrArrayLayers);
- } else {
- CopyTextureToTextureWithBlit(gl, src, dst, copySize);
- }
- break;
- }
-
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
- if (cmd->size == 0) {
- // Skip no-op fills.
- break;
- }
- Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
-
- bool clearedToZero =
- dstBuffer->EnsureDataInitializedAsDestination(cmd->offset, cmd->size);
-
- if (!clearedToZero) {
- const std::vector<uint8_t> clearValues(cmd->size, 0u);
- gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
- gl.BufferSubData(GL_ARRAY_BUFFER, cmd->offset, cmd->size,
- clearValues.data());
- }
-
- break;
- }
-
- case Command::ResolveQuerySet: {
- // TODO(crbug.com/dawn/434): Resolve non-precise occlusion query.
- SkipCommand(&mCommands, type);
- break;
- }
-
- case Command::WriteTimestamp: {
- return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
- }
-
- case Command::InsertDebugMarker:
- case Command::PopDebugGroup:
- case Command::PushDebugGroup: {
- // Due to lack of linux driver support for GL_EXT_debug_marker
- // extension these functions are skipped.
- SkipCommand(&mCommands, type);
- break;
- }
-
- case Command::WriteBuffer: {
- WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
- uint64_t offset = write->offset;
- uint64_t size = write->size;
- if (size == 0) {
- continue;
- }
-
- Buffer* dstBuffer = ToBackend(write->buffer.Get());
- uint8_t* data = mCommands.NextData<uint8_t>(size);
- dstBuffer->EnsureDataInitializedAsDestination(offset, size);
-
- gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
- gl.BufferSubData(GL_ARRAY_BUFFER, offset, size, data);
- break;
- }
-
- default:
- UNREACHABLE();
- }
- }
-
- return {};
- }
-
- MaybeError CommandBuffer::ExecuteComputePass() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- ComputePipeline* lastPipeline = nullptr;
- BindGroupTracker bindGroupTracker = {};
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndComputePass: {
- mCommands.NextCommand<EndComputePassCmd>();
- return {};
- }
-
- case Command::Dispatch: {
- DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- bindGroupTracker.Apply(gl);
-
- gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
- gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
- break;
- }
-
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- bindGroupTracker.Apply(gl);
-
- uint64_t indirectBufferOffset = dispatch->indirectOffset;
- Buffer* indirectBuffer = ToBackend(dispatch->indirectBuffer.Get());
-
- gl.BindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirectBuffer->GetHandle());
- gl.DispatchComputeIndirect(static_cast<GLintptr>(indirectBufferOffset));
- gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
- break;
- }
-
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
- lastPipeline = ToBackend(cmd->pipeline).Get();
- lastPipeline->ApplyNow();
-
- bindGroupTracker.OnSetPipeline(lastPipeline);
- break;
- }
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
- bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
- cmd->dynamicOffsetCount, dynamicOffsets);
- break;
- }
-
- case Command::InsertDebugMarker:
- case Command::PopDebugGroup:
- case Command::PushDebugGroup: {
- // Due to lack of linux driver support for GL_EXT_debug_marker
- // extension these functions are skipped.
- SkipCommand(&mCommands, type);
- break;
- }
-
- case Command::WriteTimestamp: {
- return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
- }
-
- default:
- UNREACHABLE();
- }
- }
-
- // EndComputePass should have been called
- UNREACHABLE();
- }
-
- MaybeError CommandBuffer::ExecuteRenderPass(BeginRenderPassCmd* renderPass) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- GLuint fbo = 0;
-
- // Create the framebuffer used for this render pass and calls the correct glDrawBuffers
- {
- // TODO(kainino@chromium.org): This is added to possibly work around an issue seen on
- // Windows/Intel. It should break any feedback loop before the clears, even if there
- // shouldn't be any negative effects from this. Investigate whether it's actually
- // needed.
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, 0);
- // TODO(kainino@chromium.org): possible future optimization: create these framebuffers
- // at Framebuffer build time (or maybe CommandBuffer build time) so they don't have to
- // be created and destroyed at draw time.
- gl.GenFramebuffers(1, &fbo);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
-
- // Mapping from attachmentSlot to GL framebuffer attachment points. Defaults to zero
- // (GL_NONE).
- ityp::array<ColorAttachmentIndex, GLenum, kMaxColorAttachments> drawBuffers = {};
-
- // Construct GL framebuffer
-
- ColorAttachmentIndex attachmentCount(uint8_t(0));
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- TextureViewBase* textureView = renderPass->colorAttachments[i].view.Get();
- GLuint texture = ToBackend(textureView->GetTexture())->GetHandle();
-
- GLenum glAttachment = GL_COLOR_ATTACHMENT0 + static_cast<uint8_t>(i);
-
- // Attach color buffers.
- if (textureView->GetTexture()->GetArrayLayers() == 1) {
- GLenum target = ToBackend(textureView->GetTexture())->GetGLTarget();
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, texture,
- textureView->GetBaseMipLevel());
- } else {
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, texture,
- textureView->GetBaseMipLevel(),
- textureView->GetBaseArrayLayer());
- }
- drawBuffers[i] = glAttachment;
- attachmentCount = i;
- attachmentCount++;
- }
- gl.DrawBuffers(static_cast<uint8_t>(attachmentCount), drawBuffers.data());
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- TextureViewBase* textureView = renderPass->depthStencilAttachment.view.Get();
- GLuint texture = ToBackend(textureView->GetTexture())->GetHandle();
- const Format& format = textureView->GetTexture()->GetFormat();
-
- // Attach depth/stencil buffer.
- GLenum glAttachment = 0;
- if (format.aspects == (Aspect::Depth | Aspect::Stencil)) {
- glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
- } else if (format.aspects == Aspect::Depth) {
- glAttachment = GL_DEPTH_ATTACHMENT;
- } else if (format.aspects == Aspect::Stencil) {
- glAttachment = GL_STENCIL_ATTACHMENT;
- } else {
- UNREACHABLE();
- }
-
- if (textureView->GetTexture()->GetArrayLayers() == 1) {
- GLenum target = ToBackend(textureView->GetTexture())->GetGLTarget();
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, texture,
- textureView->GetBaseMipLevel());
- } else {
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, texture,
- textureView->GetBaseMipLevel(),
- textureView->GetBaseArrayLayer());
- }
- }
- }
-
- ASSERT(gl.CheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
-
- // Set defaults for dynamic state before executing clears and commands.
- PersistentPipelineState persistentPipelineState;
- persistentPipelineState.SetDefaultState(gl);
- gl.BlendColor(0, 0, 0, 0);
- gl.Viewport(0, 0, renderPass->width, renderPass->height);
- gl.DepthRangef(0.0, 1.0);
- gl.Scissor(0, 0, renderPass->width, renderPass->height);
-
- // Clear framebuffer attachments as needed
- {
- for (ColorAttachmentIndex index :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- uint8_t i = static_cast<uint8_t>(index);
- auto* attachmentInfo = &renderPass->colorAttachments[index];
-
- // Load op - color
- if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
- gl.ColorMask(true, true, true, true);
-
- wgpu::TextureComponentType baseType =
- attachmentInfo->view->GetFormat().GetAspectInfo(Aspect::Color).baseType;
- switch (baseType) {
- case wgpu::TextureComponentType::Float: {
- const std::array<float, 4> appliedClearColor =
- ConvertToFloatColor(attachmentInfo->clearColor);
- gl.ClearBufferfv(GL_COLOR, i, appliedClearColor.data());
- break;
- }
- case wgpu::TextureComponentType::Uint: {
- const std::array<uint32_t, 4> appliedClearColor =
- ConvertToUnsignedIntegerColor(attachmentInfo->clearColor);
- gl.ClearBufferuiv(GL_COLOR, i, appliedClearColor.data());
- break;
- }
- case wgpu::TextureComponentType::Sint: {
- const std::array<int32_t, 4> appliedClearColor =
- ConvertToSignedIntegerColor(attachmentInfo->clearColor);
- gl.ClearBufferiv(GL_COLOR, i, appliedClearColor.data());
- break;
- }
-
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
- }
- }
-
- if (attachmentInfo->storeOp == wgpu::StoreOp::Discard) {
- // TODO(natlee@microsoft.com): call glDiscard to do optimization
- }
- }
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto* attachmentInfo = &renderPass->depthStencilAttachment;
- const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
-
- // Load op - depth/stencil
- bool doDepthClear = attachmentFormat.HasDepth() &&
- (attachmentInfo->depthLoadOp == wgpu::LoadOp::Clear);
- bool doStencilClear = attachmentFormat.HasStencil() &&
- (attachmentInfo->stencilLoadOp == wgpu::LoadOp::Clear);
-
- if (doDepthClear) {
- gl.DepthMask(GL_TRUE);
- }
- if (doStencilClear) {
- gl.StencilMask(GetStencilMaskFromStencilFormat(attachmentFormat.format));
- }
-
- if (doDepthClear && doStencilClear) {
- gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo->clearDepth,
- attachmentInfo->clearStencil);
- } else if (doDepthClear) {
- gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo->clearDepth);
- } else if (doStencilClear) {
- const GLint clearStencil = attachmentInfo->clearStencil;
- gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
- }
- }
- }
-
- RenderPipeline* lastPipeline = nullptr;
- uint64_t indexBufferBaseOffset = 0;
- GLenum indexBufferFormat;
- uint32_t indexFormatSize;
-
- VertexStateBufferBindingTracker vertexStateBufferBindingTracker;
- BindGroupTracker bindGroupTracker = {};
-
- auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
- switch (type) {
- case Command::Draw: {
- DrawCmd* draw = iter->NextCommand<DrawCmd>();
- vertexStateBufferBindingTracker.Apply(gl);
- bindGroupTracker.Apply(gl);
-
- if (draw->firstInstance > 0) {
- gl.DrawArraysInstancedBaseInstance(
- lastPipeline->GetGLPrimitiveTopology(), draw->firstVertex,
- draw->vertexCount, draw->instanceCount, draw->firstInstance);
- } else {
- // This branch is only needed on OpenGL < 4.2
- gl.DrawArraysInstanced(lastPipeline->GetGLPrimitiveTopology(),
- draw->firstVertex, draw->vertexCount,
- draw->instanceCount);
- }
- break;
- }
-
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- vertexStateBufferBindingTracker.Apply(gl);
- bindGroupTracker.Apply(gl);
-
- if (draw->firstInstance > 0) {
- gl.DrawElementsInstancedBaseVertexBaseInstance(
- lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
- indexBufferFormat,
- reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
- indexBufferBaseOffset),
- draw->instanceCount, draw->baseVertex, draw->firstInstance);
- } else {
- // This branch is only needed on OpenGL < 4.2; ES < 3.2
- if (draw->baseVertex != 0) {
- gl.DrawElementsInstancedBaseVertex(
- lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
- indexBufferFormat,
- reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
- indexBufferBaseOffset),
- draw->instanceCount, draw->baseVertex);
- } else {
- // This branch is only needed on OpenGL < 3.2; ES < 3.2
- gl.DrawElementsInstanced(
- lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
- indexBufferFormat,
- reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
- indexBufferBaseOffset),
- draw->instanceCount);
- }
- }
- break;
- }
-
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- vertexStateBufferBindingTracker.Apply(gl);
- bindGroupTracker.Apply(gl);
-
- uint64_t indirectBufferOffset = draw->indirectOffset;
- Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
-
- gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
- gl.DrawArraysIndirect(
- lastPipeline->GetGLPrimitiveTopology(),
- reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
- break;
- }
-
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
-
- vertexStateBufferBindingTracker.Apply(gl);
- bindGroupTracker.Apply(gl);
-
- Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
- ASSERT(indirectBuffer != nullptr);
-
- gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
- gl.DrawElementsIndirect(
- lastPipeline->GetGLPrimitiveTopology(), indexBufferFormat,
- reinterpret_cast<void*>(static_cast<intptr_t>(draw->indirectOffset)));
- break;
- }
-
- case Command::InsertDebugMarker:
- case Command::PopDebugGroup:
- case Command::PushDebugGroup: {
- // Due to lack of linux driver support for GL_EXT_debug_marker
- // extension these functions are skipped.
- SkipCommand(iter, type);
- break;
- }
-
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
- lastPipeline = ToBackend(cmd->pipeline).Get();
- lastPipeline->ApplyNow(persistentPipelineState);
-
- vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
- bindGroupTracker.OnSetPipeline(lastPipeline);
- break;
- }
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
- bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
- cmd->dynamicOffsetCount, dynamicOffsets);
- break;
- }
-
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
-
- indexBufferBaseOffset = cmd->offset;
- indexBufferFormat = IndexFormatType(cmd->format);
- indexFormatSize = IndexFormatSize(cmd->format);
- vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
- break;
- }
-
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
- cmd->offset);
- break;
- }
-
- default:
- UNREACHABLE();
- break;
- }
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndRenderPass: {
- mCommands.NextCommand<EndRenderPassCmd>();
-
- if (renderPass->attachmentState->GetSampleCount() > 1) {
- ResolveMultisampledRenderTargets(gl, renderPass);
- }
- gl.DeleteFramebuffers(1, &fbo);
- return {};
- }
-
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
- persistentPipelineState.SetStencilReference(gl, cmd->reference);
- break;
- }
-
- case Command::SetViewport: {
- SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
- if (gl.IsAtLeastGL(4, 1)) {
- gl.ViewportIndexedf(0, cmd->x, cmd->y, cmd->width, cmd->height);
- } else {
- // Floating-point viewport coords are unsupported on OpenGL ES, but
- // truncation is ok because other APIs do not guarantee subpixel precision
- // either.
- gl.Viewport(static_cast<int>(cmd->x), static_cast<int>(cmd->y),
- static_cast<int>(cmd->width), static_cast<int>(cmd->height));
- }
- gl.DepthRangef(cmd->minDepth, cmd->maxDepth);
- break;
- }
-
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
- gl.Scissor(cmd->x, cmd->y, cmd->width, cmd->height);
- break;
- }
-
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
- const std::array<float, 4> blendColor = ConvertToFloatColor(cmd->color);
- gl.BlendColor(blendColor[0], blendColor[1], blendColor[2], blendColor[3]);
- break;
- }
-
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
- auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
-
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* iter = bundles[i]->GetCommands();
- iter->Reset();
- while (iter->NextCommandId(&type)) {
- DoRenderBundleCommand(iter, type);
- }
- }
- break;
- }
-
- case Command::BeginOcclusionQuery: {
- return DAWN_UNIMPLEMENTED_ERROR("BeginOcclusionQuery unimplemented.");
- }
-
- case Command::EndOcclusionQuery: {
- return DAWN_UNIMPLEMENTED_ERROR("EndOcclusionQuery unimplemented.");
- }
-
- case Command::WriteTimestamp:
- return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
-
- default: {
- DoRenderBundleCommand(&mCommands, type);
- break;
- }
- }
- }
-
- // EndRenderPass should have been called
- UNREACHABLE();
- }
-
- void DoTexSubImage(const OpenGLFunctions& gl,
- const TextureCopy& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& copySize) {
- Texture* texture = ToBackend(destination.texture.Get());
- ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
-
- const GLFormat& format = texture->GetGLFormat();
- GLenum target = texture->GetGLTarget();
- data = static_cast<const uint8_t*>(data) + dataLayout.offset;
- gl.ActiveTexture(GL_TEXTURE0);
- gl.BindTexture(target, texture->GetHandle());
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(destination.aspect).block;
-
- uint32_t x = destination.origin.x;
- uint32_t y = destination.origin.y;
- uint32_t z = destination.origin.z;
- if (texture->GetFormat().isCompressed) {
- size_t rowSize = copySize.width / blockInfo.width * blockInfo.byteSize;
- Extent3D virtSize = texture->GetMipLevelVirtualSize(destination.mipLevel);
- uint32_t width = std::min(copySize.width, virtSize.width - x);
-
- // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
- // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
- // this limitation by copying the compressed texture data once per row.
- // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
- // Buffer Objects" for more details. For Desktop GL, we use row-by-row
- // copies only for uploads where bytesPerRow is not a multiple of byteSize.
- if (dataLayout.bytesPerRow % blockInfo.byteSize == 0 && gl.GetVersion().IsDesktop()) {
- size_t imageSize =
- rowSize * (copySize.height / blockInfo.height) * copySize.depthOrArrayLayers;
-
- uint32_t height = std::min(copySize.height, virtSize.height - y);
-
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
- dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
-
- if (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D) {
- gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
- format.internalFormat, imageSize, data);
- } else {
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
- dataLayout.rowsPerImage * blockInfo.height);
- gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
- copySize.depthOrArrayLayers, format.internalFormat,
- imageSize, data);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
- }
-
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
- gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
- } else {
- if (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D) {
- const uint8_t* d = static_cast<const uint8_t*>(data);
-
- for (; y < destination.origin.y + copySize.height; y += blockInfo.height) {
- uint32_t height = std::min(blockInfo.height, virtSize.height - y);
- gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width,
- height, format.internalFormat, rowSize, d);
- d += dataLayout.bytesPerRow;
- }
- } else {
- const uint8_t* slice = static_cast<const uint8_t*>(data);
-
- for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
- const uint8_t* d = slice;
-
- for (y = destination.origin.y; y < destination.origin.y + copySize.height;
- y += blockInfo.height) {
- uint32_t height = std::min(blockInfo.height, virtSize.height - y);
- gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
- height, 1, format.internalFormat, rowSize,
- d);
- d += dataLayout.bytesPerRow;
- }
-
- slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
- }
- }
- }
- } else {
- uint32_t width = copySize.width;
- uint32_t height = copySize.height;
- if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
- dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
- if (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D) {
- gl.TexSubImage2D(target, destination.mipLevel, x, y, width, height,
- format.format, format.type, data);
- } else {
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
- dataLayout.rowsPerImage * blockInfo.height);
- gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
- copySize.depthOrArrayLayers, format.format, format.type, data);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
- }
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
- } else {
- if (texture->GetArrayLayers() == 1 &&
- texture->GetDimension() == wgpu::TextureDimension::e2D) {
- const uint8_t* d = static_cast<const uint8_t*>(data);
- for (; y < destination.origin.y + height; ++y) {
- gl.TexSubImage2D(target, destination.mipLevel, x, y, width, 1,
- format.format, format.type, d);
- d += dataLayout.bytesPerRow;
- }
- } else {
- const uint8_t* slice = static_cast<const uint8_t*>(data);
- for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
- const uint8_t* d = slice;
- for (y = destination.origin.y; y < destination.origin.y + height; ++y) {
- gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, 1, 1,
- format.format, format.type, d);
- d += dataLayout.bytesPerRow;
- }
- slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
- }
- }
- }
- }
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
deleted file mode 100644
index fde8751ef5a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
-#define DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
-
-#include "dawn_native/CommandBuffer.h"
-
-namespace dawn_native {
- struct BeginRenderPassCmd;
-} // namespace dawn_native
-
-namespace dawn_native { namespace opengl {
-
- class Device;
- struct OpenGLFunctions;
-
- class CommandBuffer final : public CommandBufferBase {
- public:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
-
- MaybeError Execute();
-
- private:
- MaybeError ExecuteComputePass();
- MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
- };
-
- // Like glTexSubImage*, the "data" argument is either a pointer to image data or
- // an offset if a PBO is bound.
- void DoTexSubImage(const OpenGLFunctions& gl,
- const TextureCopy& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& copySize);
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp
deleted file mode 100644
index 2ee7bb88c10..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/ComputePipelineGL.h"
-
-#include "dawn_native/opengl/DeviceGL.h"
-
-namespace dawn_native { namespace opengl {
-
- // static
- Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(device, descriptor));
- }
-
- ComputePipeline::~ComputePipeline() = default;
-
- void ComputePipeline::DestroyImpl() {
- ComputePipelineBase::DestroyImpl();
- DeleteProgram(ToBackend(GetDevice())->gl);
- }
-
- MaybeError ComputePipeline::Initialize() {
- DAWN_TRY(
- InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
- return {};
- }
-
- void ComputePipeline::ApplyNow() {
- PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h
deleted file mode 100644
index f5646bafefc..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
-#define DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
-
-#include "dawn_native/ComputePipeline.h"
-
-#include "dawn_native/opengl/PipelineGL.h"
-
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
- public:
- static Ref<ComputePipeline> CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor);
-
- void ApplyNow();
-
- MaybeError Initialize() override;
-
- private:
- using ComputePipelineBase::ComputePipelineBase;
- ~ComputePipeline() override;
- void DestroyImpl() override;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_COMPUTEPIPELINEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
deleted file mode 100644
index 89bd9f8a116..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/DeviceGL.h"
-
-#include "dawn_native/BackendConnection.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/StagingBuffer.h"
-#include "dawn_native/opengl/BindGroupGL.h"
-#include "dawn_native/opengl/BindGroupLayoutGL.h"
-#include "dawn_native/opengl/BufferGL.h"
-#include "dawn_native/opengl/CommandBufferGL.h"
-#include "dawn_native/opengl/ComputePipelineGL.h"
-#include "dawn_native/opengl/PipelineLayoutGL.h"
-#include "dawn_native/opengl/QuerySetGL.h"
-#include "dawn_native/opengl/QueueGL.h"
-#include "dawn_native/opengl/RenderPipelineGL.h"
-#include "dawn_native/opengl/SamplerGL.h"
-#include "dawn_native/opengl/ShaderModuleGL.h"
-#include "dawn_native/opengl/SwapChainGL.h"
-#include "dawn_native/opengl/TextureGL.h"
-
-namespace dawn_native { namespace opengl {
-
- // static
- ResultOrError<Device*> Device::Create(AdapterBase* adapter,
- const DawnDeviceDescriptor* descriptor,
- const OpenGLFunctions& functions) {
- Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
- DAWN_TRY(device->Initialize());
- return device.Detach();
- }
-
- Device::Device(AdapterBase* adapter,
- const DawnDeviceDescriptor* descriptor,
- const OpenGLFunctions& functions)
- : DeviceBase(adapter, descriptor), gl(functions) {
- }
-
- Device::~Device() {
- Destroy();
- }
-
- MaybeError Device::Initialize() {
- InitTogglesFromDriver();
- mFormatTable = BuildGLFormatTable();
-
- return DeviceBase::Initialize(new Queue(this));
- }
-
- void Device::InitTogglesFromDriver() {
- bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
-
- bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
-
- // TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
- bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
-
- bool supportsSnormRead =
- gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
-
- bool supportsDepthStencilRead =
- gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
-
- bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
- gl.IsGLExtensionSupported("GL_OES_sample_variables");
-
- // TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
- // procs without the extension suffix.
- // We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
-
- // supportsBaseVertex |=
- // (gl.IsAtLeastGLES(2, 0) &&
- // (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
- // gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
- // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
-
- // supportsBaseInstance |=
- // (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
- // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
-
- // TODO(crbug.com/dawn/343): Investigate emulation.
- SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
- SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
- SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
- SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
- SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
- SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
- SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
- // For OpenGL ES, we must use dummy fragment shader for vertex-only render pipeline.
- SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
- }
-
- const GLFormat& Device::GetGLFormat(const Format& format) {
- ASSERT(format.isSupported);
- ASSERT(format.GetIndex() < mFormatTable.size());
-
- const GLFormat& result = mFormatTable[format.GetIndex()];
- ASSERT(result.isSupportedOnBackend);
- return result;
- }
-
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
- return BindGroup::Create(this, descriptor);
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return AcquireRef(new Buffer(this, descriptor));
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return ComputePipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return AcquireRef(new PipelineLayout(this, descriptor));
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return AcquireRef(new QuerySet(this, descriptor));
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return AcquireRef(new Sampler(this, descriptor));
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- return ShaderModule::Create(this, descriptor, parseResult);
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return AcquireRef(new SwapChain(this, descriptor));
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return AcquireRef(new Texture(this, descriptor));
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return AcquireRef(new TextureView(texture, descriptor));
- }
-
- void Device::SubmitFenceSync() {
- GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
- IncrementLastSubmittedCommandSerial();
- mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
- }
-
- MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
- ::EGLImage image) {
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "Texture dimension (%s) is not %s.", descriptor->dimension,
- wgpu::TextureDimension::e2D);
-
- DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
- "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
- descriptor->sampleCount);
-
- DAWN_INVALID_IF(descriptor->usage & (wgpu::TextureUsage::TextureBinding |
- wgpu::TextureUsage::StorageBinding),
- "Texture usage (%s) cannot have %s or %s.", descriptor->usage,
- wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
-
- return {};
- }
- TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
- ::EGLImage image) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
- return nullptr;
- }
- if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
- return nullptr;
- }
-
- GLuint tex;
- gl.GenTextures(1, &tex);
- gl.BindTexture(GL_TEXTURE_2D, tex);
- gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
-
- GLint width, height, internalFormat;
- gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
- gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
- gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
-
- if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
- textureDescriptor->size.height != static_cast<uint32_t>(height) ||
- textureDescriptor->size.depthOrArrayLayers != 1) {
- ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
- "EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
- width, height, &textureDescriptor->size));
- gl.DeleteTextures(1, &tex);
- return nullptr;
- }
-
- // TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
- // in the passed-in TextureDescriptor.
- return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
- }
-
- MaybeError Device::TickImpl() {
- return {};
- }
-
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- ExecutionSerial fenceSerial{0};
- while (!mFencesInFlight.empty()) {
- GLsync sync = mFencesInFlight.front().first;
- ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
-
- // Fence are added in order, so we can stop searching as soon
- // as we see one that's not ready.
-
- // TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
- if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
- gl.Flush();
- }
- GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
- if (result == GL_TIMEOUT_EXPIRED) {
- return fenceSerial;
- }
- // Update fenceSerial since fence is ready.
- fenceSerial = tentativeSerial;
-
- gl.DeleteSync(sync);
-
- mFencesInFlight.pop();
-
- ASSERT(fenceSerial > GetCompletedCommandSerial());
- }
- return fenceSerial;
- }
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
- }
-
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
- }
-
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
- }
-
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
- }
-
- MaybeError Device::WaitForIdleForDestruction() {
- gl.Finish();
- DAWN_TRY(CheckPassedSerials());
- ASSERT(mFencesInFlight.empty());
-
- return {};
- }
-
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return 1;
- }
-
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return 1;
- }
-
- float Device::GetTimestampPeriodInNS() const {
- return 1.0f;
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
deleted file mode 100644
index f1317069b19..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_DEVICEGL_H_
-#define DAWNNATIVE_OPENGL_DEVICEGL_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include "common/Platform.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/QuerySet.h"
-#include "dawn_native/opengl/Forward.h"
-#include "dawn_native/opengl/GLFormat.h"
-#include "dawn_native/opengl/OpenGLFunctions.h"
-
-#include <queue>
-
-// Remove windows.h macros after glad's include of windows.h
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include "common/windows_with_undefs.h"
-#endif
-
-typedef void* EGLImage;
-
-namespace dawn_native { namespace opengl {
-
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Device*> Create(AdapterBase* adapter,
- const DawnDeviceDescriptor* descriptor,
- const OpenGLFunctions& functions);
- ~Device() override;
-
- MaybeError Initialize();
-
- // Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
- const OpenGLFunctions gl;
-
- const GLFormat& GetGLFormat(const Format& format);
-
- void SubmitFenceSync();
-
- MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
- ::EGLImage image);
- TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
- ::EGLImage image);
-
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
-
- MaybeError TickImpl() override;
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
-
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- private:
- Device(AdapterBase* adapter,
- const DawnDeviceDescriptor* descriptor,
- const OpenGLFunctions& functions);
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
-
- void InitTogglesFromDriver();
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
-
- std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
-
- GLFormatTable mFormatTable;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_DEVICEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h b/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h
deleted file mode 100644
index 82d07661ae8..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_FORWARD_H_
-#define DAWNNATIVE_OPENGL_FORWARD_H_
-
-#include "dawn_native/ToBackend.h"
-
-namespace dawn_native { namespace opengl {
-
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- class PersistentPipelineState;
- class PipelineLayout;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- class Sampler;
- class ShaderModule;
- class SwapChain;
- class Texture;
- class TextureView;
-
- struct OpenGLBackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
-
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
- return ToBackendBase<OpenGLBackendTraits>(common);
- }
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp
deleted file mode 100644
index af28814e999..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/GLFormat.h"
-
-namespace dawn_native { namespace opengl {
-
- GLFormatTable BuildGLFormatTable() {
- GLFormatTable table;
-
- using Type = GLFormat::ComponentType;
-
- auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat,
- GLenum format, GLenum type, Type componentType) {
- size_t index = ComputeFormatIndex(dawnFormat);
- ASSERT(index < table.size());
-
- table[index].internalFormat = internalFormat;
- table[index].format = format;
- table[index].type = type;
- table[index].componentType = componentType;
- table[index].isSupportedOnBackend = true;
- };
-
- // It's dangerous to go alone, take this:
- //
- // [ANGLE's formatutils.cpp]
- // [ANGLE's formatutilsgl.cpp]
- //
- // The format tables in these files are extremely complete and the best reference on GL
- // format support, enums, etc.
-
- // clang-format off
-
- // 1 byte color formats
- AddFormat(wgpu::TextureFormat::R8Unorm, GL_R8, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::R8Snorm, GL_R8_SNORM, GL_RED, GL_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::R8Uint, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
- AddFormat(wgpu::TextureFormat::R8Sint, GL_R8I, GL_RED_INTEGER, GL_BYTE, Type::Int);
-
- // 2 bytes color formats
- AddFormat(wgpu::TextureFormat::R16Uint, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
- AddFormat(wgpu::TextureFormat::R16Sint, GL_R16I, GL_RED_INTEGER, GL_SHORT, Type::Int);
- AddFormat(wgpu::TextureFormat::R16Float, GL_R16F, GL_RED, GL_HALF_FLOAT, Type::Float);
- AddFormat(wgpu::TextureFormat::RG8Unorm, GL_RG8, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::RG8Snorm, GL_RG8_SNORM, GL_RG, GL_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::RG8Uint, GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
- AddFormat(wgpu::TextureFormat::RG8Sint, GL_RG8I, GL_RG_INTEGER, GL_BYTE, Type::Int);
-
- // 4 bytes color formats
- AddFormat(wgpu::TextureFormat::R32Uint, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, Type::Uint);
- AddFormat(wgpu::TextureFormat::R32Sint, GL_R32I, GL_RED_INTEGER, GL_INT, Type::Int);
- AddFormat(wgpu::TextureFormat::R32Float, GL_R32F, GL_RED, GL_FLOAT, Type::Float);
- AddFormat(wgpu::TextureFormat::RG16Uint, GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
- AddFormat(wgpu::TextureFormat::RG16Sint, GL_RG16I, GL_RG_INTEGER, GL_SHORT, Type::Int);
- AddFormat(wgpu::TextureFormat::RG16Float, GL_RG16F, GL_RG, GL_HALF_FLOAT, Type::Float);
- AddFormat(wgpu::TextureFormat::RGBA8Unorm, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::RGBA8UnormSrgb, GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::RGBA8Snorm, GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::RGBA8Uint, GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
- AddFormat(wgpu::TextureFormat::RGBA8Sint, GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE, Type::Int);
-
- // This doesn't have an enum for the internal format in OpenGL, so use RGBA8.
- AddFormat(wgpu::TextureFormat::BGRA8Unorm, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::RGB10A2Unorm, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, Type::Float);
- AddFormat(wgpu::TextureFormat::RG11B10Ufloat, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, Type::Float);
- AddFormat(wgpu::TextureFormat::RGB9E5Ufloat, GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, Type::Float);
-
- // 8 bytes color formats
- AddFormat(wgpu::TextureFormat::RG32Uint, GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, Type::Uint);
- AddFormat(wgpu::TextureFormat::RG32Sint, GL_RG32I, GL_RG_INTEGER, GL_INT, Type::Int);
- AddFormat(wgpu::TextureFormat::RG32Float, GL_RG32F, GL_RG, GL_FLOAT, Type::Float);
- AddFormat(wgpu::TextureFormat::RGBA16Uint, GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
- AddFormat(wgpu::TextureFormat::RGBA16Sint, GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT, Type::Int);
- AddFormat(wgpu::TextureFormat::RGBA16Float, GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, Type::Float);
-
- // 16 bytes color formats
- AddFormat(wgpu::TextureFormat::RGBA32Uint, GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, Type::Uint);
- AddFormat(wgpu::TextureFormat::RGBA32Sint, GL_RGBA32I, GL_RGBA_INTEGER, GL_INT, Type::Int);
- AddFormat(wgpu::TextureFormat::RGBA32Float, GL_RGBA32F, GL_RGBA, GL_FLOAT, Type::Float);
-
- // Depth stencil formats
- AddFormat(wgpu::TextureFormat::Depth32Float, GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, Type::DepthStencil);
- AddFormat(wgpu::TextureFormat::Depth24Plus, GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, Type::DepthStencil);
- AddFormat(wgpu::TextureFormat::Depth24PlusStencil8, GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, Type::DepthStencil);
- AddFormat(wgpu::TextureFormat::Depth16Unorm, GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, Type::DepthStencil);
-
- // Block compressed formats
- AddFormat(wgpu::TextureFormat::BC1RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC2RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC2RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC3RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC3RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC4RSnorm, GL_COMPRESSED_SIGNED_RED_RGTC1, GL_RED, GL_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC4RUnorm, GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC5RGSnorm, GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC5RGUnorm, GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC6HRGBFloat, GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
- AddFormat(wgpu::TextureFormat::BC6HRGBUfloat, GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
- AddFormat(wgpu::TextureFormat::BC7RGBAUnorm, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
-
- // clang-format on
-
- return table;
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.h b/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.h
deleted file mode 100644
index 255b17cdd5e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_GLFORMAT_H_
-#define DAWNNATIVE_OPENGL_GLFORMAT_H_
-
-#include "dawn_native/Format.h"
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- struct GLFormat {
- GLenum internalFormat = 0;
- GLenum format = 0;
- GLenum type = 0;
- bool isSupportedOnBackend = false;
-
- // OpenGL has different functions depending on the format component type, for example
- // glClearBufferfv is only valid on formats with the Float ComponentType
- enum ComponentType { Float, Int, Uint, DepthStencil };
- ComponentType componentType;
- };
-
- using GLFormatTable = std::array<GLFormat, kKnownFormatCount>;
- GLFormatTable BuildGLFormatTable();
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_GLFORMAT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp
deleted file mode 100644
index 330999db6d2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/NativeSwapChainImplGL.h"
-
-#include "dawn_native/opengl/DeviceGL.h"
-
-namespace dawn_native { namespace opengl {
-
- NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
- PresentCallback present,
- void* presentUserdata)
- : mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {
- }
-
- NativeSwapChainImpl::~NativeSwapChainImpl() {
- const OpenGLFunctions& gl = mDevice->gl;
- gl.DeleteTextures(1, &mBackTexture);
- gl.DeleteFramebuffers(1, &mBackFBO);
- }
-
- void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
- const OpenGLFunctions& gl = mDevice->gl;
- gl.GenTextures(1, &mBackTexture);
- gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
- gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
-
- gl.GenFramebuffers(1, &mBackFBO);
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
- gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
- mBackTexture, 0);
- }
-
- DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
- WGPUTextureUsage usage,
- uint32_t width,
- uint32_t height) {
- if (format != WGPUTextureFormat_RGBA8Unorm) {
- return "unsupported format";
- }
- ASSERT(width > 0);
- ASSERT(height > 0);
- mWidth = width;
- mHeight = height;
-
- const OpenGLFunctions& gl = mDevice->gl;
- gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
- // Reallocate the texture
- gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE,
- nullptr);
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- nextTexture->texture.u32 = mBackTexture;
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::Present() {
- const OpenGLFunctions& gl = mDevice->gl;
- gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
- gl.Scissor(0, 0, mWidth, mHeight);
- gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
- GL_NEAREST);
-
- mPresentCallback(mPresentUserdata);
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return wgpu::TextureFormat::RGBA8Unorm;
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h
deleted file mode 100644
index acda00576f8..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
-#define DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
-
-#include "dawn_native/OpenGLBackend.h"
-
-#include "dawn_native/dawn_platform.h"
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- class NativeSwapChainImpl {
- public:
- using WSIContext = DawnWSIContextGL;
-
- NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
- ~NativeSwapChainImpl();
-
- void Init(DawnWSIContextGL* context);
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height);
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
- DawnSwapChainError Present();
-
- wgpu::TextureFormat GetPreferredFormat() const;
-
- private:
- PresentCallback mPresentCallback;
- void* mPresentUserdata;
-
- uint32_t mWidth = 0;
- uint32_t mHeight = 0;
- GLuint mBackFBO = 0;
- GLuint mBackTexture = 0;
-
- Device* mDevice = nullptr;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_NATIVESWAPCHAINIMPLGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp
deleted file mode 100644
index 560f3a0342c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// OpenGLBackend.cpp: contains the definition of symbols exported by OpenGLBackend.h so that they
-// can be compiled twice: once export (shared library), once not exported (static library)
-
-#include "dawn_native/OpenGLBackend.h"
-
-#include "common/SwapChainUtils.h"
-#include "dawn_native/opengl/DeviceGL.h"
-#include "dawn_native/opengl/NativeSwapChainImplGL.h"
-
-namespace dawn_native { namespace opengl {
-
- AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {
- }
-
- AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {
- }
-
- DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
- PresentCallback present,
- void* presentUserdata) {
- Device* backendDevice = ToBackend(FromAPI(device));
-
- DawnSwapChainImplementation impl;
- impl = CreateSwapChainImplementation(
- new NativeSwapChainImpl(backendDevice, present, presentUserdata));
- impl.textureUsage = WGPUTextureUsage_Present;
-
- return impl;
- }
-
- WGPUTextureFormat GetNativeSwapChainPreferredFormat(
- const DawnSwapChainImplementation* swapChain) {
- NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
- }
-
- ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
- : ExternalImageDescriptor(ExternalImageType::EGLImage) {
- }
-
- WGPUTexture WrapExternalEGLImage(WGPUDevice device,
- const ExternalImageDescriptorEGLImage* descriptor) {
- Device* backendDevice = ToBackend(FromAPI(device));
- TextureBase* texture =
- backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
- return ToAPI(texture);
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp
deleted file mode 100644
index 472fa5b3ab4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/OpenGLFunctions.h"
-
-#include <cctype>
-
-namespace dawn_native { namespace opengl {
-
- MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
- DAWN_TRY(mVersion.Initialize(getProc));
- if (mVersion.IsES()) {
- DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
- } else {
- DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
- }
-
- InitializeSupportedGLExtensions();
-
- return {};
- }
-
- void OpenGLFunctions::InitializeSupportedGLExtensions() {
- int32_t numExtensions;
- GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
-
- for (int32_t i = 0; i < numExtensions; ++i) {
- const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
- mSupportedGLExtensionsSet.insert(extensionName);
- }
- }
-
- bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
- ASSERT(extension != nullptr);
- return mSupportedGLExtensionsSet.count(extension) != 0;
- }
-
- const OpenGLVersion& OpenGLFunctions::GetVersion() const {
- return mVersion;
- }
-
- bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
- return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
- }
-
- bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
- return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h
deleted file mode 100644
index 69180b48cfe..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
-#define DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
-
-#include <unordered_set>
-
-#include "dawn_native/opengl/OpenGLFunctionsBase_autogen.h"
-#include "dawn_native/opengl/OpenGLVersion.h"
-
-namespace dawn_native { namespace opengl {
-
- struct OpenGLFunctions : OpenGLFunctionsBase {
- public:
- MaybeError Initialize(GetProcAddress getProc);
-
- const OpenGLVersion& GetVersion() const;
- bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
- bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
-
- bool IsGLExtensionSupported(const char* extension) const;
-
- private:
- void InitializeSupportedGLExtensions();
-
- OpenGLVersion mVersion;
-
- std::unordered_set<std::string> mSupportedGLExtensionsSet;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_OPENGLFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLVersion.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLVersion.cpp
deleted file mode 100644
index edb6adf3f4e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLVersion.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/OpenGLVersion.h"
-
-#include <cctype>
-#include <tuple>
-
-namespace dawn_native { namespace opengl {
-
- MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
- PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
- if (getString == nullptr) {
- return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
- }
-
- std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
-
- if (version.find("OpenGL ES") != std::string::npos) {
- // ES spec states that the GL_VERSION string will be in the following format:
- // "OpenGL ES N.M vendor-specific information"
- mStandard = Standard::ES;
- mMajorVersion = version[10] - '0';
- mMinorVersion = version[12] - '0';
-
- // The minor version shouldn't get to two digits.
- ASSERT(version.size() <= 13 || !isdigit(version[13]));
- } else {
- // OpenGL spec states the GL_VERSION string will be in the following format:
- // <version number><space><vendor-specific information>
- // The version number is either of the form major number.minor number or major
- // number.minor number.release number, where the numbers all have one or more
- // digits
- mStandard = Standard::Desktop;
- mMajorVersion = version[0] - '0';
- mMinorVersion = version[2] - '0';
-
- // The minor version shouldn't get to two digits.
- ASSERT(version.size() <= 3 || !isdigit(version[3]));
- }
-
- return {};
- }
-
- bool OpenGLVersion::IsDesktop() const {
- return mStandard == Standard::Desktop;
- }
-
- bool OpenGLVersion::IsES() const {
- return mStandard == Standard::ES;
- }
-
- uint32_t OpenGLVersion::GetMajor() const {
- return mMajorVersion;
- }
-
- uint32_t OpenGLVersion::GetMinor() const {
- return mMinorVersion;
- }
-
- bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
- return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLVersion.h b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLVersion.h
deleted file mode 100644
index 88ddd50ed97..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLVersion.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_OPENGLVERSION_H_
-#define DAWNNATIVE_OPENGL_OPENGLVERSION_H_
-
-#include "dawn_native/opengl/OpenGLFunctionsBase_autogen.h"
-
-namespace dawn_native { namespace opengl {
-
- struct OpenGLVersion {
- public:
- MaybeError Initialize(GetProcAddress getProc);
-
- bool IsDesktop() const;
- bool IsES() const;
- uint32_t GetMajor() const;
- uint32_t GetMinor() const;
- bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
-
- private:
- enum class Standard {
- Desktop,
- ES,
- };
- uint32_t mMajorVersion;
- uint32_t mMinorVersion;
- Standard mStandard;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_OPENGLVERSION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PersistentPipelineStateGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PersistentPipelineStateGL.cpp
deleted file mode 100644
index a37bb6700bb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PersistentPipelineStateGL.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/PersistentPipelineStateGL.h"
-
-#include "dawn_native/opengl/OpenGLFunctions.h"
-
-namespace dawn_native { namespace opengl {
-
- void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
- CallGLStencilFunc(gl);
- }
-
- void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
- GLenum stencilBackCompareFunction,
- GLenum stencilFrontCompareFunction,
- uint32_t stencilReadMask) {
- if (mStencilBackCompareFunction == stencilBackCompareFunction &&
- mStencilFrontCompareFunction == stencilFrontCompareFunction &&
- mStencilReadMask == stencilReadMask) {
- return;
- }
-
- mStencilBackCompareFunction = stencilBackCompareFunction;
- mStencilFrontCompareFunction = stencilFrontCompareFunction;
- mStencilReadMask = stencilReadMask;
- CallGLStencilFunc(gl);
- }
-
- void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
- uint32_t stencilReference) {
- if (mStencilReference == stencilReference) {
- return;
- }
-
- mStencilReference = stencilReference;
- CallGLStencilFunc(gl);
- }
-
- void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
- gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
- mStencilReadMask);
- gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
- mStencilReadMask);
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PersistentPipelineStateGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/PersistentPipelineStateGL.h
deleted file mode 100644
index c98ec152a4a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PersistentPipelineStateGL.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
-#define DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
-
-#include "dawn_native/dawn_platform.h"
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- struct OpenGLFunctions;
-
- class PersistentPipelineState {
- public:
- void SetDefaultState(const OpenGLFunctions& gl);
- void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
- GLenum stencilBackCompareFunction,
- GLenum stencilFrontCompareFunction,
- uint32_t stencilReadMask);
- void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
-
- private:
- void CallGLStencilFunc(const OpenGLFunctions& gl);
-
- GLenum mStencilBackCompareFunction = GL_ALWAYS;
- GLenum mStencilFrontCompareFunction = GL_ALWAYS;
- GLuint mStencilReadMask = 0xffffffff;
- GLuint mStencilReference = 0;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_PERSISTENTPIPELINESTATEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
deleted file mode 100644
index 614a125c173..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/PipelineGL.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/Pipeline.h"
-#include "dawn_native/opengl/Forward.h"
-#include "dawn_native/opengl/OpenGLFunctions.h"
-#include "dawn_native/opengl/PipelineLayoutGL.h"
-#include "dawn_native/opengl/SamplerGL.h"
-#include "dawn_native/opengl/ShaderModuleGL.h"
-
-#include <set>
-#include <sstream>
-
-namespace dawn_native { namespace opengl {
-
- namespace {
-
- GLenum GLShaderType(SingleShaderStage stage) {
- switch (stage) {
- case SingleShaderStage::Vertex:
- return GL_VERTEX_SHADER;
- case SingleShaderStage::Fragment:
- return GL_FRAGMENT_SHADER;
- case SingleShaderStage::Compute:
- return GL_COMPUTE_SHADER;
- }
- UNREACHABLE();
- }
-
- } // namespace
-
- PipelineGL::PipelineGL() : mProgram(0) {
- }
-
- PipelineGL::~PipelineGL() = default;
-
- MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
- const PipelineLayout* layout,
- const PerStage<ProgrammableStage>& stages) {
- auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
- const char* source) -> ResultOrError<GLuint> {
- GLuint shader = gl.CreateShader(type);
- gl.ShaderSource(shader, 1, &source, nullptr);
- gl.CompileShader(shader);
-
- GLint compileStatus = GL_FALSE;
- gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
- if (compileStatus == GL_FALSE) {
- GLint infoLogLength = 0;
- gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
-
- if (infoLogLength > 1) {
- std::vector<char> buffer(infoLogLength);
- gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
- return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s",
- source, buffer.data());
- }
- }
- return shader;
- };
-
- mProgram = gl.CreateProgram();
-
- // Compute the set of active stages.
- wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
- for (SingleShaderStage stage : IterateStages(kAllStages)) {
- if (stages[stage].module != nullptr) {
- activeStages |= StageBit(stage);
- }
- }
-
- // Create an OpenGL shader for each stage and gather the list of combined samplers.
- PerStage<CombinedSamplerInfo> combinedSamplers;
- bool needsDummySampler = false;
- std::vector<GLuint> glShaders;
- for (SingleShaderStage stage : IterateStages(activeStages)) {
- const ShaderModule* module = ToBackend(stages[stage].module.Get());
- std::string glsl;
- DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
- &combinedSamplers[stage], layout,
- &needsDummySampler));
- GLuint shader;
- DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
- gl.AttachShader(mProgram, shader);
- glShaders.push_back(shader);
- }
-
- if (needsDummySampler) {
- SamplerDescriptor desc = {};
- ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
- ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
- ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
- mDummySampler =
- ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
- }
-
- // Link all the shaders together.
- gl.LinkProgram(mProgram);
-
- GLint linkStatus = GL_FALSE;
- gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
- if (linkStatus == GL_FALSE) {
- GLint infoLogLength = 0;
- gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
-
- if (infoLogLength > 1) {
- std::vector<char> buffer(infoLogLength);
- gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
- return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
- }
- }
-
- // Compute links between stages for combined samplers, then bind them to texture units
- gl.UseProgram(mProgram);
- const auto& indices = layout->GetBindingIndexInfo();
-
- std::set<CombinedSampler> combinedSamplersSet;
- for (SingleShaderStage stage : IterateStages(activeStages)) {
- for (const CombinedSampler& combined : combinedSamplers[stage]) {
- combinedSamplersSet.insert(combined);
- }
- }
-
- mUnitsForSamplers.resize(layout->GetNumSamplers());
- mUnitsForTextures.resize(layout->GetNumSampledTextures());
-
- GLuint textureUnit = layout->GetTextureUnitsUsed();
- for (const auto& combined : combinedSamplersSet) {
- const std::string& name = combined.GetName();
- GLint location = gl.GetUniformLocation(mProgram, name.c_str());
-
- if (location == -1) {
- continue;
- }
-
- gl.Uniform1i(location, textureUnit);
-
- bool shouldUseFiltering;
- {
- const BindGroupLayoutBase* bgl =
- layout->GetBindGroupLayout(combined.textureLocation.group);
- BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
-
- GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
- mUnitsForTextures[textureIndex].push_back(textureUnit);
-
- shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
- wgpu::TextureSampleType::Float;
- }
- {
- if (combined.useDummySampler) {
- mDummySamplerUnits.push_back(textureUnit);
- } else {
- const BindGroupLayoutBase* bgl =
- layout->GetBindGroupLayout(combined.samplerLocation.group);
- BindingIndex bindingIndex =
- bgl->GetBindingIndex(combined.samplerLocation.binding);
-
- GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
- mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
- }
- }
-
- textureUnit++;
- }
-
- for (GLuint glShader : glShaders) {
- gl.DetachShader(mProgram, glShader);
- gl.DeleteShader(glShader);
- }
-
- return {};
- }
-
- void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
- gl.DeleteProgram(mProgram);
- }
-
- const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
- GLuint index) const {
- ASSERT(index < mUnitsForSamplers.size());
- return mUnitsForSamplers[index];
- }
-
- const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
- ASSERT(index < mUnitsForTextures.size());
- return mUnitsForTextures[index];
- }
-
- GLuint PipelineGL::GetProgramHandle() const {
- return mProgram;
- }
-
- void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
- gl.UseProgram(mProgram);
- for (GLuint unit : mDummySamplerUnits) {
- ASSERT(mDummySampler.Get() != nullptr);
- gl.BindSampler(unit, mDummySampler->GetNonFilteringHandle());
- }
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
deleted file mode 100644
index be6c1dd5538..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_PIPELINEGL_H_
-#define DAWNNATIVE_OPENGL_PIPELINEGL_H_
-
-#include "dawn_native/Pipeline.h"
-
-#include "dawn_native/PerStage.h"
-#include "dawn_native/opengl/opengl_platform.h"
-
-#include <vector>
-
-namespace dawn_native {
- struct ProgrammableStage;
-} // namespace dawn_native
-
-namespace dawn_native { namespace opengl {
-
- struct OpenGLFunctions;
- class PipelineLayout;
- class Sampler;
-
- class PipelineGL {
- public:
- PipelineGL();
- ~PipelineGL();
-
- // For each unit a sampler is bound to we need to know if we should use filtering or not
- // because int and uint texture are only complete without filtering.
- struct SamplerUnit {
- GLuint unit;
- bool shouldUseFiltering;
- };
- const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
- const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
- GLuint GetProgramHandle() const;
-
- protected:
- void ApplyNow(const OpenGLFunctions& gl);
- MaybeError InitializeBase(const OpenGLFunctions& gl,
- const PipelineLayout* layout,
- const PerStage<ProgrammableStage>& stages);
- void DeleteProgram(const OpenGLFunctions& gl);
-
- private:
- GLuint mProgram;
- std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
- std::vector<std::vector<GLuint>> mUnitsForTextures;
- std::vector<GLuint> mDummySamplerUnits;
- // TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
- // destruction complex as it requires the sampler to be destroyed before the sampler cache.
- Ref<Sampler> mDummySampler;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_PIPELINEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
deleted file mode 100644
index a1742a54b54..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/PipelineLayoutGL.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/opengl/DeviceGL.h"
-
-namespace dawn_native { namespace opengl {
-
- PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
- : PipelineLayoutBase(device, descriptor) {
- GLuint uboIndex = 0;
- GLuint samplerIndex = 0;
- GLuint sampledTextureIndex = 0;
- GLuint ssboIndex = 0;
- GLuint storageTextureIndex = 0;
-
- for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
- mIndexInfo[group].resize(bgl->GetBindingCount());
-
- for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount();
- ++bindingIndex) {
- const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- mIndexInfo[group][bindingIndex] = uboIndex;
- uboIndex++;
- break;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- mIndexInfo[group][bindingIndex] = ssboIndex;
- ssboIndex++;
- break;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- break;
-
- case BindingInfoType::Sampler:
- mIndexInfo[group][bindingIndex] = samplerIndex;
- samplerIndex++;
- break;
-
- case BindingInfoType::Texture:
- case BindingInfoType::ExternalTexture:
- mIndexInfo[group][bindingIndex] = sampledTextureIndex;
- sampledTextureIndex++;
- break;
-
- case BindingInfoType::StorageTexture:
- mIndexInfo[group][bindingIndex] = storageTextureIndex;
- storageTextureIndex++;
- break;
- }
- }
- }
-
- mNumSamplers = samplerIndex;
- mNumSampledTextures = sampledTextureIndex;
- }
-
- const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
- return mIndexInfo;
- }
-
- GLuint PipelineLayout::GetTextureUnitsUsed() const {
- return 0;
- }
-
- size_t PipelineLayout::GetNumSamplers() const {
- return mNumSamplers;
- }
-
- size_t PipelineLayout::GetNumSampledTextures() const {
- return mNumSampledTextures;
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h
deleted file mode 100644
index eeff7182dda..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
-#define DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
-
-#include "dawn_native/PipelineLayout.h"
-
-#include "common/ityp_array.h"
-#include "common/ityp_vector.h"
-#include "dawn_native/BindingInfo.h"
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- class PipelineLayout final : public PipelineLayoutBase {
- public:
- PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
-
- using BindingIndexInfo =
- ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
- const BindingIndexInfo& GetBindingIndexInfo() const;
-
- GLuint GetTextureUnitsUsed() const;
- size_t GetNumSamplers() const;
- size_t GetNumSampledTextures() const;
-
- private:
- ~PipelineLayout() override = default;
- BindingIndexInfo mIndexInfo;
- size_t mNumSamplers;
- size_t mNumSampledTextures;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_PIPELINELAYOUTGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.cpp
deleted file mode 100644
index 2f712919dbb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/QuerySetGL.h"
-
-#include "dawn_native/opengl/DeviceGL.h"
-
-namespace dawn_native { namespace opengl {
-
- QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
- : QuerySetBase(device, descriptor) {
- }
-
- QuerySet::~QuerySet() = default;
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.h
deleted file mode 100644
index b0a31daff6e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QuerySetGL.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_QUERYSETGL_H_
-#define DAWNNATIVE_OPENGL_QUERYSETGL_H_
-
-#include "dawn_native/QuerySet.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- class QuerySet final : public QuerySetBase {
- public:
- QuerySet(Device* device, const QuerySetDescriptor* descriptor);
-
- private:
- ~QuerySet() override;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_QUERYSETGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
deleted file mode 100644
index ec98b64bb4b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/QueueGL.h"
-
-#include "dawn_native/opengl/BufferGL.h"
-#include "dawn_native/opengl/CommandBufferGL.h"
-#include "dawn_native/opengl/DeviceGL.h"
-#include "dawn_native/opengl/TextureGL.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-
-namespace dawn_native { namespace opengl {
-
- Queue::Queue(Device* device) : QueueBase(device) {
- }
-
- MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
- Device* device = ToBackend(GetDevice());
-
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->Execute());
- }
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
-
- device->SubmitFenceSync();
- return {};
- }
-
- MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
-
- gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
- gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
- return {};
- }
-
- MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& writeSizePixel) {
- DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
- "Writes to stencil textures unsupported on the OpenGL backend.");
-
- TextureCopy textureCopy;
- textureCopy.texture = destination.texture;
- textureCopy.mipLevel = destination.mipLevel;
- textureCopy.origin = destination.origin;
- textureCopy.aspect =
- SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
-
- SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
- if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel,
- destination.mipLevel)) {
- destination.texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
- }
- DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
- return {};
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
deleted file mode 100644
index b5a5243dc56..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_QUEUEGL_H_
-#define DAWNNATIVE_OPENGL_QUEUEGL_H_
-
-#include "dawn_native/Queue.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- class Queue final : public QueueBase {
- public:
- Queue(Device* device);
-
- private:
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- MaybeError WriteBufferImpl(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) override;
- MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
- const void* data,
- const TextureDataLayout& dataLayout,
- const Extent3D& writeSizePixel) override;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_QUEUEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
deleted file mode 100644
index bc8c2c6155a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/RenderPipelineGL.h"
-
-#include "dawn_native/opengl/DeviceGL.h"
-#include "dawn_native/opengl/Forward.h"
-#include "dawn_native/opengl/PersistentPipelineStateGL.h"
-#include "dawn_native/opengl/UtilsGL.h"
-
-namespace dawn_native { namespace opengl {
-
- namespace {
-
- GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
- switch (primitiveTopology) {
- case wgpu::PrimitiveTopology::PointList:
- return GL_POINTS;
- case wgpu::PrimitiveTopology::LineList:
- return GL_LINES;
- case wgpu::PrimitiveTopology::LineStrip:
- return GL_LINE_STRIP;
- case wgpu::PrimitiveTopology::TriangleList:
- return GL_TRIANGLES;
- case wgpu::PrimitiveTopology::TriangleStrip:
- return GL_TRIANGLE_STRIP;
- }
- UNREACHABLE();
- }
-
- void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
- wgpu::FrontFace face,
- wgpu::CullMode mode) {
- // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
- // which is different from WebGPU and other backends (Y axis is down).
- GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
- gl.FrontFace(direction);
-
- if (mode == wgpu::CullMode::None) {
- gl.Disable(GL_CULL_FACE);
- } else {
- gl.Enable(GL_CULL_FACE);
-
- GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
- gl.CullFace(cullMode);
- }
- }
-
- GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
- switch (factor) {
- case wgpu::BlendFactor::Zero:
- return GL_ZERO;
- case wgpu::BlendFactor::One:
- return GL_ONE;
- case wgpu::BlendFactor::Src:
- return GL_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrc:
- return GL_ONE_MINUS_SRC_COLOR;
- case wgpu::BlendFactor::SrcAlpha:
- return GL_SRC_ALPHA;
- case wgpu::BlendFactor::OneMinusSrcAlpha:
- return GL_ONE_MINUS_SRC_ALPHA;
- case wgpu::BlendFactor::Dst:
- return GL_DST_COLOR;
- case wgpu::BlendFactor::OneMinusDst:
- return GL_ONE_MINUS_DST_COLOR;
- case wgpu::BlendFactor::DstAlpha:
- return GL_DST_ALPHA;
- case wgpu::BlendFactor::OneMinusDstAlpha:
- return GL_ONE_MINUS_DST_ALPHA;
- case wgpu::BlendFactor::SrcAlphaSaturated:
- return GL_SRC_ALPHA_SATURATE;
- case wgpu::BlendFactor::Constant:
- return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
- case wgpu::BlendFactor::OneMinusConstant:
- return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
- }
- UNREACHABLE();
- }
-
- GLenum GLBlendMode(wgpu::BlendOperation operation) {
- switch (operation) {
- case wgpu::BlendOperation::Add:
- return GL_FUNC_ADD;
- case wgpu::BlendOperation::Subtract:
- return GL_FUNC_SUBTRACT;
- case wgpu::BlendOperation::ReverseSubtract:
- return GL_FUNC_REVERSE_SUBTRACT;
- case wgpu::BlendOperation::Min:
- return GL_MIN;
- case wgpu::BlendOperation::Max:
- return GL_MAX;
- }
- UNREACHABLE();
- }
-
- void ApplyColorState(const OpenGLFunctions& gl,
- ColorAttachmentIndex attachment,
- const ColorTargetState* state) {
- GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
- if (state->blend != nullptr) {
- gl.Enablei(GL_BLEND, colorBuffer);
- gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
- GLBlendMode(state->blend->alpha.operation));
- gl.BlendFuncSeparatei(colorBuffer,
- GLBlendFactor(state->blend->color.srcFactor, false),
- GLBlendFactor(state->blend->color.dstFactor, false),
- GLBlendFactor(state->blend->alpha.srcFactor, true),
- GLBlendFactor(state->blend->alpha.dstFactor, true));
- } else {
- gl.Disablei(GL_BLEND, colorBuffer);
- }
- gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
- state->writeMask & wgpu::ColorWriteMask::Green,
- state->writeMask & wgpu::ColorWriteMask::Blue,
- state->writeMask & wgpu::ColorWriteMask::Alpha);
- }
-
- void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
- if (state->blend != nullptr) {
- gl.Enable(GL_BLEND);
- gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
- GLBlendMode(state->blend->alpha.operation));
- gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
- GLBlendFactor(state->blend->color.dstFactor, false),
- GLBlendFactor(state->blend->alpha.srcFactor, true),
- GLBlendFactor(state->blend->alpha.dstFactor, true));
- } else {
- gl.Disable(GL_BLEND);
- }
- gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
- state->writeMask & wgpu::ColorWriteMask::Green,
- state->writeMask & wgpu::ColorWriteMask::Blue,
- state->writeMask & wgpu::ColorWriteMask::Alpha);
- }
-
- bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
- return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
- lhs.dstFactor == rhs.dstFactor;
- }
-
- GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
- switch (stencilOperation) {
- case wgpu::StencilOperation::Keep:
- return GL_KEEP;
- case wgpu::StencilOperation::Zero:
- return GL_ZERO;
- case wgpu::StencilOperation::Replace:
- return GL_REPLACE;
- case wgpu::StencilOperation::Invert:
- return GL_INVERT;
- case wgpu::StencilOperation::IncrementClamp:
- return GL_INCR;
- case wgpu::StencilOperation::DecrementClamp:
- return GL_DECR;
- case wgpu::StencilOperation::IncrementWrap:
- return GL_INCR_WRAP;
- case wgpu::StencilOperation::DecrementWrap:
- return GL_DECR_WRAP;
- }
- UNREACHABLE();
- }
-
- void ApplyDepthStencilState(const OpenGLFunctions& gl,
- const DepthStencilState* descriptor,
- PersistentPipelineState* persistentPipelineState) {
- // Depth writes only occur if depth is enabled
- if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
- !descriptor->depthWriteEnabled) {
- gl.Disable(GL_DEPTH_TEST);
- } else {
- gl.Enable(GL_DEPTH_TEST);
- }
-
- if (descriptor->depthWriteEnabled) {
- gl.DepthMask(GL_TRUE);
- } else {
- gl.DepthMask(GL_FALSE);
- }
-
- gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
-
- if (StencilTestEnabled(descriptor)) {
- gl.Enable(GL_STENCIL_TEST);
- } else {
- gl.Disable(GL_STENCIL_TEST);
- }
-
- GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
- GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
- persistentPipelineState->SetStencilFuncsAndMask(
- gl, backCompareFunction, frontCompareFunction, descriptor->stencilReadMask);
-
- gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
- OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
- OpenGLStencilOperation(descriptor->stencilBack.passOp));
- gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
- OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
- OpenGLStencilOperation(descriptor->stencilFront.passOp));
-
- gl.StencilMask(descriptor->stencilWriteMask);
- }
-
- } // anonymous namespace
-
- // static
- Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(device, descriptor));
- }
-
- RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
- : RenderPipelineBase(device, descriptor),
- mVertexArrayObject(0),
- mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {
- }
-
- MaybeError RenderPipeline::Initialize() {
- DAWN_TRY(
- InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
- CreateVAOForVertexState();
- return {};
- }
-
- RenderPipeline::~RenderPipeline() = default;
-
- void RenderPipeline::DestroyImpl() {
- RenderPipelineBase::DestroyImpl();
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- gl.DeleteVertexArrays(1, &mVertexArrayObject);
- gl.BindVertexArray(0);
- DeleteProgram(gl);
- }
-
- GLenum RenderPipeline::GetGLPrimitiveTopology() const {
- return mGlPrimitiveTopology;
- }
-
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
- RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
- ASSERT(!IsError());
- return mAttributesUsingVertexBuffer[slot];
- }
-
- void RenderPipeline::CreateVAOForVertexState() {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- gl.GenVertexArrays(1, &mVertexArrayObject);
- gl.BindVertexArray(mVertexArrayObject);
-
- for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
- const auto& attribute = GetAttribute(location);
- GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
- gl.EnableVertexAttribArray(glAttrib);
-
- mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
- const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
-
- if (vertexBuffer.arrayStride == 0) {
- // Emulate a stride of zero (constant vertex attribute) by
- // setting the attribute instance divisor to a huge number.
- gl.VertexAttribDivisor(glAttrib, 0xffffffff);
- } else {
- switch (vertexBuffer.stepMode) {
- case wgpu::VertexStepMode::Vertex:
- break;
- case wgpu::VertexStepMode::Instance:
- gl.VertexAttribDivisor(glAttrib, 1);
- break;
- }
- }
- }
- }
-
- void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- PipelineGL::ApplyNow(gl);
-
- ASSERT(mVertexArrayObject);
- gl.BindVertexArray(mVertexArrayObject);
-
- ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
-
- ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
-
- gl.SampleMaski(0, GetSampleMask());
- if (IsAlphaToCoverageEnabled()) {
- gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
- } else {
- gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
- }
-
- if (IsDepthBiasEnabled()) {
- gl.Enable(GL_POLYGON_OFFSET_FILL);
- float depthBias = GetDepthBias();
- float slopeScale = GetDepthBiasSlopeScale();
- if (gl.PolygonOffsetClamp != nullptr) {
- gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
- } else {
- gl.PolygonOffset(slopeScale, depthBias);
- }
- } else {
- gl.Disable(GL_POLYGON_OFFSET_FILL);
- }
-
- if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
- for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
- ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
- }
- } else {
- const ColorTargetState* prevDescriptor = nullptr;
- for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
- const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
- if (!prevDescriptor) {
- ApplyColorState(gl, descriptor);
- prevDescriptor = descriptor;
- } else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
- // TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
- // per color target. Add validation to prevent this as it is not.
- ASSERT(false);
- } else if (descriptor->blend != nullptr) {
- if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
- !Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
- descriptor->writeMask != prevDescriptor->writeMask) {
- // TODO(crbug.com/dawn/582)
- ASSERT(false);
- }
- }
- }
- }
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
deleted file mode 100644
index 75aab208538..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
-#define DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
-
-#include "dawn_native/RenderPipeline.h"
-
-#include "dawn_native/opengl/PipelineGL.h"
-#include "dawn_native/opengl/opengl_platform.h"
-
-#include <vector>
-
-namespace dawn_native { namespace opengl {
-
- class Device;
- class PersistentPipelineState;
-
- class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
- public:
- static Ref<RenderPipeline> CreateUninitialized(Device* device,
- const RenderPipelineDescriptor* descriptor);
-
- GLenum GetGLPrimitiveTopology() const;
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
- VertexBufferSlot slot) const;
-
- void ApplyNow(PersistentPipelineState& persistentPipelineState);
-
- MaybeError Initialize() override;
-
- private:
- RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
- ~RenderPipeline() override;
- void DestroyImpl() override;
-
- void CreateVAOForVertexState();
-
- // TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
- GLuint mVertexArrayObject;
- GLenum mGlPrimitiveTopology;
-
- ityp::array<VertexBufferSlot,
- ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
- kMaxVertexBuffers>
- mAttributesUsingVertexBuffer;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_RENDERPIPELINEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp
deleted file mode 100644
index b331df301a2..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/SamplerGL.h"
-
-#include "common/Assert.h"
-#include "dawn_native/opengl/DeviceGL.h"
-#include "dawn_native/opengl/UtilsGL.h"
-
-namespace dawn_native { namespace opengl {
-
- namespace {
- GLenum MagFilterMode(wgpu::FilterMode filter) {
- switch (filter) {
- case wgpu::FilterMode::Nearest:
- return GL_NEAREST;
- case wgpu::FilterMode::Linear:
- return GL_LINEAR;
- }
- UNREACHABLE();
- }
-
- GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
- switch (minFilter) {
- case wgpu::FilterMode::Nearest:
- switch (mipMapFilter) {
- case wgpu::FilterMode::Nearest:
- return GL_NEAREST_MIPMAP_NEAREST;
- case wgpu::FilterMode::Linear:
- return GL_NEAREST_MIPMAP_LINEAR;
- }
- case wgpu::FilterMode::Linear:
- switch (mipMapFilter) {
- case wgpu::FilterMode::Nearest:
- return GL_LINEAR_MIPMAP_NEAREST;
- case wgpu::FilterMode::Linear:
- return GL_LINEAR_MIPMAP_LINEAR;
- }
- }
- UNREACHABLE();
- }
-
- GLenum WrapMode(wgpu::AddressMode mode) {
- switch (mode) {
- case wgpu::AddressMode::Repeat:
- return GL_REPEAT;
- case wgpu::AddressMode::MirrorRepeat:
- return GL_MIRRORED_REPEAT;
- case wgpu::AddressMode::ClampToEdge:
- return GL_CLAMP_TO_EDGE;
- }
- UNREACHABLE();
- }
-
- } // namespace
-
- Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
- : SamplerBase(device, descriptor) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- gl.GenSamplers(1, &mFilteringHandle);
- SetupGLSampler(mFilteringHandle, descriptor, false);
-
- gl.GenSamplers(1, &mNonFilteringHandle);
- SetupGLSampler(mNonFilteringHandle, descriptor, true);
- }
-
- Sampler::~Sampler() = default;
-
- void Sampler::DestroyImpl() {
- SamplerBase::DestroyImpl();
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- gl.DeleteSamplers(1, &mFilteringHandle);
- gl.DeleteSamplers(1, &mNonFilteringHandle);
- }
-
- void Sampler::SetupGLSampler(GLuint sampler,
- const SamplerDescriptor* descriptor,
- bool forceNearest) {
- Device* device = ToBackend(GetDevice());
- const OpenGLFunctions& gl = device->gl;
-
- if (forceNearest) {
- gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
- } else {
- gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER,
- MagFilterMode(descriptor->magFilter));
- gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
- MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
- }
- gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
- gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
- gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
-
- gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
- gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
-
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
- gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
- ToOpenGLCompareFunction(descriptor->compare));
- }
-
- if (gl.IsAtLeastGL(4, 6) ||
- gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
- gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
- }
- }
-
- GLuint Sampler::GetFilteringHandle() const {
- return mFilteringHandle;
- }
-
- GLuint Sampler::GetNonFilteringHandle() const {
- return mNonFilteringHandle;
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.h
deleted file mode 100644
index f08930e3641..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_SAMPLERGL_H_
-#define DAWNNATIVE_OPENGL_SAMPLERGL_H_
-
-#include "dawn_native/Sampler.h"
-
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- class Sampler final : public SamplerBase {
- public:
- Sampler(Device* device, const SamplerDescriptor* descriptor);
-
- GLuint GetFilteringHandle() const;
- GLuint GetNonFilteringHandle() const;
-
- private:
- ~Sampler() override;
- void DestroyImpl() override;
-
- void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
-
- GLuint mFilteringHandle;
-
- // This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
- // for everything, which is important to preserve texture completeness for u/int textures.
- GLuint mNonFilteringHandle;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_SAMPLERGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
deleted file mode 100644
index dba7b33000d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/ShaderModuleGL.h"
-
-#include "common/Assert.h"
-#include "common/Platform.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/SpirvValidation.h"
-#include "dawn_native/TintUtils.h"
-#include "dawn_native/opengl/DeviceGL.h"
-#include "dawn_native/opengl/PipelineLayoutGL.h"
-#include "dawn_native/opengl/SpirvUtils.h"
-
-#include <spirv_glsl.hpp>
-
-// Tint include must be after spirv_glsl.hpp, because spirv-cross has its own
-// version of spirv_headers. We also need to undef SPV_REVISION because SPIRV-Cross
-// is at 3 while spirv-headers is at 4.
-#undef SPV_REVISION
-#include <tint/tint.h>
-#include <spirv-tools/libspirv.hpp>
-
-#include <sstream>
-
-namespace dawn_native { namespace opengl {
-
- std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
- std::ostringstream o;
- o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
- << static_cast<uint32_t>(bindingNumber);
- return o.str();
- }
-
- bool operator<(const BindingLocation& a, const BindingLocation& b) {
- return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
- }
-
- bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
- return std::tie(a.useDummySampler, a.samplerLocation, a.textureLocation) <
- std::tie(b.useDummySampler, a.samplerLocation, b.textureLocation);
- }
-
- std::string CombinedSampler::GetName() const {
- std::ostringstream o;
- o << "dawn_combined";
- if (useDummySampler) {
- o << "_dummy_sampler";
- } else {
- o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
- << static_cast<uint32_t>(samplerLocation.binding);
- }
- o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
- << static_cast<uint32_t>(textureLocation.binding);
- return o.str();
- }
-
- ResultOrError<std::unique_ptr<BindingInfoArray>> ExtractSpirvInfo(
- const DeviceBase* device,
- const spirv_cross::Compiler& compiler,
- const std::string& entryPointName,
- SingleShaderStage stage) {
- const auto& resources = compiler.get_shader_resources();
-
- // Fill in bindingInfo with the SPIRV bindings
- auto ExtractResourcesBinding =
- [](const DeviceBase* device,
- const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
- const spirv_cross::Compiler& compiler, BindingInfoType bindingType,
- BindingInfoArray* bindings, bool isStorageBuffer = false) -> MaybeError {
- for (const auto& resource : resources) {
- DAWN_INVALID_IF(
- !compiler.get_decoration_bitset(resource.id).get(spv::DecorationBinding),
- "No Binding decoration set for resource");
-
- DAWN_INVALID_IF(
- !compiler.get_decoration_bitset(resource.id).get(spv::DecorationDescriptorSet),
- "No Descriptor Decoration set for resource");
-
- BindingNumber bindingNumber(
- compiler.get_decoration(resource.id, spv::DecorationBinding));
- BindGroupIndex bindGroupIndex(
- compiler.get_decoration(resource.id, spv::DecorationDescriptorSet));
-
- DAWN_INVALID_IF(bindGroupIndex >= kMaxBindGroupsTyped,
- "Bind group index over limits in the SPIRV");
-
- const auto& it =
- (*bindings)[bindGroupIndex].emplace(bindingNumber, ShaderBindingInfo{});
- DAWN_INVALID_IF(!it.second, "Shader has duplicate bindings");
-
- ShaderBindingInfo* info = &it.first->second;
- info->id = resource.id;
- info->base_type_id = resource.base_type_id;
- info->bindingType = bindingType;
-
- switch (bindingType) {
- case BindingInfoType::Texture: {
- spirv_cross::SPIRType::ImageType imageType =
- compiler.get_type(info->base_type_id).image;
- spirv_cross::SPIRType::BaseType textureComponentType =
- compiler.get_type(imageType.type).basetype;
-
- info->texture.viewDimension =
- SpirvDimToTextureViewDimension(imageType.dim, imageType.arrayed);
- info->texture.multisampled = imageType.ms;
- info->texture.compatibleSampleTypes =
- SpirvBaseTypeToSampleTypeBit(textureComponentType);
-
- if (imageType.depth) {
- DAWN_INVALID_IF(
- (info->texture.compatibleSampleTypes & SampleTypeBit::Float) == 0,
- "Depth textures must have a float type");
- info->texture.compatibleSampleTypes = SampleTypeBit::Depth;
- }
-
- DAWN_INVALID_IF(imageType.ms && imageType.arrayed,
- "Multisampled array textures aren't supported");
- break;
- }
- case BindingInfoType::Buffer: {
- // Determine buffer size, with a minimum of 1 element in the runtime
- // array
- spirv_cross::SPIRType type = compiler.get_type(info->base_type_id);
- info->buffer.minBindingSize =
- compiler.get_declared_struct_size_runtime_array(type, 1);
-
- // Differentiate between readonly storage bindings and writable ones
- // based on the NonWritable decoration.
- // TODO(dawn:527): Could isStorageBuffer be determined by calling
- // compiler.get_storage_class(resource.id)?
- if (isStorageBuffer) {
- spirv_cross::Bitset flags =
- compiler.get_buffer_block_flags(resource.id);
- if (flags.get(spv::DecorationNonWritable)) {
- info->buffer.type = wgpu::BufferBindingType::ReadOnlyStorage;
- } else {
- info->buffer.type = wgpu::BufferBindingType::Storage;
- }
- } else {
- info->buffer.type = wgpu::BufferBindingType::Uniform;
- }
- break;
- }
- case BindingInfoType::StorageTexture: {
- spirv_cross::Bitset flags = compiler.get_decoration_bitset(resource.id);
- DAWN_INVALID_IF(!flags.get(spv::DecorationNonReadable),
- "Read-write storage textures are not supported.");
- info->storageTexture.access = wgpu::StorageTextureAccess::WriteOnly;
-
- spirv_cross::SPIRType::ImageType imageType =
- compiler.get_type(info->base_type_id).image;
- wgpu::TextureFormat storageTextureFormat =
- SpirvImageFormatToTextureFormat(imageType.format);
- DAWN_INVALID_IF(storageTextureFormat == wgpu::TextureFormat::Undefined,
- "Invalid image format declaration on storage image.");
-
- const Format& format = device->GetValidInternalFormat(storageTextureFormat);
- DAWN_INVALID_IF(!format.supportsStorageUsage,
- "The storage texture format (%s) is not supported.",
- storageTextureFormat);
-
- DAWN_INVALID_IF(imageType.ms,
- "Multisampled storage textures aren't supported.");
-
- DAWN_INVALID_IF(imageType.depth,
- "Depth storage textures aren't supported.");
-
- info->storageTexture.format = storageTextureFormat;
- info->storageTexture.viewDimension =
- SpirvDimToTextureViewDimension(imageType.dim, imageType.arrayed);
- break;
- }
- case BindingInfoType::Sampler: {
- info->sampler.isComparison = false;
- break;
- }
- case BindingInfoType::ExternalTexture: {
- return DAWN_FORMAT_VALIDATION_ERROR("External textures are not supported.");
- }
- }
- }
- return {};
- };
-
- std::unique_ptr<BindingInfoArray> resultBindings = std::make_unique<BindingInfoArray>();
- BindingInfoArray* bindings = resultBindings.get();
- DAWN_TRY(ExtractResourcesBinding(device, resources.uniform_buffers, compiler,
- BindingInfoType::Buffer, bindings));
- DAWN_TRY(ExtractResourcesBinding(device, resources.separate_images, compiler,
- BindingInfoType::Texture, bindings));
- DAWN_TRY(ExtractResourcesBinding(device, resources.separate_samplers, compiler,
- BindingInfoType::Sampler, bindings));
- DAWN_TRY(ExtractResourcesBinding(device, resources.storage_buffers, compiler,
- BindingInfoType::Buffer, bindings, true));
- // ReadonlyStorageTexture is used as a tag to do general storage texture handling.
- DAWN_TRY(ExtractResourcesBinding(device, resources.storage_images, compiler,
- BindingInfoType::StorageTexture, resultBindings.get()));
-
- return {std::move(resultBindings)};
- }
-
- // static
- ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
-
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor) {
- }
-
- // static
- ResultOrError<BindingInfoArrayTable> ShaderModule::ReflectShaderUsingSPIRVCross(
- DeviceBase* device,
- const std::vector<uint32_t>& spirv) {
- BindingInfoArrayTable result;
- spirv_cross::Compiler compiler(spirv);
- for (const spirv_cross::EntryPoint& entryPoint : compiler.get_entry_points_and_stages()) {
- ASSERT(result.count(entryPoint.name) == 0);
-
- SingleShaderStage stage = ExecutionModelToShaderStage(entryPoint.execution_model);
- compiler.set_entry_point(entryPoint.name, entryPoint.execution_model);
-
- std::unique_ptr<BindingInfoArray> bindings;
- DAWN_TRY_ASSIGN(bindings, ExtractSpirvInfo(device, compiler, entryPoint.name, stage));
- result[entryPoint.name] = std::move(bindings);
- }
- return std::move(result);
- }
-
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
-
- DAWN_TRY(InitializeBase(parseResult));
- // Tint currently does not support emitting GLSL, so when provided a Tint program need to
- // generate SPIRV and SPIRV-Cross reflection data to be used in this backend.
- tint::writer::spirv::Options options;
- options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- auto result = tint::writer::spirv::Generate(GetTintProgram(), options);
- DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
- result.error);
-
- DAWN_TRY_ASSIGN(mGLBindings, ReflectShaderUsingSPIRVCross(GetDevice(), result.spirv));
-
- return {};
- }
-
- ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
- SingleShaderStage stage,
- CombinedSamplerInfo* combinedSamplers,
- const PipelineLayout* layout,
- bool* needsDummySampler) const {
- tint::transform::SingleEntryPoint singleEntryPointTransform;
-
- tint::transform::DataMap transformInputs;
- transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
-
- tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&singleEntryPointTransform, GetTintProgram(),
- transformInputs, nullptr, nullptr));
-
- tint::writer::spirv::Options tintOptions;
- tintOptions.disable_workgroup_init =
- GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- auto result = tint::writer::spirv::Generate(&program, tintOptions);
- DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
- result.error);
-
- std::vector<uint32_t> spirv = std::move(result.spirv);
- DAWN_TRY(
- ValidateSpirv(GetDevice(), spirv, GetDevice()->IsToggleEnabled(Toggle::DumpShaders)));
-
- // If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
- // be updated.
- spirv_cross::CompilerGLSL::Options options;
-
- // The range of Z-coordinate in the clipping volume of OpenGL is [-w, w], while it is
- // [0, w] in D3D12, Metal and Vulkan, so we should normalize it in shaders in all
- // backends. See the documentation of
- // spirv_cross::CompilerGLSL::Options::vertex::fixup_clipspace for more details.
- options.vertex.flip_vert_y = true;
- options.vertex.fixup_clipspace = true;
-
- const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
- if (version.IsDesktop()) {
- // The computation of GLSL version below only works for 3.3 and above.
- ASSERT(version.IsAtLeast(3, 3));
- }
- options.es = version.IsES();
- options.version = version.GetMajor() * 100 + version.GetMinor() * 10;
-
- spirv_cross::CompilerGLSL compiler(std::move(spirv));
- compiler.set_common_options(options);
- compiler.set_entry_point(entryPointName, ShaderStageToExecutionModel(stage));
-
- // Analyzes all OpImageFetch opcodes and checks if there are instances where
- // said instruction is used without a combined image sampler.
- // GLSL does not support texelFetch without a sampler.
- // To workaround this, we must inject a dummy sampler which can be used to form a sampler2D
- // at the call-site of texelFetch as necessary.
- spirv_cross::VariableID dummySamplerId = compiler.build_dummy_sampler_for_combined_images();
-
- // Extract bindings names so that it can be used to get its location in program.
- // Now translate the separate sampler / textures into combined ones and store their info. We
- // need to do this before removing the set and binding decorations.
- compiler.build_combined_image_samplers();
-
- for (const auto& combined : compiler.get_combined_image_samplers()) {
- combinedSamplers->emplace_back();
-
- CombinedSampler* info = &combinedSamplers->back();
- if (combined.sampler_id == dummySamplerId) {
- *needsDummySampler = true;
- info->useDummySampler = true;
- info->samplerLocation = {};
- } else {
- info->useDummySampler = false;
- info->samplerLocation.group = BindGroupIndex(
- compiler.get_decoration(combined.sampler_id, spv::DecorationDescriptorSet));
- info->samplerLocation.binding = BindingNumber(
- compiler.get_decoration(combined.sampler_id, spv::DecorationBinding));
- }
- info->textureLocation.group = BindGroupIndex(
- compiler.get_decoration(combined.image_id, spv::DecorationDescriptorSet));
- info->textureLocation.binding =
- BindingNumber(compiler.get_decoration(combined.image_id, spv::DecorationBinding));
- compiler.set_name(combined.combined_id, info->GetName());
- }
-
- const BindingInfoArray& bindingInfo = *(mGLBindings.at(entryPointName));
-
- // Change binding names to be "dawn_binding_<group>_<binding>".
- // Also unsets the SPIRV "Binding" decoration as it outputs "layout(binding=)" which
- // isn't supported on OSX's OpenGL.
- const PipelineLayout::BindingIndexInfo& indices = layout->GetBindingIndexInfo();
-
- // Modify the decoration of variables so that SPIRV-Cross outputs only
- // layout(binding=<index>) for interface variables.
- //
- // Tint is used for the reflection of bindings for the implicit pipeline layout and
- // pipeline/layout validation, but bindingInfo is set to mGLEntryPoints which is the
- // SPIRV-Cross reflection. Tint reflects bindings used more precisely than SPIRV-Cross so
- // some bindings in bindingInfo might not exist in the layout and querying the layout for
- // them would cause an ASSERT. That's why we defensively check that bindings are in the
- // layout before modifying them. This slight hack is ok because in the long term we will use
- // Tint to produce GLSL.
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- for (const auto& it : bindingInfo[group]) {
- const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(group);
- BindingNumber bindingNumber = it.first;
- const auto& info = it.second;
-
- if (!bgl->HasBinding(bindingNumber)) {
- continue;
- }
-
- // Remove the name of the base type. This works around an issue where if the SPIRV
- // has two uniform/storage interface variables that point to the same base type,
- // then SPIRV-Cross would emit two bindings with type names that conflict:
- //
- // layout(binding=0) uniform Buf {...} binding0;
- // layout(binding=1) uniform Buf {...} binding1;
- compiler.set_name(info.base_type_id, "");
-
- BindingIndex bindingIndex = bgl->GetBindingIndex(bindingNumber);
-
- compiler.unset_decoration(info.id, spv::DecorationDescriptorSet);
- compiler.set_decoration(info.id, spv::DecorationBinding,
- indices[group][bindingIndex]);
- }
- }
-
- std::string glsl = compiler.compile();
-
- if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
-
- GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
-
- return glsl;
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
deleted file mode 100644
index d9552253574..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
-#define DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
-
-#include "dawn_native/ShaderModule.h"
-
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
- class PipelineLayout;
-
- std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
-
- struct BindingLocation {
- BindGroupIndex group;
- BindingNumber binding;
- };
- bool operator<(const BindingLocation& a, const BindingLocation& b);
-
- struct CombinedSampler {
- BindingLocation samplerLocation;
- BindingLocation textureLocation;
- // OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
- // one and Dawn should bind a dummy non-filtering sampler. |samplerLocation| is unused.
- bool useDummySampler;
- std::string GetName() const;
- };
- bool operator<(const CombinedSampler& a, const CombinedSampler& b);
-
- using CombinedSamplerInfo = std::vector<CombinedSampler>;
-
- using BindingInfoArrayTable =
- std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
-
- class ShaderModule final : public ShaderModuleBase {
- public:
- static ResultOrError<Ref<ShaderModule>> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
-
- ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
- SingleShaderStage stage,
- CombinedSamplerInfo* combinedSamplers,
- const PipelineLayout* layout,
- bool* needsDummySampler) const;
-
- private:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModule() override = default;
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- static ResultOrError<BindingInfoArrayTable> ReflectShaderUsingSPIRVCross(
- DeviceBase* device,
- const std::vector<uint32_t>& spirv);
-
- BindingInfoArrayTable mGLBindings;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_SHADERMODULEGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp
deleted file mode 100644
index 6ce41ca7be4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/SpirvUtils.h"
-
-namespace dawn_native {
-
- spv::ExecutionModel ShaderStageToExecutionModel(SingleShaderStage stage) {
- switch (stage) {
- case SingleShaderStage::Vertex:
- return spv::ExecutionModelVertex;
- case SingleShaderStage::Fragment:
- return spv::ExecutionModelFragment;
- case SingleShaderStage::Compute:
- return spv::ExecutionModelGLCompute;
- }
- UNREACHABLE();
- }
-
- SingleShaderStage ExecutionModelToShaderStage(spv::ExecutionModel model) {
- switch (model) {
- case spv::ExecutionModelVertex:
- return SingleShaderStage::Vertex;
- case spv::ExecutionModelFragment:
- return SingleShaderStage::Fragment;
- case spv::ExecutionModelGLCompute:
- return SingleShaderStage::Compute;
- default:
- UNREACHABLE();
- }
- }
-
- wgpu::TextureViewDimension SpirvDimToTextureViewDimension(spv::Dim dim, bool arrayed) {
- switch (dim) {
- case spv::Dim::Dim1D:
- return wgpu::TextureViewDimension::e1D;
- case spv::Dim::Dim2D:
- if (arrayed) {
- return wgpu::TextureViewDimension::e2DArray;
- } else {
- return wgpu::TextureViewDimension::e2D;
- }
- case spv::Dim::Dim3D:
- return wgpu::TextureViewDimension::e3D;
- case spv::Dim::DimCube:
- if (arrayed) {
- return wgpu::TextureViewDimension::CubeArray;
- } else {
- return wgpu::TextureViewDimension::Cube;
- }
- default:
- UNREACHABLE();
- }
- }
-
- wgpu::TextureFormat SpirvImageFormatToTextureFormat(spv::ImageFormat format) {
- switch (format) {
- case spv::ImageFormatR8:
- return wgpu::TextureFormat::R8Unorm;
- case spv::ImageFormatR8Snorm:
- return wgpu::TextureFormat::R8Snorm;
- case spv::ImageFormatR8ui:
- return wgpu::TextureFormat::R8Uint;
- case spv::ImageFormatR8i:
- return wgpu::TextureFormat::R8Sint;
- case spv::ImageFormatR16ui:
- return wgpu::TextureFormat::R16Uint;
- case spv::ImageFormatR16i:
- return wgpu::TextureFormat::R16Sint;
- case spv::ImageFormatR16f:
- return wgpu::TextureFormat::R16Float;
- case spv::ImageFormatRg8:
- return wgpu::TextureFormat::RG8Unorm;
- case spv::ImageFormatRg8Snorm:
- return wgpu::TextureFormat::RG8Snorm;
- case spv::ImageFormatRg8ui:
- return wgpu::TextureFormat::RG8Uint;
- case spv::ImageFormatRg8i:
- return wgpu::TextureFormat::RG8Sint;
- case spv::ImageFormatR32f:
- return wgpu::TextureFormat::R32Float;
- case spv::ImageFormatR32ui:
- return wgpu::TextureFormat::R32Uint;
- case spv::ImageFormatR32i:
- return wgpu::TextureFormat::R32Sint;
- case spv::ImageFormatRg16ui:
- return wgpu::TextureFormat::RG16Uint;
- case spv::ImageFormatRg16i:
- return wgpu::TextureFormat::RG16Sint;
- case spv::ImageFormatRg16f:
- return wgpu::TextureFormat::RG16Float;
- case spv::ImageFormatRgba8:
- return wgpu::TextureFormat::RGBA8Unorm;
- case spv::ImageFormatRgba8Snorm:
- return wgpu::TextureFormat::RGBA8Snorm;
- case spv::ImageFormatRgba8ui:
- return wgpu::TextureFormat::RGBA8Uint;
- case spv::ImageFormatRgba8i:
- return wgpu::TextureFormat::RGBA8Sint;
- case spv::ImageFormatRgb10A2:
- return wgpu::TextureFormat::RGB10A2Unorm;
- case spv::ImageFormatR11fG11fB10f:
- return wgpu::TextureFormat::RG11B10Ufloat;
- case spv::ImageFormatRg32f:
- return wgpu::TextureFormat::RG32Float;
- case spv::ImageFormatRg32ui:
- return wgpu::TextureFormat::RG32Uint;
- case spv::ImageFormatRg32i:
- return wgpu::TextureFormat::RG32Sint;
- case spv::ImageFormatRgba16ui:
- return wgpu::TextureFormat::RGBA16Uint;
- case spv::ImageFormatRgba16i:
- return wgpu::TextureFormat::RGBA16Sint;
- case spv::ImageFormatRgba16f:
- return wgpu::TextureFormat::RGBA16Float;
- case spv::ImageFormatRgba32f:
- return wgpu::TextureFormat::RGBA32Float;
- case spv::ImageFormatRgba32ui:
- return wgpu::TextureFormat::RGBA32Uint;
- case spv::ImageFormatRgba32i:
- return wgpu::TextureFormat::RGBA32Sint;
- default:
- return wgpu::TextureFormat::Undefined;
- }
- }
-
- wgpu::TextureComponentType SpirvBaseTypeToTextureComponentType(
- spirv_cross::SPIRType::BaseType spirvBaseType) {
- switch (spirvBaseType) {
- case spirv_cross::SPIRType::Float:
- return wgpu::TextureComponentType::Float;
- case spirv_cross::SPIRType::Int:
- return wgpu::TextureComponentType::Sint;
- case spirv_cross::SPIRType::UInt:
- return wgpu::TextureComponentType::Uint;
- default:
- UNREACHABLE();
- }
- }
-
- SampleTypeBit SpirvBaseTypeToSampleTypeBit(spirv_cross::SPIRType::BaseType spirvBaseType) {
- switch (spirvBaseType) {
- case spirv_cross::SPIRType::Float:
- return SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
- case spirv_cross::SPIRType::Int:
- return SampleTypeBit::Sint;
- case spirv_cross::SPIRType::UInt:
- return SampleTypeBit::Uint;
- default:
- UNREACHABLE();
- }
- }
-
- VertexFormatBaseType SpirvBaseTypeToVertexFormatBaseType(
- spirv_cross::SPIRType::BaseType spirvBaseType) {
- switch (spirvBaseType) {
- case spirv_cross::SPIRType::Float:
- return VertexFormatBaseType::Float;
- case spirv_cross::SPIRType::Int:
- return VertexFormatBaseType::Sint;
- case spirv_cross::SPIRType::UInt:
- return VertexFormatBaseType::Uint;
- default:
- UNREACHABLE();
- }
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.h b/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.h
deleted file mode 100644
index 844b0b7009b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file contains utilities to convert from-to spirv.hpp datatypes without polluting other
-// headers with spirv.hpp
-
-#ifndef DAWNNATIVE_OPENGL_SPIRV_UTILS_H_
-#define DAWNNATIVE_OPENGL_SPIRV_UTILS_H_
-
-#include "dawn_native/Format.h"
-#include "dawn_native/PerStage.h"
-#include "dawn_native/VertexFormat.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <spirv_cross.hpp>
-
-namespace dawn_native {
-
- // Returns the spirv_cross equivalent for this shader stage and vice-versa.
- spv::ExecutionModel ShaderStageToExecutionModel(SingleShaderStage stage);
- SingleShaderStage ExecutionModelToShaderStage(spv::ExecutionModel model);
-
- // Returns the texture view dimension for corresponding to (dim, arrayed).
- wgpu::TextureViewDimension SpirvDimToTextureViewDimension(spv::Dim dim, bool arrayed);
-
- // Returns the texture format corresponding to format.
- wgpu::TextureFormat SpirvImageFormatToTextureFormat(spv::ImageFormat format);
-
- // Returns the format "component type" corresponding to the SPIRV base type.
- wgpu::TextureComponentType SpirvBaseTypeToTextureComponentType(
- spirv_cross::SPIRType::BaseType spirvBaseType);
- SampleTypeBit SpirvBaseTypeToSampleTypeBit(spirv_cross::SPIRType::BaseType spirvBaseType);
-
- // Returns the VertexFormatBaseType corresponding to the SPIRV base type.
- VertexFormatBaseType SpirvBaseTypeToVertexFormatBaseType(
- spirv_cross::SPIRType::BaseType spirvBaseType);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_OPENGL_SPIRV_UTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
deleted file mode 100644
index 8223a2ceb12..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/SwapChainGL.h"
-
-#include "dawn_native/opengl/DeviceGL.h"
-#include "dawn_native/opengl/Forward.h"
-#include "dawn_native/opengl/TextureGL.h"
-
-#include <dawn/dawn_wsi.h>
-
-namespace dawn_native { namespace opengl {
-
- SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- im.Init(im.userData, nullptr);
- }
-
- SwapChain::~SwapChain() {
- }
-
- TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- const auto& im = GetImplementation();
- DawnSwapChainNextTexture next = {};
- DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
- if (error) {
- GetDevice()->HandleError(InternalErrorType::Internal, error);
- return nullptr;
- }
- GLuint nativeTexture = next.texture.u32;
- return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
- TextureBase::TextureState::OwnedExternal);
- }
-
- MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
- return {};
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
deleted file mode 100644
index 0cce92594ac..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
-#define DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
-
-#include "dawn_native/SwapChain.h"
-
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
-
- class SwapChain final : public OldSwapChainBase {
- public:
- SwapChain(Device* device, const SwapChainDescriptor* descriptor);
-
- protected:
- ~SwapChain() override;
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase* view) override;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_SWAPCHAINGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
deleted file mode 100644
index ceb57bd426d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ /dev/null
@@ -1,580 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/TextureGL.h"
-
-#include "common/Assert.h"
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/opengl/BufferGL.h"
-#include "dawn_native/opengl/CommandBufferGL.h"
-#include "dawn_native/opengl/DeviceGL.h"
-#include "dawn_native/opengl/UtilsGL.h"
-
-namespace dawn_native { namespace opengl {
-
- namespace {
-
- GLenum TargetForTexture(const TextureDescriptor* descriptor) {
- switch (descriptor->dimension) {
- case wgpu::TextureDimension::e2D:
- if (descriptor->size.depthOrArrayLayers > 1) {
- ASSERT(descriptor->sampleCount == 1);
- return GL_TEXTURE_2D_ARRAY;
- } else {
- if (descriptor->sampleCount > 1) {
- return GL_TEXTURE_2D_MULTISAMPLE;
- } else {
- return GL_TEXTURE_2D;
- }
- }
- case wgpu::TextureDimension::e3D:
- return GL_TEXTURE_3D;
-
- case wgpu::TextureDimension::e1D:
- break;
- }
- UNREACHABLE();
- }
-
- GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
- uint32_t arrayLayerCount,
- uint32_t sampleCount) {
- switch (dimension) {
- case wgpu::TextureViewDimension::e2D:
- return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
- case wgpu::TextureViewDimension::e2DArray:
- if (arrayLayerCount == 1) {
- return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
- }
- ASSERT(sampleCount == 1);
- return GL_TEXTURE_2D_ARRAY;
- case wgpu::TextureViewDimension::Cube:
- return GL_TEXTURE_CUBE_MAP;
- case wgpu::TextureViewDimension::CubeArray:
- return GL_TEXTURE_CUBE_MAP_ARRAY;
- case wgpu::TextureViewDimension::e3D:
- return GL_TEXTURE_3D;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- GLuint GenTexture(const OpenGLFunctions& gl) {
- GLuint handle = 0;
- gl.GenTextures(1, &handle);
- return handle;
- }
-
- bool UsageNeedsTextureView(wgpu::TextureUsage usage) {
- constexpr wgpu::TextureUsage kUsageNeedingTextureView =
- wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
- return usage & kUsageNeedingTextureView;
- }
-
- bool RequiresCreatingNewTextureView(const TextureBase* texture,
- const TextureViewDescriptor* textureViewDescriptor) {
- if (texture->GetFormat().format != textureViewDescriptor->format) {
- return true;
- }
-
- if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount) {
- return true;
- }
-
- if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
- return true;
- }
-
- if (ToBackend(texture)->GetGLFormat().format == GL_DEPTH_STENCIL &&
- (texture->GetUsage() & wgpu::TextureUsage::TextureBinding) != 0 &&
- textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
- // We need a separate view for one of the depth or stencil planes
- // because each glTextureView needs it's own handle to set
- // GL_DEPTH_STENCIL_TEXTURE_MODE. Choose the stencil aspect for the
- // extra handle since it is likely sampled less often.
- return true;
- }
-
- switch (textureViewDescriptor->dimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return true;
- default:
- break;
- }
-
- return false;
- }
-
- } // namespace
-
- // Texture
-
- Texture::Texture(Device* device, const TextureDescriptor* descriptor)
- : Texture(device, descriptor, GenTexture(device->gl), TextureState::OwnedInternal) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
- uint32_t width = GetWidth();
- uint32_t height = GetHeight();
- uint32_t levels = GetNumMipLevels();
- uint32_t arrayLayers = GetArrayLayers();
- uint32_t sampleCount = GetSampleCount();
-
- const GLFormat& glFormat = GetGLFormat();
-
- gl.BindTexture(mTarget, mHandle);
-
- // glTextureView() requires the value of GL_TEXTURE_IMMUTABLE_FORMAT for origtexture to be
- // GL_TRUE, so the storage of the texture must be allocated with glTexStorage*D.
- // https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTextureView.xhtml
- switch (GetDimension()) {
- case wgpu::TextureDimension::e2D:
- if (arrayLayers > 1) {
- ASSERT(!IsMultisampledTexture());
- gl.TexStorage3D(mTarget, levels, glFormat.internalFormat, width, height,
- arrayLayers);
- } else {
- if (IsMultisampledTexture()) {
- gl.TexStorage2DMultisample(mTarget, sampleCount, glFormat.internalFormat,
- width, height, true);
- } else {
- gl.TexStorage2D(mTarget, levels, glFormat.internalFormat, width, height);
- }
- }
- break;
- case wgpu::TextureDimension::e3D:
- ASSERT(!IsMultisampledTexture());
- ASSERT(arrayLayers == 1);
- gl.TexStorage3D(mTarget, levels, glFormat.internalFormat, width, height,
- GetDepth());
- break;
-
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
-
- // The texture is not complete if it uses mipmapping and not all levels up to
- // MAX_LEVEL have been defined.
- gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
-
- if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- GetDevice()->ConsumedError(
- ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
- }
- }
-
- Texture::Texture(Device* device,
- const TextureDescriptor* descriptor,
- GLuint handle,
- TextureState state)
- : TextureBase(device, descriptor, state), mHandle(handle) {
- mTarget = TargetForTexture(descriptor);
- }
-
- Texture::~Texture() {
- }
-
- void Texture::DestroyImpl() {
- TextureBase::DestroyImpl();
- if (GetTextureState() == TextureState::OwnedInternal) {
- ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
- mHandle = 0;
- }
- }
-
- GLuint Texture::GetHandle() const {
- return mHandle;
- }
-
- GLenum Texture::GetGLTarget() const {
- return mTarget;
- }
-
- const GLFormat& Texture::GetGLFormat() const {
- return ToBackend(GetDevice())->GetGLFormat(GetFormat());
- }
-
- MaybeError Texture::ClearTexture(const SubresourceRange& range,
- TextureBase::ClearValue clearValue) {
- // TODO(crbug.com/dawn/850): initialize the textures with compressed formats.
- if (GetFormat().isCompressed) {
- return {};
- }
-
- Device* device = ToBackend(GetDevice());
- const OpenGLFunctions& gl = device->gl;
-
- uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
- float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
-
- if (GetFormat().isRenderable) {
- if ((range.aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
- GLfloat depth = fClearColor;
- GLint stencil = clearColor;
- if (range.aspects & Aspect::Depth) {
- gl.DepthMask(GL_TRUE);
- }
- if (range.aspects & Aspect::Stencil) {
- gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
- }
-
- auto DoClear = [&](Aspect aspects) {
- if (aspects == (Aspect::Depth | Aspect::Stencil)) {
- gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
- } else if (aspects == Aspect::Depth) {
- gl.ClearBufferfv(GL_DEPTH, 0, &depth);
- } else if (aspects == Aspect::Stencil) {
- gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
- } else {
- UNREACHABLE();
- }
- };
-
- GLuint framebuffer = 0;
- gl.GenFramebuffers(1, &framebuffer);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
-
- GLenum attachment;
- if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
- attachment = GL_DEPTH_STENCIL_ATTACHMENT;
- } else if (range.aspects == Aspect::Depth) {
- attachment = GL_DEPTH_ATTACHMENT;
- } else if (range.aspects == Aspect::Stencil) {
- attachment = GL_STENCIL_ATTACHMENT;
- } else {
- UNREACHABLE();
- }
-
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- switch (GetDimension()) {
- case wgpu::TextureDimension::e2D:
- if (GetArrayLayers() == 1) {
- Aspect aspectsToClear = Aspect::None;
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, 0,
- aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
- aspectsToClear |= aspect;
- }
-
- if (aspectsToClear == Aspect::None) {
- continue;
- }
-
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
- GetGLTarget(), GetHandle(),
- static_cast<GLint>(level));
- DoClear(aspectsToClear);
- } else {
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- Aspect aspectsToClear = Aspect::None;
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer,
- aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
- aspectsToClear |= aspect;
- }
-
- if (aspectsToClear == Aspect::None) {
- continue;
- }
-
- gl.FramebufferTextureLayer(
- GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
- static_cast<GLint>(level), static_cast<GLint>(layer));
- DoClear(aspectsToClear);
- }
- }
- break;
-
- case wgpu::TextureDimension::e1D:
- case wgpu::TextureDimension::e3D:
- UNREACHABLE();
- }
- }
-
- gl.DeleteFramebuffers(1, &framebuffer);
- } else {
- ASSERT(range.aspects == Aspect::Color);
-
- // For gl.ClearBufferiv/uiv calls
- constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
- constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
- std::array<GLuint, 4> clearColorData;
- clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
-
- // For gl.ClearBufferfv calls
- constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
- constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
- std::array<GLfloat, 4> fClearColorData;
- fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
-
- static constexpr uint32_t MAX_TEXEL_SIZE = 16;
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
- ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
-
- // For gl.ClearTexSubImage calls
- constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
- constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
-
- wgpu::TextureComponentType baseType =
- GetFormat().GetAspectInfo(Aspect::Color).baseType;
-
- const GLFormat& glFormat = GetGLFormat();
- for (uint32_t level = range.baseMipLevel;
- level < range.baseMipLevel + range.levelCount; ++level) {
- Extent3D mipSize = GetMipLevelPhysicalSize(level);
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
- // Skip lazy clears if already initialized.
- continue;
- }
- if (gl.IsAtLeastGL(4, 4)) {
- gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
- static_cast<GLint>(layer), mipSize.width,
- mipSize.height, mipSize.depthOrArrayLayers,
- glFormat.format, glFormat.type,
- clearValue == TextureBase::ClearValue::Zero
- ? kClearColorDataBytes0.data()
- : kClearColorDataBytes255.data());
- continue;
- }
-
- GLuint framebuffer = 0;
- gl.GenFramebuffers(1, &framebuffer);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
-
- GLenum attachment = GL_COLOR_ATTACHMENT0;
- gl.DrawBuffers(1, &attachment);
-
- gl.Disable(GL_SCISSOR_TEST);
- gl.ColorMask(true, true, true, true);
-
- auto DoClear = [&]() {
- switch (baseType) {
- case wgpu::TextureComponentType::Float: {
- gl.ClearBufferfv(GL_COLOR, 0,
- clearValue == TextureBase::ClearValue::Zero
- ? kClearColorDataFloat0.data()
- : kClearColorDataFloat1.data());
- break;
- }
- case wgpu::TextureComponentType::Uint: {
- gl.ClearBufferuiv(GL_COLOR, 0,
- clearValue == TextureBase::ClearValue::Zero
- ? kClearColorDataUint0.data()
- : kClearColorDataUint1.data());
- break;
- }
- case wgpu::TextureComponentType::Sint: {
- gl.ClearBufferiv(GL_COLOR, 0,
- reinterpret_cast<const GLint*>(
- clearValue == TextureBase::ClearValue::Zero
- ? kClearColorDataUint0.data()
- : kClearColorDataUint1.data()));
- break;
- }
-
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
- }
- };
-
- if (GetArrayLayers() == 1) {
- switch (GetDimension()) {
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- case wgpu::TextureDimension::e2D:
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
- GetGLTarget(), GetHandle(), level);
- DoClear();
- break;
- case wgpu::TextureDimension::e3D:
- uint32_t depth =
- GetMipLevelVirtualSize(level).depthOrArrayLayers;
- for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
- GetHandle(), level, z);
- DoClear();
- }
- break;
- }
-
- } else {
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
- gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
- level, layer);
- DoClear();
- }
-
- gl.Enable(GL_SCISSOR_TEST);
- gl.DeleteFramebuffers(1, &framebuffer);
- gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
- }
- }
- }
- } else {
- ASSERT(range.aspects == Aspect::Color);
-
- // create temp buffer with clear color to copy to the texture image
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
- ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
-
- Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
- uint32_t bytesPerRow =
- Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
-
- // Make sure that we are not rounding
- ASSERT(bytesPerRow % blockInfo.byteSize == 0);
- ASSERT(largestMipSize.height % blockInfo.height == 0);
-
- uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
- (largestMipSize.height / blockInfo.height) *
- largestMipSize.depthOrArrayLayers;
- if (bufferSize64 > std::numeric_limits<size_t>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
- }
- size_t bufferSize = static_cast<size_t>(bufferSize64);
-
- dawn_native::BufferDescriptor descriptor = {};
- descriptor.mappedAtCreation = true;
- descriptor.usage = wgpu::BufferUsage::CopySrc;
- descriptor.size = bufferSize;
-
- // We don't count the lazy clear of srcBuffer because it is an internal buffer.
- // TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
- Ref<Buffer> srcBuffer;
- DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
-
- // Fill the buffer with clear color
- memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
- srcBuffer->Unmap();
-
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- TextureCopy textureCopy;
- textureCopy.texture = this;
- textureCopy.mipLevel = level;
- textureCopy.origin = {};
- textureCopy.aspect = Aspect::Color;
-
- TextureDataLayout dataLayout;
- dataLayout.offset = 0;
- dataLayout.bytesPerRow = bytesPerRow;
- dataLayout.rowsPerImage = largestMipSize.height;
-
- Extent3D mipSize = GetMipLevelPhysicalSize(level);
-
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- textureCopy.origin.z = layer;
- DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
- }
- }
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- }
- if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, range);
- device->IncrementLazyClearCountForTesting();
- }
- return {};
- }
-
- void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
- if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- return;
- }
- if (!IsSubresourceContentInitialized(range)) {
- GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
- }
- }
-
- // TextureView
-
- TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
- : TextureViewBase(texture, descriptor), mOwnsHandle(false) {
- mTarget = TargetForTextureViewDimension(descriptor->dimension, descriptor->arrayLayerCount,
- texture->GetSampleCount());
-
- // Texture could be destroyed by the time we make a view.
- if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
- return;
- }
-
- if (!UsageNeedsTextureView(texture->GetUsage())) {
- mHandle = 0;
- } else if (!RequiresCreatingNewTextureView(texture, descriptor)) {
- mHandle = ToBackend(texture)->GetHandle();
- } else {
- // glTextureView() is supported on OpenGL version >= 4.3
- // TODO(crbug.com/dawn/593): support texture view on OpenGL version <= 4.2 and ES
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
- mHandle = GenTexture(gl);
- const Texture* textureGL = ToBackend(texture);
- const GLFormat& glFormat = ToBackend(GetDevice())->GetGLFormat(GetFormat());
- gl.TextureView(mHandle, mTarget, textureGL->GetHandle(), glFormat.internalFormat,
- descriptor->baseMipLevel, descriptor->mipLevelCount,
- descriptor->baseArrayLayer, descriptor->arrayLayerCount);
- mOwnsHandle = true;
- }
- }
-
- TextureView::~TextureView() {
- }
-
- void TextureView::DestroyImpl() {
- TextureViewBase::DestroyImpl();
- if (mOwnsHandle) {
- ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
- }
- }
-
- GLuint TextureView::GetHandle() const {
- ASSERT(mHandle != 0);
- return mHandle;
- }
-
- GLenum TextureView::GetGLTarget() const {
- return mTarget;
- }
-
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
deleted file mode 100644
index 28b35d31a83..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_TEXTUREGL_H_
-#define DAWNNATIVE_OPENGL_TEXTUREGL_H_
-
-#include "dawn_native/Texture.h"
-
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- class Device;
- struct GLFormat;
-
- class Texture final : public TextureBase {
- public:
- Texture(Device* device, const TextureDescriptor* descriptor);
- Texture(Device* device,
- const TextureDescriptor* descriptor,
- GLuint handle,
- TextureState state);
-
- GLuint GetHandle() const;
- GLenum GetGLTarget() const;
- const GLFormat& GetGLFormat() const;
-
- void EnsureSubresourceContentInitialized(const SubresourceRange& range);
-
- private:
- ~Texture() override;
-
- void DestroyImpl() override;
- MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
-
- GLuint mHandle;
- GLenum mTarget;
- };
-
- class TextureView final : public TextureViewBase {
- public:
- TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
-
- GLuint GetHandle() const;
- GLenum GetGLTarget() const;
-
- private:
- ~TextureView() override;
- void DestroyImpl() override;
-
- GLuint mHandle;
- GLenum mTarget;
- bool mOwnsHandle;
- };
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_TEXTUREGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp
deleted file mode 100644
index 413336b5e61..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/UtilsGL.h"
-
-#include "common/Assert.h"
-
-namespace dawn_native { namespace opengl {
-
- GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
- switch (compareFunction) {
- case wgpu::CompareFunction::Never:
- return GL_NEVER;
- case wgpu::CompareFunction::Less:
- return GL_LESS;
- case wgpu::CompareFunction::LessEqual:
- return GL_LEQUAL;
- case wgpu::CompareFunction::Greater:
- return GL_GREATER;
- case wgpu::CompareFunction::GreaterEqual:
- return GL_GEQUAL;
- case wgpu::CompareFunction::NotEqual:
- return GL_NOTEQUAL;
- case wgpu::CompareFunction::Equal:
- return GL_EQUAL;
- case wgpu::CompareFunction::Always:
- return GL_ALWAYS;
-
- case wgpu::CompareFunction::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
- switch (depthStencilFormat) {
- case wgpu::TextureFormat::Depth24PlusStencil8:
- return 0xFF;
-
- default:
- UNREACHABLE();
- }
- }
-}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h
deleted file mode 100644
index 2f87b378132..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGL_UTILSGL_H_
-#define DAWNNATIVE_OPENGL_UTILSGL_H_
-
-#include "dawn_native/dawn_platform.h"
-#include "dawn_native/opengl/opengl_platform.h"
-
-namespace dawn_native { namespace opengl {
-
- GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
- GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGL_UTILSGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/opengl_platform.h b/chromium/third_party/dawn/src/dawn_native/opengl/opengl_platform.h
deleted file mode 100644
index 783f6e48ca4..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/opengl/opengl_platform.h
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/opengl/opengl_platform_autogen.h"
diff --git a/chromium/third_party/dawn/src/dawn_native/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/dawn_native/utils/WGPUHelpers.cpp
deleted file mode 100644
index 60c2addc820..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/utils/WGPUHelpers.cpp
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/utils/WGPUHelpers.h"
-
-#include "common/Assert.h"
-#include "common/Constants.h"
-#include "dawn_native/BindGroup.h"
-#include "dawn_native/BindGroupLayout.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/PipelineLayout.h"
-#include "dawn_native/Queue.h"
-#include "dawn_native/Sampler.h"
-#include "dawn_native/ShaderModule.h"
-
-#include <cstring>
-#include <iomanip>
-#include <limits>
-#include <mutex>
-#include <sstream>
-
-namespace dawn_native { namespace utils {
-
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device,
- const char* source) {
- ShaderModuleWGSLDescriptor wgslDesc;
- wgslDesc.source = source;
- ShaderModuleDescriptor descriptor;
- descriptor.nextInChain = &wgslDesc;
- return device->CreateShaderModule(&descriptor);
- }
-
- ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
- wgpu::BufferUsage usage,
- const void* data,
- uint64_t size) {
- BufferDescriptor descriptor;
- descriptor.size = size;
- descriptor.usage = usage;
- descriptor.mappedAtCreation = true;
- Ref<BufferBase> buffer;
- DAWN_TRY_ASSIGN(buffer, device->CreateBuffer(&descriptor));
- memcpy(buffer->GetMappedRange(0, size), data, size);
- buffer->Unmap();
- return buffer;
- }
-
- ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
- DeviceBase* device,
- const Ref<BindGroupLayoutBase>& bindGroupLayout) {
- PipelineLayoutDescriptor descriptor;
- descriptor.bindGroupLayoutCount = 1;
- BindGroupLayoutBase* bgl = bindGroupLayout.Get();
- descriptor.bindGroupLayouts = &bgl;
- return device->CreatePipelineLayout(&descriptor);
- }
-
- ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
- DeviceBase* device,
- std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
- bool allowInternalBinding) {
- std::vector<BindGroupLayoutEntry> entries;
- for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
- entries.push_back(entry);
- }
-
- BindGroupLayoutDescriptor descriptor;
- descriptor.entryCount = static_cast<uint32_t>(entries.size());
- descriptor.entries = entries.data();
- return device->CreateBindGroupLayout(&descriptor, allowInternalBinding);
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::BufferBindingType bufferType,
- bool bufferHasDynamicOffset,
- uint64_t bufferMinBindingSize) {
- binding = entryBinding;
- visibility = entryVisibility;
- buffer.type = bufferType;
- buffer.hasDynamicOffset = bufferHasDynamicOffset;
- buffer.minBindingSize = bufferMinBindingSize;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::SamplerBindingType samplerType) {
- binding = entryBinding;
- visibility = entryVisibility;
- sampler.type = samplerType;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::TextureSampleType textureSampleType,
- wgpu::TextureViewDimension textureViewDimension,
- bool textureMultisampled) {
- binding = entryBinding;
- visibility = entryVisibility;
- texture.sampleType = textureSampleType;
- texture.viewDimension = textureViewDimension;
- texture.multisampled = textureMultisampled;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::StorageTextureAccess storageTextureAccess,
- wgpu::TextureFormat format,
- wgpu::TextureViewDimension textureViewDimension) {
- binding = entryBinding;
- visibility = entryVisibility;
- storageTexture.access = storageTextureAccess;
- storageTexture.format = format;
- storageTexture.viewDimension = textureViewDimension;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- const BindGroupLayoutEntry& entry)
- : BindGroupLayoutEntry(entry) {
- }
-
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const Ref<SamplerBase>& sampler)
- : binding(binding), sampler(sampler) {
- }
-
- BindingInitializationHelper::BindingInitializationHelper(
- uint32_t binding,
- const Ref<TextureViewBase>& textureView)
- : binding(binding), textureView(textureView) {
- }
-
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const Ref<BufferBase>& buffer,
- uint64_t offset,
- uint64_t size)
- : binding(binding), buffer(buffer), offset(offset), size(size) {
- }
-
- BindingInitializationHelper::~BindingInitializationHelper() = default;
-
- BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
- BindGroupEntry result;
-
- result.binding = binding;
- result.sampler = sampler.Get();
- result.textureView = textureView.Get();
- result.buffer = buffer.Get();
- result.offset = offset;
- result.size = size;
-
- return result;
- }
-
- ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
- DeviceBase* device,
- const Ref<BindGroupLayoutBase>& layout,
- std::initializer_list<BindingInitializationHelper> entriesInitializer) {
- std::vector<BindGroupEntry> entries;
- for (const BindingInitializationHelper& helper : entriesInitializer) {
- entries.push_back(helper.GetAsBinding());
- }
-
- BindGroupDescriptor descriptor;
- descriptor.layout = layout.Get();
- descriptor.entryCount = entries.size();
- descriptor.entries = entries.data();
-
- return device->CreateBindGroup(&descriptor);
- }
-
- const char* GetLabelForTrace(const char* label) {
- return (label == nullptr || strlen(label) == 0) ? "None" : label;
- }
-
-}} // namespace dawn_native::utils
diff --git a/chromium/third_party/dawn/src/dawn_native/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/dawn_native/utils/WGPUHelpers.h
deleted file mode 100644
index 108f107d916..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/utils/WGPUHelpers.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_UTILS_WGPUHELPERS_H_
-#define DAWNNATIVE_UTILS_WGPUHELPERS_H_
-
-#include <dawn_native/dawn_platform.h>
-
-#include <array>
-#include <initializer_list>
-#include <vector>
-
-#include "common/RefCounted.h"
-#include "dawn_native/Error.h"
-
-namespace dawn_native { namespace utils {
-
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source);
-
- ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
- wgpu::BufferUsage usage,
- const void* data,
- uint64_t size);
-
- template <typename T>
- ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
- wgpu::BufferUsage usage,
- std::initializer_list<T> data) {
- return CreateBufferFromData(device, usage, data.begin(), uint32_t(sizeof(T) * data.size()));
- }
-
- ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
- DeviceBase* device,
- const Ref<BindGroupLayoutBase>& bindGroupLayout);
-
- // Helpers to make creating bind group layouts look nicer:
- //
- // utils::MakeBindGroupLayout(device, {
- // {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
- // {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
- // {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
- // });
-
- struct BindingLayoutEntryInitializationHelper : BindGroupLayoutEntry {
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::BufferBindingType bufferType,
- bool bufferHasDynamicOffset = false,
- uint64_t bufferMinBindingSize = 0);
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::SamplerBindingType samplerType);
- BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::TextureSampleType textureSampleType,
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
- bool textureMultisampled = false);
- BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::StorageTextureAccess storageTextureAccess,
- wgpu::TextureFormat format,
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
-
- BindingLayoutEntryInitializationHelper(const BindGroupLayoutEntry& entry);
- };
-
- ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
- DeviceBase* device,
- std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
- bool allowInternalBinding = false);
-
- // Helpers to make creating bind groups look nicer:
- //
- // utils::MakeBindGroup(device, layout, {
- // {0, mySampler},
- // {1, myBuffer, offset, size},
- // {3, myTextureView}
- // });
-
- // Structure with one constructor per-type of bindings, so that the initializer_list accepts
- // bindings with the right type and no extra information.
- struct BindingInitializationHelper {
- BindingInitializationHelper(uint32_t binding, const Ref<SamplerBase>& sampler);
- BindingInitializationHelper(uint32_t binding, const Ref<TextureViewBase>& textureView);
- BindingInitializationHelper(uint32_t binding,
- const Ref<BufferBase>& buffer,
- uint64_t offset = 0,
- uint64_t size = wgpu::kWholeSize);
- ~BindingInitializationHelper();
-
- BindGroupEntry GetAsBinding() const;
-
- uint32_t binding;
- Ref<SamplerBase> sampler;
- Ref<TextureViewBase> textureView;
- Ref<BufferBase> buffer;
- uint64_t offset = 0;
- uint64_t size = 0;
- };
-
- ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
- DeviceBase* device,
- const Ref<BindGroupLayoutBase>& layout,
- std::initializer_list<BindingInitializationHelper> entriesInitializer);
-
- const char* GetLabelForTrace(const char* label);
-
-}} // namespace dawn_native::utils
-
-#endif // DAWNNATIVE_UTILS_WGPUHELPERS_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
deleted file mode 100644
index 1e6595f8227..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/AdapterVk.h"
-
-#include "dawn_native/Limits.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-
-#include "common/GPUInfo.h"
-
-namespace dawn_native { namespace vulkan {
-
- Adapter::Adapter(InstanceBase* instance,
- VulkanInstance* vulkanInstance,
- VkPhysicalDevice physicalDevice)
- : AdapterBase(instance, wgpu::BackendType::Vulkan),
- mPhysicalDevice(physicalDevice),
- mVulkanInstance(vulkanInstance) {
- }
-
- const VulkanDeviceInfo& Adapter::GetDeviceInfo() const {
- return mDeviceInfo;
- }
-
- VkPhysicalDevice Adapter::GetPhysicalDevice() const {
- return mPhysicalDevice;
- }
-
- VulkanInstance* Adapter::GetVulkanInstance() const {
- return mVulkanInstance.Get();
- }
-
- bool Adapter::IsDepthStencilFormatSupported(VkFormat format) {
- ASSERT(format == VK_FORMAT_D16_UNORM_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT ||
- format == VK_FORMAT_D32_SFLOAT_S8_UINT);
-
- VkFormatProperties properties;
- mVulkanInstance->GetFunctions().GetPhysicalDeviceFormatProperties(mPhysicalDevice, format,
- &properties);
- return properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
- }
-
- MaybeError Adapter::InitializeImpl() {
- DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
-
- if (mDeviceInfo.HasExt(DeviceExt::DriverProperties)) {
- mDriverDescription = mDeviceInfo.driverProperties.driverName;
- if (mDeviceInfo.driverProperties.driverInfo[0] != '\0') {
- mDriverDescription += std::string(": ") + mDeviceInfo.driverProperties.driverInfo;
- }
- } else {
- mDriverDescription =
- "Vulkan driver version: " + std::to_string(mDeviceInfo.properties.driverVersion);
- }
-
- mPCIInfo.deviceId = mDeviceInfo.properties.deviceID;
- mPCIInfo.vendorId = mDeviceInfo.properties.vendorID;
- mPCIInfo.name = mDeviceInfo.properties.deviceName;
-
- switch (mDeviceInfo.properties.deviceType) {
- case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
- mAdapterType = wgpu::AdapterType::IntegratedGPU;
- break;
- case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
- mAdapterType = wgpu::AdapterType::DiscreteGPU;
- break;
- case VK_PHYSICAL_DEVICE_TYPE_CPU:
- mAdapterType = wgpu::AdapterType::CPU;
- break;
- default:
- mAdapterType = wgpu::AdapterType::Unknown;
- break;
- }
-
- return {};
- }
-
- MaybeError Adapter::InitializeSupportedFeaturesImpl() {
- // Needed for viewport Y-flip.
- if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
- return DAWN_INTERNAL_ERROR("Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 required.");
- }
-
- // Needed for security
- if (!mDeviceInfo.features.robustBufferAccess) {
- return DAWN_INTERNAL_ERROR("Vulkan robustBufferAccess feature required.");
- }
-
- if (!mDeviceInfo.features.textureCompressionBC &&
- !(mDeviceInfo.features.textureCompressionETC2 &&
- mDeviceInfo.features.textureCompressionASTC_LDR)) {
- return DAWN_INTERNAL_ERROR(
- "Vulkan textureCompressionBC feature required or both textureCompressionETC2 and "
- "textureCompressionASTC required.");
- }
-
- // Needed for the respective WebGPU features.
- if (!mDeviceInfo.features.depthBiasClamp) {
- return DAWN_INTERNAL_ERROR("Vulkan depthBiasClamp feature required.");
- }
- if (!mDeviceInfo.features.fragmentStoresAndAtomics) {
- return DAWN_INTERNAL_ERROR("Vulkan fragmentStoresAndAtomics feature required.");
- }
- if (!mDeviceInfo.features.fullDrawIndexUint32) {
- return DAWN_INTERNAL_ERROR("Vulkan fullDrawIndexUint32 feature required.");
- }
- if (!mDeviceInfo.features.imageCubeArray) {
- return DAWN_INTERNAL_ERROR("Vulkan imageCubeArray feature required.");
- }
- if (!mDeviceInfo.features.independentBlend) {
- return DAWN_INTERNAL_ERROR("Vulkan independentBlend feature required.");
- }
- if (!mDeviceInfo.features.sampleRateShading) {
- return DAWN_INTERNAL_ERROR("Vulkan sampleRateShading feature required.");
- }
-
- // Initialize supported extensions
- if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
- }
-
- if (mDeviceInfo.features.textureCompressionETC2 == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
- }
-
- if (mDeviceInfo.features.textureCompressionASTC_LDR == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
- }
-
- if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
- }
-
- if (mDeviceInfo.features.depthClamp == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::DepthClamping);
- }
-
- if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
- mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
- }
-
- return {};
- }
-
- MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
- GetDefaultLimits(&limits->v1);
- CombinedLimits baseLimits = *limits;
-
- const VkPhysicalDeviceLimits& vkLimits = mDeviceInfo.properties.limits;
-
-#define CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, compareOp, msgSegment) \
- do { \
- if (vkLimits.vulkanName compareOp baseLimits.v1.webgpuName) { \
- return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for " #webgpuName \
- "." \
- " VkPhysicalDeviceLimits::" #vulkanName \
- " must be at " msgSegment " " + \
- std::to_string(baseLimits.v1.webgpuName)); \
- } \
- limits->v1.webgpuName = vkLimits.vulkanName; \
- } while (false)
-
-#define CHECK_AND_SET_V1_MAX_LIMIT(vulkanName, webgpuName) \
- CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, <, "least")
-#define CHECK_AND_SET_V1_MIN_LIMIT(vulkanName, webgpuName) \
- CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, >, "most")
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension1D, maxTextureDimension1D);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension2D, maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimensionCube, maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferWidth, maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferHeight, maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[0], maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[1], maxTextureDimension2D);
- CHECK_AND_SET_V1_MAX_LIMIT(viewportBoundsRange[1], maxTextureDimension2D);
- limits->v1.maxTextureDimension2D = std::min({
- static_cast<uint32_t>(vkLimits.maxImageDimension2D),
- static_cast<uint32_t>(vkLimits.maxImageDimensionCube),
- static_cast<uint32_t>(vkLimits.maxFramebufferWidth),
- static_cast<uint32_t>(vkLimits.maxFramebufferHeight),
- static_cast<uint32_t>(vkLimits.maxViewportDimensions[0]),
- static_cast<uint32_t>(vkLimits.maxViewportDimensions[1]),
- static_cast<uint32_t>(vkLimits.viewportBoundsRange[1]),
- });
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension3D, maxTextureDimension3D);
- CHECK_AND_SET_V1_MAX_LIMIT(maxImageArrayLayers, maxTextureArrayLayers);
- CHECK_AND_SET_V1_MAX_LIMIT(maxBoundDescriptorSets, maxBindGroups);
- CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetUniformBuffersDynamic,
- maxDynamicUniformBuffersPerPipelineLayout);
- CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetStorageBuffersDynamic,
- maxDynamicStorageBuffersPerPipelineLayout);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSampledImages,
- maxSampledTexturesPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSamplers, maxSamplersPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageBuffers,
- maxStorageBuffersPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageImages,
- maxStorageTexturesPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorUniformBuffers,
- maxUniformBuffersPerShaderStage);
- CHECK_AND_SET_V1_MAX_LIMIT(maxUniformBufferRange, maxUniformBufferBindingSize);
- CHECK_AND_SET_V1_MAX_LIMIT(maxStorageBufferRange, maxStorageBufferBindingSize);
-
- CHECK_AND_SET_V1_MIN_LIMIT(minUniformBufferOffsetAlignment,
- minUniformBufferOffsetAlignment);
- CHECK_AND_SET_V1_MIN_LIMIT(minStorageBufferOffsetAlignment,
- minStorageBufferOffsetAlignment);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputBindings, maxVertexBuffers);
- CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputAttributes, maxVertexAttributes);
-
- if (vkLimits.maxVertexInputBindingStride < baseLimits.v1.maxVertexBufferArrayStride ||
- vkLimits.maxVertexInputAttributeOffset < baseLimits.v1.maxVertexBufferArrayStride - 1) {
- return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBufferArrayStride");
- }
- limits->v1.maxVertexBufferArrayStride = std::min(
- vkLimits.maxVertexInputBindingStride, vkLimits.maxVertexInputAttributeOffset + 1);
-
- if (vkLimits.maxVertexOutputComponents < baseLimits.v1.maxInterStageShaderComponents ||
- vkLimits.maxFragmentInputComponents < baseLimits.v1.maxInterStageShaderComponents) {
- return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan limits for maxInterStageShaderComponents");
- }
- limits->v1.maxInterStageShaderComponents =
- std::min(vkLimits.maxVertexOutputComponents, vkLimits.maxFragmentInputComponents);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeSharedMemorySize, maxComputeWorkgroupStorageSize);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupInvocations,
- maxComputeInvocationsPerWorkgroup);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[0], maxComputeWorkgroupSizeX);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[1], maxComputeWorkgroupSizeY);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[2], maxComputeWorkgroupSizeZ);
-
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[0], maxComputeWorkgroupsPerDimension);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[1], maxComputeWorkgroupsPerDimension);
- CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[2], maxComputeWorkgroupsPerDimension);
- limits->v1.maxComputeWorkgroupsPerDimension = std::min({
- vkLimits.maxComputeWorkGroupCount[0],
- vkLimits.maxComputeWorkGroupCount[1],
- vkLimits.maxComputeWorkGroupCount[2],
- });
-
- if (vkLimits.maxColorAttachments < kMaxColorAttachments) {
- return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxColorAttachments");
- }
- if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
- vkLimits.framebufferColorSampleCounts)) {
- return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan limits for framebufferColorSampleCounts");
- }
- if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
- vkLimits.framebufferDepthSampleCounts)) {
- return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan limits for framebufferDepthSampleCounts");
- }
-
- // Only check maxFragmentCombinedOutputResources on mobile GPUs. Desktop GPUs drivers seem
- // to put incorrect values for this limit with things like 8 or 16 when they can do bindless
- // storage buffers.
- uint32_t vendorId = mDeviceInfo.properties.vendorID;
- if (!gpu_info::IsAMD(vendorId) && !gpu_info::IsIntel(vendorId) &&
- !gpu_info::IsNvidia(vendorId)) {
- if (vkLimits.maxFragmentCombinedOutputResources <
- kMaxColorAttachments + baseLimits.v1.maxStorageTexturesPerShaderStage +
- baseLimits.v1.maxStorageBuffersPerShaderStage) {
- return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan maxFragmentCombinedOutputResources limit");
- }
-
- uint32_t maxFragmentCombinedOutputResources =
- kMaxColorAttachments + limits->v1.maxStorageTexturesPerShaderStage +
- limits->v1.maxStorageBuffersPerShaderStage;
-
- if (maxFragmentCombinedOutputResources > vkLimits.maxFragmentCombinedOutputResources) {
- // WebGPU's maxFragmentCombinedOutputResources exceeds the Vulkan limit.
- // Decrease |maxStorageTexturesPerShaderStage| and |maxStorageBuffersPerShaderStage|
- // to fit within the Vulkan limit.
- uint32_t countOverLimit = maxFragmentCombinedOutputResources -
- vkLimits.maxFragmentCombinedOutputResources;
-
- uint32_t maxStorageTexturesOverBase =
- limits->v1.maxStorageTexturesPerShaderStage -
- baseLimits.v1.maxStorageTexturesPerShaderStage;
- uint32_t maxStorageBuffersOverBase = limits->v1.maxStorageBuffersPerShaderStage -
- baseLimits.v1.maxStorageBuffersPerShaderStage;
-
- // Reduce the number of resources by half the overage count, but clamp to
- // to ensure we don't go below the base limits.
- uint32_t numFewerStorageTextures =
- std::min(countOverLimit / 2, maxStorageTexturesOverBase);
- uint32_t numFewerStorageBuffers =
- std::min((countOverLimit + 1) / 2, maxStorageBuffersOverBase);
-
- if (numFewerStorageTextures == maxStorageTexturesOverBase) {
- // If |numFewerStorageTextures| was clamped, subtract the remaining
- // from the storage buffers.
- numFewerStorageBuffers = countOverLimit - numFewerStorageTextures;
- ASSERT(numFewerStorageBuffers <= maxStorageBuffersOverBase);
- } else if (numFewerStorageBuffers == maxStorageBuffersOverBase) {
- // If |numFewerStorageBuffers| was clamped, subtract the remaining
- // from the storage textures.
- numFewerStorageTextures = countOverLimit - numFewerStorageBuffers;
- ASSERT(numFewerStorageTextures <= maxStorageTexturesOverBase);
- }
- limits->v1.maxStorageTexturesPerShaderStage -= numFewerStorageTextures;
- limits->v1.maxStorageBuffersPerShaderStage -= numFewerStorageBuffers;
- }
- }
-
- return {};
- }
-
- bool Adapter::SupportsExternalImages() const {
- // Via dawn_native::vulkan::WrapVulkanImage
- return external_memory::Service::CheckSupport(mDeviceInfo) &&
- external_semaphore::Service::CheckSupport(mDeviceInfo, mPhysicalDevice,
- mVulkanInstance->GetFunctions());
- }
-
- ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DawnDeviceDescriptor* descriptor) {
- return Device::Create(this, descriptor);
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h
deleted file mode 100644
index 7e9257cbf52..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_ADAPTERVK_H_
-#define DAWNNATIVE_VULKAN_ADAPTERVK_H_
-
-#include "dawn_native/Adapter.h"
-
-#include "common/RefCounted.h"
-#include "common/vulkan_platform.h"
-#include "dawn_native/vulkan/VulkanInfo.h"
-
-namespace dawn_native { namespace vulkan {
-
- class VulkanInstance;
-
- class Adapter : public AdapterBase {
- public:
- Adapter(InstanceBase* instance,
- VulkanInstance* vulkanInstance,
- VkPhysicalDevice physicalDevice);
- ~Adapter() override = default;
-
- // AdapterBase Implementation
- bool SupportsExternalImages() const override;
-
- const VulkanDeviceInfo& GetDeviceInfo() const;
- VkPhysicalDevice GetPhysicalDevice() const;
- VulkanInstance* GetVulkanInstance() const;
-
- bool IsDepthStencilFormatSupported(VkFormat format);
-
- private:
- MaybeError InitializeImpl() override;
- MaybeError InitializeSupportedFeaturesImpl() override;
- MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
-
- ResultOrError<DeviceBase*> CreateDeviceImpl(
- const DawnDeviceDescriptor* descriptor) override;
-
- VkPhysicalDevice mPhysicalDevice;
- Ref<VulkanInstance> mVulkanInstance;
- VulkanDeviceInfo mDeviceInfo = {};
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_ADAPTERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
deleted file mode 100644
index 1c4c1b90707..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/BackendVk.h"
-
-#include "common/BitSetIterator.h"
-#include "common/Log.h"
-#include "common/SystemUtils.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/VulkanBackend.h"
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-// TODO(crbug.com/dawn/283): Link against the Vulkan Loader and remove this.
-#if defined(DAWN_ENABLE_SWIFTSHADER)
-# if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_FUSCHIA)
-constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.so";
-# elif defined(DAWN_PLATFORM_WINDOWS)
-constexpr char kSwiftshaderLibName[] = "vk_swiftshader.dll";
-# elif defined(DAWN_PLATFORM_MACOS)
-constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.dylib";
-# else
-# error "Unimplemented Swiftshader Vulkan backend platform"
-# endif
-#endif
-
-#if defined(DAWN_PLATFORM_LINUX)
-# if defined(DAWN_PLATFORM_ANDROID)
-constexpr char kVulkanLibName[] = "libvulkan.so";
-# else
-constexpr char kVulkanLibName[] = "libvulkan.so.1";
-# endif
-#elif defined(DAWN_PLATFORM_WINDOWS)
-constexpr char kVulkanLibName[] = "vulkan-1.dll";
-#elif defined(DAWN_PLATFORM_MACOS)
-constexpr char kVulkanLibName[] = "libvulkan.dylib";
-#elif defined(DAWN_PLATFORM_FUCHSIA)
-constexpr char kVulkanLibName[] = "libvulkan.so";
-#else
-# error "Unimplemented Vulkan backend platform"
-#endif
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
-
- static constexpr ICD kICDs[] = {
- ICD::None,
-#if defined(DAWN_ENABLE_SWIFTSHADER)
- ICD::SwiftShader,
-#endif // defined(DAWN_ENABLE_SWIFTSHADER)
- };
-
- VKAPI_ATTR VkBool32 VKAPI_CALL
- OnDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
- VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
- const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
- void* /* pUserData */) {
- dawn::WarningLog() << pCallbackData->pMessage;
- ASSERT((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) == 0);
-
- return VK_FALSE;
- }
-
- // A debug callback specifically for instance creation so that we don't fire an ASSERT when
- // the instance fails creation in an expected manner (for example the system not having
- // Vulkan drivers).
- VKAPI_ATTR VkBool32 VKAPI_CALL OnInstanceCreationDebugUtilsCallback(
- VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
- VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
- const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
- void* /* pUserData */) {
- dawn::WarningLog() << pCallbackData->pMessage;
- return VK_FALSE;
- }
-
- } // anonymous namespace
-
- VulkanInstance::VulkanInstance() = default;
-
- VulkanInstance::~VulkanInstance() {
- if (mDebugUtilsMessenger != VK_NULL_HANDLE) {
- mFunctions.DestroyDebugUtilsMessengerEXT(mInstance, mDebugUtilsMessenger, nullptr);
- mDebugUtilsMessenger = VK_NULL_HANDLE;
- }
-
- // VkPhysicalDevices are destroyed when the VkInstance is destroyed
- if (mInstance != VK_NULL_HANDLE) {
- mFunctions.DestroyInstance(mInstance, nullptr);
- mInstance = VK_NULL_HANDLE;
- }
- }
-
- const VulkanFunctions& VulkanInstance::GetFunctions() const {
- return mFunctions;
- }
-
- VkInstance VulkanInstance::GetVkInstance() const {
- return mInstance;
- }
-
- const VulkanGlobalInfo& VulkanInstance::GetGlobalInfo() const {
- return mGlobalInfo;
- }
-
- const std::vector<VkPhysicalDevice>& VulkanInstance::GetPhysicalDevices() const {
- return mPhysicalDevices;
- }
-
- // static
- ResultOrError<Ref<VulkanInstance>> VulkanInstance::Create(const InstanceBase* instance,
- ICD icd) {
- Ref<VulkanInstance> vulkanInstance = AcquireRef(new VulkanInstance());
- DAWN_TRY(vulkanInstance->Initialize(instance, icd));
- return std::move(vulkanInstance);
- }
-
- MaybeError VulkanInstance::Initialize(const InstanceBase* instance, ICD icd) {
- // These environment variables need only be set while loading procs and gathering device
- // info.
- ScopedEnvironmentVar vkICDFilenames;
- ScopedEnvironmentVar vkLayerPath;
-
-#if defined(DAWN_ENABLE_VULKAN_LOADER)
- // If enabled, we use our own built Vulkan loader by specifying an absolute path to the
- // shared library. Note that when we are currently getting the absolute path for the custom
- // loader by getting the path to the dawn native library and traversing relative from there.
- // This has implications for dawn tests because some of them are linking statically to
- // dawn_native which means the "module" is actually the test as well. If the directory
- // location of the tests change w.r.t the shared lib then this may break. Essentially we are
- // assuming that our custom built Vulkan loader will always be in the same directory as the
- // shared dawn native library and all test binaries that link statically.
- const std::string resolvedVulkanLibPath = GetModuleDirectory() + kVulkanLibName;
-#else
- const std::string resolvedVulkanLibPath = kVulkanLibName;
-#endif // defined(DAWN_ENABLE_VULKAN_LOADER)
-
- switch (icd) {
- case ICD::None: {
- if (!mVulkanLib.Open(resolvedVulkanLibPath)) {
- return DAWN_FORMAT_INTERNAL_ERROR("Couldn't load %s.", resolvedVulkanLibPath);
- }
- break;
- }
- case ICD::SwiftShader: {
-#if defined(DAWN_ENABLE_SWIFTSHADER)
- // First try to load the system Vulkan driver, if that fails, try to load with
- // Swiftshader. Note: The system driver could potentially be Swiftshader if it was
- // installed.
-# if defined(DAWN_SWIFTSHADER_VK_ICD_JSON)
- if (mVulkanLib.Open(resolvedVulkanLibPath)) {
- std::string fullSwiftshaderICDPath =
- GetExecutableDirectory() + DAWN_SWIFTSHADER_VK_ICD_JSON;
- if (!vkICDFilenames.Set("VK_ICD_FILENAMES", fullSwiftshaderICDPath.c_str())) {
- return DAWN_FORMAT_INTERNAL_ERROR("Couldn't set VK_ICD_FILENAMES to %s.",
- fullSwiftshaderICDPath);
- }
- // Succesfully loaded driver and set VK_ICD_FILENAMES.
- break;
- } else
-# endif // defined(DAWN_SWIFTSHADER_VK_ICD_JSON)
- // Fallback to loading SwiftShader directly.
- if (mVulkanLib.Open(kSwiftshaderLibName)) {
- // Succesfully loaded SwiftShader.
- break;
- }
- return DAWN_FORMAT_INTERNAL_ERROR(
- "Failed to load SwiftShader. DAWN_SWIFTSHADER_VK_ICD_JSON was not defined and "
- "could not load %s.",
- kSwiftshaderLibName);
-#endif // defined(DAWN_ENABLE_SWIFTSHADER)
-
- // ICD::SwiftShader should not be passed if SwiftShader is not enabled.
- UNREACHABLE();
- }
- }
-
- if (instance->IsBackendValidationEnabled()) {
-#if defined(DAWN_ENABLE_VULKAN_VALIDATION_LAYERS)
- std::string vkDataDir = GetExecutableDirectory() + DAWN_VK_DATA_DIR;
- if (!vkLayerPath.Set("VK_LAYER_PATH", vkDataDir.c_str())) {
- return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
- }
-#else
- dawn::WarningLog() << "Backend validation enabled but Dawn was not built with "
- "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS.";
-#endif
- }
-
- DAWN_TRY(mFunctions.LoadGlobalProcs(mVulkanLib));
-
- DAWN_TRY_ASSIGN(mGlobalInfo, GatherGlobalInfo(mFunctions));
-
- VulkanGlobalKnobs usedGlobalKnobs = {};
- DAWN_TRY_ASSIGN(usedGlobalKnobs, CreateVkInstance(instance));
- *static_cast<VulkanGlobalKnobs*>(&mGlobalInfo) = usedGlobalKnobs;
-
- DAWN_TRY(mFunctions.LoadInstanceProcs(mInstance, mGlobalInfo));
-
- if (usedGlobalKnobs.HasExt(InstanceExt::DebugUtils)) {
- DAWN_TRY(RegisterDebugUtils());
- }
-
- DAWN_TRY_ASSIGN(mPhysicalDevices, GatherPhysicalDevices(mInstance, mFunctions));
-
- return {};
- }
-
- ResultOrError<VulkanGlobalKnobs> VulkanInstance::CreateVkInstance(
- const InstanceBase* instance) {
- VulkanGlobalKnobs usedKnobs = {};
- std::vector<const char*> layerNames;
- InstanceExtSet extensionsToRequest = mGlobalInfo.extensions;
-
- auto UseLayerIfAvailable = [&](VulkanLayer layer) {
- if (mGlobalInfo.layers[layer]) {
- layerNames.push_back(GetVulkanLayerInfo(layer).name);
- usedKnobs.layers.set(layer, true);
- extensionsToRequest |= mGlobalInfo.layerExtensions[layer];
- }
- };
-
- // vktrace works by instering a layer, but we hide it behind a macro because the vktrace
- // layer crashes when used without vktrace server started. See this vktrace issue:
- // https://github.com/LunarG/VulkanTools/issues/254
- // Also it is good to put it in first position so that it doesn't see Vulkan calls inserted
- // by other layers.
-#if defined(DAWN_USE_VKTRACE)
- UseLayerIfAvailable(VulkanLayer::LunargVkTrace);
-#endif
- // RenderDoc installs a layer at the system level for its capture but we don't want to use
- // it unless we are debugging in RenderDoc so we hide it behind a macro.
-#if defined(DAWN_USE_RENDERDOC)
- UseLayerIfAvailable(VulkanLayer::RenderDocCapture);
-#endif
-
- if (instance->IsBackendValidationEnabled()) {
- UseLayerIfAvailable(VulkanLayer::Validation);
- }
-
- // Always use the Fuchsia swapchain layer if available.
- UseLayerIfAvailable(VulkanLayer::FuchsiaImagePipeSwapchain);
-
- // Available and known instance extensions default to being requested, but some special
- // cases are removed.
- usedKnobs.extensions = extensionsToRequest;
-
- std::vector<const char*> extensionNames;
- for (InstanceExt ext : IterateBitSet(extensionsToRequest)) {
- const InstanceExtInfo& info = GetInstanceExtInfo(ext);
-
- if (info.versionPromoted > mGlobalInfo.apiVersion) {
- extensionNames.push_back(info.name);
- }
- }
-
- VkApplicationInfo appInfo;
- appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
- appInfo.pNext = nullptr;
- appInfo.pApplicationName = nullptr;
- appInfo.applicationVersion = 0;
- appInfo.pEngineName = nullptr;
- appInfo.engineVersion = 0;
- // Vulkan 1.0 implementations were required to return VK_ERROR_INCOMPATIBLE_DRIVER if
- // apiVersion was larger than 1.0. Meanwhile, as long as the instance supports at least
- // Vulkan 1.1, an application can use different versions of Vulkan with an instance than
- // it does with a device or physical device. So we should set apiVersion to Vulkan 1.0
- // if the instance only supports Vulkan 1.0. Otherwise we set apiVersion to Vulkan 1.2,
- // treat 1.2 as the highest API version dawn targets.
- if (mGlobalInfo.apiVersion == VK_MAKE_VERSION(1, 0, 0)) {
- appInfo.apiVersion = VK_MAKE_VERSION(1, 0, 0);
- } else {
- appInfo.apiVersion = VK_MAKE_VERSION(1, 2, 0);
- }
-
- VkInstanceCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.pApplicationInfo = &appInfo;
- createInfo.enabledLayerCount = static_cast<uint32_t>(layerNames.size());
- createInfo.ppEnabledLayerNames = layerNames.data();
- createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
- createInfo.ppEnabledExtensionNames = extensionNames.data();
-
- PNextChainBuilder createInfoChain(&createInfo);
-
- // Register the debug callback for instance creation so we receive message for any errors
- // (validation or other).
- VkDebugUtilsMessengerCreateInfoEXT utilsMessengerCreateInfo;
- if (usedKnobs.HasExt(InstanceExt::DebugUtils)) {
- utilsMessengerCreateInfo.flags = 0;
- utilsMessengerCreateInfo.messageSeverity =
- VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
- VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
- utilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
- VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
- utilsMessengerCreateInfo.pfnUserCallback = OnInstanceCreationDebugUtilsCallback;
- utilsMessengerCreateInfo.pUserData = nullptr;
-
- createInfoChain.Add(&utilsMessengerCreateInfo,
- VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
- }
-
- // Try to turn on synchronization validation if the instance was created with backend
- // validation enabled.
- VkValidationFeaturesEXT validationFeatures;
- VkValidationFeatureEnableEXT kEnableSynchronizationValidation =
- VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT;
- if (instance->IsBackendValidationEnabled() &&
- usedKnobs.HasExt(InstanceExt::ValidationFeatures)) {
- validationFeatures.enabledValidationFeatureCount = 1;
- validationFeatures.pEnabledValidationFeatures = &kEnableSynchronizationValidation;
- validationFeatures.disabledValidationFeatureCount = 0;
- validationFeatures.pDisabledValidationFeatures = nullptr;
-
- createInfoChain.Add(&validationFeatures, VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT);
- }
-
- DAWN_TRY(CheckVkSuccess(mFunctions.CreateInstance(&createInfo, nullptr, &mInstance),
- "vkCreateInstance"));
-
- return usedKnobs;
- }
-
- MaybeError VulkanInstance::RegisterDebugUtils() {
- VkDebugUtilsMessengerCreateInfoEXT createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
- VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
- createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
- VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
- createInfo.pfnUserCallback = OnDebugUtilsCallback;
- createInfo.pUserData = nullptr;
-
- return CheckVkSuccess(mFunctions.CreateDebugUtilsMessengerEXT(
- mInstance, &createInfo, nullptr, &*mDebugUtilsMessenger),
- "vkCreateDebugUtilsMessengerEXT");
- }
-
- Backend::Backend(InstanceBase* instance)
- : BackendConnection(instance, wgpu::BackendType::Vulkan) {
- }
-
- Backend::~Backend() = default;
-
- std::vector<std::unique_ptr<AdapterBase>> Backend::DiscoverDefaultAdapters() {
- AdapterDiscoveryOptions options;
- auto result = DiscoverAdapters(&options);
- if (result.IsError()) {
- GetInstance()->ConsumedError(result.AcquireError());
- return {};
- }
- return result.AcquireSuccess();
- }
-
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> Backend::DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) {
- ASSERT(optionsBase->backendType == WGPUBackendType_Vulkan);
-
- const AdapterDiscoveryOptions* options =
- static_cast<const AdapterDiscoveryOptions*>(optionsBase);
-
- std::vector<std::unique_ptr<AdapterBase>> adapters;
-
- InstanceBase* instance = GetInstance();
- for (ICD icd : kICDs) {
- if (options->forceSwiftShader && icd != ICD::SwiftShader) {
- continue;
- }
- if (mVulkanInstances[icd] == nullptr && instance->ConsumedError([&]() -> MaybeError {
- DAWN_TRY_ASSIGN(mVulkanInstances[icd], VulkanInstance::Create(instance, icd));
- return {};
- }())) {
- // Instance failed to initialize.
- continue;
- }
- const std::vector<VkPhysicalDevice>& physicalDevices =
- mVulkanInstances[icd]->GetPhysicalDevices();
- for (uint32_t i = 0; i < physicalDevices.size(); ++i) {
- std::unique_ptr<Adapter> adapter = std::make_unique<Adapter>(
- instance, mVulkanInstances[icd].Get(), physicalDevices[i]);
- if (instance->ConsumedError(adapter->Initialize())) {
- continue;
- }
- adapters.push_back(std::move(adapter));
- }
- }
- return adapters;
- }
-
- BackendConnection* Connect(InstanceBase* instance) {
- return new Backend(instance);
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h
deleted file mode 100644
index 96541f1b0b7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_BACKENDVK_H_
-#define DAWNNATIVE_VULKAN_BACKENDVK_H_
-
-#include "dawn_native/BackendConnection.h"
-
-#include "common/DynamicLib.h"
-#include "common/RefCounted.h"
-#include "common/ityp_array.h"
-#include "dawn_native/vulkan/VulkanFunctions.h"
-#include "dawn_native/vulkan/VulkanInfo.h"
-
-namespace dawn_native { namespace vulkan {
-
- enum class ICD {
- None,
- SwiftShader,
- };
-
- // VulkanInstance holds the reference to the Vulkan library, the VkInstance, VkPhysicalDevices
- // on that instance, Vulkan functions loaded from the library, and global information
- // gathered from the instance. VkPhysicalDevices bound to the VkInstance are bound to the GPU
- // and GPU driver, keeping them active. It is RefCounted so that (eventually) when all adapters
- // on an instance are no longer in use, the instance is deleted. This can be particuarly useful
- // when we create multiple instances to selectively discover ICDs (like only
- // SwiftShader/iGPU/dGPU/eGPU), and only one physical device on one instance remains in use. We
- // can delete the VkInstances that are not in use to avoid holding the discrete GPU active.
- class VulkanInstance : public RefCounted {
- public:
- static ResultOrError<Ref<VulkanInstance>> Create(const InstanceBase* instance, ICD icd);
- ~VulkanInstance();
-
- const VulkanFunctions& GetFunctions() const;
- VkInstance GetVkInstance() const;
- const VulkanGlobalInfo& GetGlobalInfo() const;
- const std::vector<VkPhysicalDevice>& GetPhysicalDevices() const;
-
- private:
- VulkanInstance();
-
- MaybeError Initialize(const InstanceBase* instance, ICD icd);
- ResultOrError<VulkanGlobalKnobs> CreateVkInstance(const InstanceBase* instance);
-
- MaybeError RegisterDebugUtils();
-
- DynamicLib mVulkanLib;
- VulkanGlobalInfo mGlobalInfo = {};
- VkInstance mInstance = VK_NULL_HANDLE;
- VulkanFunctions mFunctions;
-
- VkDebugUtilsMessengerEXT mDebugUtilsMessenger = VK_NULL_HANDLE;
-
- std::vector<VkPhysicalDevice> mPhysicalDevices;
- };
-
- class Backend : public BackendConnection {
- public:
- Backend(InstanceBase* instance);
- ~Backend() override;
-
- MaybeError Initialize();
-
- std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override;
- ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> DiscoverAdapters(
- const AdapterDiscoveryOptionsBase* optionsBase) override;
-
- private:
- ityp::array<ICD, Ref<VulkanInstance>, 2> mVulkanInstances = {};
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_BACKENDVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
deleted file mode 100644
index dc61c03bcbe..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/BindGroupLayoutVk.h"
-
-#include "common/BitSetIterator.h"
-#include "common/ityp_vector.h"
-#include "dawn_native/vulkan/BindGroupVk.h"
-#include "dawn_native/vulkan/DescriptorSetAllocator.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-#include <map>
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
-
- VkShaderStageFlags VulkanShaderStageFlags(wgpu::ShaderStage stages) {
- VkShaderStageFlags flags = 0;
-
- if (stages & wgpu::ShaderStage::Vertex) {
- flags |= VK_SHADER_STAGE_VERTEX_BIT;
- }
- if (stages & wgpu::ShaderStage::Fragment) {
- flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
- }
- if (stages & wgpu::ShaderStage::Compute) {
- flags |= VK_SHADER_STAGE_COMPUTE_BIT;
- }
-
- return flags;
- }
-
- } // anonymous namespace
-
- VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo) {
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer:
- switch (bindingInfo.buffer.type) {
- case wgpu::BufferBindingType::Uniform:
- if (bindingInfo.buffer.hasDynamicOffset) {
- return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
- }
- return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
- case wgpu::BufferBindingType::Storage:
- case kInternalStorageBufferBinding:
- case wgpu::BufferBindingType::ReadOnlyStorage:
- if (bindingInfo.buffer.hasDynamicOffset) {
- return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
- }
- return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
- case wgpu::BufferBindingType::Undefined:
- UNREACHABLE();
- }
- case BindingInfoType::Sampler:
- return VK_DESCRIPTOR_TYPE_SAMPLER;
- case BindingInfoType::Texture:
- case BindingInfoType::ExternalTexture:
- return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
- case BindingInfoType::StorageTexture:
- return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
- }
- UNREACHABLE();
- }
-
- // static
- ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
- Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- Ref<BindGroupLayout> bgl =
- AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
- DAWN_TRY(bgl->Initialize());
- return bgl;
- }
-
- MaybeError BindGroupLayout::Initialize() {
- // Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
- // one entry per binding set. This might be optimized by computing continuous ranges of
- // bindings of the same type.
- ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
- bindings.reserve(GetBindingCount());
-
- for (const auto& it : GetBindingMap()) {
- BindingIndex bindingIndex = it.second;
- const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
-
- VkDescriptorSetLayoutBinding vkBinding;
- vkBinding.binding = static_cast<uint32_t>(bindingIndex);
- // TODO(dawn:728) In the future, special handling will be needed for external textures
- // here because they encompass multiple views.
- vkBinding.descriptorType = VulkanDescriptorType(bindingInfo);
- vkBinding.descriptorCount = 1;
- vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
- vkBinding.pImmutableSamplers = nullptr;
-
- bindings.emplace_back(vkBinding);
- }
-
- VkDescriptorSetLayoutCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
- createInfo.pBindings = bindings.data();
-
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(
- device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreateDescriptorSetLayout"));
-
- // Compute the size of descriptor pools used for this layout.
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
-
- for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
- // TODO(dawn:728) In the future, special handling will be needed for external textures
- // here because they encompass multiple views.
- VkDescriptorType vulkanType = VulkanDescriptorType(GetBindingInfo(bindingIndex));
-
- // map::operator[] will return 0 if the key doesn't exist.
- descriptorCountPerType[vulkanType]++;
- }
-
- // TODO(enga): Consider deduping allocators for layouts with the same descriptor type
- // counts.
- mDescriptorSetAllocator =
- DescriptorSetAllocator::Create(this, std::move(descriptorCountPerType));
-
- SetLabelImpl();
-
- return {};
- }
-
- BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken)
- : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
- mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- }
-
- BindGroupLayout::~BindGroupLayout() = default;
-
- void BindGroupLayout::DestroyImpl() {
- BindGroupLayoutBase::DestroyImpl();
-
- Device* device = ToBackend(GetDevice());
-
- // DescriptorSetLayout aren't used by execution on the GPU and can be deleted at any time,
- // so we can destroy mHandle immediately instead of using the FencedDeleter.
- // (Swiftshader implements this wrong b/154522740).
- // In practice, the GPU is done with all descriptor sets because bind group deallocation
- // refs the bind group layout so that once the bind group is finished being used, we can
- // recycle its descriptor set.
- if (mHandle != VK_NULL_HANDLE) {
- device->fn.DestroyDescriptorSetLayout(device->GetVkDevice(), mHandle, nullptr);
- mHandle = VK_NULL_HANDLE;
- }
- mDescriptorSetAllocator = nullptr;
- }
-
- VkDescriptorSetLayout BindGroupLayout::GetHandle() const {
- return mHandle;
- }
-
- ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
- Device* device,
- const BindGroupDescriptor* descriptor) {
- DescriptorSetAllocation descriptorSetAllocation;
- DAWN_TRY_ASSIGN(descriptorSetAllocation, mDescriptorSetAllocator->Allocate());
-
- return AcquireRef(
- mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation));
- }
-
- void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
- DescriptorSetAllocation* descriptorSetAllocation) {
- mDescriptorSetAllocator->Deallocate(descriptorSetAllocation);
- mBindGroupAllocator.Deallocate(bindGroup);
- }
-
- void BindGroupLayout::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
- reinterpret_cast<uint64_t&>(mHandle), "Dawn_BindGroupLayout", GetLabel());
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
deleted file mode 100644
index 4b0c98d6551..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
-#define DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
-
-#include "dawn_native/BindGroupLayout.h"
-
-#include "common/SlabAllocator.h"
-#include "common/vulkan_platform.h"
-
-#include <vector>
-
-namespace dawn_native { namespace vulkan {
-
- class BindGroup;
- struct DescriptorSetAllocation;
- class DescriptorSetAllocator;
- class Device;
-
- VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo);
-
- // In Vulkan descriptor pools have to be sized to an exact number of descriptors. This means
- // it's hard to have something where we can mix different types of descriptor sets because
- // we don't know if their vector of number of descriptors will be similar.
- //
- // That's why that in addition to containing the VkDescriptorSetLayout to create
- // VkDescriptorSets for its bindgroups, the layout also acts as an allocator for the descriptor
- // sets.
- //
- // The allocations is done with one pool per descriptor set, which is inefficient, but at least
- // the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
- // is important because creating them can incur GPU memory allocation which is usually an
- // expensive syscall.
- class BindGroupLayout final : public BindGroupLayoutBase {
- public:
- static ResultOrError<Ref<BindGroupLayout>> Create(
- Device* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken);
-
- VkDescriptorSetLayout GetHandle() const;
-
- ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor);
- void DeallocateBindGroup(BindGroup* bindGroup,
- DescriptorSetAllocation* descriptorSetAllocation);
-
- private:
- ~BindGroupLayout() override;
- MaybeError Initialize();
- void DestroyImpl() override;
-
- // Dawn API
- void SetLabelImpl() override;
-
- VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
-
- SlabAllocator<BindGroup> mBindGroupAllocator;
- Ref<DescriptorSetAllocator> mDescriptorSetAllocator;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_BINDGROUPLAYOUTVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
deleted file mode 100644
index dcfae8ff513..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/BindGroupVk.h"
-
-#include "common/BitSetIterator.h"
-#include "common/ityp_stack_vec.h"
-#include "dawn_native/ExternalTexture.h"
-#include "dawn_native/vulkan/BindGroupLayoutVk.h"
-#include "dawn_native/vulkan/BufferVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/SamplerVk.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- // static
- ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
- const BindGroupDescriptor* descriptor) {
- return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
- }
-
- BindGroup::BindGroup(Device* device,
- const BindGroupDescriptor* descriptor,
- DescriptorSetAllocation descriptorSetAllocation)
- : BindGroupBase(this, device, descriptor),
- mDescriptorSetAllocation(descriptorSetAllocation) {
- // Now do a write of a single descriptor set with all possible chained data allocated on the
- // stack.
- const uint32_t bindingCount = static_cast<uint32_t>((GetLayout()->GetBindingCount()));
- ityp::stack_vec<uint32_t, VkWriteDescriptorSet, kMaxOptimalBindingsPerGroup> writes(
- bindingCount);
- ityp::stack_vec<uint32_t, VkDescriptorBufferInfo, kMaxOptimalBindingsPerGroup>
- writeBufferInfo(bindingCount);
- ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup>
- writeImageInfo(bindingCount);
-
- uint32_t numWrites = 0;
- for (const auto& it : GetLayout()->GetBindingMap()) {
- BindingIndex bindingIndex = it.second;
- const BindingInfo& bindingInfo = GetLayout()->GetBindingInfo(bindingIndex);
-
- auto& write = writes[numWrites];
- write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
- write.pNext = nullptr;
- write.dstSet = GetHandle();
- write.dstBinding = static_cast<uint32_t>(bindingIndex);
- write.dstArrayElement = 0;
- write.descriptorCount = 1;
- write.descriptorType = VulkanDescriptorType(bindingInfo);
-
- switch (bindingInfo.bindingType) {
- case BindingInfoType::Buffer: {
- BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
-
- VkBuffer handle = ToBackend(binding.buffer)->GetHandle();
- if (handle == VK_NULL_HANDLE) {
- // The Buffer was destroyed. Skip this descriptor write since it would be
- // a Vulkan Validation Layers error. This bind group won't be used as it
- // is an error to submit a command buffer that references destroyed
- // resources.
- continue;
- }
- writeBufferInfo[numWrites].buffer = handle;
- writeBufferInfo[numWrites].offset = binding.offset;
- writeBufferInfo[numWrites].range = binding.size;
- write.pBufferInfo = &writeBufferInfo[numWrites];
- break;
- }
-
- case BindingInfoType::Sampler: {
- Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
- writeImageInfo[numWrites].sampler = sampler->GetHandle();
- write.pImageInfo = &writeImageInfo[numWrites];
- break;
- }
-
- case BindingInfoType::Texture: {
- TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-
- VkImageView handle = view->GetHandle();
- if (handle == VK_NULL_HANDLE) {
- // The Texture was destroyed before the TextureView was created.
- // Skip this descriptor write since it would be
- // a Vulkan Validation Layers error. This bind group won't be used as it
- // is an error to submit a command buffer that references destroyed
- // resources.
- continue;
- }
- writeImageInfo[numWrites].imageView = handle;
-
- // The layout may be GENERAL here because of interactions between the Sampled
- // and ReadOnlyStorage usages. See the logic in VulkanImageLayout.
- writeImageInfo[numWrites].imageLayout = VulkanImageLayout(
- ToBackend(view->GetTexture()), wgpu::TextureUsage::TextureBinding);
-
- write.pImageInfo = &writeImageInfo[numWrites];
- break;
- }
-
- case BindingInfoType::StorageTexture: {
- TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-
- VkImageView handle = view->GetHandle();
- if (handle == VK_NULL_HANDLE) {
- // The Texture was destroyed before the TextureView was created.
- // Skip this descriptor write since it would be
- // a Vulkan Validation Layers error. This bind group won't be used as it
- // is an error to submit a command buffer that references destroyed
- // resources.
- continue;
- }
- writeImageInfo[numWrites].imageView = handle;
- writeImageInfo[numWrites].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
-
- write.pImageInfo = &writeImageInfo[numWrites];
- break;
- }
-
- case BindingInfoType::ExternalTexture: {
- const std::array<Ref<dawn_native::TextureViewBase>, kMaxPlanesPerFormat>&
- textureViews = GetBindingAsExternalTexture(bindingIndex)->GetTextureViews();
-
- // Only single-plane formats are supported right now, so ensure only one view
- // exists.
- ASSERT(textureViews[1].Get() == nullptr);
- ASSERT(textureViews[2].Get() == nullptr);
-
- TextureView* view = ToBackend(textureViews[0].Get());
-
- writeImageInfo[numWrites].imageView = view->GetHandle();
- writeImageInfo[numWrites].imageLayout = VulkanImageLayout(
- ToBackend(view->GetTexture()), wgpu::TextureUsage::TextureBinding);
-
- write.pImageInfo = &writeImageInfo[numWrites];
- break;
- }
- }
-
- numWrites++;
- }
-
- // TODO(crbug.com/dawn/855): Batch these updates
- device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0,
- nullptr);
-
- SetLabelImpl();
- }
-
- BindGroup::~BindGroup() = default;
-
- void BindGroup::DestroyImpl() {
- BindGroupBase::DestroyImpl();
- ToBackend(GetLayout())->DeallocateBindGroup(this, &mDescriptorSetAllocation);
- }
-
- VkDescriptorSet BindGroup::GetHandle() const {
- return mDescriptorSetAllocation.set;
- }
-
- void BindGroup::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_DESCRIPTOR_SET,
- reinterpret_cast<uint64_t&>(mDescriptorSetAllocation.set), "Dawn_BindGroup",
- GetLabel());
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
deleted file mode 100644
index abd8b1da3df..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_BINDGROUPVK_H_
-#define DAWNNATIVE_VULKAN_BINDGROUPVK_H_
-
-#include "dawn_native/BindGroup.h"
-
-#include "common/PlacementAllocated.h"
-#include "common/vulkan_platform.h"
-#include "dawn_native/vulkan/BindGroupLayoutVk.h"
-#include "dawn_native/vulkan/DescriptorSetAllocation.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class BindGroup final : public BindGroupBase, public PlacementAllocated {
- public:
- static ResultOrError<Ref<BindGroup>> Create(Device* device,
- const BindGroupDescriptor* descriptor);
-
- BindGroup(Device* device,
- const BindGroupDescriptor* descriptor,
- DescriptorSetAllocation descriptorSetAllocation);
-
- VkDescriptorSet GetHandle() const;
-
- private:
- ~BindGroup() override;
-
- void DestroyImpl() override;
-
- // Dawn API
- void SetLabelImpl() override;
-
- // The descriptor set in this allocation outlives the BindGroup because it is owned by
- // the BindGroupLayout which is referenced by the BindGroup.
- DescriptorSetAllocation mDescriptorSetAllocation;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_BINDGROUPVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
deleted file mode 100644
index 0f038739fe5..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
+++ /dev/null
@@ -1,413 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/BufferVk.h"
-
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/ResourceHeapVk.h"
-#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-#include <cstring>
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
-
- VkBufferUsageFlags VulkanBufferUsage(wgpu::BufferUsage usage) {
- VkBufferUsageFlags flags = 0;
-
- if (usage & wgpu::BufferUsage::CopySrc) {
- flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
- }
- if (usage & wgpu::BufferUsage::CopyDst) {
- flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
- }
- if (usage & wgpu::BufferUsage::Index) {
- flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
- }
- if (usage & wgpu::BufferUsage::Vertex) {
- flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
- }
- if (usage & wgpu::BufferUsage::Uniform) {
- flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
- }
- if (usage &
- (wgpu::BufferUsage::Storage | kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
- flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
- }
- if (usage & wgpu::BufferUsage::Indirect) {
- flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
- }
- if (usage & wgpu::BufferUsage::QueryResolve) {
- flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
- }
-
- return flags;
- }
-
- VkPipelineStageFlags VulkanPipelineStage(wgpu::BufferUsage usage) {
- VkPipelineStageFlags flags = 0;
-
- if (usage & kMappableBufferUsages) {
- flags |= VK_PIPELINE_STAGE_HOST_BIT;
- }
- if (usage & (wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst)) {
- flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- }
- if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
- flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
- }
- if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage |
- kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
- flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
- VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- }
- if (usage & wgpu::BufferUsage::Indirect) {
- flags |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
- }
- if (usage & wgpu::BufferUsage::QueryResolve) {
- flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- }
-
- return flags;
- }
-
- VkAccessFlags VulkanAccessFlags(wgpu::BufferUsage usage) {
- VkAccessFlags flags = 0;
-
- if (usage & wgpu::BufferUsage::MapRead) {
- flags |= VK_ACCESS_HOST_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::MapWrite) {
- flags |= VK_ACCESS_HOST_WRITE_BIT;
- }
- if (usage & wgpu::BufferUsage::CopySrc) {
- flags |= VK_ACCESS_TRANSFER_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::CopyDst) {
- flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
- }
- if (usage & wgpu::BufferUsage::Index) {
- flags |= VK_ACCESS_INDEX_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::Vertex) {
- flags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::Uniform) {
- flags |= VK_ACCESS_UNIFORM_READ_BIT;
- }
- if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
- flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- }
- if (usage & kReadOnlyStorageBuffer) {
- flags |= VK_ACCESS_SHADER_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::Indirect) {
- flags |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
- }
- if (usage & wgpu::BufferUsage::QueryResolve) {
- flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
- }
-
- return flags;
- }
-
- } // namespace
-
- // static
- ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
- DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
- return std::move(buffer);
- }
-
- MaybeError Buffer::Initialize(bool mappedAtCreation) {
- // vkCmdFillBuffer requires the size to be a multiple of 4.
- constexpr size_t kAlignment = 4u;
-
- uint32_t extraBytes = 0u;
- if (GetUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
- // vkCmdSetIndexBuffer and vkCmdSetVertexBuffer are invalid if the offset
- // is equal to the whole buffer size. Allocate at least one more byte so it
- // is valid to setVertex/IndexBuffer with a zero-sized range at the end
- // of the buffer with (offset=buffer.size, size=0).
- extraBytes = 1u;
- }
-
- uint64_t size = GetSize();
- if (size > std::numeric_limits<uint64_t>::max() - extraBytes) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
-
- size += extraBytes;
-
- // Allocate at least 4 bytes so clamped accesses are always in bounds.
- // Also, Vulkan requires the size to be non-zero.
- size = std::max(size, uint64_t(4u));
-
- if (size > std::numeric_limits<uint64_t>::max() - kAlignment) {
- // Alignment would overlow.
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
- }
- mAllocatedSize = Align(size, kAlignment);
-
- // Avoid passing ludicrously large sizes to drivers because it causes issues: drivers add
- // some constants to the size passed and align it, but for values close to the maximum
- // VkDeviceSize this can cause overflows and makes drivers crash or return bad sizes in the
- // VkmemoryRequirements. See https://gitlab.khronos.org/vulkan/vulkan/issues/1904
- // Any size with one of two top bits of VkDeviceSize set is a HUGE allocation and we can
- // safely return an OOM error.
- if (mAllocatedSize & (uint64_t(3) << uint64_t(62))) {
- return DAWN_OUT_OF_MEMORY_ERROR("Buffer size is HUGE and could cause overflows");
- }
-
- VkBufferCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.size = mAllocatedSize;
- // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
- // and robust resource initialization.
- createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
- createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = 0;
-
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkOOMThenSuccess(
- device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "vkCreateBuffer"));
-
- // Gather requirements for the buffer's memory and allocate it.
- VkMemoryRequirements requirements;
- device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
-
- MemoryKind requestKind = MemoryKind::Linear;
- if (GetUsage() & kMappableBufferUsages) {
- requestKind = MemoryKind::LinearMappable;
- }
- DAWN_TRY_ASSIGN(mMemoryAllocation,
- device->GetResourceMemoryAllocator()->Allocate(requirements, requestKind));
-
- // Finally associate it with the buffer.
- DAWN_TRY(CheckVkSuccess(
- device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
- ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
- mMemoryAllocation.GetOffset()),
- "vkBindBufferMemory"));
-
- // The buffers with mappedAtCreation == true will be initialized in
- // BufferBase::MapAtCreation().
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
- !mappedAtCreation) {
- ClearBuffer(device->GetPendingRecordingContext(), 0x01010101);
- }
-
- // Initialize the padding bytes to zero.
- if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
- uint32_t paddingBytes = GetAllocatedSize() - GetSize();
- if (paddingBytes > 0) {
- uint32_t clearSize = Align(paddingBytes, 4);
- uint64_t clearOffset = GetAllocatedSize() - clearSize;
-
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- ClearBuffer(recordingContext, 0, clearOffset, clearSize);
- }
- }
-
- SetLabelImpl();
-
- return {};
- }
-
- Buffer::~Buffer() = default;
-
- VkBuffer Buffer::GetHandle() const {
- return mHandle;
- }
-
- void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
- wgpu::BufferUsage usage) {
- VkBufferMemoryBarrier barrier;
- VkPipelineStageFlags srcStages = 0;
- VkPipelineStageFlags dstStages = 0;
-
- if (TransitionUsageAndGetResourceBarrier(usage, &barrier, &srcStages, &dstStages)) {
- ASSERT(srcStages != 0 && dstStages != 0);
- ToBackend(GetDevice())
- ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
- nullptr, 1u, &barrier, 0, nullptr);
- }
- }
-
- bool Buffer::TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
- VkBufferMemoryBarrier* barrier,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- bool lastIncludesTarget = IsSubset(usage, mLastUsage);
- bool lastReadOnly = IsSubset(mLastUsage, kReadOnlyBufferUsages);
-
- // We can skip transitions to already current read-only usages.
- if (lastIncludesTarget && lastReadOnly) {
- return false;
- }
-
- // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
- if (mLastUsage == wgpu::BufferUsage::None) {
- mLastUsage = usage;
- return false;
- }
-
- *srcStages |= VulkanPipelineStage(mLastUsage);
- *dstStages |= VulkanPipelineStage(usage);
-
- barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
- barrier->pNext = nullptr;
- barrier->srcAccessMask = VulkanAccessFlags(mLastUsage);
- barrier->dstAccessMask = VulkanAccessFlags(usage);
- barrier->srcQueueFamilyIndex = 0;
- barrier->dstQueueFamilyIndex = 0;
- barrier->buffer = mHandle;
- barrier->offset = 0;
- // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
- barrier->size = GetAllocatedSize();
-
- mLastUsage = usage;
-
- return true;
- }
-
- bool Buffer::IsCPUWritableAtCreation() const {
- // TODO(enga): Handle CPU-visible memory on UMA
- return mMemoryAllocation.GetMappedPointer() != nullptr;
- }
-
- MaybeError Buffer::MapAtCreationImpl() {
- return {};
- }
-
- MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
- Device* device = ToBackend(GetDevice());
-
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
-
- // TODO(crbug.com/dawn/852): initialize mapped buffer in CPU side.
- EnsureDataInitialized(recordingContext);
-
- if (mode & wgpu::MapMode::Read) {
- TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
- } else {
- ASSERT(mode & wgpu::MapMode::Write);
- TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
- }
- return {};
- }
-
- void Buffer::UnmapImpl() {
- // No need to do anything, we keep CPU-visible memory mapped at all time.
- }
-
- void* Buffer::GetMappedPointerImpl() {
- uint8_t* memory = mMemoryAllocation.GetMappedPointer();
- ASSERT(memory != nullptr);
- return memory;
- }
-
- void Buffer::DestroyImpl() {
- BufferBase::DestroyImpl();
-
- ToBackend(GetDevice())->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
-
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
-
- bool Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- InitializeToZero(recordingContext);
- return true;
- }
-
- bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
- uint64_t offset,
- uint64_t size) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferRange(offset, size)) {
- SetIsDataInitialized();
- return false;
- }
-
- InitializeToZero(recordingContext);
- return true;
- }
-
- bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
- const CopyTextureToBufferCmd* copy) {
- if (!NeedsInitialization()) {
- return false;
- }
-
- if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
- SetIsDataInitialized();
- return false;
- }
-
- InitializeToZero(recordingContext);
- return true;
- }
-
- void Buffer::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_BUFFER,
- reinterpret_cast<uint64_t&>(mHandle), "Dawn_Buffer", GetLabel());
- }
-
- void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
- ASSERT(NeedsInitialization());
-
- ClearBuffer(recordingContext, 0u);
- GetDevice()->IncrementLazyClearCountForTesting();
- SetIsDataInitialized();
- }
-
- void Buffer::ClearBuffer(CommandRecordingContext* recordingContext,
- uint32_t clearValue,
- uint64_t offset,
- uint64_t size) {
- ASSERT(recordingContext != nullptr);
- size = size > 0 ? size : GetAllocatedSize();
- ASSERT(size > 0);
-
- TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
- Device* device = ToBackend(GetDevice());
- // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
- // Note: Allocated size must be a multiple of 4.
- ASSERT(size % 4 == 0);
- device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, offset, size,
- clearValue);
- }
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
deleted file mode 100644
index 721d4f06990..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_BUFFERVK_H_
-#define DAWNNATIVE_VULKAN_BUFFERVK_H_
-
-#include "dawn_native/Buffer.h"
-
-#include "common/SerialQueue.h"
-#include "common/vulkan_platform.h"
-#include "dawn_native/ResourceMemoryAllocation.h"
-
-namespace dawn_native { namespace vulkan {
-
- struct CommandRecordingContext;
- class Device;
-
- class Buffer final : public BufferBase {
- public:
- static ResultOrError<Ref<Buffer>> Create(Device* device,
- const BufferDescriptor* descriptor);
-
- VkBuffer GetHandle() const;
-
- // Transitions the buffer to be used as `usage`, recording any necessary barrier in
- // `commands`.
- // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
- void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
- bool TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
- VkBufferMemoryBarrier* barrier,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages);
-
- // All the Ensure methods return true if the buffer was initialized to zero.
- bool EnsureDataInitialized(CommandRecordingContext* recordingContext);
- bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
- uint64_t offset,
- uint64_t size);
- bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
- const CopyTextureToBufferCmd* copy);
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- ~Buffer() override;
- using BufferBase::BufferBase;
-
- MaybeError Initialize(bool mappedAtCreation);
- void InitializeToZero(CommandRecordingContext* recordingContext);
- void ClearBuffer(CommandRecordingContext* recordingContext,
- uint32_t clearValue,
- uint64_t offset = 0,
- uint64_t size = 0);
-
- MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
- void UnmapImpl() override;
- void DestroyImpl() override;
- bool IsCPUWritableAtCreation() const override;
- MaybeError MapAtCreationImpl() override;
- void* GetMappedPointerImpl() override;
-
- VkBuffer mHandle = VK_NULL_HANDLE;
- ResourceMemoryAllocation mMemoryAllocation;
-
- wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_BUFFERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
deleted file mode 100644
index 62a5cd2ce76..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ /dev/null
@@ -1,1323 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/CommandBufferVk.h"
-
-#include "dawn_native/BindGroupTracker.h"
-#include "dawn_native/CommandEncoder.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/RenderBundle.h"
-#include "dawn_native/vulkan/BindGroupVk.h"
-#include "dawn_native/vulkan/BufferVk.h"
-#include "dawn_native/vulkan/CommandRecordingContext.h"
-#include "dawn_native/vulkan/ComputePipelineVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/PipelineLayoutVk.h"
-#include "dawn_native/vulkan/QuerySetVk.h"
-#include "dawn_native/vulkan/RenderPassCache.h"
-#include "dawn_native/vulkan/RenderPipelineVk.h"
-#include "dawn_native/vulkan/StagingBufferVk.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-#include <algorithm>
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
-
- VkIndexType VulkanIndexType(wgpu::IndexFormat format) {
- switch (format) {
- case wgpu::IndexFormat::Uint16:
- return VK_INDEX_TYPE_UINT16;
- case wgpu::IndexFormat::Uint32:
- return VK_INDEX_TYPE_UINT32;
- case wgpu::IndexFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- bool HasSameTextureCopyExtent(const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize) {
- Extent3D imageExtentSrc = ComputeTextureCopyExtent(srcCopy, copySize);
- Extent3D imageExtentDst = ComputeTextureCopyExtent(dstCopy, copySize);
- return imageExtentSrc.width == imageExtentDst.width &&
- imageExtentSrc.height == imageExtentDst.height &&
- imageExtentSrc.depthOrArrayLayers == imageExtentDst.depthOrArrayLayers;
- }
-
- VkImageCopy ComputeImageCopyRegion(const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize,
- Aspect aspect) {
- const Texture* srcTexture = ToBackend(srcCopy.texture.Get());
- const Texture* dstTexture = ToBackend(dstCopy.texture.Get());
-
- VkImageCopy region;
- region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
- region.srcSubresource.mipLevel = srcCopy.mipLevel;
- region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
- region.dstSubresource.mipLevel = dstCopy.mipLevel;
-
- bool has3DTextureInCopy = false;
-
- region.srcOffset.x = srcCopy.origin.x;
- region.srcOffset.y = srcCopy.origin.y;
- switch (srcTexture->GetDimension()) {
- case wgpu::TextureDimension::e2D:
- region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
- region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
- region.srcOffset.z = 0;
- break;
- case wgpu::TextureDimension::e3D:
- has3DTextureInCopy = true;
- region.srcSubresource.baseArrayLayer = 0;
- region.srcSubresource.layerCount = 1;
- region.srcOffset.z = srcCopy.origin.z;
- break;
- case wgpu::TextureDimension::e1D:
- // TODO(crbug.com/dawn/814): support 1D textures
- UNREACHABLE();
- }
-
- region.dstOffset.x = dstCopy.origin.x;
- region.dstOffset.y = dstCopy.origin.y;
- switch (dstTexture->GetDimension()) {
- case wgpu::TextureDimension::e2D:
- region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
- region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
- region.dstOffset.z = 0;
- break;
- case wgpu::TextureDimension::e3D:
- has3DTextureInCopy = true;
- region.dstSubresource.baseArrayLayer = 0;
- region.dstSubresource.layerCount = 1;
- region.dstOffset.z = dstCopy.origin.z;
- break;
- case wgpu::TextureDimension::e1D:
- // TODO(crbug.com/dawn/814): support 1D textures
- UNREACHABLE();
- }
-
- ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
- Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
- region.extent.width = imageExtent.width;
- region.extent.height = imageExtent.height;
- region.extent.depth = has3DTextureInCopy ? copySize.depthOrArrayLayers : 1;
-
- return region;
- }
-
- class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
- public:
- DescriptorSetTracker() = default;
-
- void Apply(Device* device,
- CommandRecordingContext* recordingContext,
- VkPipelineBindPoint bindPoint) {
- BeforeApply();
- for (BindGroupIndex dirtyIndex :
- IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- VkDescriptorSet set = ToBackend(mBindGroups[dirtyIndex])->GetHandle();
- const uint32_t* dynamicOffset = mDynamicOffsetCounts[dirtyIndex] > 0
- ? mDynamicOffsets[dirtyIndex].data()
- : nullptr;
- device->fn.CmdBindDescriptorSets(
- recordingContext->commandBuffer, bindPoint,
- ToBackend(mPipelineLayout)->GetHandle(), static_cast<uint32_t>(dirtyIndex),
- 1, &*set, mDynamicOffsetCounts[dirtyIndex], dynamicOffset);
- }
- AfterApply();
- }
- };
-
- // Records the necessary barriers for a synchronization scope using the resource usage
- // data pre-computed in the frontend. Also performs lazy initialization if required.
- void TransitionAndClearForSyncScope(Device* device,
- CommandRecordingContext* recordingContext,
- const SyncScopeResourceUsage& scope) {
- std::vector<VkBufferMemoryBarrier> bufferBarriers;
- std::vector<VkImageMemoryBarrier> imageBarriers;
- VkPipelineStageFlags srcStages = 0;
- VkPipelineStageFlags dstStages = 0;
-
- for (size_t i = 0; i < scope.buffers.size(); ++i) {
- Buffer* buffer = ToBackend(scope.buffers[i]);
- buffer->EnsureDataInitialized(recordingContext);
-
- VkBufferMemoryBarrier bufferBarrier;
- if (buffer->TransitionUsageAndGetResourceBarrier(
- scope.bufferUsages[i], &bufferBarrier, &srcStages, &dstStages)) {
- bufferBarriers.push_back(bufferBarrier);
- }
- }
-
- for (size_t i = 0; i < scope.textures.size(); ++i) {
- Texture* texture = ToBackend(scope.textures[i]);
-
- // Clear subresources that are not render attachments. Render attachments will be
- // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
- // subresource has not been initialized before the render pass.
- scope.textureUsages[i].Iterate(
- [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- if (usage & ~wgpu::TextureUsage::RenderAttachment) {
- texture->EnsureSubresourceContentInitialized(recordingContext, range);
- }
- });
- texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i],
- &imageBarriers, &srcStages, &dstStages);
- }
-
- if (bufferBarriers.size() || imageBarriers.size()) {
- device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages,
- 0, 0, nullptr, bufferBarriers.size(),
- bufferBarriers.data(), imageBarriers.size(),
- imageBarriers.data());
- }
- }
-
- MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
- Device* device,
- BeginRenderPassCmd* renderPass) {
- VkCommandBuffer commands = recordingContext->commandBuffer;
-
- // Query a VkRenderPass from the cache
- VkRenderPass renderPassVK = VK_NULL_HANDLE;
- {
- RenderPassCacheQuery query;
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- const auto& attachmentInfo = renderPass->colorAttachments[i];
-
- bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
-
- query.SetColor(i, attachmentInfo.view->GetFormat().format,
- attachmentInfo.loadOp, attachmentInfo.storeOp, hasResolveTarget);
- }
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- const auto& attachmentInfo = renderPass->depthStencilAttachment;
-
- query.SetDepthStencil(attachmentInfo.view->GetTexture()->GetFormat().format,
- attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
- attachmentInfo.stencilLoadOp,
- attachmentInfo.stencilStoreOp);
- }
-
- query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
-
- DAWN_TRY_ASSIGN(renderPassVK, device->GetRenderPassCache()->GetRenderPass(query));
- }
-
- // Create a framebuffer that will be used once for the render pass and gather the clear
- // values for the attachments at the same time.
- std::array<VkClearValue, kMaxColorAttachments + 1> clearValues;
- VkFramebuffer framebuffer = VK_NULL_HANDLE;
- uint32_t attachmentCount = 0;
- {
- // Fill in the attachment info that will be chained in the framebuffer create info.
- std::array<VkImageView, kMaxColorAttachments * 2 + 1> attachments;
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- auto& attachmentInfo = renderPass->colorAttachments[i];
- TextureView* view = ToBackend(attachmentInfo.view.Get());
-
- attachments[attachmentCount] = view->GetHandle();
-
- switch (view->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
- case wgpu::TextureComponentType::Float: {
- const std::array<float, 4> appliedClearColor =
- ConvertToFloatColor(attachmentInfo.clearColor);
- for (uint32_t i = 0; i < 4; ++i) {
- clearValues[attachmentCount].color.float32[i] =
- appliedClearColor[i];
- }
- break;
- }
- case wgpu::TextureComponentType::Uint: {
- const std::array<uint32_t, 4> appliedClearColor =
- ConvertToUnsignedIntegerColor(attachmentInfo.clearColor);
- for (uint32_t i = 0; i < 4; ++i) {
- clearValues[attachmentCount].color.uint32[i] = appliedClearColor[i];
- }
- break;
- }
- case wgpu::TextureComponentType::Sint: {
- const std::array<int32_t, 4> appliedClearColor =
- ConvertToSignedIntegerColor(attachmentInfo.clearColor);
- for (uint32_t i = 0; i < 4; ++i) {
- clearValues[attachmentCount].color.int32[i] = appliedClearColor[i];
- }
- break;
- }
-
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
- }
- attachmentCount++;
- }
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto& attachmentInfo = renderPass->depthStencilAttachment;
- TextureView* view = ToBackend(attachmentInfo.view.Get());
-
- attachments[attachmentCount] = view->GetHandle();
-
- clearValues[attachmentCount].depthStencil.depth = attachmentInfo.clearDepth;
- clearValues[attachmentCount].depthStencil.stencil = attachmentInfo.clearStencil;
-
- attachmentCount++;
- }
-
- for (ColorAttachmentIndex i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
- TextureView* view =
- ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
-
- attachments[attachmentCount] = view->GetHandle();
-
- attachmentCount++;
- }
- }
-
- // Chain attachments and create the framebuffer
- VkFramebufferCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.renderPass = renderPassVK;
- createInfo.attachmentCount = attachmentCount;
- createInfo.pAttachments = AsVkArray(attachments.data());
- createInfo.width = renderPass->width;
- createInfo.height = renderPass->height;
- createInfo.layers = 1;
-
- DAWN_TRY(
- CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
- nullptr, &*framebuffer),
- "CreateFramebuffer"));
-
- // We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
- // commands currently being recorded are finished.
- device->GetFencedDeleter()->DeleteWhenUnused(framebuffer);
- }
-
- VkRenderPassBeginInfo beginInfo;
- beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
- beginInfo.pNext = nullptr;
- beginInfo.renderPass = renderPassVK;
- beginInfo.framebuffer = framebuffer;
- beginInfo.renderArea.offset.x = 0;
- beginInfo.renderArea.offset.y = 0;
- beginInfo.renderArea.extent.width = renderPass->width;
- beginInfo.renderArea.extent.height = renderPass->height;
- beginInfo.clearValueCount = attachmentCount;
- beginInfo.pClearValues = clearValues.data();
-
- device->fn.CmdBeginRenderPass(commands, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
-
- return {};
- }
-
- // Reset the query sets used on render pass because the reset command must be called outside
- // render pass.
- void ResetUsedQuerySetsOnRenderPass(Device* device,
- VkCommandBuffer commands,
- QuerySetBase* querySet,
- const std::vector<bool>& availability) {
- ASSERT(availability.size() == querySet->GetQueryAvailability().size());
-
- auto currentIt = availability.begin();
- auto lastIt = availability.end();
- // Traverse the used queries which availability are true.
- while (currentIt != lastIt) {
- auto firstTrueIt = std::find(currentIt, lastIt, true);
- // No used queries need to be reset
- if (firstTrueIt == lastIt) {
- break;
- }
-
- auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
-
- uint32_t queryIndex = std::distance(availability.begin(), firstTrueIt);
- uint32_t queryCount = std::distance(firstTrueIt, nextFalseIt);
-
- // Reset the queries between firstTrueIt and nextFalseIt (which is at most
- // lastIt)
- device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), queryIndex,
- queryCount);
-
- // Set current iterator to next false
- currentIt = nextFalseIt;
- }
- }
-
- void RecordWriteTimestampCmd(CommandRecordingContext* recordingContext,
- Device* device,
- WriteTimestampCmd* cmd) {
- VkCommandBuffer commands = recordingContext->commandBuffer;
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
-
- device->fn.CmdWriteTimestamp(commands, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
- querySet->GetHandle(), cmd->queryIndex);
- }
-
- void RecordResolveQuerySetCmd(VkCommandBuffer commands,
- Device* device,
- QuerySet* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- Buffer* destination,
- uint64_t destinationOffset) {
- const std::vector<bool>& availability = querySet->GetQueryAvailability();
-
- auto currentIt = availability.begin() + firstQuery;
- auto lastIt = availability.begin() + firstQuery + queryCount;
-
- // Traverse available queries in the range of [firstQuery, firstQuery + queryCount - 1]
- while (currentIt != lastIt) {
- auto firstTrueIt = std::find(currentIt, lastIt, true);
- // No available query found for resolving
- if (firstTrueIt == lastIt) {
- break;
- }
- auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
-
- // The query index of firstTrueIt where the resolving starts
- uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
- // The queries count between firstTrueIt and nextFalseIt need to be resolved
- uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
-
- // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
- uint32_t resolveDestinationOffset =
- destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
-
- // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
- device->fn.CmdCopyQueryPoolResults(
- commands, querySet->GetHandle(), resolveQueryIndex, resolveQueryCount,
- destination->GetHandle(), resolveDestinationOffset, sizeof(uint64_t),
- VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
-
- // Set current iterator to next false
- currentIt = nextFalseIt;
- }
- }
-
- } // anonymous namespace
-
- // static
- Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return AcquireRef(new CommandBuffer(encoder, descriptor));
- }
-
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
- }
-
- void CommandBuffer::RecordCopyImageWithTemporaryBuffer(
- CommandRecordingContext* recordingContext,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize) {
- ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
- ASSERT(srcCopy.aspect == dstCopy.aspect);
- dawn_native::Format format = srcCopy.texture->GetFormat();
- const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
- ASSERT(copySize.width % blockInfo.width == 0);
- uint32_t widthInBlocks = copySize.width / blockInfo.width;
- ASSERT(copySize.height % blockInfo.height == 0);
- uint32_t heightInBlocks = copySize.height / blockInfo.height;
-
- // Create the temporary buffer. Note that We don't need to respect WebGPU's 256 alignment
- // because it isn't a hard constraint in Vulkan.
- uint64_t tempBufferSize =
- widthInBlocks * heightInBlocks * copySize.depthOrArrayLayers * blockInfo.byteSize;
- BufferDescriptor tempBufferDescriptor;
- tempBufferDescriptor.size = tempBufferSize;
- tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
-
- Device* device = ToBackend(GetDevice());
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- Ref<Buffer> tempBuffer =
- AcquireRef(ToBackend(device->APICreateBuffer(&tempBufferDescriptor)));
-
- BufferCopy tempBufferCopy;
- tempBufferCopy.buffer = tempBuffer.Get();
- tempBufferCopy.rowsPerImage = heightInBlocks;
- tempBufferCopy.offset = 0;
- tempBufferCopy.bytesPerRow = copySize.width / blockInfo.width * blockInfo.byteSize;
-
- VkCommandBuffer commands = recordingContext->commandBuffer;
- VkImage srcImage = ToBackend(srcCopy.texture)->GetHandle();
- VkImage dstImage = ToBackend(dstCopy.texture)->GetHandle();
-
- tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
- VkBufferImageCopy srcToTempBufferRegion =
- ComputeBufferImageCopyRegion(tempBufferCopy, srcCopy, copySize);
-
- // The Dawn CopySrc usage is always mapped to GENERAL
- device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
- tempBuffer->GetHandle(), 1, &srcToTempBufferRegion);
-
- tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
- VkBufferImageCopy tempBufferToDstRegion =
- ComputeBufferImageCopyRegion(tempBufferCopy, dstCopy, copySize);
-
- // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
- // copy command.
- device->fn.CmdCopyBufferToImage(commands, tempBuffer->GetHandle(), dstImage,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
- &tempBufferToDstRegion);
-
- recordingContext->tempBuffers.emplace_back(tempBuffer);
- }
-
- MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
- Device* device = ToBackend(GetDevice());
- VkCommandBuffer commands = recordingContext->commandBuffer;
-
- // Records the necessary barriers for the resource usage pre-computed by the frontend.
- // And resets the used query sets which are rewritten on the render pass.
- auto PrepareResourcesForRenderPass = [](Device* device,
- CommandRecordingContext* recordingContext,
- const RenderPassResourceUsage& usages) {
- TransitionAndClearForSyncScope(device, recordingContext, usages);
-
- // Reset all query set used on current render pass together before beginning render pass
- // because the reset command must be called outside render pass
- for (size_t i = 0; i < usages.querySets.size(); ++i) {
- ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
- usages.querySets[i], usages.queryAvailabilities[i]);
- }
- };
-
- size_t nextComputePassNumber = 0;
- size_t nextRenderPassNumber = 0;
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- if (copy->size == 0) {
- // Skip no-op copies.
- break;
- }
-
- Buffer* srcBuffer = ToBackend(copy->source.Get());
- Buffer* dstBuffer = ToBackend(copy->destination.Get());
-
- srcBuffer->EnsureDataInitialized(recordingContext);
- dstBuffer->EnsureDataInitializedAsDestination(
- recordingContext, copy->destinationOffset, copy->size);
-
- srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
- dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
- VkBufferCopy region;
- region.srcOffset = copy->sourceOffset;
- region.dstOffset = copy->destinationOffset;
- region.size = copy->size;
-
- VkBuffer srcHandle = srcBuffer->GetHandle();
- VkBuffer dstHandle = dstBuffer->GetHandle();
- device->fn.CmdCopyBuffer(commands, srcHandle, dstHandle, 1, &region);
- break;
- }
-
- case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
-
- ToBackend(src.buffer)->EnsureDataInitialized(recordingContext);
-
- VkBufferImageCopy region =
- ComputeBufferImageCopyRegion(src, dst, copy->copySize);
- VkImageSubresourceLayers subresource = region.imageSubresource;
-
- ASSERT(dst.texture->GetDimension() != wgpu::TextureDimension::e1D);
- SubresourceRange range =
- GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
-
- if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
- subresource.mipLevel)) {
- // Since texture has been overwritten, it has been "initialized"
- dst.texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- ToBackend(dst.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, range);
- }
- ToBackend(src.buffer)
- ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
- ToBackend(dst.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
- VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
- VkImage dstImage = ToBackend(dst.texture)->GetHandle();
-
- // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
- // copy command.
- device->fn.CmdCopyBufferToImage(commands, srcBuffer, dstImage,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
- &region);
- break;
- }
-
- case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- auto& src = copy->source;
- auto& dst = copy->destination;
-
- ToBackend(dst.buffer)
- ->EnsureDataInitializedAsDestination(recordingContext, copy);
-
- VkBufferImageCopy region =
- ComputeBufferImageCopyRegion(dst, src, copy->copySize);
-
- ASSERT(src.texture->GetDimension() != wgpu::TextureDimension::e1D);
- SubresourceRange range =
- GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
-
- ToBackend(src.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, range);
-
- ToBackend(src.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
- ToBackend(dst.buffer)
- ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
- VkImage srcImage = ToBackend(src.texture)->GetHandle();
- VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
- // The Dawn CopySrc usage is always mapped to GENERAL
- device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
- dstBuffer, 1, &region);
- break;
- }
-
- case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
- mCommands.NextCommand<CopyTextureToTextureCmd>();
- if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
- copy->copySize.depthOrArrayLayers == 0) {
- // Skip no-op copies.
- continue;
- }
- TextureCopy& src = copy->source;
- TextureCopy& dst = copy->destination;
- SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
- SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
-
- ToBackend(src.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, srcRange);
- if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
- dst.mipLevel)) {
- // Since destination texture has been overwritten, it has been "initialized"
- dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
- } else {
- ToBackend(dst.texture)
- ->EnsureSubresourceContentInitialized(recordingContext, dstRange);
- }
-
- if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
- // When there are overlapped subresources, the layout of the overlapped
- // subresources should all be GENERAL instead of what we set now. Currently
- // it is not allowed to copy with overlapped subresources, but we still
- // add the ASSERT here as a reminder for this possible misuse.
- ASSERT(!IsRangeOverlapped(src.origin.z, dst.origin.z,
- copy->copySize.depthOrArrayLayers));
- }
-
- // TODO after Yunchao's CL
- ToBackend(src.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
- srcRange);
- ToBackend(dst.texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
- dstRange);
-
- // In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
- // because as Vulkan SPEC always validates image copies with the virtual size of
- // the image subresource, when the extent that fits in the copy region of one
- // subresource but does not fit in the one of another subresource, we will fail
- // to find a valid extent to satisfy the requirements on both source and
- // destination image subresource. For example, when the source is the first
- // level of a 16x16 texture in BC format, and the destination is the third level
- // of a 60x60 texture in the same format, neither 16x16 nor 15x15 is valid as
- // the extent of vkCmdCopyImage.
- // Our workaround for this issue is replacing the texture-to-texture copy with
- // one texture-to-buffer copy and one buffer-to-texture copy.
- bool copyUsingTemporaryBuffer =
- device->IsToggleEnabled(
- Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy) &&
- src.texture->GetFormat().isCompressed &&
- !HasSameTextureCopyExtent(src, dst, copy->copySize);
-
- if (!copyUsingTemporaryBuffer) {
- VkImage srcImage = ToBackend(src.texture)->GetHandle();
- VkImage dstImage = ToBackend(dst.texture)->GetHandle();
-
- for (Aspect aspect : IterateEnumMask(src.texture->GetFormat().aspects)) {
- ASSERT(dst.texture->GetFormat().aspects & aspect);
- VkImageCopy region =
- ComputeImageCopyRegion(src, dst, copy->copySize, aspect);
-
- // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after
- // the copy command.
- device->fn.CmdCopyImage(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
- dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- 1, &region);
- }
- } else {
- RecordCopyImageWithTemporaryBuffer(recordingContext, src, dst,
- copy->copySize);
- }
-
- break;
- }
-
- case Command::ClearBuffer: {
- ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
- if (cmd->size == 0) {
- // Skip no-op fills.
- break;
- }
-
- Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
- bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
- recordingContext, cmd->offset, cmd->size);
-
- if (!clearedToZero) {
- dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
- device->fn.CmdFillBuffer(recordingContext->commandBuffer,
- dstBuffer->GetHandle(), cmd->offset, cmd->size,
- 0u);
- }
-
- break;
- }
-
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
-
- PrepareResourcesForRenderPass(
- device, recordingContext,
- GetResourceUsages().renderPasses[nextRenderPassNumber]);
-
- LazyClearRenderPassAttachments(cmd);
- DAWN_TRY(RecordRenderPass(recordingContext, cmd));
-
- nextRenderPassNumber++;
- break;
- }
-
- case Command::BeginComputePass: {
- mCommands.NextCommand<BeginComputePassCmd>();
-
- DAWN_TRY(RecordComputePass(
- recordingContext,
- GetResourceUsages().computePasses[nextComputePassNumber]));
-
- nextComputePassNumber++;
- break;
- }
-
- case Command::ResolveQuerySet: {
- ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
- QuerySet* querySet = ToBackend(cmd->querySet.Get());
- Buffer* destination = ToBackend(cmd->destination.Get());
-
- destination->EnsureDataInitializedAsDestination(
- recordingContext, cmd->destinationOffset,
- cmd->queryCount * sizeof(uint64_t));
-
- // vkCmdCopyQueryPoolResults only can retrieve available queries because
- // VK_QUERY_RESULT_WAIT_BIT is set. In order to resolve the unavailable queries
- // as 0s, we need to clear the resolving region of the destination buffer to 0s.
- auto startIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery;
- auto endIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery +
- cmd->queryCount;
- bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
- if (hasUnavailableQueries) {
- destination->TransitionUsageNow(recordingContext,
- wgpu::BufferUsage::CopyDst);
- device->fn.CmdFillBuffer(commands, destination->GetHandle(),
- cmd->destinationOffset,
- cmd->queryCount * sizeof(uint64_t), 0u);
- }
-
- destination->TransitionUsageNow(recordingContext,
- wgpu::BufferUsage::QueryResolve);
-
- RecordResolveQuerySetCmd(commands, device, querySet, cmd->firstQuery,
- cmd->queryCount, destination, cmd->destinationOffset);
-
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
- // The query must be reset between uses.
- device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
- cmd->queryIndex, 1);
-
- RecordWriteTimestampCmd(recordingContext, device, cmd);
- break;
- }
-
- case Command::InsertDebugMarker: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(&mCommands, Command::InsertDebugMarker);
- }
- break;
- }
-
- case Command::PopDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- mCommands.NextCommand<PopDebugGroupCmd>();
- device->fn.CmdEndDebugUtilsLabelEXT(commands);
- } else {
- SkipCommand(&mCommands, Command::PopDebugGroup);
- }
- break;
- }
-
- case Command::PushDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(&mCommands, Command::PushDebugGroup);
- }
- break;
- }
-
- case Command::WriteBuffer: {
- WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
- const uint64_t offset = write->offset;
- const uint64_t size = write->size;
- if (size == 0) {
- continue;
- }
-
- Buffer* dstBuffer = ToBackend(write->buffer.Get());
- uint8_t* data = mCommands.NextData<uint8_t>(size);
- Device* device = ToBackend(GetDevice());
-
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial(),
- kCopyBufferToBufferOffsetAlignment));
- ASSERT(uploadHandle.mappedBuffer != nullptr);
- memcpy(uploadHandle.mappedBuffer, data, size);
-
- dstBuffer->EnsureDataInitializedAsDestination(recordingContext, offset, size);
-
- dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
- VkBufferCopy copy;
- copy.srcOffset = uploadHandle.startOffset;
- copy.dstOffset = offset;
- copy.size = size;
-
- device->fn.CmdCopyBuffer(
- commands, ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
- dstBuffer->GetHandle(), 1, &copy);
- break;
- }
-
- default:
- break;
- }
- }
-
- return {};
- }
-
- MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext,
- const ComputePassResourceUsage& resourceUsages) {
- Device* device = ToBackend(GetDevice());
- VkCommandBuffer commands = recordingContext->commandBuffer;
-
- uint64_t currentDispatch = 0;
- DescriptorSetTracker descriptorSets = {};
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndComputePass: {
- mCommands.NextCommand<EndComputePassCmd>();
- return {};
- }
-
- case Command::Dispatch: {
- DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
-
- TransitionAndClearForSyncScope(device, recordingContext,
- resourceUsages.dispatchUsages[currentDispatch]);
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
-
- device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
- currentDispatch++;
- break;
- }
-
- case Command::DispatchIndirect: {
- DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
-
- TransitionAndClearForSyncScope(device, recordingContext,
- resourceUsages.dispatchUsages[currentDispatch]);
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
-
- device->fn.CmdDispatchIndirect(
- commands, indirectBuffer,
- static_cast<VkDeviceSize>(dispatch->indirectOffset));
- currentDispatch++;
- break;
- }
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
-
- BindGroup* bindGroup = ToBackend(cmd->group.Get());
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
-
- descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
- dynamicOffsets);
- break;
- }
-
- case Command::SetComputePipeline: {
- SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
- ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
-
- device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
- pipeline->GetHandle());
- descriptorSets.OnSetPipeline(pipeline);
- break;
- }
-
- case Command::InsertDebugMarker: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(&mCommands, Command::InsertDebugMarker);
- }
- break;
- }
-
- case Command::PopDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- mCommands.NextCommand<PopDebugGroupCmd>();
- device->fn.CmdEndDebugUtilsLabelEXT(commands);
- } else {
- SkipCommand(&mCommands, Command::PopDebugGroup);
- }
- break;
- }
-
- case Command::PushDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
- const char* label = mCommands.NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(&mCommands, Command::PushDebugGroup);
- }
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
- // The query must be reset between uses.
- device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
- cmd->queryIndex, 1);
-
- RecordWriteTimestampCmd(recordingContext, device, cmd);
- break;
- }
-
- default:
- UNREACHABLE();
- }
- }
-
- // EndComputePass should have been called
- UNREACHABLE();
- }
-
- MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
- BeginRenderPassCmd* renderPassCmd) {
- Device* device = ToBackend(GetDevice());
- VkCommandBuffer commands = recordingContext->commandBuffer;
-
- DAWN_TRY(RecordBeginRenderPass(recordingContext, device, renderPassCmd));
-
- // Set the default value for the dynamic state
- {
- device->fn.CmdSetLineWidth(commands, 1.0f);
- device->fn.CmdSetDepthBounds(commands, 0.0f, 1.0f);
-
- device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK, 0);
-
- float blendConstants[4] = {
- 0.0f,
- 0.0f,
- 0.0f,
- 0.0f,
- };
- device->fn.CmdSetBlendConstants(commands, blendConstants);
-
- // The viewport and scissor default to cover all of the attachments
- VkViewport viewport;
- viewport.x = 0.0f;
- viewport.y = static_cast<float>(renderPassCmd->height);
- viewport.width = static_cast<float>(renderPassCmd->width);
- viewport.height = -static_cast<float>(renderPassCmd->height);
- viewport.minDepth = 0.0f;
- viewport.maxDepth = 1.0f;
- device->fn.CmdSetViewport(commands, 0, 1, &viewport);
-
- VkRect2D scissorRect;
- scissorRect.offset.x = 0;
- scissorRect.offset.y = 0;
- scissorRect.extent.width = renderPassCmd->width;
- scissorRect.extent.height = renderPassCmd->height;
- device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
- }
-
- DescriptorSetTracker descriptorSets = {};
- RenderPipeline* lastPipeline = nullptr;
-
- auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
- switch (type) {
- case Command::Draw: {
- DrawCmd* draw = iter->NextCommand<DrawCmd>();
-
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
- device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
- draw->firstVertex, draw->firstInstance);
- break;
- }
-
- case Command::DrawIndexed: {
- DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
-
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
- device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
- draw->firstIndex, draw->baseVertex,
- draw->firstInstance);
- break;
- }
-
- case Command::DrawIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
-
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
- device->fn.CmdDrawIndirect(commands, buffer->GetHandle(),
- static_cast<VkDeviceSize>(draw->indirectOffset), 1,
- 0);
- break;
- }
-
- case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
- ASSERT(buffer != nullptr);
-
- descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
- device->fn.CmdDrawIndexedIndirect(
- commands, buffer->GetHandle(),
- static_cast<VkDeviceSize>(draw->indirectOffset), 1, 0);
- break;
- }
-
- case Command::InsertDebugMarker: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
- const char* label = iter->NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(iter, Command::InsertDebugMarker);
- }
- break;
- }
-
- case Command::PopDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- iter->NextCommand<PopDebugGroupCmd>();
- device->fn.CmdEndDebugUtilsLabelEXT(commands);
- } else {
- SkipCommand(iter, Command::PopDebugGroup);
- }
- break;
- }
-
- case Command::PushDebugGroup: {
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
- const char* label = iter->NextData<char>(cmd->length + 1);
- VkDebugUtilsLabelEXT utilsLabel;
- utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
- utilsLabel.pNext = nullptr;
- utilsLabel.pLabelName = label;
- // Default color to black
- utilsLabel.color[0] = 0.0;
- utilsLabel.color[1] = 0.0;
- utilsLabel.color[2] = 0.0;
- utilsLabel.color[3] = 1.0;
- device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
- } else {
- SkipCommand(iter, Command::PushDebugGroup);
- }
- break;
- }
-
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- BindGroup* bindGroup = ToBackend(cmd->group.Get());
- uint32_t* dynamicOffsets = nullptr;
- if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
-
- descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
- dynamicOffsets);
- break;
- }
-
- case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
- VkBuffer indexBuffer = ToBackend(cmd->buffer)->GetHandle();
-
- device->fn.CmdBindIndexBuffer(commands, indexBuffer, cmd->offset,
- VulkanIndexType(cmd->format));
- break;
- }
-
- case Command::SetRenderPipeline: {
- SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
- RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
-
- device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_GRAPHICS,
- pipeline->GetHandle());
- lastPipeline = pipeline;
-
- descriptorSets.OnSetPipeline(pipeline);
- break;
- }
-
- case Command::SetVertexBuffer: {
- SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
- VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
-
- device->fn.CmdBindVertexBuffers(commands, static_cast<uint8_t>(cmd->slot), 1,
- &*buffer, &offset);
- break;
- }
-
- default:
- UNREACHABLE();
- break;
- }
- };
-
- Command type;
- while (mCommands.NextCommandId(&type)) {
- switch (type) {
- case Command::EndRenderPass: {
- mCommands.NextCommand<EndRenderPassCmd>();
- device->fn.CmdEndRenderPass(commands);
- return {};
- }
-
- case Command::SetBlendConstant: {
- SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
- const std::array<float, 4> blendConstants = ConvertToFloatColor(cmd->color);
- device->fn.CmdSetBlendConstants(commands, blendConstants.data());
- break;
- }
-
- case Command::SetStencilReference: {
- SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
- device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK,
- cmd->reference);
- break;
- }
-
- case Command::SetViewport: {
- SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
- VkViewport viewport;
- viewport.x = cmd->x;
- viewport.y = cmd->y + cmd->height;
- viewport.width = cmd->width;
- viewport.height = -cmd->height;
- viewport.minDepth = cmd->minDepth;
- viewport.maxDepth = cmd->maxDepth;
-
- // Vulkan disallows width = 0, but VK_KHR_maintenance1 which we require allows
- // height = 0 so use that to do an empty viewport.
- if (viewport.width == 0) {
- viewport.height = 0;
-
- // Set the viewport x range to a range that's always valid.
- viewport.x = 0;
- viewport.width = 1;
- }
-
- device->fn.CmdSetViewport(commands, 0, 1, &viewport);
- break;
- }
-
- case Command::SetScissorRect: {
- SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
- VkRect2D rect;
- rect.offset.x = cmd->x;
- rect.offset.y = cmd->y;
- rect.extent.width = cmd->width;
- rect.extent.height = cmd->height;
-
- device->fn.CmdSetScissor(commands, 0, 1, &rect);
- break;
- }
-
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
- auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
-
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* iter = bundles[i]->GetCommands();
- iter->Reset();
- while (iter->NextCommandId(&type)) {
- EncodeRenderBundleCommand(iter, type);
- }
- }
- break;
- }
-
- case Command::BeginOcclusionQuery: {
- BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
-
- device->fn.CmdBeginQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
- cmd->queryIndex, 0);
- break;
- }
-
- case Command::EndOcclusionQuery: {
- EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
-
- device->fn.CmdEndQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
- cmd->queryIndex);
- break;
- }
-
- case Command::WriteTimestamp: {
- WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
- RecordWriteTimestampCmd(recordingContext, device, cmd);
- break;
- }
-
- default: {
- EncodeRenderBundleCommand(&mCommands, type);
- break;
- }
- }
- }
-
- // EndRenderPass should have been called
- UNREACHABLE();
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
deleted file mode 100644
index d5d603b611f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
-#define DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
-
-#include "dawn_native/CommandBuffer.h"
-#include "dawn_native/Error.h"
-
-#include "common/vulkan_platform.h"
-
-namespace dawn_native {
- struct BeginRenderPassCmd;
- struct TextureCopy;
-} // namespace dawn_native
-
-namespace dawn_native { namespace vulkan {
-
- struct CommandRecordingContext;
- class Device;
-
- class CommandBuffer final : public CommandBufferBase {
- public:
- static Ref<CommandBuffer> Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor);
-
- MaybeError RecordCommands(CommandRecordingContext* recordingContext);
-
- private:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
-
- MaybeError RecordComputePass(CommandRecordingContext* recordingContext,
- const ComputePassResourceUsage& resourceUsages);
- MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
- BeginRenderPassCmd* renderPass);
- void RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
- const TextureCopy& srcCopy,
- const TextureCopy& dstCopy,
- const Extent3D& copySize);
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h
deleted file mode 100644
index a8dd68d2e7f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandRecordingContext.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-#ifndef DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
-#define DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
-
-#include "common/vulkan_platform.h"
-
-#include "dawn_native/vulkan/BufferVk.h"
-
-namespace dawn_native { namespace vulkan {
- // Used to track operations that are handled after recording.
- // Currently only tracks semaphores, but may be used to do barrier coalescing in the future.
- struct CommandRecordingContext {
- VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
- std::vector<VkSemaphore> waitSemaphores = {};
- std::vector<VkSemaphore> signalSemaphores = {};
-
- // The internal buffers used in the workaround of texture-to-texture copies with compressed
- // formats.
- std::vector<Ref<Buffer>> tempBuffers;
-
- // For Device state tracking only.
- VkCommandPool commandPool = VK_NULL_HANDLE;
- bool used = false;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
deleted file mode 100644
index 77ceba7deec..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/ComputePipelineVk.h"
-
-#include "dawn_native/CreatePipelineAsyncTask.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/PipelineLayoutVk.h"
-#include "dawn_native/vulkan/ShaderModuleVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- // static
- Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor) {
- return AcquireRef(new ComputePipeline(device, descriptor));
- }
-
- MaybeError ComputePipeline::Initialize() {
- VkComputePipelineCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.layout = ToBackend(GetLayout())->GetHandle();
- createInfo.basePipelineHandle = ::VK_NULL_HANDLE;
- createInfo.basePipelineIndex = -1;
-
- createInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
- createInfo.stage.pNext = nullptr;
- createInfo.stage.flags = 0;
- createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
- // Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
- const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
- DAWN_TRY_ASSIGN(createInfo.stage.module,
- ToBackend(computeStage.module.Get())
- ->GetTransformedModuleHandle(computeStage.entryPoint.c_str(),
- ToBackend(GetLayout())));
-
- createInfo.stage.pName = computeStage.entryPoint.c_str();
-
- std::vector<OverridableConstantScalar> specializationDataEntries;
- std::vector<VkSpecializationMapEntry> specializationMapEntries;
- VkSpecializationInfo specializationInfo{};
- createInfo.stage.pSpecializationInfo =
- GetVkSpecializationInfo(computeStage, &specializationInfo, &specializationDataEntries,
- &specializationMapEntries);
-
- Device* device = ToBackend(GetDevice());
-
- PNextChainBuilder stageExtChain(&createInfo.stage);
-
- VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeInfo = {};
- uint32_t computeSubgroupSize = device->GetComputeSubgroupSize();
- if (computeSubgroupSize != 0u) {
- ASSERT(device->GetDeviceInfo().HasExt(DeviceExt::SubgroupSizeControl));
- subgroupSizeInfo.requiredSubgroupSize = computeSubgroupSize;
- stageExtChain.Add(
- &subgroupSizeInfo,
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
- }
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
- &createInfo, nullptr, &*mHandle),
- "CreateComputePipeline"));
-
- SetLabelImpl();
-
- return {};
- }
-
- void ComputePipeline::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE,
- reinterpret_cast<uint64_t&>(mHandle), "Dawn_ComputePipeline", GetLabel());
- }
-
- ComputePipeline::~ComputePipeline() = default;
-
- void ComputePipeline::DestroyImpl() {
- ComputePipelineBase::DestroyImpl();
-
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
-
- VkPipeline ComputePipeline::GetHandle() const {
- return mHandle;
- }
-
- void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
- userdata);
- CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
deleted file mode 100644
index 26bf4c27134..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
-#define DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
-
-#include "dawn_native/ComputePipeline.h"
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class ComputePipeline final : public ComputePipelineBase {
- public:
- static Ref<ComputePipeline> CreateUninitialized(
- Device* device,
- const ComputePipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
-
- VkPipeline GetHandle() const;
-
- MaybeError Initialize() override;
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- ~ComputePipeline() override;
- void DestroyImpl() override;
- using ComputePipelineBase::ComputePipelineBase;
-
- VkPipeline mHandle = VK_NULL_HANDLE;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_COMPUTEPIPELINEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocation.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocation.h
deleted file mode 100644
index 6a708e1ae37..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocation.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
-#define DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
-
-#include "common/vulkan_platform.h"
-
-namespace dawn_native { namespace vulkan {
-
- // Contains a descriptor set along with data necessary to track its allocation.
- struct DescriptorSetAllocation {
- VkDescriptorSet set = VK_NULL_HANDLE;
- uint32_t poolIndex;
- uint16_t setIndex;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp
deleted file mode 100644
index 3f43f67a6fa..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/DescriptorSetAllocator.h"
-
-#include "dawn_native/vulkan/BindGroupLayoutVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- // TODO(enga): Figure out this value.
- static constexpr uint32_t kMaxDescriptorsPerPool = 512;
-
- // static
- Ref<DescriptorSetAllocator> DescriptorSetAllocator::Create(
- BindGroupLayout* layout,
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType) {
- return AcquireRef(new DescriptorSetAllocator(layout, descriptorCountPerType));
- }
-
- DescriptorSetAllocator::DescriptorSetAllocator(
- BindGroupLayout* layout,
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType)
- : ObjectBase(layout->GetDevice()), mLayout(layout) {
- ASSERT(layout != nullptr);
-
- // Compute the total number of descriptors for this layout.
- uint32_t totalDescriptorCount = 0;
- mPoolSizes.reserve(descriptorCountPerType.size());
- for (const auto& it : descriptorCountPerType) {
- ASSERT(it.second > 0);
- totalDescriptorCount += it.second;
- mPoolSizes.push_back(VkDescriptorPoolSize{it.first, it.second});
- }
-
- if (totalDescriptorCount == 0) {
- // Vulkan requires that valid usage of vkCreateDescriptorPool must have a non-zero
- // number of pools, each of which has non-zero descriptor counts.
- // Since the descriptor set layout is empty, we should be able to allocate
- // |kMaxDescriptorsPerPool| sets from this 1-sized descriptor pool.
- // The type of this descriptor pool doesn't matter because it is never used.
- mPoolSizes.push_back(VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1});
- mMaxSets = kMaxDescriptorsPerPool;
- } else {
- ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
- static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool, "");
-
- // Compute the total number of descriptors sets that fits given the max.
- mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
- ASSERT(mMaxSets > 0);
-
- // Grow the number of desciptors in the pool to fit the computed |mMaxSets|.
- for (auto& poolSize : mPoolSizes) {
- poolSize.descriptorCount *= mMaxSets;
- }
- }
- }
-
- DescriptorSetAllocator::~DescriptorSetAllocator() {
- for (auto& pool : mDescriptorPools) {
- ASSERT(pool.freeSetIndices.size() == mMaxSets);
- if (pool.vkPool != VK_NULL_HANDLE) {
- Device* device = ToBackend(GetDevice());
- device->GetFencedDeleter()->DeleteWhenUnused(pool.vkPool);
- }
- }
- }
-
- ResultOrError<DescriptorSetAllocation> DescriptorSetAllocator::Allocate() {
- if (mAvailableDescriptorPoolIndices.empty()) {
- DAWN_TRY(AllocateDescriptorPool());
- }
-
- ASSERT(!mAvailableDescriptorPoolIndices.empty());
-
- const PoolIndex poolIndex = mAvailableDescriptorPoolIndices.back();
- DescriptorPool* pool = &mDescriptorPools[poolIndex];
-
- ASSERT(!pool->freeSetIndices.empty());
-
- SetIndex setIndex = pool->freeSetIndices.back();
- pool->freeSetIndices.pop_back();
-
- if (pool->freeSetIndices.empty()) {
- mAvailableDescriptorPoolIndices.pop_back();
- }
-
- return DescriptorSetAllocation{pool->sets[setIndex], poolIndex, setIndex};
- }
-
- void DescriptorSetAllocator::Deallocate(DescriptorSetAllocation* allocationInfo) {
- ASSERT(allocationInfo != nullptr);
- ASSERT(allocationInfo->set != VK_NULL_HANDLE);
-
- // We can't reuse the descriptor set right away because the Vulkan spec says in the
- // documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
- // host execution of the command and the end of the draw/dispatch.
- Device* device = ToBackend(GetDevice());
- const ExecutionSerial serial = device->GetPendingCommandSerial();
- mPendingDeallocations.Enqueue({allocationInfo->poolIndex, allocationInfo->setIndex},
- serial);
-
- if (mLastDeallocationSerial != serial) {
- device->EnqueueDeferredDeallocation(this);
- mLastDeallocationSerial = serial;
- }
-
- // Clear the content of allocation so that use after frees are more visible.
- *allocationInfo = {};
- }
-
- void DescriptorSetAllocator::FinishDeallocation(ExecutionSerial completedSerial) {
- for (const Deallocation& dealloc : mPendingDeallocations.IterateUpTo(completedSerial)) {
- ASSERT(dealloc.poolIndex < mDescriptorPools.size());
-
- auto& freeSetIndices = mDescriptorPools[dealloc.poolIndex].freeSetIndices;
- if (freeSetIndices.empty()) {
- mAvailableDescriptorPoolIndices.emplace_back(dealloc.poolIndex);
- }
- freeSetIndices.emplace_back(dealloc.setIndex);
- }
- mPendingDeallocations.ClearUpTo(completedSerial);
- }
-
- MaybeError DescriptorSetAllocator::AllocateDescriptorPool() {
- VkDescriptorPoolCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.maxSets = mMaxSets;
- createInfo.poolSizeCount = static_cast<PoolIndex>(mPoolSizes.size());
- createInfo.pPoolSizes = mPoolSizes.data();
-
- Device* device = ToBackend(GetDevice());
-
- VkDescriptorPool descriptorPool;
- DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
- nullptr, &*descriptorPool),
- "CreateDescriptorPool"));
-
- std::vector<VkDescriptorSetLayout> layouts(mMaxSets, mLayout->GetHandle());
-
- VkDescriptorSetAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.descriptorPool = descriptorPool;
- allocateInfo.descriptorSetCount = mMaxSets;
- allocateInfo.pSetLayouts = AsVkArray(layouts.data());
-
- std::vector<VkDescriptorSet> sets(mMaxSets);
- MaybeError result =
- CheckVkSuccess(device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo,
- AsVkArray(sets.data())),
- "AllocateDescriptorSets");
- if (result.IsError()) {
- // On an error we can destroy the pool immediately because no command references it.
- device->fn.DestroyDescriptorPool(device->GetVkDevice(), descriptorPool, nullptr);
- DAWN_TRY(std::move(result));
- }
-
- std::vector<SetIndex> freeSetIndices;
- freeSetIndices.reserve(mMaxSets);
-
- for (SetIndex i = 0; i < mMaxSets; ++i) {
- freeSetIndices.push_back(i);
- }
-
- mAvailableDescriptorPoolIndices.push_back(mDescriptorPools.size());
- mDescriptorPools.emplace_back(
- DescriptorPool{descriptorPool, std::move(sets), std::move(freeSetIndices)});
-
- return {};
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.h
deleted file mode 100644
index ef7eba1b495..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
-#define DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
-
-#include "common/SerialQueue.h"
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/ObjectBase.h"
-#include "dawn_native/vulkan/DescriptorSetAllocation.h"
-
-#include <map>
-#include <vector>
-
-namespace dawn_native { namespace vulkan {
-
- class BindGroupLayout;
-
- class DescriptorSetAllocator : public ObjectBase {
- using PoolIndex = uint32_t;
- using SetIndex = uint16_t;
-
- public:
- static Ref<DescriptorSetAllocator> Create(
- BindGroupLayout* layout,
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
-
- ResultOrError<DescriptorSetAllocation> Allocate();
- void Deallocate(DescriptorSetAllocation* allocationInfo);
- void FinishDeallocation(ExecutionSerial completedSerial);
-
- private:
- DescriptorSetAllocator(BindGroupLayout* layout,
- std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
- ~DescriptorSetAllocator();
-
- MaybeError AllocateDescriptorPool();
-
- BindGroupLayout* mLayout;
-
- std::vector<VkDescriptorPoolSize> mPoolSizes;
- SetIndex mMaxSets;
-
- struct DescriptorPool {
- VkDescriptorPool vkPool;
- std::vector<VkDescriptorSet> sets;
- std::vector<SetIndex> freeSetIndices;
- };
-
- std::vector<PoolIndex> mAvailableDescriptorPoolIndices;
- std::vector<DescriptorPool> mDescriptorPools;
-
- struct Deallocation {
- PoolIndex poolIndex;
- SetIndex setIndex;
- };
- SerialQueue<ExecutionSerial, Deallocation> mPendingDeallocations;
- ExecutionSerial mLastDeallocationSerial = ExecutionSerial(0);
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
deleted file mode 100644
index 791818c967d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ /dev/null
@@ -1,1017 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/DeviceVk.h"
-
-#include "common/Platform.h"
-#include "dawn_native/BackendConnection.h"
-#include "dawn_native/ChainUtils_autogen.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/ErrorData.h"
-#include "dawn_native/VulkanBackend.h"
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/BindGroupLayoutVk.h"
-#include "dawn_native/vulkan/BindGroupVk.h"
-#include "dawn_native/vulkan/BufferVk.h"
-#include "dawn_native/vulkan/CommandBufferVk.h"
-#include "dawn_native/vulkan/ComputePipelineVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/PipelineLayoutVk.h"
-#include "dawn_native/vulkan/QuerySetVk.h"
-#include "dawn_native/vulkan/QueueVk.h"
-#include "dawn_native/vulkan/RenderPassCache.h"
-#include "dawn_native/vulkan/RenderPipelineVk.h"
-#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
-#include "dawn_native/vulkan/SamplerVk.h"
-#include "dawn_native/vulkan/ShaderModuleVk.h"
-#include "dawn_native/vulkan/StagingBufferVk.h"
-#include "dawn_native/vulkan/SwapChainVk.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- // static
- ResultOrError<Device*> Device::Create(Adapter* adapter,
- const DawnDeviceDescriptor* descriptor) {
- Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
- DAWN_TRY(device->Initialize());
- return device.Detach();
- }
-
- Device::Device(Adapter* adapter, const DawnDeviceDescriptor* descriptor)
- : DeviceBase(adapter, descriptor) {
- InitTogglesFromDriver();
- }
-
- MaybeError Device::Initialize() {
- // Copy the adapter's device info to the device so that we can change the "knobs"
- mDeviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
-
- // Initialize the "instance" procs of our local function table.
- VulkanFunctions* functions = GetMutableFunctions();
- *functions = ToBackend(GetAdapter())->GetVulkanInstance()->GetFunctions();
-
- // Two things are crucial if device initialization fails: the function pointers to destroy
- // objects, and the fence deleter that calls these functions. Do not do anything before
- // these two are set up, so that a failed initialization doesn't cause a crash in
- // DestroyImpl()
- {
- VkPhysicalDevice physicalDevice = ToBackend(GetAdapter())->GetPhysicalDevice();
-
- VulkanDeviceKnobs usedDeviceKnobs = {};
- DAWN_TRY_ASSIGN(usedDeviceKnobs, CreateDevice(physicalDevice));
- *static_cast<VulkanDeviceKnobs*>(&mDeviceInfo) = usedDeviceKnobs;
-
- DAWN_TRY(functions->LoadDeviceProcs(mVkDevice, mDeviceInfo));
-
- // The queue can be loaded before the fenced deleter because their lifetime is tied to
- // the device.
- GatherQueueFromDevice();
-
- mDeleter = std::make_unique<FencedDeleter>(this);
- }
-
- mRenderPassCache = std::make_unique<RenderPassCache>(this);
- mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
-
- mExternalMemoryService = std::make_unique<external_memory::Service>(this);
- mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
-
- DAWN_TRY(PrepareRecordingContext());
-
- // The environment can request to use D32S8 or D24S8 when it's not available. Override
- // the decision if it is not applicable.
- ApplyDepth24PlusS8Toggle();
-
- return DeviceBase::Initialize(Queue::Create(this));
- }
-
- Device::~Device() {
- Destroy();
- }
-
- ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) {
- return BindGroup::Create(this, descriptor);
- }
- ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) {
- return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
- }
- ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
- }
- ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return CommandBuffer::Create(encoder, descriptor);
- }
- Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) {
- return ComputePipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) {
- return PipelineLayout::Create(this, descriptor);
- }
- ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) {
- return QuerySet::Create(this, descriptor);
- }
- Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::CreateUninitialized(this, descriptor);
- }
- ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return Sampler::Create(this, descriptor);
- }
- ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- return ShaderModule::Create(this, descriptor, parseResult);
- }
- ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) {
- return OldSwapChain::Create(this, descriptor);
- }
- ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- return SwapChain::Create(this, surface, previousSwapChain, descriptor);
- }
- ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return Texture::Create(this, descriptor);
- }
- ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- return TextureView::Create(texture, descriptor);
- }
- void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
- }
- void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
- }
-
- MaybeError Device::TickImpl() {
- RecycleCompletedCommands();
-
- ExecutionSerial completedSerial = GetCompletedCommandSerial();
-
- for (Ref<DescriptorSetAllocator>& allocator :
- mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
- allocator->FinishDeallocation(completedSerial);
- }
-
- mResourceMemoryAllocator->Tick(completedSerial);
- mDeleter->Tick(completedSerial);
- mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
-
- if (mRecordingContext.used) {
- DAWN_TRY(SubmitPendingCommands());
- }
-
- return {};
- }
-
- VkInstance Device::GetVkInstance() const {
- return ToBackend(GetAdapter())->GetVulkanInstance()->GetVkInstance();
- }
- const VulkanDeviceInfo& Device::GetDeviceInfo() const {
- return mDeviceInfo;
- }
-
- const VulkanGlobalInfo& Device::GetGlobalInfo() const {
- return ToBackend(GetAdapter())->GetVulkanInstance()->GetGlobalInfo();
- }
-
- VkDevice Device::GetVkDevice() const {
- return mVkDevice;
- }
-
- uint32_t Device::GetGraphicsQueueFamily() const {
- return mQueueFamily;
- }
-
- VkQueue Device::GetQueue() const {
- return mQueue;
- }
-
- FencedDeleter* Device::GetFencedDeleter() const {
- return mDeleter.get();
- }
-
- RenderPassCache* Device::GetRenderPassCache() const {
- return mRenderPassCache.get();
- }
-
- ResourceMemoryAllocator* Device::GetResourceMemoryAllocator() const {
- return mResourceMemoryAllocator.get();
- }
-
- void Device::EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator) {
- mDescriptorAllocatorsPendingDeallocation.Enqueue(allocator, GetPendingCommandSerial());
- }
-
- CommandRecordingContext* Device::GetPendingRecordingContext() {
- ASSERT(mRecordingContext.commandBuffer != VK_NULL_HANDLE);
- mRecordingContext.used = true;
- return &mRecordingContext;
- }
-
- MaybeError Device::SubmitPendingCommands() {
- if (!mRecordingContext.used) {
- return {};
- }
-
- DAWN_TRY(CheckVkSuccess(fn.EndCommandBuffer(mRecordingContext.commandBuffer),
- "vkEndCommandBuffer"));
-
- std::vector<VkPipelineStageFlags> dstStageMasks(mRecordingContext.waitSemaphores.size(),
- VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
-
- VkSubmitInfo submitInfo;
- submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
- submitInfo.pNext = nullptr;
- submitInfo.waitSemaphoreCount =
- static_cast<uint32_t>(mRecordingContext.waitSemaphores.size());
- submitInfo.pWaitSemaphores = AsVkArray(mRecordingContext.waitSemaphores.data());
- submitInfo.pWaitDstStageMask = dstStageMasks.data();
- submitInfo.commandBufferCount = 1;
- submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
- submitInfo.signalSemaphoreCount =
- static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
- submitInfo.pSignalSemaphores = AsVkArray(mRecordingContext.signalSemaphores.data());
-
- VkFence fence = VK_NULL_HANDLE;
- DAWN_TRY_ASSIGN(fence, GetUnusedFence());
- DAWN_TRY_WITH_CLEANUP(
- CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"), {
- // If submitting to the queue fails, move the fence back into the unused fence
- // list, as if it were never acquired. Not doing so would leak the fence since
- // it would be neither in the unused list nor in the in-flight list.
- mUnusedFences.push_back(fence);
- });
-
- // Enqueue the semaphores before incrementing the serial, so that they can be deleted as
- // soon as the current submission is finished.
- for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
- mDeleter->DeleteWhenUnused(semaphore);
- }
- for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
- mDeleter->DeleteWhenUnused(semaphore);
- }
-
- IncrementLastSubmittedCommandSerial();
- ExecutionSerial lastSubmittedSerial = GetLastSubmittedCommandSerial();
- mFencesInFlight.emplace(fence, lastSubmittedSerial);
-
- CommandPoolAndBuffer submittedCommands = {mRecordingContext.commandPool,
- mRecordingContext.commandBuffer};
- mCommandsInFlight.Enqueue(submittedCommands, lastSubmittedSerial);
- mRecordingContext = CommandRecordingContext();
- DAWN_TRY(PrepareRecordingContext());
-
- return {};
- }
-
- ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
- VulkanDeviceKnobs usedKnobs = {};
-
- // Default to asking for all avilable known extensions.
- usedKnobs.extensions = mDeviceInfo.extensions;
-
- // However only request the extensions that haven't been promoted in the device's apiVersion
- std::vector<const char*> extensionNames;
- for (DeviceExt ext : IterateBitSet(usedKnobs.extensions)) {
- const DeviceExtInfo& info = GetDeviceExtInfo(ext);
-
- if (info.versionPromoted > mDeviceInfo.properties.apiVersion) {
- extensionNames.push_back(info.name);
- }
- }
-
- // Some device features can only be enabled using a VkPhysicalDeviceFeatures2 struct, which
- // is supported by the VK_EXT_get_physical_properties2 instance extension, which was
- // promoted as a core API in Vulkan 1.1.
- //
- // Prepare a VkPhysicalDeviceFeatures2 struct for this use case, it will only be populated
- // if HasExt(DeviceExt::GetPhysicalDeviceProperties2) is true.
- VkPhysicalDeviceFeatures2 features2 = {};
- features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- PNextChainBuilder featuresChain(&features2);
-
- // Required for core WebGPU features.
- usedKnobs.features.depthBiasClamp = VK_TRUE;
- usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
- usedKnobs.features.fullDrawIndexUint32 = VK_TRUE;
- usedKnobs.features.imageCubeArray = VK_TRUE;
- usedKnobs.features.independentBlend = VK_TRUE;
- usedKnobs.features.sampleRateShading = VK_TRUE;
-
- if (IsRobustnessEnabled()) {
- usedKnobs.features.robustBufferAccess = VK_TRUE;
- }
-
- if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
- ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
-
- // Always request all the features from VK_EXT_subgroup_size_control when available.
- usedKnobs.subgroupSizeControlFeatures = mDeviceInfo.subgroupSizeControlFeatures;
- featuresChain.Add(&usedKnobs.subgroupSizeControlFeatures);
-
- mComputeSubgroupSize = FindComputeSubgroupSize();
- }
-
- if (mDeviceInfo.features.samplerAnisotropy == VK_TRUE) {
- usedKnobs.features.samplerAnisotropy = VK_TRUE;
- }
-
- if (IsFeatureEnabled(Feature::TextureCompressionBC)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC ==
- VK_TRUE);
- usedKnobs.features.textureCompressionBC = VK_TRUE;
- }
-
- if (IsFeatureEnabled(Feature::TextureCompressionETC2)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionETC2 ==
- VK_TRUE);
- usedKnobs.features.textureCompressionETC2 = VK_TRUE;
- }
-
- if (IsFeatureEnabled(Feature::TextureCompressionASTC)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionASTC_LDR ==
- VK_TRUE);
- usedKnobs.features.textureCompressionASTC_LDR = VK_TRUE;
- }
-
- if (IsFeatureEnabled(Feature::PipelineStatisticsQuery)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
- VK_TRUE);
- usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
- }
-
- if (IsFeatureEnabled(Feature::ShaderFloat16)) {
- const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
- ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
- deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
- deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
- deviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
- deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
-
- usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
- usedKnobs._16BitStorageFeatures.storageBuffer16BitAccess = VK_TRUE;
- usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
-
- featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
- featuresChain.Add(&usedKnobs._16BitStorageFeatures,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
- }
-
- if (IsFeatureEnabled(Feature::DepthClamping)) {
- ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.depthClamp == VK_TRUE);
- usedKnobs.features.depthClamp = VK_TRUE;
- }
-
- // Find a universal queue family
- {
- // Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
- constexpr uint32_t kUniversalFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
- int universalQueueFamily = -1;
- for (unsigned int i = 0; i < mDeviceInfo.queueFamilies.size(); ++i) {
- if ((mDeviceInfo.queueFamilies[i].queueFlags & kUniversalFlags) ==
- kUniversalFlags) {
- universalQueueFamily = i;
- break;
- }
- }
-
- if (universalQueueFamily == -1) {
- return DAWN_INTERNAL_ERROR("No universal queue family");
- }
- mQueueFamily = static_cast<uint32_t>(universalQueueFamily);
- }
-
- // Choose to create a single universal queue
- std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
- float zero = 0.0f;
- {
- VkDeviceQueueCreateInfo queueCreateInfo;
- queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
- queueCreateInfo.pNext = nullptr;
- queueCreateInfo.flags = 0;
- queueCreateInfo.queueFamilyIndex = static_cast<uint32_t>(mQueueFamily);
- queueCreateInfo.queueCount = 1;
- queueCreateInfo.pQueuePriorities = &zero;
-
- queuesToRequest.push_back(queueCreateInfo);
- }
-
- VkDeviceCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.queueCreateInfoCount = static_cast<uint32_t>(queuesToRequest.size());
- createInfo.pQueueCreateInfos = queuesToRequest.data();
- createInfo.enabledLayerCount = 0;
- createInfo.ppEnabledLayerNames = nullptr;
- createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
- createInfo.ppEnabledExtensionNames = extensionNames.data();
-
- // When we have DeviceExt::GetPhysicalDeviceProperties2, use features2 so that features not
- // covered by VkPhysicalDeviceFeatures can be enabled.
- if (mDeviceInfo.HasExt(DeviceExt::GetPhysicalDeviceProperties2)) {
- features2.features = usedKnobs.features;
- createInfo.pNext = &features2;
- createInfo.pEnabledFeatures = nullptr;
- } else {
- ASSERT(features2.pNext == nullptr);
- createInfo.pEnabledFeatures = &usedKnobs.features;
- }
-
- DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
- "vkCreateDevice"));
-
- return usedKnobs;
- }
-
- uint32_t Device::FindComputeSubgroupSize() const {
- if (!mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
- return 0;
- }
-
- const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& ext =
- mDeviceInfo.subgroupSizeControlProperties;
-
- if (ext.minSubgroupSize == ext.maxSubgroupSize) {
- return 0;
- }
-
- // At the moment, only Intel devices support varying subgroup sizes and 16, which is the
- // next value after the minimum of 8, is the sweet spot according to [1]. Hence the
- // following heuristics, which may need to be adjusted in the future for other
- // architectures, or if a specific API is added to let client code select the size..
- //
- // [1] https://bugs.freedesktop.org/show_bug.cgi?id=108875
- uint32_t subgroupSize = ext.minSubgroupSize * 2;
- if (subgroupSize <= ext.maxSubgroupSize) {
- return subgroupSize;
- } else {
- return ext.minSubgroupSize;
- }
- }
-
- void Device::GatherQueueFromDevice() {
- fn.GetDeviceQueue(mVkDevice, mQueueFamily, 0, &mQueue);
- }
-
- void Device::InitTogglesFromDriver() {
- // TODO(crbug.com/dawn/857): tighten this workaround when this issue is fixed in both
- // Vulkan SPEC and drivers.
- SetToggle(Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy, true);
-
- // By default try to use D32S8 for Depth24PlusStencil8
- SetToggle(Toggle::VulkanUseD32S8, true);
- }
-
- void Device::ApplyDepth24PlusS8Toggle() {
- bool supportsD32s8 =
- ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT);
- bool supportsD24s8 =
- ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT);
-
- ASSERT(supportsD32s8 || supportsD24s8);
-
- if (!supportsD24s8) {
- ForceSetToggle(Toggle::VulkanUseD32S8, true);
- }
- if (!supportsD32s8) {
- ForceSetToggle(Toggle::VulkanUseD32S8, false);
- }
- }
-
- VulkanFunctions* Device::GetMutableFunctions() {
- return const_cast<VulkanFunctions*>(&fn);
- }
-
- ResultOrError<VkFence> Device::GetUnusedFence() {
- if (!mUnusedFences.empty()) {
- VkFence fence = mUnusedFences.back();
- DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &*fence), "vkResetFences"));
-
- mUnusedFences.pop_back();
- return fence;
- }
-
- VkFenceCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
-
- VkFence fence = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &*fence),
- "vkCreateFence"));
-
- return fence;
- }
-
- ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
- ExecutionSerial fenceSerial(0);
- while (!mFencesInFlight.empty()) {
- VkFence fence = mFencesInFlight.front().first;
- ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
- VkResult result = VkResult::WrapUnsafe(
- INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
-
- // Fence are added in order, so we can stop searching as soon
- // as we see one that's not ready.
- if (result == VK_NOT_READY) {
- return fenceSerial;
- } else {
- DAWN_TRY(CheckVkSuccess(::VkResult(result), "GetFenceStatus"));
- }
-
- // Update fenceSerial since fence is ready.
- fenceSerial = tentativeSerial;
-
- mUnusedFences.push_back(fence);
-
- ASSERT(fenceSerial > GetCompletedCommandSerial());
- mFencesInFlight.pop();
- }
- return fenceSerial;
- }
-
- MaybeError Device::PrepareRecordingContext() {
- ASSERT(!mRecordingContext.used);
- ASSERT(mRecordingContext.commandBuffer == VK_NULL_HANDLE);
- ASSERT(mRecordingContext.commandPool == VK_NULL_HANDLE);
-
- // First try to recycle unused command pools.
- if (!mUnusedCommands.empty()) {
- CommandPoolAndBuffer commands = mUnusedCommands.back();
- mUnusedCommands.pop_back();
- DAWN_TRY_WITH_CLEANUP(CheckVkSuccess(fn.ResetCommandPool(mVkDevice, commands.pool, 0),
- "vkResetCommandPool"),
- {
- // vkResetCommandPool failed (it may return out-of-memory).
- // Free the commands in the cleanup step before returning to
- // reclaim memory.
-
- // The VkCommandBuffer memory should be wholly owned by the
- // pool and freed when it is destroyed, but that's not the
- // case in some drivers and they leak memory. So we call
- // FreeCommandBuffers before DestroyCommandPool to be safe.
- // TODO(enga): Only do this on a known list of bad drivers.
- fn.FreeCommandBuffers(mVkDevice, commands.pool, 1,
- &commands.commandBuffer);
- fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
- });
-
- mRecordingContext.commandBuffer = commands.commandBuffer;
- mRecordingContext.commandPool = commands.pool;
- } else {
- // Create a new command pool for our commands and allocate the command buffer.
- VkCommandPoolCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
- createInfo.queueFamilyIndex = mQueueFamily;
-
- DAWN_TRY(CheckVkSuccess(fn.CreateCommandPool(mVkDevice, &createInfo, nullptr,
- &*mRecordingContext.commandPool),
- "vkCreateCommandPool"));
-
- VkCommandBufferAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.commandPool = mRecordingContext.commandPool;
- allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
- allocateInfo.commandBufferCount = 1;
-
- DAWN_TRY(CheckVkSuccess(fn.AllocateCommandBuffers(mVkDevice, &allocateInfo,
- &mRecordingContext.commandBuffer),
- "vkAllocateCommandBuffers"));
- }
-
- // Start the recording of commands in the command buffer.
- VkCommandBufferBeginInfo beginInfo;
- beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
- beginInfo.pNext = nullptr;
- beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
- beginInfo.pInheritanceInfo = nullptr;
-
- return CheckVkSuccess(fn.BeginCommandBuffer(mRecordingContext.commandBuffer, &beginInfo),
- "vkBeginCommandBuffer");
- }
-
- void Device::RecycleCompletedCommands() {
- for (auto& commands : mCommandsInFlight.IterateUpTo(GetCompletedCommandSerial())) {
- mUnusedCommands.push_back(commands);
- }
- mCommandsInFlight.ClearUpTo(GetCompletedCommandSerial());
- }
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
- std::unique_ptr<StagingBufferBase> stagingBuffer =
- std::make_unique<StagingBuffer>(size, this);
- DAWN_TRY(stagingBuffer->Initialize());
- return std::move(stagingBuffer);
- }
-
- MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
- // It is a validation error to do a 0-sized copy in Vulkan, check it is skipped prior to
- // calling this function.
- ASSERT(size != 0);
-
- CommandRecordingContext* recordingContext = GetPendingRecordingContext();
-
- ToBackend(destination)
- ->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
-
- // There is no need of a barrier to make host writes available and visible to the copy
- // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
- // does an implicit availability, visibility and domain operation.
-
- // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
- // buffer.
- ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
- VkBufferCopy copy;
- copy.srcOffset = sourceOffset;
- copy.dstOffset = destinationOffset;
- copy.size = size;
-
- this->fn.CmdCopyBuffer(recordingContext->commandBuffer,
- ToBackend(source)->GetBufferHandle(),
- ToBackend(destination)->GetHandle(), 1, &copy);
-
- return {};
- }
-
- MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) {
- // There is no need of a barrier to make host writes available and visible to the copy
- // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
- // does an implicit availability, visibility and domain operation.
-
- CommandRecordingContext* recordingContext = GetPendingRecordingContext();
-
- VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
- VkImageSubresourceLayers subresource = region.imageSubresource;
-
- ASSERT(dst->texture->GetDimension() != wgpu::TextureDimension::e1D);
- SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
-
- if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels,
- subresource.mipLevel)) {
- // Since texture has been overwritten, it has been "initialized"
- dst->texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
- }
- // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
- // texture.
- ToBackend(dst->texture)
- ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
- VkImage dstImage = ToBackend(dst->texture)->GetHandle();
-
- // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
- // copy command.
- this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
- ToBackend(source)->GetBufferHandle(), dstImage,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
- return {};
- }
-
- MaybeError Device::ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
- ExternalMemoryHandle memoryHandle,
- VkImage image,
- const std::vector<ExternalSemaphoreHandle>& waitHandles,
- VkSemaphore* outSignalSemaphore,
- VkDeviceMemory* outAllocation,
- std::vector<VkSemaphore>* outWaitSemaphores) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
- FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
-
- wgpu::TextureUsage usage = textureDescriptor->usage;
- if (internalUsageDesc != nullptr) {
- usage |= internalUsageDesc->internalUsage;
- }
-
- // Check services support this combination of handle type / image info
- DAWN_INVALID_IF(!mExternalSemaphoreService->Supported(),
- "External semaphore usage not supported");
-
- DAWN_INVALID_IF(
- !mExternalMemoryService->SupportsImportMemory(
- VulkanImageFormat(this, textureDescriptor->format), VK_IMAGE_TYPE_2D,
- VK_IMAGE_TILING_OPTIMAL,
- VulkanImageUsage(usage, GetValidInternalFormat(textureDescriptor->format)),
- VK_IMAGE_CREATE_ALIAS_BIT_KHR),
- "External memory usage not supported");
-
- // Create an external semaphore to signal when the texture is done being used
- DAWN_TRY_ASSIGN(*outSignalSemaphore,
- mExternalSemaphoreService->CreateExportableSemaphore());
-
- // Import the external image's memory
- external_memory::MemoryImportParams importParams;
- DAWN_TRY_ASSIGN(importParams,
- mExternalMemoryService->GetMemoryImportParams(descriptor, image));
- DAWN_TRY_ASSIGN(*outAllocation,
- mExternalMemoryService->ImportMemory(memoryHandle, importParams, image));
-
- // Import semaphores we have to wait on before using the texture
- for (const ExternalSemaphoreHandle& handle : waitHandles) {
- VkSemaphore semaphore = VK_NULL_HANDLE;
- DAWN_TRY_ASSIGN(semaphore, mExternalSemaphoreService->ImportSemaphore(handle));
- outWaitSemaphores->push_back(semaphore);
- }
-
- return {};
- }
-
- bool Device::SignalAndExportExternalTexture(
- Texture* texture,
- VkImageLayout desiredLayout,
- ExternalImageExportInfoVk* info,
- std::vector<ExternalSemaphoreHandle>* semaphoreHandles) {
- return !ConsumedError([&]() -> MaybeError {
- DAWN_TRY(ValidateObject(texture));
-
- VkSemaphore signalSemaphore;
- VkImageLayout releasedOldLayout;
- VkImageLayout releasedNewLayout;
- DAWN_TRY(texture->ExportExternalTexture(desiredLayout, &signalSemaphore,
- &releasedOldLayout, &releasedNewLayout));
-
- ExternalSemaphoreHandle semaphoreHandle;
- DAWN_TRY_ASSIGN(semaphoreHandle,
- mExternalSemaphoreService->ExportSemaphore(signalSemaphore));
- semaphoreHandles->push_back(semaphoreHandle);
- info->releasedOldLayout = releasedOldLayout;
- info->releasedNewLayout = releasedNewLayout;
- info->isInitialized =
- texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
-
- return {};
- }());
- }
-
- TextureBase* Device::CreateTextureWrappingVulkanImage(
- const ExternalImageDescriptorVk* descriptor,
- ExternalMemoryHandle memoryHandle,
- const std::vector<ExternalSemaphoreHandle>& waitHandles) {
- const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
- // Initial validation
- if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
- return nullptr;
- }
- if (ConsumedError(ValidateVulkanImageCanBeWrapped(this, textureDescriptor),
- "validating that a Vulkan image can be wrapped with %s.",
- textureDescriptor)) {
- return nullptr;
- }
-
- VkSemaphore signalSemaphore = VK_NULL_HANDLE;
- VkDeviceMemory allocation = VK_NULL_HANDLE;
- std::vector<VkSemaphore> waitSemaphores;
- waitSemaphores.reserve(waitHandles.size());
-
- // Cleanup in case of a failure, the image creation doesn't acquire the external objects
- // if a failure happems.
- Texture* result = nullptr;
- // TODO(crbug.com/1026480): Consolidate this into a single CreateFromExternal call.
- if (ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
- mExternalMemoryService.get()),
- &result) ||
- ConsumedError(ImportExternalImage(descriptor, memoryHandle, result->GetHandle(),
- waitHandles, &signalSemaphore, &allocation,
- &waitSemaphores)) ||
- ConsumedError(result->BindExternalMemory(descriptor, signalSemaphore, allocation,
- waitSemaphores))) {
- // Delete the Texture if it was created
- if (result != nullptr) {
- result->Release();
- }
-
- // Clear the signal semaphore
- fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
-
- // Clear image memory
- fn.FreeMemory(GetVkDevice(), allocation, nullptr);
-
- // Clear any wait semaphores we were able to import
- for (VkSemaphore semaphore : waitSemaphores) {
- fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
- }
- return nullptr;
- }
-
- return result;
- }
-
- uint32_t Device::GetComputeSubgroupSize() const {
- return mComputeSubgroupSize;
- }
-
- MaybeError Device::WaitForIdleForDestruction() {
- // Immediately tag the recording context as unused so we don't try to submit it in Tick.
- // Move the mRecordingContext.used to mUnusedCommands so it can be cleaned up in
- // ShutDownImpl
- if (mRecordingContext.used) {
- CommandPoolAndBuffer commands = {mRecordingContext.commandPool,
- mRecordingContext.commandBuffer};
- mUnusedCommands.push_back(commands);
- mRecordingContext = CommandRecordingContext();
- }
-
- VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
- // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
- // about, Device lost, which means workloads running on the GPU are no longer accessible
- // (so they are as good as waited on) or success.
- DAWN_UNUSED(waitIdleResult);
-
- // Make sure all fences are complete by explicitly waiting on them all
- while (!mFencesInFlight.empty()) {
- VkFence fence = mFencesInFlight.front().first;
- ExecutionSerial fenceSerial = mFencesInFlight.front().second;
- ASSERT(fenceSerial > GetCompletedCommandSerial());
-
- VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
- do {
- // If WaitForIdleForDesctruction is called while we are Disconnected, it means that
- // the device lost came from the ErrorInjector and we need to wait without allowing
- // any more error to be injected. This is because the device lost was "fake" and
- // commands might still be running.
- if (GetState() == State::Disconnected) {
- result = VkResult::WrapUnsafe(
- fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX));
- continue;
- }
-
- result = VkResult::WrapUnsafe(
- INJECT_ERROR_OR_RUN(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX),
- VK_ERROR_DEVICE_LOST));
- } while (result == VK_TIMEOUT);
- // Ignore errors from vkWaitForFences: it can be either OOM which we can't do anything
- // about (and we need to keep going with the destruction of all fences), or device
- // loss, which means the workload on the GPU is no longer accessible and we can
- // safely destroy the fence.
-
- fn.DestroyFence(mVkDevice, fence, nullptr);
- mFencesInFlight.pop();
- }
- return {};
- }
-
- void Device::DestroyImpl() {
- ASSERT(GetState() == State::Disconnected);
-
- // We failed during initialization so early that we don't even have a VkDevice. There is
- // nothing to do.
- if (mVkDevice == VK_NULL_HANDLE) {
- return;
- }
-
- // The deleter is the second thing we initialize. If it is not present, it means that
- // only the VkDevice was created and nothing else. Destroy the device and do nothing else
- // because the function pointers might not have been loaded (and there is nothing to
- // destroy anyway).
- if (mDeleter == nullptr) {
- fn.DestroyDevice(mVkDevice, nullptr);
- mVkDevice = VK_NULL_HANDLE;
- return;
- }
-
- // Enough of the Device's initialization happened that we can now do regular robust
- // deinitialization.
-
- // Immediately tag the recording context as unused so we don't try to submit it in Tick.
- mRecordingContext.used = false;
- if (mRecordingContext.commandPool != VK_NULL_HANDLE) {
- // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
- // destroyed, but that's not the case in some drivers and the leak memory.
- // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
- // TODO(enga): Only do this on a known list of bad drivers.
- fn.FreeCommandBuffers(mVkDevice, mRecordingContext.commandPool, 1,
- &mRecordingContext.commandBuffer);
- fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
- }
-
- for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
- fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
- }
- mRecordingContext.waitSemaphores.clear();
-
- for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
- fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
- }
- mRecordingContext.signalSemaphores.clear();
-
- // Some commands might still be marked as in-flight if we shut down because of a device
- // loss. Recycle them as unused so that we free them below.
- RecycleCompletedCommands();
- ASSERT(mCommandsInFlight.Empty());
-
- for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
- // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
- // destroyed, but that's not the case in some drivers and the leak memory.
- // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
- // TODO(enga): Only do this on a known list of bad drivers.
- fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
- fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
- }
- mUnusedCommands.clear();
-
- // Some fences might still be marked as in-flight if we shut down because of a device loss.
- // Delete them since at this point all commands are complete.
- while (!mFencesInFlight.empty()) {
- fn.DestroyFence(mVkDevice, *mFencesInFlight.front().first, nullptr);
- mFencesInFlight.pop();
- }
-
- for (VkFence fence : mUnusedFences) {
- fn.DestroyFence(mVkDevice, fence, nullptr);
- }
- mUnusedFences.clear();
-
- ExecutionSerial completedSerial = GetCompletedCommandSerial();
- for (Ref<DescriptorSetAllocator>& allocator :
- mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
- allocator->FinishDeallocation(completedSerial);
- }
-
- // Releasing the uploader enqueues buffers to be released.
- // Call Tick() again to clear them before releasing the deleter.
- mResourceMemoryAllocator->Tick(completedSerial);
- mDeleter->Tick(completedSerial);
- mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
-
- // Allow recycled memory to be deleted.
- mResourceMemoryAllocator->DestroyPool();
-
- // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
- // to them are guaranteed to be finished executing.
- mRenderPassCache = nullptr;
-
- // We need handle deleting all child objects by calling Tick() again with a large serial to
- // force all operations to look as if they were completed, and delete all objects before
- // destroying the Deleter and vkDevice.
- ASSERT(mDeleter != nullptr);
- mDeleter->Tick(kMaxExecutionSerial);
- mDeleter = nullptr;
-
- // VkQueues are destroyed when the VkDevice is destroyed
- // The VkDevice is needed to destroy child objects, so it must be destroyed last after all
- // child objects have been deleted.
- ASSERT(mVkDevice != VK_NULL_HANDLE);
- fn.DestroyDevice(mVkDevice, nullptr);
- mVkDevice = VK_NULL_HANDLE;
- }
-
- uint32_t Device::GetOptimalBytesPerRowAlignment() const {
- return mDeviceInfo.properties.limits.optimalBufferCopyRowPitchAlignment;
- }
-
- uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
- return mDeviceInfo.properties.limits.optimalBufferCopyOffsetAlignment;
- }
-
- float Device::GetTimestampPeriodInNS() const {
- return mDeviceInfo.properties.limits.timestampPeriod;
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
deleted file mode 100644
index 1c697a84aaf..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_DEVICEVK_H_
-#define DAWNNATIVE_VULKAN_DEVICEVK_H_
-
-#include "dawn_native/dawn_platform.h"
-
-#include "common/SerialQueue.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/Device.h"
-#include "dawn_native/vulkan/CommandRecordingContext.h"
-#include "dawn_native/vulkan/DescriptorSetAllocator.h"
-#include "dawn_native/vulkan/Forward.h"
-#include "dawn_native/vulkan/VulkanFunctions.h"
-#include "dawn_native/vulkan/VulkanInfo.h"
-
-#include "dawn_native/vulkan/external_memory/MemoryService.h"
-#include "dawn_native/vulkan/external_semaphore/SemaphoreService.h"
-
-#include <memory>
-#include <queue>
-
-namespace dawn_native { namespace vulkan {
-
- class Adapter;
- class BindGroupLayout;
- class BufferUploader;
- class FencedDeleter;
- class RenderPassCache;
- class ResourceMemoryAllocator;
-
- class Device final : public DeviceBase {
- public:
- static ResultOrError<Device*> Create(Adapter* adapter,
- const DawnDeviceDescriptor* descriptor);
- ~Device() override;
-
- MaybeError Initialize();
-
- // Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
- const VulkanFunctions fn;
-
- VkInstance GetVkInstance() const;
- const VulkanDeviceInfo& GetDeviceInfo() const;
- const VulkanGlobalInfo& GetGlobalInfo() const;
- VkDevice GetVkDevice() const;
- uint32_t GetGraphicsQueueFamily() const;
- VkQueue GetQueue() const;
-
- FencedDeleter* GetFencedDeleter() const;
- RenderPassCache* GetRenderPassCache() const;
- ResourceMemoryAllocator* GetResourceMemoryAllocator() const;
-
- CommandRecordingContext* GetPendingRecordingContext();
- MaybeError SubmitPendingCommands();
-
- void EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator);
-
- // Dawn Native API
-
- TextureBase* CreateTextureWrappingVulkanImage(
- const ExternalImageDescriptorVk* descriptor,
- ExternalMemoryHandle memoryHandle,
- const std::vector<ExternalSemaphoreHandle>& waitHandles);
- bool SignalAndExportExternalTexture(Texture* texture,
- VkImageLayout desiredLayout,
- ExternalImageExportInfoVk* info,
- std::vector<ExternalSemaphoreHandle>* semaphoreHandle);
-
- ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
- CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
-
- MaybeError TickImpl() override;
-
- ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
- MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) override;
- MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
- const TextureDataLayout& src,
- TextureCopy* dst,
- const Extent3D& copySizePixels) override;
-
- // Return the fixed subgroup size to use for compute shaders on this device or 0 if none
- // needs to be set.
- uint32_t GetComputeSubgroupSize() const;
-
- uint32_t GetOptimalBytesPerRowAlignment() const override;
- uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
-
- float GetTimestampPeriodInNS() const override;
-
- private:
- Device(Adapter* adapter, const DawnDeviceDescriptor* descriptor);
-
- ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
- const BindGroupDescriptor* descriptor) override;
- ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor,
- PipelineCompatibilityToken pipelineCompatibilityToken) override;
- ResultOrError<Ref<BufferBase>> CreateBufferImpl(
- const BufferDescriptor* descriptor) override;
- ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
- const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
- const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
- const SamplerDescriptor* descriptor) override;
- ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) override;
- ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) override;
- ResultOrError<Ref<TextureBase>> CreateTextureImpl(
- const TextureDescriptor* descriptor) override;
- ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) override;
- Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
- const ComputePipelineDescriptor* descriptor) override;
- Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) override;
- void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) override;
-
- ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
- void GatherQueueFromDevice();
-
- uint32_t FindComputeSubgroupSize() const;
- void InitTogglesFromDriver();
- void ApplyDepth24PlusS8Toggle();
-
- void DestroyImpl() override;
- MaybeError WaitForIdleForDestruction() override;
-
- // To make it easier to use fn it is a public const member. However
- // the Device is allowed to mutate them through these private methods.
- VulkanFunctions* GetMutableFunctions();
-
- VulkanDeviceInfo mDeviceInfo = {};
- VkDevice mVkDevice = VK_NULL_HANDLE;
- uint32_t mQueueFamily = 0;
- VkQueue mQueue = VK_NULL_HANDLE;
- uint32_t mComputeSubgroupSize = 0;
-
- SerialQueue<ExecutionSerial, Ref<DescriptorSetAllocator>>
- mDescriptorAllocatorsPendingDeallocation;
- std::unique_ptr<FencedDeleter> mDeleter;
- std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
- std::unique_ptr<RenderPassCache> mRenderPassCache;
-
- std::unique_ptr<external_memory::Service> mExternalMemoryService;
- std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
-
- ResultOrError<VkFence> GetUnusedFence();
- ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
-
- // We track which operations are in flight on the GPU with an increasing serial.
- // This works only because we have a single queue. Each submit to a queue is associated
- // to a serial and a fence, such that when the fence is "ready" we know the operations
- // have finished.
- std::queue<std::pair<VkFence, ExecutionSerial>> mFencesInFlight;
- // Fences in the unused list aren't reset yet.
- std::vector<VkFence> mUnusedFences;
-
- MaybeError PrepareRecordingContext();
- void RecycleCompletedCommands();
-
- struct CommandPoolAndBuffer {
- VkCommandPool pool = VK_NULL_HANDLE;
- VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
- };
- SerialQueue<ExecutionSerial, CommandPoolAndBuffer> mCommandsInFlight;
- // Command pools in the unused list haven't been reset yet.
- std::vector<CommandPoolAndBuffer> mUnusedCommands;
- // There is always a valid recording context stored in mRecordingContext
- CommandRecordingContext mRecordingContext;
-
- MaybeError ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
- ExternalMemoryHandle memoryHandle,
- VkImage image,
- const std::vector<ExternalSemaphoreHandle>& waitHandles,
- VkSemaphore* outSignalSemaphore,
- VkDeviceMemory* outAllocation,
- std::vector<VkSemaphore>* outWaitSemaphores);
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_DEVICEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h
deleted file mode 100644
index 45206b36f6d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ExternalHandle.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
-#define DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
-
-#include "common/vulkan_platform.h"
-
-namespace dawn_native { namespace vulkan {
-
-#if DAWN_PLATFORM_LINUX
- // File descriptor
- using ExternalMemoryHandle = int;
- // File descriptor
- using ExternalSemaphoreHandle = int;
-#elif DAWN_PLATFORM_FUCHSIA
- // Really a Zircon vmo handle.
- using ExternalMemoryHandle = zx_handle_t;
- // Really a Zircon event handle.
- using ExternalSemaphoreHandle = zx_handle_t;
-#else
- // Generic types so that the Null service can compile, not used for real handles
- using ExternalMemoryHandle = void*;
- using ExternalSemaphoreHandle = void*;
-#endif
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_EXTERNALHANDLE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp
deleted file mode 100644
index 4086f0cc5c3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/FencedDeleter.h"
-
-#include "dawn_native/vulkan/DeviceVk.h"
-
-namespace dawn_native { namespace vulkan {
-
- FencedDeleter::FencedDeleter(Device* device) : mDevice(device) {
- }
-
- FencedDeleter::~FencedDeleter() {
- ASSERT(mBuffersToDelete.Empty());
- ASSERT(mDescriptorPoolsToDelete.Empty());
- ASSERT(mFramebuffersToDelete.Empty());
- ASSERT(mImagesToDelete.Empty());
- ASSERT(mImageViewsToDelete.Empty());
- ASSERT(mMemoriesToDelete.Empty());
- ASSERT(mPipelinesToDelete.Empty());
- ASSERT(mPipelineLayoutsToDelete.Empty());
- ASSERT(mQueryPoolsToDelete.Empty());
- ASSERT(mRenderPassesToDelete.Empty());
- ASSERT(mSamplersToDelete.Empty());
- ASSERT(mSemaphoresToDelete.Empty());
- ASSERT(mShaderModulesToDelete.Empty());
- ASSERT(mSurfacesToDelete.Empty());
- ASSERT(mSwapChainsToDelete.Empty());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkBuffer buffer) {
- mBuffersToDelete.Enqueue(buffer, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkDescriptorPool pool) {
- mDescriptorPoolsToDelete.Enqueue(pool, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkDeviceMemory memory) {
- mMemoriesToDelete.Enqueue(memory, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkFramebuffer framebuffer) {
- mFramebuffersToDelete.Enqueue(framebuffer, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkImage image) {
- mImagesToDelete.Enqueue(image, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkImageView view) {
- mImageViewsToDelete.Enqueue(view, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkPipeline pipeline) {
- mPipelinesToDelete.Enqueue(pipeline, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkPipelineLayout layout) {
- mPipelineLayoutsToDelete.Enqueue(layout, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkQueryPool querypool) {
- mQueryPoolsToDelete.Enqueue(querypool, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkRenderPass renderPass) {
- mRenderPassesToDelete.Enqueue(renderPass, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkSampler sampler) {
- mSamplersToDelete.Enqueue(sampler, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkSemaphore semaphore) {
- mSemaphoresToDelete.Enqueue(semaphore, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkShaderModule module) {
- mShaderModulesToDelete.Enqueue(module, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkSurfaceKHR surface) {
- mSurfacesToDelete.Enqueue(surface, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::DeleteWhenUnused(VkSwapchainKHR swapChain) {
- mSwapChainsToDelete.Enqueue(swapChain, mDevice->GetPendingCommandSerial());
- }
-
- void FencedDeleter::Tick(ExecutionSerial completedSerial) {
- VkDevice vkDevice = mDevice->GetVkDevice();
- VkInstance instance = mDevice->GetVkInstance();
-
- // Buffers and images must be deleted before memories because it is invalid to free memory
- // that still have resources bound to it.
- for (VkBuffer buffer : mBuffersToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyBuffer(vkDevice, buffer, nullptr);
- }
- mBuffersToDelete.ClearUpTo(completedSerial);
- for (VkImage image : mImagesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyImage(vkDevice, image, nullptr);
- }
- mImagesToDelete.ClearUpTo(completedSerial);
-
- for (VkDeviceMemory memory : mMemoriesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.FreeMemory(vkDevice, memory, nullptr);
- }
- mMemoriesToDelete.ClearUpTo(completedSerial);
-
- for (VkPipelineLayout layout : mPipelineLayoutsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyPipelineLayout(vkDevice, layout, nullptr);
- }
- mPipelineLayoutsToDelete.ClearUpTo(completedSerial);
-
- for (VkRenderPass renderPass : mRenderPassesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyRenderPass(vkDevice, renderPass, nullptr);
- }
- mRenderPassesToDelete.ClearUpTo(completedSerial);
-
- for (VkFramebuffer framebuffer : mFramebuffersToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyFramebuffer(vkDevice, framebuffer, nullptr);
- }
- mFramebuffersToDelete.ClearUpTo(completedSerial);
-
- for (VkImageView view : mImageViewsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyImageView(vkDevice, view, nullptr);
- }
- mImageViewsToDelete.ClearUpTo(completedSerial);
-
- for (VkShaderModule module : mShaderModulesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyShaderModule(vkDevice, module, nullptr);
- }
- mShaderModulesToDelete.ClearUpTo(completedSerial);
-
- for (VkPipeline pipeline : mPipelinesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyPipeline(vkDevice, pipeline, nullptr);
- }
- mPipelinesToDelete.ClearUpTo(completedSerial);
-
- // Vulkan swapchains must be destroyed before their corresponding VkSurface
- for (VkSwapchainKHR swapChain : mSwapChainsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroySwapchainKHR(vkDevice, swapChain, nullptr);
- }
- mSwapChainsToDelete.ClearUpTo(completedSerial);
- for (VkSurfaceKHR surface : mSurfacesToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroySurfaceKHR(instance, surface, nullptr);
- }
- mSurfacesToDelete.ClearUpTo(completedSerial);
-
- for (VkSemaphore semaphore : mSemaphoresToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroySemaphore(vkDevice, semaphore, nullptr);
- }
- mSemaphoresToDelete.ClearUpTo(completedSerial);
-
- for (VkDescriptorPool pool : mDescriptorPoolsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyDescriptorPool(vkDevice, pool, nullptr);
- }
- mDescriptorPoolsToDelete.ClearUpTo(completedSerial);
-
- for (VkQueryPool pool : mQueryPoolsToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroyQueryPool(vkDevice, pool, nullptr);
- }
- mQueryPoolsToDelete.ClearUpTo(completedSerial);
-
- for (VkSampler sampler : mSamplersToDelete.IterateUpTo(completedSerial)) {
- mDevice->fn.DestroySampler(vkDevice, sampler, nullptr);
- }
- mSamplersToDelete.ClearUpTo(completedSerial);
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h b/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h
deleted file mode 100644
index 81e60280dc3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_FENCEDDELETER_H_
-#define DAWNNATIVE_VULKAN_FENCEDDELETER_H_
-
-#include "common/SerialQueue.h"
-#include "common/vulkan_platform.h"
-#include "dawn_native/IntegerTypes.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class FencedDeleter {
- public:
- FencedDeleter(Device* device);
- ~FencedDeleter();
-
- void DeleteWhenUnused(VkBuffer buffer);
- void DeleteWhenUnused(VkDescriptorPool pool);
- void DeleteWhenUnused(VkDeviceMemory memory);
- void DeleteWhenUnused(VkFramebuffer framebuffer);
- void DeleteWhenUnused(VkImage image);
- void DeleteWhenUnused(VkImageView view);
- void DeleteWhenUnused(VkPipelineLayout layout);
- void DeleteWhenUnused(VkRenderPass renderPass);
- void DeleteWhenUnused(VkPipeline pipeline);
- void DeleteWhenUnused(VkQueryPool querypool);
- void DeleteWhenUnused(VkSampler sampler);
- void DeleteWhenUnused(VkSemaphore semaphore);
- void DeleteWhenUnused(VkShaderModule module);
- void DeleteWhenUnused(VkSurfaceKHR surface);
- void DeleteWhenUnused(VkSwapchainKHR swapChain);
-
- void Tick(ExecutionSerial completedSerial);
-
- private:
- Device* mDevice = nullptr;
- SerialQueue<ExecutionSerial, VkBuffer> mBuffersToDelete;
- SerialQueue<ExecutionSerial, VkDescriptorPool> mDescriptorPoolsToDelete;
- SerialQueue<ExecutionSerial, VkDeviceMemory> mMemoriesToDelete;
- SerialQueue<ExecutionSerial, VkFramebuffer> mFramebuffersToDelete;
- SerialQueue<ExecutionSerial, VkImage> mImagesToDelete;
- SerialQueue<ExecutionSerial, VkImageView> mImageViewsToDelete;
- SerialQueue<ExecutionSerial, VkPipeline> mPipelinesToDelete;
- SerialQueue<ExecutionSerial, VkPipelineLayout> mPipelineLayoutsToDelete;
- SerialQueue<ExecutionSerial, VkQueryPool> mQueryPoolsToDelete;
- SerialQueue<ExecutionSerial, VkRenderPass> mRenderPassesToDelete;
- SerialQueue<ExecutionSerial, VkSampler> mSamplersToDelete;
- SerialQueue<ExecutionSerial, VkSemaphore> mSemaphoresToDelete;
- SerialQueue<ExecutionSerial, VkShaderModule> mShaderModulesToDelete;
- SerialQueue<ExecutionSerial, VkSurfaceKHR> mSurfacesToDelete;
- SerialQueue<ExecutionSerial, VkSwapchainKHR> mSwapChainsToDelete;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_FENCEDDELETER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h b/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
deleted file mode 100644
index e11a74fe4aa..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_FORWARD_H_
-#define DAWNNATIVE_VULKAN_FORWARD_H_
-
-#include "dawn_native/ToBackend.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Adapter;
- class BindGroup;
- class BindGroupLayout;
- class Buffer;
- class CommandBuffer;
- class ComputePipeline;
- class Device;
- class PipelineLayout;
- class QuerySet;
- class Queue;
- class RenderPipeline;
- class ResourceHeap;
- class Sampler;
- class ShaderModule;
- class StagingBuffer;
- class SwapChain;
- class Texture;
- class TextureView;
-
- struct VulkanBackendTraits {
- using AdapterType = Adapter;
- using BindGroupType = BindGroup;
- using BindGroupLayoutType = BindGroupLayout;
- using BufferType = Buffer;
- using CommandBufferType = CommandBuffer;
- using ComputePipelineType = ComputePipeline;
- using DeviceType = Device;
- using PipelineLayoutType = PipelineLayout;
- using QuerySetType = QuerySet;
- using QueueType = Queue;
- using RenderPipelineType = RenderPipeline;
- using ResourceHeapType = ResourceHeap;
- using SamplerType = Sampler;
- using ShaderModuleType = ShaderModule;
- using StagingBufferType = StagingBuffer;
- using SwapChainType = SwapChain;
- using TextureType = Texture;
- using TextureViewType = TextureView;
- };
-
- template <typename T>
- auto ToBackend(T&& common) -> decltype(ToBackendBase<VulkanBackendTraits>(common)) {
- return ToBackendBase<VulkanBackendTraits>(common);
- }
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_FORWARD_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
deleted file mode 100644
index 9782a2bfbd1..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/NativeSwapChainImplVk.h"
-
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/TextureVk.h"
-
-#include <limits>
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
-
- bool chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes,
- bool turnOffVsync,
- VkPresentModeKHR* presentMode) {
- if (turnOffVsync) {
- for (const auto& availablePresentMode : availablePresentModes) {
- if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
- *presentMode = availablePresentMode;
- return true;
- }
- }
- return false;
- }
-
- *presentMode = VK_PRESENT_MODE_FIFO_KHR;
- return true;
- }
-
- bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
- NativeSwapChainImpl::ChosenConfig* config,
- bool turnOffVsync) {
- VkPresentModeKHR presentMode;
- if (!chooseSwapPresentMode(info.presentModes, turnOffVsync, &presentMode)) {
- return false;
- }
- // TODO(crbug.com/dawn/269): For now this is hardcoded to what works with one NVIDIA
- // driver. Need to generalize
- config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
- config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
- config->format = wgpu::TextureFormat::BGRA8Unorm;
- config->minImageCount = 3;
- // TODO(crbug.com/dawn/269): This is upside down compared to what we want, at least
- // on Linux
- config->preTransform = info.capabilities.currentTransform;
- config->presentMode = presentMode;
- config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
-
- return true;
- }
- } // anonymous namespace
-
- NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
- : mSurface(surface), mDevice(device) {
- // Call this immediately, so that BackendBinding::GetPreferredSwapChainTextureFormat
- // will return a correct result before a SwapChain is created.
- UpdateSurfaceConfig();
- }
-
- NativeSwapChainImpl::~NativeSwapChainImpl() {
- if (mSwapChain != VK_NULL_HANDLE) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
- mSwapChain = VK_NULL_HANDLE;
- }
- if (mSurface != VK_NULL_HANDLE) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(mSurface);
- mSurface = VK_NULL_HANDLE;
- }
- }
-
- void NativeSwapChainImpl::UpdateSurfaceConfig() {
- if (mDevice->ConsumedError(GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface),
- &mInfo)) {
- ASSERT(false);
- }
-
- if (!ChooseSurfaceConfig(mInfo, &mConfig, mDevice->IsToggleEnabled(Toggle::TurnOffVsync))) {
- ASSERT(false);
- }
- }
-
- void NativeSwapChainImpl::Init(DawnWSIContextVulkan* /*context*/) {
- UpdateSurfaceConfig();
- }
-
- DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
- WGPUTextureUsage usage,
- uint32_t width,
- uint32_t height) {
- UpdateSurfaceConfig();
-
- ASSERT(mInfo.capabilities.minImageExtent.width <= width);
- ASSERT(mInfo.capabilities.maxImageExtent.width >= width);
- ASSERT(mInfo.capabilities.minImageExtent.height <= height);
- ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
-
- ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
- // TODO(crbug.com/dawn/269): need to check usage works too
-
- // Create the swapchain with the configuration we chose
- VkSwapchainKHR oldSwapchain = mSwapChain;
- VkSwapchainCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.surface = mSurface;
- createInfo.minImageCount = mConfig.minImageCount;
- createInfo.imageFormat = mConfig.nativeFormat;
- createInfo.imageColorSpace = mConfig.colorSpace;
- createInfo.imageExtent.width = width;
- createInfo.imageExtent.height = height;
- createInfo.imageArrayLayers = 1;
- createInfo.imageUsage = VulkanImageUsage(static_cast<wgpu::TextureUsage>(usage),
- mDevice->GetValidInternalFormat(mConfig.format));
- createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = nullptr;
- createInfo.preTransform = mConfig.preTransform;
- createInfo.compositeAlpha = mConfig.compositeAlpha;
- createInfo.presentMode = mConfig.presentMode;
- createInfo.clipped = false;
- createInfo.oldSwapchain = oldSwapchain;
-
- if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
- &*mSwapChain) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- // Gather the swapchain's images. Implementations are allowed to return more images than the
- // number we asked for.
- uint32_t count = 0;
- if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
- nullptr) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- ASSERT(count >= mConfig.minImageCount);
- mSwapChainImages.resize(count);
- if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
- AsVkArray(mSwapChainImages.data())) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- if (oldSwapchain != VK_NULL_HANDLE) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(oldSwapchain);
- }
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- // Transiently create a semaphore that will be signaled when the presentation engine is done
- // with the swapchain image. Further operations on the image will wait for this semaphore.
- VkSemaphore semaphore = VK_NULL_HANDLE;
- {
- VkSemaphoreCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
- &*semaphore) != VK_SUCCESS) {
- ASSERT(false);
- }
- }
-
- if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
- std::numeric_limits<uint64_t>::max(), semaphore,
- VkFence{}, &mLastImageIndex) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- nextTexture->texture.u64 =
-#if defined(DAWN_PLATFORM_64_BIT)
- reinterpret_cast<uint64_t>
-#endif
- (*mSwapChainImages[mLastImageIndex]);
- mDevice->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError NativeSwapChainImpl::Present() {
- // This assumes that the image has already been transitioned to the PRESENT layout and
- // writes were made available to the stage.
-
- // Assuming that the present queue is the same as the graphics queue, the proper
- // synchronization has already been done on the queue so we don't need to wait on any
- // semaphores.
- VkPresentInfoKHR presentInfo;
- presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
- presentInfo.pNext = nullptr;
- presentInfo.waitSemaphoreCount = 0;
- presentInfo.pWaitSemaphores = nullptr;
- presentInfo.swapchainCount = 1;
- presentInfo.pSwapchains = &*mSwapChain;
- presentInfo.pImageIndices = &mLastImageIndex;
- presentInfo.pResults = nullptr;
-
- VkQueue queue = mDevice->GetQueue();
- if (mDevice->fn.QueuePresentKHR(queue, &presentInfo) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return mConfig.format;
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h
deleted file mode 100644
index fe7a1820f51..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
-#define DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
-
-#include "dawn_native/vulkan/VulkanInfo.h"
-
-#include "dawn/dawn_wsi.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class NativeSwapChainImpl {
- public:
- using WSIContext = DawnWSIContextVulkan;
-
- NativeSwapChainImpl(Device* device, VkSurfaceKHR surface);
- ~NativeSwapChainImpl();
-
- void Init(DawnWSIContextVulkan* context);
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage,
- uint32_t width,
- uint32_t height);
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
- DawnSwapChainError Present();
-
- wgpu::TextureFormat GetPreferredFormat() const;
-
- struct ChosenConfig {
- VkFormat nativeFormat;
- wgpu::TextureFormat format;
- VkColorSpaceKHR colorSpace;
- VkSurfaceTransformFlagBitsKHR preTransform;
- uint32_t minImageCount;
- VkPresentModeKHR presentMode;
- VkCompositeAlphaFlagBitsKHR compositeAlpha;
- };
-
- private:
- void UpdateSurfaceConfig();
-
- VkSurfaceKHR mSurface = VK_NULL_HANDLE;
- VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
- std::vector<VkImage> mSwapChainImages;
- uint32_t mLastImageIndex = 0;
-
- VulkanSurfaceInfo mInfo;
-
- ChosenConfig mConfig;
-
- Device* mDevice = nullptr;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
deleted file mode 100644
index e41a99b8b9d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/PipelineLayoutVk.h"
-
-#include "common/BitSetIterator.h"
-#include "dawn_native/vulkan/BindGroupLayoutVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- // static
- ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
- Device* device,
- const PipelineLayoutDescriptor* descriptor) {
- Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
- DAWN_TRY(layout->Initialize());
- return layout;
- }
-
- MaybeError PipelineLayout::Initialize() {
- // Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
- // TODO(crbug.com/dawn/277) Vulkan doesn't allow holes in this array, should we expose
- // this constraints at the Dawn level?
- uint32_t numSetLayouts = 0;
- std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
- for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
- setLayouts[numSetLayouts] = ToBackend(GetBindGroupLayout(setIndex))->GetHandle();
- numSetLayouts++;
- }
-
- VkPipelineLayoutCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.setLayoutCount = numSetLayouts;
- createInfo.pSetLayouts = AsVkArray(setLayouts.data());
- createInfo.pushConstantRangeCount = 0;
- createInfo.pPushConstantRanges = nullptr;
-
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreatePipelineLayout"));
-
- SetLabelImpl();
-
- return {};
- }
-
- PipelineLayout::~PipelineLayout() = default;
-
- void PipelineLayout::DestroyImpl() {
- PipelineLayoutBase::DestroyImpl();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
-
- VkPipelineLayout PipelineLayout::GetHandle() const {
- return mHandle;
- }
-
- void PipelineLayout::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE_LAYOUT,
- reinterpret_cast<uint64_t&>(mHandle), "Dawn_PipelineLayout", GetLabel());
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h
deleted file mode 100644
index 3cabde09a5e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
-#define DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
-
-#include "dawn_native/PipelineLayout.h"
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class PipelineLayout final : public PipelineLayoutBase {
- public:
- static ResultOrError<Ref<PipelineLayout>> Create(
- Device* device,
- const PipelineLayoutDescriptor* descriptor);
-
- VkPipelineLayout GetHandle() const;
-
- private:
- ~PipelineLayout() override;
- void DestroyImpl() override;
-
- using PipelineLayoutBase::PipelineLayoutBase;
- MaybeError Initialize();
-
- // Dawn API
- void SetLabelImpl() override;
-
- VkPipelineLayout mHandle = VK_NULL_HANDLE;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_PIPELINELAYOUTVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
deleted file mode 100644
index 01d3a738c55..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/QuerySetVk.h"
-
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-#include "dawn_platform/DawnPlatform.h"
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
- VkQueryType VulkanQueryType(wgpu::QueryType type) {
- switch (type) {
- case wgpu::QueryType::Occlusion:
- return VK_QUERY_TYPE_OCCLUSION;
- case wgpu::QueryType::PipelineStatistics:
- return VK_QUERY_TYPE_PIPELINE_STATISTICS;
- case wgpu::QueryType::Timestamp:
- return VK_QUERY_TYPE_TIMESTAMP;
- }
- UNREACHABLE();
- }
-
- VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
- std::vector<wgpu::PipelineStatisticName> pipelineStatisticsSet) {
- VkQueryPipelineStatisticFlags pipelineStatistics = 0;
- for (size_t i = 0; i < pipelineStatisticsSet.size(); ++i) {
- switch (pipelineStatisticsSet[i]) {
- case wgpu::PipelineStatisticName::ClipperInvocations:
- pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
- break;
- case wgpu::PipelineStatisticName::ClipperPrimitivesOut:
- pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT;
- break;
- case wgpu::PipelineStatisticName::ComputeShaderInvocations:
- pipelineStatistics |=
- VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
- break;
- case wgpu::PipelineStatisticName::FragmentShaderInvocations:
- pipelineStatistics |=
- VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT;
- break;
- case wgpu::PipelineStatisticName::VertexShaderInvocations:
- pipelineStatistics |=
- VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT;
- break;
- }
- }
-
- return pipelineStatistics;
- }
- } // anonymous namespace
-
- // static
- ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
- Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
- DAWN_TRY(queryset->Initialize());
- return queryset;
- }
-
- MaybeError QuerySet::Initialize() {
- VkQueryPoolCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
- createInfo.pNext = NULL;
- createInfo.flags = 0;
- createInfo.queryType = VulkanQueryType(GetQueryType());
- createInfo.queryCount = std::max(GetQueryCount(), uint32_t(1u));
- if (GetQueryType() == wgpu::QueryType::PipelineStatistics) {
- createInfo.pipelineStatistics =
- VulkanQueryPipelineStatisticFlags(GetPipelineStatistics());
- }
-
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkOOMThenSuccess(
- device->fn.CreateQueryPool(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "vkCreateQueryPool"));
-
- SetLabelImpl();
-
- return {};
- }
-
- VkQueryPool QuerySet::GetHandle() const {
- return mHandle;
- }
-
- QuerySet::~QuerySet() = default;
-
- void QuerySet::DestroyImpl() {
- QuerySetBase::DestroyImpl();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
-
- void QuerySet::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_QUERY_POOL,
- reinterpret_cast<uint64_t&>(mHandle), "Dawn_QuerySet", GetLabel());
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h
deleted file mode 100644
index e219a0f054b..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_QUERYSETVK_H_
-#define DAWNNATIVE_VULKAN_QUERYSETVK_H_
-
-#include "dawn_native/QuerySet.h"
-
-#include "common/vulkan_platform.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class QuerySet final : public QuerySetBase {
- public:
- static ResultOrError<Ref<QuerySet>> Create(Device* device,
- const QuerySetDescriptor* descriptor);
-
- VkQueryPool GetHandle() const;
-
- private:
- ~QuerySet() override;
- using QuerySetBase::QuerySetBase;
- MaybeError Initialize();
-
- // Dawn API
- void DestroyImpl() override;
- void SetLabelImpl() override;
-
- VkQueryPool mHandle = VK_NULL_HANDLE;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_QUERYSETVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
deleted file mode 100644
index 2cb1e69db89..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/QueueVk.h"
-
-#include "common/Math.h"
-#include "dawn_native/Buffer.h"
-#include "dawn_native/CommandValidation.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/vulkan/CommandBufferVk.h"
-#include "dawn_native/vulkan/CommandRecordingContext.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/tracing/TraceEvent.h"
-
-namespace dawn_native { namespace vulkan {
-
- // static
- Queue* Queue::Create(Device* device) {
- return new Queue(device);
- }
-
- Queue::Queue(Device* device) : QueueBase(device) {
- }
-
- Queue::~Queue() {
- }
-
- MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
- Device* device = ToBackend(GetDevice());
-
- DAWN_TRY(device->Tick());
-
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
- "CommandBufferVk::RecordCommands");
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
- }
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
-
- DAWN_TRY(device->SubmitPendingCommands());
-
- return {};
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h
deleted file mode 100644
index b3aa66538e7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_QUEUEVK_H_
-#define DAWNNATIVE_VULKAN_QUEUEVK_H_
-
-#include "dawn_native/Queue.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class Queue final : public QueueBase {
- public:
- static Queue* Create(Device* device);
-
- private:
- Queue(Device* device);
- ~Queue() override;
- using QueueBase::QueueBase;
-
- MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_QUEUEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
deleted file mode 100644
index b91e46fb970..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/RenderPassCache.h"
-
-#include "common/BitSetIterator.h"
-#include "common/HashUtils.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
- VkAttachmentLoadOp VulkanAttachmentLoadOp(wgpu::LoadOp op) {
- switch (op) {
- case wgpu::LoadOp::Load:
- return VK_ATTACHMENT_LOAD_OP_LOAD;
- case wgpu::LoadOp::Clear:
- return VK_ATTACHMENT_LOAD_OP_CLEAR;
- }
- UNREACHABLE();
- }
-
- VkAttachmentStoreOp VulkanAttachmentStoreOp(wgpu::StoreOp op) {
- switch (op) {
- case wgpu::StoreOp::Store:
- return VK_ATTACHMENT_STORE_OP_STORE;
- case wgpu::StoreOp::Discard:
- return VK_ATTACHMENT_STORE_OP_DONT_CARE;
- }
- UNREACHABLE();
- }
- } // anonymous namespace
-
- // RenderPassCacheQuery
-
- void RenderPassCacheQuery::SetColor(ColorAttachmentIndex index,
- wgpu::TextureFormat format,
- wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- bool hasResolveTarget) {
- colorMask.set(index);
- colorFormats[index] = format;
- colorLoadOp[index] = loadOp;
- colorStoreOp[index] = storeOp;
- resolveTargetMask[index] = hasResolveTarget;
- }
-
- void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
- wgpu::LoadOp depthLoadOpIn,
- wgpu::StoreOp depthStoreOpIn,
- wgpu::LoadOp stencilLoadOpIn,
- wgpu::StoreOp stencilStoreOpIn) {
- hasDepthStencil = true;
- depthStencilFormat = format;
- depthLoadOp = depthLoadOpIn;
- depthStoreOp = depthStoreOpIn;
- stencilLoadOp = stencilLoadOpIn;
- stencilStoreOp = stencilStoreOpIn;
- }
-
- void RenderPassCacheQuery::SetSampleCount(uint32_t sampleCount) {
- this->sampleCount = sampleCount;
- }
-
- // RenderPassCache
-
- RenderPassCache::RenderPassCache(Device* device) : mDevice(device) {
- }
-
- RenderPassCache::~RenderPassCache() {
- std::lock_guard<std::mutex> lock(mMutex);
- for (auto it : mCache) {
- mDevice->fn.DestroyRenderPass(mDevice->GetVkDevice(), it.second, nullptr);
- }
-
- mCache.clear();
- }
-
- ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
- std::lock_guard<std::mutex> lock(mMutex);
- auto it = mCache.find(query);
- if (it != mCache.end()) {
- return VkRenderPass(it->second);
- }
-
- VkRenderPass renderPass;
- DAWN_TRY_ASSIGN(renderPass, CreateRenderPassForQuery(query));
- mCache.emplace(query, renderPass);
- return renderPass;
- }
-
- ResultOrError<VkRenderPass> RenderPassCache::CreateRenderPassForQuery(
- const RenderPassCacheQuery& query) const {
- // The Vulkan subpasses want to know the layout of the attachments with VkAttachmentRef.
- // Precompute them as they must be pointer-chained in VkSubpassDescription
- std::array<VkAttachmentReference, kMaxColorAttachments> colorAttachmentRefs;
- std::array<VkAttachmentReference, kMaxColorAttachments> resolveAttachmentRefs;
- VkAttachmentReference depthStencilAttachmentRef;
-
- // Contains the attachment description that will be chained in the create info
- // The order of all attachments in attachmentDescs is "color-depthstencil-resolve".
- constexpr uint8_t kMaxAttachmentCount = kMaxColorAttachments * 2 + 1;
- std::array<VkAttachmentDescription, kMaxAttachmentCount> attachmentDescs = {};
-
- VkSampleCountFlagBits vkSampleCount = VulkanSampleCount(query.sampleCount);
-
- uint32_t colorAttachmentIndex = 0;
- for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
- auto& attachmentRef = colorAttachmentRefs[colorAttachmentIndex];
- auto& attachmentDesc = attachmentDescs[colorAttachmentIndex];
-
- attachmentRef.attachment = colorAttachmentIndex;
- attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
- attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
- attachmentDesc.samples = vkSampleCount;
- attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
- attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.colorStoreOp[i]);
- attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
- ++colorAttachmentIndex;
- }
-
- uint32_t attachmentCount = colorAttachmentIndex;
- VkAttachmentReference* depthStencilAttachment = nullptr;
- if (query.hasDepthStencil) {
- auto& attachmentDesc = attachmentDescs[attachmentCount];
-
- depthStencilAttachment = &depthStencilAttachmentRef;
-
- depthStencilAttachmentRef.attachment = attachmentCount;
- depthStencilAttachmentRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
-
- attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
- attachmentDesc.samples = vkSampleCount;
- attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
- attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.depthStoreOp);
- attachmentDesc.stencilLoadOp = VulkanAttachmentLoadOp(query.stencilLoadOp);
- attachmentDesc.stencilStoreOp = VulkanAttachmentStoreOp(query.stencilStoreOp);
- attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
-
- ++attachmentCount;
- }
-
- uint32_t resolveAttachmentIndex = 0;
- for (ColorAttachmentIndex i : IterateBitSet(query.resolveTargetMask)) {
- auto& attachmentRef = resolveAttachmentRefs[resolveAttachmentIndex];
- auto& attachmentDesc = attachmentDescs[attachmentCount];
-
- attachmentRef.attachment = attachmentCount;
- attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
- attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
- attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
- attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
- attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
- attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
- ++attachmentCount;
- ++resolveAttachmentIndex;
- }
-
- // All color attachments without a corresponding resolve attachment must be set to VK_ATTACHMENT_UNUSED
- for (; resolveAttachmentIndex < colorAttachmentIndex; resolveAttachmentIndex++) {
- auto& attachmentRef = resolveAttachmentRefs[resolveAttachmentIndex];
- attachmentRef.attachment = VK_ATTACHMENT_UNUSED;
- attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // The Khronos Vulkan validation layer will complain if not set
- }
-
- VkAttachmentReference* resolveTargetAttachmentRefs =
- query.resolveTargetMask.any() ? resolveAttachmentRefs.data() : nullptr;
-
- // Create the VkSubpassDescription that will be chained in the VkRenderPassCreateInfo
- VkSubpassDescription subpassDesc;
- subpassDesc.flags = 0;
- subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
- subpassDesc.inputAttachmentCount = 0;
- subpassDesc.pInputAttachments = nullptr;
- subpassDesc.colorAttachmentCount = colorAttachmentIndex;
- subpassDesc.pColorAttachments = colorAttachmentRefs.data();
- subpassDesc.pResolveAttachments = resolveTargetAttachmentRefs;
- subpassDesc.pDepthStencilAttachment = depthStencilAttachment;
- subpassDesc.preserveAttachmentCount = 0;
- subpassDesc.pPreserveAttachments = nullptr;
-
- // Chain everything in VkRenderPassCreateInfo
- VkRenderPassCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.attachmentCount = attachmentCount;
- createInfo.pAttachments = attachmentDescs.data();
- createInfo.subpassCount = 1;
- createInfo.pSubpasses = &subpassDesc;
- createInfo.dependencyCount = 0;
- createInfo.pDependencies = nullptr;
-
- // Create the render pass from the zillion parameters
- VkRenderPass renderPass;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo,
- nullptr, &*renderPass),
- "CreateRenderPass"));
- return renderPass;
- }
-
- // RenderPassCache
-
- size_t RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& query) const {
- size_t hash = Hash(query.colorMask);
-
- HashCombine(&hash, Hash(query.resolveTargetMask));
-
- for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
- HashCombine(&hash, query.colorFormats[i], query.colorLoadOp[i]);
- }
-
- HashCombine(&hash, query.hasDepthStencil);
- if (query.hasDepthStencil) {
- HashCombine(&hash, query.depthStencilFormat, query.depthLoadOp, query.stencilLoadOp);
- }
-
- HashCombine(&hash, query.sampleCount);
-
- return hash;
- }
-
- bool RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& a,
- const RenderPassCacheQuery& b) const {
- if (a.colorMask != b.colorMask) {
- return false;
- }
-
- if (a.resolveTargetMask != b.resolveTargetMask) {
- return false;
- }
-
- if (a.sampleCount != b.sampleCount) {
- return false;
- }
-
- for (ColorAttachmentIndex i : IterateBitSet(a.colorMask)) {
- if ((a.colorFormats[i] != b.colorFormats[i]) ||
- (a.colorLoadOp[i] != b.colorLoadOp[i])) {
- return false;
- }
- }
-
- if (a.hasDepthStencil != b.hasDepthStencil) {
- return false;
- }
-
- if (a.hasDepthStencil) {
- if ((a.depthStencilFormat != b.depthStencilFormat) ||
- (a.depthLoadOp != b.depthLoadOp) || (a.stencilLoadOp != b.stencilLoadOp)) {
- return false;
- }
- }
-
- return true;
- }
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
deleted file mode 100644
index 4503e1fe5e0..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
-#define DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
-
-#include "common/Constants.h"
-#include "common/ityp_array.h"
-#include "common/ityp_bitset.h"
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/dawn_platform.h"
-
-#include <array>
-#include <bitset>
-#include <mutex>
-#include <unordered_map>
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- // This is a key to query the RenderPassCache, it can be sparse meaning that only the
- // information for bits set in colorMask or hasDepthStencil need to be provided and the rest can
- // be uninintialized.
- struct RenderPassCacheQuery {
- // Use these helpers to build the query, they make sure all relevant data is initialized and
- // masks set.
- void SetColor(ColorAttachmentIndex index,
- wgpu::TextureFormat format,
- wgpu::LoadOp loadOp,
- wgpu::StoreOp storeOp,
- bool hasResolveTarget);
- void SetDepthStencil(wgpu::TextureFormat format,
- wgpu::LoadOp depthLoadOp,
- wgpu::StoreOp depthStoreOp,
- wgpu::LoadOp stencilLoadOp,
- wgpu::StoreOp stencilStoreOp);
- void SetSampleCount(uint32_t sampleCount);
-
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> colorMask;
- ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> resolveTargetMask;
- ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
- ityp::array<ColorAttachmentIndex, wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
- ityp::array<ColorAttachmentIndex, wgpu::StoreOp, kMaxColorAttachments> colorStoreOp;
-
- bool hasDepthStencil = false;
- wgpu::TextureFormat depthStencilFormat;
- wgpu::LoadOp depthLoadOp;
- wgpu::StoreOp depthStoreOp;
- wgpu::LoadOp stencilLoadOp;
- wgpu::StoreOp stencilStoreOp;
-
- uint32_t sampleCount;
- };
-
- // Caches VkRenderPasses so that we don't create duplicate ones for every RenderPipeline or
- // render pass. We always arrange the order of attachments in "color-depthstencil-resolve" order
- // when creating render pass and framebuffer so that we can always make sure the order of
- // attachments in the rendering pipeline matches the one of the framebuffer.
- // All the operations on RenderPassCache are guaranteed to be thread-safe.
- // TODO(cwallez@chromium.org): Make it an LRU cache somehow?
- class RenderPassCache {
- public:
- RenderPassCache(Device* device);
- ~RenderPassCache();
-
- ResultOrError<VkRenderPass> GetRenderPass(const RenderPassCacheQuery& query);
-
- private:
- // Does the actual VkRenderPass creation on a cache miss.
- ResultOrError<VkRenderPass> CreateRenderPassForQuery(
- const RenderPassCacheQuery& query) const;
-
- // Implements the functors necessary for to use RenderPassCacheQueries as unordered_map
- // keys.
- struct CacheFuncs {
- size_t operator()(const RenderPassCacheQuery& query) const;
- bool operator()(const RenderPassCacheQuery& a, const RenderPassCacheQuery& b) const;
- };
- using Cache =
- std::unordered_map<RenderPassCacheQuery, VkRenderPass, CacheFuncs, CacheFuncs>;
-
- Device* mDevice = nullptr;
-
- std::mutex mMutex;
- Cache mCache;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_RENDERPASSCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
deleted file mode 100644
index 556b582b524..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
+++ /dev/null
@@ -1,625 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/RenderPipelineVk.h"
-
-#include "dawn_native/CreatePipelineAsyncTask.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/PipelineLayoutVk.h"
-#include "dawn_native/vulkan/RenderPassCache.h"
-#include "dawn_native/vulkan/ShaderModuleVk.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
-
- VkVertexInputRate VulkanInputRate(wgpu::VertexStepMode stepMode) {
- switch (stepMode) {
- case wgpu::VertexStepMode::Vertex:
- return VK_VERTEX_INPUT_RATE_VERTEX;
- case wgpu::VertexStepMode::Instance:
- return VK_VERTEX_INPUT_RATE_INSTANCE;
- }
- UNREACHABLE();
- }
-
- VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- return VK_FORMAT_R8G8_UINT;
- case wgpu::VertexFormat::Uint8x4:
- return VK_FORMAT_R8G8B8A8_UINT;
- case wgpu::VertexFormat::Sint8x2:
- return VK_FORMAT_R8G8_SINT;
- case wgpu::VertexFormat::Sint8x4:
- return VK_FORMAT_R8G8B8A8_SINT;
- case wgpu::VertexFormat::Unorm8x2:
- return VK_FORMAT_R8G8_UNORM;
- case wgpu::VertexFormat::Unorm8x4:
- return VK_FORMAT_R8G8B8A8_UNORM;
- case wgpu::VertexFormat::Snorm8x2:
- return VK_FORMAT_R8G8_SNORM;
- case wgpu::VertexFormat::Snorm8x4:
- return VK_FORMAT_R8G8B8A8_SNORM;
- case wgpu::VertexFormat::Uint16x2:
- return VK_FORMAT_R16G16_UINT;
- case wgpu::VertexFormat::Uint16x4:
- return VK_FORMAT_R16G16B16A16_UINT;
- case wgpu::VertexFormat::Sint16x2:
- return VK_FORMAT_R16G16_SINT;
- case wgpu::VertexFormat::Sint16x4:
- return VK_FORMAT_R16G16B16A16_SINT;
- case wgpu::VertexFormat::Unorm16x2:
- return VK_FORMAT_R16G16_UNORM;
- case wgpu::VertexFormat::Unorm16x4:
- return VK_FORMAT_R16G16B16A16_UNORM;
- case wgpu::VertexFormat::Snorm16x2:
- return VK_FORMAT_R16G16_SNORM;
- case wgpu::VertexFormat::Snorm16x4:
- return VK_FORMAT_R16G16B16A16_SNORM;
- case wgpu::VertexFormat::Float16x2:
- return VK_FORMAT_R16G16_SFLOAT;
- case wgpu::VertexFormat::Float16x4:
- return VK_FORMAT_R16G16B16A16_SFLOAT;
- case wgpu::VertexFormat::Float32:
- return VK_FORMAT_R32_SFLOAT;
- case wgpu::VertexFormat::Float32x2:
- return VK_FORMAT_R32G32_SFLOAT;
- case wgpu::VertexFormat::Float32x3:
- return VK_FORMAT_R32G32B32_SFLOAT;
- case wgpu::VertexFormat::Float32x4:
- return VK_FORMAT_R32G32B32A32_SFLOAT;
- case wgpu::VertexFormat::Uint32:
- return VK_FORMAT_R32_UINT;
- case wgpu::VertexFormat::Uint32x2:
- return VK_FORMAT_R32G32_UINT;
- case wgpu::VertexFormat::Uint32x3:
- return VK_FORMAT_R32G32B32_UINT;
- case wgpu::VertexFormat::Uint32x4:
- return VK_FORMAT_R32G32B32A32_UINT;
- case wgpu::VertexFormat::Sint32:
- return VK_FORMAT_R32_SINT;
- case wgpu::VertexFormat::Sint32x2:
- return VK_FORMAT_R32G32_SINT;
- case wgpu::VertexFormat::Sint32x3:
- return VK_FORMAT_R32G32B32_SINT;
- case wgpu::VertexFormat::Sint32x4:
- return VK_FORMAT_R32G32B32A32_SINT;
- default:
- UNREACHABLE();
- }
- }
-
- VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) {
- switch (topology) {
- case wgpu::PrimitiveTopology::PointList:
- return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
- case wgpu::PrimitiveTopology::LineList:
- return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
- case wgpu::PrimitiveTopology::LineStrip:
- return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
- case wgpu::PrimitiveTopology::TriangleList:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
- case wgpu::PrimitiveTopology::TriangleStrip:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
- }
- UNREACHABLE();
- }
-
- bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
- // Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that
- // primitive restart be only enabled on primitive topologies that support restarting.
- switch (topology) {
- case wgpu::PrimitiveTopology::PointList:
- case wgpu::PrimitiveTopology::LineList:
- case wgpu::PrimitiveTopology::TriangleList:
- return false;
- case wgpu::PrimitiveTopology::LineStrip:
- case wgpu::PrimitiveTopology::TriangleStrip:
- return true;
- }
- UNREACHABLE();
- }
-
- VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
- switch (face) {
- case wgpu::FrontFace::CCW:
- return VK_FRONT_FACE_COUNTER_CLOCKWISE;
- case wgpu::FrontFace::CW:
- return VK_FRONT_FACE_CLOCKWISE;
- }
- UNREACHABLE();
- }
-
- VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
- switch (mode) {
- case wgpu::CullMode::None:
- return VK_CULL_MODE_NONE;
- case wgpu::CullMode::Front:
- return VK_CULL_MODE_FRONT_BIT;
- case wgpu::CullMode::Back:
- return VK_CULL_MODE_BACK_BIT;
- }
- UNREACHABLE();
- }
-
- VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
- switch (factor) {
- case wgpu::BlendFactor::Zero:
- return VK_BLEND_FACTOR_ZERO;
- case wgpu::BlendFactor::One:
- return VK_BLEND_FACTOR_ONE;
- case wgpu::BlendFactor::Src:
- return VK_BLEND_FACTOR_SRC_COLOR;
- case wgpu::BlendFactor::OneMinusSrc:
- return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
- case wgpu::BlendFactor::SrcAlpha:
- return VK_BLEND_FACTOR_SRC_ALPHA;
- case wgpu::BlendFactor::OneMinusSrcAlpha:
- return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
- case wgpu::BlendFactor::Dst:
- return VK_BLEND_FACTOR_DST_COLOR;
- case wgpu::BlendFactor::OneMinusDst:
- return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
- case wgpu::BlendFactor::DstAlpha:
- return VK_BLEND_FACTOR_DST_ALPHA;
- case wgpu::BlendFactor::OneMinusDstAlpha:
- return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
- case wgpu::BlendFactor::SrcAlphaSaturated:
- return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
- case wgpu::BlendFactor::Constant:
- return VK_BLEND_FACTOR_CONSTANT_COLOR;
- case wgpu::BlendFactor::OneMinusConstant:
- return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
- }
- UNREACHABLE();
- }
-
- VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
- switch (operation) {
- case wgpu::BlendOperation::Add:
- return VK_BLEND_OP_ADD;
- case wgpu::BlendOperation::Subtract:
- return VK_BLEND_OP_SUBTRACT;
- case wgpu::BlendOperation::ReverseSubtract:
- return VK_BLEND_OP_REVERSE_SUBTRACT;
- case wgpu::BlendOperation::Min:
- return VK_BLEND_OP_MIN;
- case wgpu::BlendOperation::Max:
- return VK_BLEND_OP_MAX;
- }
- UNREACHABLE();
- }
-
- VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
- bool isDeclaredInFragmentShader) {
- // Vulkan and Dawn color write masks match, static assert it and return the mask
- static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
- VK_COLOR_COMPONENT_R_BIT,
- "");
- static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
- VK_COLOR_COMPONENT_G_BIT,
- "");
- static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
- VK_COLOR_COMPONENT_B_BIT,
- "");
- static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
- VK_COLOR_COMPONENT_A_BIT,
- "");
-
- // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
- // attachment writes are undefined for components which do not correspond to a fragment
- // shader outputs", we set the color write mask to 0 to prevent such undefined values
- // being written into the color attachments.
- return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask)
- : static_cast<VkColorComponentFlags>(0);
- }
-
- VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state,
- bool isDeclaredInFragmentShader) {
- VkPipelineColorBlendAttachmentState attachment;
- attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE;
- if (attachment.blendEnable) {
- attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor);
- attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor);
- attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation);
- attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor);
- attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor);
- attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation);
- } else {
- // Swiftshader's Vulkan implementation appears to expect these values to be valid
- // even when blending is not enabled.
- attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
- attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
- attachment.colorBlendOp = VK_BLEND_OP_ADD;
- attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
- attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
- attachment.alphaBlendOp = VK_BLEND_OP_ADD;
- }
- attachment.colorWriteMask =
- VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
- return attachment;
- }
-
- VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) {
- switch (op) {
- case wgpu::StencilOperation::Keep:
- return VK_STENCIL_OP_KEEP;
- case wgpu::StencilOperation::Zero:
- return VK_STENCIL_OP_ZERO;
- case wgpu::StencilOperation::Replace:
- return VK_STENCIL_OP_REPLACE;
- case wgpu::StencilOperation::IncrementClamp:
- return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
- case wgpu::StencilOperation::DecrementClamp:
- return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
- case wgpu::StencilOperation::Invert:
- return VK_STENCIL_OP_INVERT;
- case wgpu::StencilOperation::IncrementWrap:
- return VK_STENCIL_OP_INCREMENT_AND_WRAP;
- case wgpu::StencilOperation::DecrementWrap:
- return VK_STENCIL_OP_DECREMENT_AND_WRAP;
- }
- UNREACHABLE();
- }
-
- VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(
- const DepthStencilState* descriptor) {
- VkPipelineDepthStencilStateCreateInfo depthStencilState;
- depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
- depthStencilState.pNext = nullptr;
- depthStencilState.flags = 0;
-
- // Depth writes only occur if depth is enabled
- depthStencilState.depthTestEnable =
- (descriptor->depthCompare == wgpu::CompareFunction::Always &&
- !descriptor->depthWriteEnabled)
- ? VK_FALSE
- : VK_TRUE;
- depthStencilState.depthWriteEnable = descriptor->depthWriteEnabled ? VK_TRUE : VK_FALSE;
- depthStencilState.depthCompareOp = ToVulkanCompareOp(descriptor->depthCompare);
- depthStencilState.depthBoundsTestEnable = false;
- depthStencilState.minDepthBounds = 0.0f;
- depthStencilState.maxDepthBounds = 1.0f;
-
- depthStencilState.stencilTestEnable =
- StencilTestEnabled(descriptor) ? VK_TRUE : VK_FALSE;
-
- depthStencilState.front.failOp = VulkanStencilOp(descriptor->stencilFront.failOp);
- depthStencilState.front.passOp = VulkanStencilOp(descriptor->stencilFront.passOp);
- depthStencilState.front.depthFailOp =
- VulkanStencilOp(descriptor->stencilFront.depthFailOp);
- depthStencilState.front.compareOp = ToVulkanCompareOp(descriptor->stencilFront.compare);
-
- depthStencilState.back.failOp = VulkanStencilOp(descriptor->stencilBack.failOp);
- depthStencilState.back.passOp = VulkanStencilOp(descriptor->stencilBack.passOp);
- depthStencilState.back.depthFailOp =
- VulkanStencilOp(descriptor->stencilBack.depthFailOp);
- depthStencilState.back.compareOp = ToVulkanCompareOp(descriptor->stencilBack.compare);
-
- // Dawn doesn't have separate front and back stencil masks.
- depthStencilState.front.compareMask = descriptor->stencilReadMask;
- depthStencilState.back.compareMask = descriptor->stencilReadMask;
- depthStencilState.front.writeMask = descriptor->stencilWriteMask;
- depthStencilState.back.writeMask = descriptor->stencilWriteMask;
-
- // The stencil reference is always dynamic
- depthStencilState.front.reference = 0;
- depthStencilState.back.reference = 0;
-
- return depthStencilState;
- }
-
- } // anonymous namespace
-
- // static
- Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
- Device* device,
- const RenderPipelineDescriptor* descriptor) {
- return AcquireRef(new RenderPipeline(device, descriptor));
- }
-
- MaybeError RenderPipeline::Initialize() {
- Device* device = ToBackend(GetDevice());
-
- // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment
- std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
- std::array<std::vector<OverridableConstantScalar>, 2> specializationDataEntriesPerStages;
- std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages;
- std::array<VkSpecializationInfo, 2> specializationInfoPerStages;
- uint32_t stageCount = 0;
-
- for (auto stage : IterateStages(this->GetStageMask())) {
- VkPipelineShaderStageCreateInfo shaderStage;
-
- const ProgrammableStage& programmableStage = GetStage(stage);
- DAWN_TRY_ASSIGN(shaderStage.module,
- ToBackend(programmableStage.module)
- ->GetTransformedModuleHandle(programmableStage.entryPoint.c_str(),
- ToBackend(GetLayout())));
-
- shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
- shaderStage.pNext = nullptr;
- shaderStage.flags = 0;
- shaderStage.pSpecializationInfo = nullptr;
- shaderStage.pName = programmableStage.entryPoint.c_str();
-
- switch (stage) {
- case dawn_native::SingleShaderStage::Vertex: {
- shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
- break;
- }
- case dawn_native::SingleShaderStage::Fragment: {
- shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
- break;
- }
- default: {
- // For render pipeline only Vertex and Fragment stage is possible
- DAWN_UNREACHABLE();
- break;
- }
- }
-
- shaderStage.pSpecializationInfo =
- GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount],
- &specializationDataEntriesPerStages[stageCount],
- &specializationMapEntriesPerStages[stageCount]);
-
- DAWN_ASSERT(stageCount < 2);
- shaderStages[stageCount] = shaderStage;
- stageCount++;
- }
-
- PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
- VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo =
- ComputeVertexInputDesc(&tempAllocations);
-
- VkPipelineInputAssemblyStateCreateInfo inputAssembly;
- inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
- inputAssembly.pNext = nullptr;
- inputAssembly.flags = 0;
- inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
- inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
-
- // A dummy viewport/scissor info. The validation layers force use to provide at least one
- // scissor and one viewport here, even if we choose to make them dynamic.
- VkViewport viewportDesc;
- viewportDesc.x = 0.0f;
- viewportDesc.y = 0.0f;
- viewportDesc.width = 1.0f;
- viewportDesc.height = 1.0f;
- viewportDesc.minDepth = 0.0f;
- viewportDesc.maxDepth = 1.0f;
- VkRect2D scissorRect;
- scissorRect.offset.x = 0;
- scissorRect.offset.y = 0;
- scissorRect.extent.width = 1;
- scissorRect.extent.height = 1;
- VkPipelineViewportStateCreateInfo viewport;
- viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
- viewport.pNext = nullptr;
- viewport.flags = 0;
- viewport.viewportCount = 1;
- viewport.pViewports = &viewportDesc;
- viewport.scissorCount = 1;
- viewport.pScissors = &scissorRect;
-
- VkPipelineRasterizationStateCreateInfo rasterization;
- rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
- rasterization.pNext = nullptr;
- rasterization.flags = 0;
- rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE;
- rasterization.rasterizerDiscardEnable = VK_FALSE;
- rasterization.polygonMode = VK_POLYGON_MODE_FILL;
- rasterization.cullMode = VulkanCullMode(GetCullMode());
- rasterization.frontFace = VulkanFrontFace(GetFrontFace());
- rasterization.depthBiasEnable = IsDepthBiasEnabled();
- rasterization.depthBiasConstantFactor = GetDepthBias();
- rasterization.depthBiasClamp = GetDepthBiasClamp();
- rasterization.depthBiasSlopeFactor = GetDepthBiasSlopeScale();
- rasterization.lineWidth = 1.0f;
-
- VkPipelineMultisampleStateCreateInfo multisample;
- multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
- multisample.pNext = nullptr;
- multisample.flags = 0;
- multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount());
- multisample.sampleShadingEnable = VK_FALSE;
- multisample.minSampleShading = 0.0f;
- // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length
- // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t
- // we have to assert that this length is indeed 1.
- ASSERT(multisample.rasterizationSamples <= 32);
- VkSampleMask sampleMask = GetSampleMask();
- multisample.pSampleMask = &sampleMask;
- multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled();
- multisample.alphaToOneEnable = VK_FALSE;
-
- VkPipelineDepthStencilStateCreateInfo depthStencilState =
- ComputeDepthStencilDesc(GetDepthStencilState());
-
- VkPipelineColorBlendStateCreateInfo colorBlend;
- // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a
- // definition scope as same as colorBlend
- ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments>
- colorBlendAttachments;
- if (GetStageMask() & wgpu::ShaderStage::Fragment) {
- // Initialize the "blend state info" that will be chained in the "create info" from the
- // data pre-computed in the ColorState
- const auto& fragmentOutputsWritten =
- GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- const ColorTargetState* target = GetColorTargetState(i);
- colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
- }
-
- colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
- colorBlend.pNext = nullptr;
- colorBlend.flags = 0;
- // LogicOp isn't supported so we disable it.
- colorBlend.logicOpEnable = VK_FALSE;
- colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
- colorBlend.attachmentCount = static_cast<uint32_t>(GetColorAttachmentsMask().count());
- colorBlend.pAttachments = colorBlendAttachments.data();
- // The blend constant is always dynamic so we fill in a dummy value
- colorBlend.blendConstants[0] = 0.0f;
- colorBlend.blendConstants[1] = 0.0f;
- colorBlend.blendConstants[2] = 0.0f;
- colorBlend.blendConstants[3] = 0.0f;
- }
-
- // Tag all state as dynamic but stencil masks and depth bias.
- VkDynamicState dynamicStates[] = {
- VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
- VK_DYNAMIC_STATE_LINE_WIDTH, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
- VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
- };
- VkPipelineDynamicStateCreateInfo dynamic;
- dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
- dynamic.pNext = nullptr;
- dynamic.flags = 0;
- dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]);
- dynamic.pDynamicStates = dynamicStates;
-
- // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops
- // don't matter so set them all to LoadOp::Load / StoreOp::Store
- VkRenderPass renderPass = VK_NULL_HANDLE;
- {
- RenderPassCacheQuery query;
-
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load,
- wgpu::StoreOp::Store, false);
- }
-
- if (HasDepthStencilAttachment()) {
- query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load,
- wgpu::StoreOp::Store, wgpu::LoadOp::Load,
- wgpu::StoreOp::Store);
- }
-
- query.SetSampleCount(GetSampleCount());
-
- DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query));
- }
-
- // The create info chains in a bunch of things created on the stack here or inside state
- // objects.
- VkGraphicsPipelineCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.stageCount = stageCount;
- createInfo.pStages = shaderStages.data();
- createInfo.pVertexInputState = &vertexInputCreateInfo;
- createInfo.pInputAssemblyState = &inputAssembly;
- createInfo.pTessellationState = nullptr;
- createInfo.pViewportState = &viewport;
- createInfo.pRasterizationState = &rasterization;
- createInfo.pMultisampleState = &multisample;
- createInfo.pDepthStencilState = &depthStencilState;
- createInfo.pColorBlendState =
- (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr;
- createInfo.pDynamicState = &dynamic;
- createInfo.layout = ToBackend(GetLayout())->GetHandle();
- createInfo.renderPass = renderPass;
- createInfo.subpass = 0;
- createInfo.basePipelineHandle = VkPipeline{};
- createInfo.basePipelineIndex = -1;
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{}, 1,
- &createInfo, nullptr, &*mHandle),
- "CreateGraphicsPipeline"));
-
- SetLabelImpl();
-
- return {};
- }
-
- void RenderPipeline::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE,
- reinterpret_cast<uint64_t&>(mHandle), "Dawn_RenderPipeline", GetLabel());
- }
-
- VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
- PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) {
- // Fill in the "binding info" that will be chained in the create info
- uint32_t bindingCount = 0;
- for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
- const VertexBufferInfo& bindingInfo = GetVertexBuffer(slot);
-
- VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount];
- bindingDesc->binding = static_cast<uint8_t>(slot);
- bindingDesc->stride = bindingInfo.arrayStride;
- bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode);
-
- bindingCount++;
- }
-
- // Fill in the "attribute info" that will be chained in the create info
- uint32_t attributeCount = 0;
- for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
- const VertexAttributeInfo& attributeInfo = GetAttribute(loc);
-
- VkVertexInputAttributeDescription* attributeDesc =
- &tempAllocations->attributes[attributeCount];
- attributeDesc->location = static_cast<uint8_t>(loc);
- attributeDesc->binding = static_cast<uint8_t>(attributeInfo.vertexBufferSlot);
- attributeDesc->format = VulkanVertexFormat(attributeInfo.format);
- attributeDesc->offset = attributeInfo.offset;
-
- attributeCount++;
- }
-
- // Build the create info
- VkPipelineVertexInputStateCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.vertexBindingDescriptionCount = bindingCount;
- createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
- createInfo.vertexAttributeDescriptionCount = attributeCount;
- createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
- return createInfo;
- }
-
- RenderPipeline::~RenderPipeline() = default;
-
- void RenderPipeline::DestroyImpl() {
- RenderPipelineBase::DestroyImpl();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
-
- VkPipeline RenderPipeline::GetHandle() const {
- return mHandle;
- }
-
- void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
- std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
- userdata);
- CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
deleted file mode 100644
index 9cfd078daca..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
-#define DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
-
-#include "dawn_native/RenderPipeline.h"
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class RenderPipeline final : public RenderPipelineBase {
- public:
- static Ref<RenderPipeline> CreateUninitialized(Device* device,
- const RenderPipelineDescriptor* descriptor);
- static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- VkPipeline GetHandle() const;
-
- MaybeError Initialize() override;
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- ~RenderPipeline() override;
- void DestroyImpl() override;
- using RenderPipelineBase::RenderPipelineBase;
-
- struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
- std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
- std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> attributes;
- };
- VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
- PipelineVertexInputStateCreateInfoTemporaryAllocations* temporaryAllocations);
-
- VkPipeline mHandle = VK_NULL_HANDLE;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_RENDERPIPELINEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.cpp
deleted file mode 100644
index bf3b947bd44..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/ResourceHeapVk.h"
-
-namespace dawn_native { namespace vulkan {
-
- ResourceHeap::ResourceHeap(VkDeviceMemory memory, size_t memoryType)
- : mMemory(memory), mMemoryType(memoryType) {
- }
-
- VkDeviceMemory ResourceHeap::GetMemory() const {
- return mMemory;
- }
-
- size_t ResourceHeap::GetMemoryType() const {
- return mMemoryType;
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.h
deleted file mode 100644
index 2bb909b5c8f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
-#define DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/ResourceHeap.h"
-
-namespace dawn_native { namespace vulkan {
-
- // Wrapper for physical memory used with or without a resource object.
- class ResourceHeap : public ResourceHeapBase {
- public:
- ResourceHeap(VkDeviceMemory memory, size_t memoryType);
- ~ResourceHeap() = default;
-
- VkDeviceMemory GetMemory() const;
- size_t GetMemoryType() const;
-
- private:
- VkDeviceMemory mMemory = VK_NULL_HANDLE;
- size_t mMemoryType = 0;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
deleted file mode 100644
index 72bb019a29f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
-
-#include "common/Math.h"
-#include "dawn_native/BuddyMemoryAllocator.h"
-#include "dawn_native/ResourceHeapAllocator.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/ResourceHeapVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
-
- // TODO(crbug.com/dawn/849): This is a hardcoded heurstic to choose when to
- // suballocate but it should ideally depend on the size of the memory heaps and other
- // factors.
- constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull; // 4MiB
-
- // Have each bucket of the buddy system allocate at least some resource of the maximum
- // size
- constexpr uint64_t kBuddyHeapsSize = 2 * kMaxSizeForSubAllocation;
-
- } // anonymous namespace
-
- // SingleTypeAllocator is a combination of a BuddyMemoryAllocator and its client and can
- // service suballocation requests, but for a single Vulkan memory type.
-
- class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
- public:
- SingleTypeAllocator(Device* device, size_t memoryTypeIndex, VkDeviceSize memoryHeapSize)
- : mDevice(device),
- mMemoryTypeIndex(memoryTypeIndex),
- mMemoryHeapSize(memoryHeapSize),
- mPooledMemoryAllocator(this),
- mBuddySystem(
- // Round down to a power of 2 that's <= mMemoryHeapSize. This will always
- // be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
- uint64_t(1) << Log2(mMemoryHeapSize),
- // Take the min in the very unlikely case the memory heap is tiny.
- std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
- &mPooledMemoryAllocator) {
- ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
- }
- ~SingleTypeAllocator() override = default;
-
- void DestroyPool() {
- mPooledMemoryAllocator.DestroyPool();
- }
-
- ResultOrError<ResourceMemoryAllocation> AllocateMemory(uint64_t size, uint64_t alignment) {
- return mBuddySystem.Allocate(size, alignment);
- }
-
- void DeallocateMemory(const ResourceMemoryAllocation& allocation) {
- mBuddySystem.Deallocate(allocation);
- }
-
- // Implementation of the MemoryAllocator interface to be a client of BuddyMemoryAllocator
-
- ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
- uint64_t size) override {
- if (size > mMemoryHeapSize) {
- return DAWN_OUT_OF_MEMORY_ERROR("Allocation size too large");
- }
-
- VkMemoryAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.allocationSize = size;
- allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
-
- // First check OOM that we want to surface to the application.
- DAWN_TRY(CheckVkOOMThenSuccess(
- mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo, nullptr,
- &*allocatedMemory),
- "vkAllocateMemory"));
-
- ASSERT(allocatedMemory != VK_NULL_HANDLE);
- return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
- }
-
- void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(ToBackend(allocation.get())->GetMemory());
- }
-
- private:
- Device* mDevice;
- size_t mMemoryTypeIndex;
- VkDeviceSize mMemoryHeapSize;
- PooledResourceMemoryAllocator mPooledMemoryAllocator;
- BuddyMemoryAllocator mBuddySystem;
- };
-
- // Implementation of ResourceMemoryAllocator
-
- ResourceMemoryAllocator::ResourceMemoryAllocator(Device* device) : mDevice(device) {
- const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
- mAllocatorsPerType.reserve(info.memoryTypes.size());
-
- for (size_t i = 0; i < info.memoryTypes.size(); i++) {
- mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(
- mDevice, i, info.memoryHeaps[info.memoryTypes[i].heapIndex].size));
- }
- }
-
- ResourceMemoryAllocator::~ResourceMemoryAllocator() = default;
-
- ResultOrError<ResourceMemoryAllocation> ResourceMemoryAllocator::Allocate(
- const VkMemoryRequirements& requirements,
- MemoryKind kind) {
- // The Vulkan spec guarantees at least on memory type is valid.
- int memoryType = FindBestTypeIndex(requirements, kind);
- ASSERT(memoryType >= 0);
-
- VkDeviceSize size = requirements.size;
-
- // Sub-allocate non-mappable resources because at the moment the mapped pointer
- // is part of the resource and not the heap, which doesn't match the Vulkan model.
- // TODO(crbug.com/dawn/849): allow sub-allocating mappable resources, maybe.
- if (requirements.size < kMaxSizeForSubAllocation && kind != MemoryKind::LinearMappable) {
- // When sub-allocating, Vulkan requires that we respect bufferImageGranularity. Some
- // hardware puts information on the memory's page table entry and allocating a linear
- // resource in the same page as a non-linear (aka opaque) resource can cause issues.
- // Probably because some texture compression flags are stored on the page table entry,
- // and allocating a linear resource removes these flags.
- //
- // Anyway, just to be safe we ask that all sub-allocated resources are allocated with at
- // least this alignment. TODO(crbug.com/dawn/849): this is suboptimal because multiple
- // linear (resp. opaque) resources can coexist in the same page. In particular Nvidia
- // GPUs often use a granularity of 64k which will lead to a lot of wasted spec. Revisit
- // with a more efficient algorithm later.
- uint64_t alignment =
- std::max(requirements.alignment,
- mDevice->GetDeviceInfo().properties.limits.bufferImageGranularity);
-
- ResourceMemoryAllocation subAllocation;
- DAWN_TRY_ASSIGN(subAllocation, mAllocatorsPerType[memoryType]->AllocateMemory(
- requirements.size, alignment));
- if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
- return std::move(subAllocation);
- }
- }
-
- // If sub-allocation failed, allocate memory just for it.
- std::unique_ptr<ResourceHeapBase> resourceHeap;
- DAWN_TRY_ASSIGN(resourceHeap, mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
-
- void* mappedPointer = nullptr;
- if (kind == MemoryKind::LinearMappable) {
- DAWN_TRY_WITH_CLEANUP(
- CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
- ToBackend(resourceHeap.get())->GetMemory(), 0,
- size, 0, &mappedPointer),
- "vkMapMemory"),
- {
- mAllocatorsPerType[memoryType]->DeallocateResourceHeap(std::move(resourceHeap));
- });
- }
-
- AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
- return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
- static_cast<uint8_t*>(mappedPointer));
- }
-
- void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
- switch (allocation->GetInfo().mMethod) {
- // Some memory allocation can never be initialized, for example when wrapping
- // swapchain VkImages with a Texture.
- case AllocationMethod::kInvalid:
- break;
-
- // For direct allocation we can put the memory for deletion immediately and the fence
- // deleter will make sure the resources are freed before the memory.
- case AllocationMethod::kDirect: {
- ResourceHeap* heap = ToBackend(allocation->GetResourceHeap());
- allocation->Invalidate();
- mDevice->GetFencedDeleter()->DeleteWhenUnused(heap->GetMemory());
- delete heap;
- break;
- }
-
- // Suballocations aren't freed immediately, otherwise another resource allocation could
- // happen just after that aliases the old one and would require a barrier.
- // TODO(crbug.com/dawn/851): Maybe we can produce the correct barriers to reduce the
- // latency to reclaim memory.
- case AllocationMethod::kSubAllocated:
- mSubAllocationsToDelete.Enqueue(*allocation, mDevice->GetPendingCommandSerial());
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- // Invalidate the underlying resource heap in case the client accidentally
- // calls DeallocateMemory again using the same allocation.
- allocation->Invalidate();
- }
-
- void ResourceMemoryAllocator::Tick(ExecutionSerial completedSerial) {
- for (const ResourceMemoryAllocation& allocation :
- mSubAllocationsToDelete.IterateUpTo(completedSerial)) {
- ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
- size_t memoryType = ToBackend(allocation.GetResourceHeap())->GetMemoryType();
-
- mAllocatorsPerType[memoryType]->DeallocateMemory(allocation);
- }
-
- mSubAllocationsToDelete.ClearUpTo(completedSerial);
- }
-
- int ResourceMemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements,
- MemoryKind kind) {
- const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
- bool mappable = kind == MemoryKind::LinearMappable;
-
- // Find a suitable memory type for this allocation
- int bestType = -1;
- for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
- // Resource must support this memory type
- if ((requirements.memoryTypeBits & (1 << i)) == 0) {
- continue;
- }
-
- // Mappable resource must be host visible
- if (mappable &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
- continue;
- }
-
- // Mappable must also be host coherent.
- if (mappable &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
- continue;
- }
-
- // Found the first candidate memory type
- if (bestType == -1) {
- bestType = static_cast<int>(i);
- continue;
- }
-
- // For non-mappable resources, favor device local memory.
- bool currentDeviceLocal =
- info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
- bool bestDeviceLocal =
- info.memoryTypes[bestType].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
- if (!mappable && (currentDeviceLocal != bestDeviceLocal)) {
- if (currentDeviceLocal) {
- bestType = static_cast<int>(i);
- }
- continue;
- }
-
- // All things equal favor the memory in the biggest heap
- VkDeviceSize bestTypeHeapSize =
- info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
- VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
- if (candidateHeapSize > bestTypeHeapSize) {
- bestType = static_cast<int>(i);
- continue;
- }
- }
-
- return bestType;
- }
-
- void ResourceMemoryAllocator::DestroyPool() {
- for (auto& alloc : mAllocatorsPerType) {
- alloc->DestroyPool();
- }
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h
deleted file mode 100644
index e3871dbd28e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
-#define DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
-
-#include "common/SerialQueue.h"
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/IntegerTypes.h"
-#include "dawn_native/PooledResourceMemoryAllocator.h"
-#include "dawn_native/ResourceMemoryAllocation.h"
-
-#include <memory>
-#include <vector>
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- // Various kinds of memory that influence the result of the allocation. For example, to take
- // into account mappability and Vulkan's bufferImageGranularity.
- enum class MemoryKind {
- Linear,
- LinearMappable,
- Opaque,
- };
-
- class ResourceMemoryAllocator {
- public:
- ResourceMemoryAllocator(Device* device);
- ~ResourceMemoryAllocator();
-
- ResultOrError<ResourceMemoryAllocation> Allocate(const VkMemoryRequirements& requirements,
- MemoryKind kind);
- void Deallocate(ResourceMemoryAllocation* allocation);
-
- void DestroyPool();
-
- void Tick(ExecutionSerial completedSerial);
-
- int FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind);
-
- private:
- Device* mDevice;
-
- class SingleTypeAllocator;
- std::vector<std::unique_ptr<SingleTypeAllocator>> mAllocatorsPerType;
-
- SerialQueue<ExecutionSerial, ResourceMemoryAllocation> mSubAllocationsToDelete;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
deleted file mode 100644
index 7c43645380d..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/SamplerVk.h"
-
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
- VkSamplerAddressMode VulkanSamplerAddressMode(wgpu::AddressMode mode) {
- switch (mode) {
- case wgpu::AddressMode::Repeat:
- return VK_SAMPLER_ADDRESS_MODE_REPEAT;
- case wgpu::AddressMode::MirrorRepeat:
- return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
- case wgpu::AddressMode::ClampToEdge:
- return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
- }
- UNREACHABLE();
- }
-
- VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
- switch (filter) {
- case wgpu::FilterMode::Linear:
- return VK_FILTER_LINEAR;
- case wgpu::FilterMode::Nearest:
- return VK_FILTER_NEAREST;
- }
- UNREACHABLE();
- }
-
- VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
- switch (filter) {
- case wgpu::FilterMode::Linear:
- return VK_SAMPLER_MIPMAP_MODE_LINEAR;
- case wgpu::FilterMode::Nearest:
- return VK_SAMPLER_MIPMAP_MODE_NEAREST;
- }
- UNREACHABLE();
- }
- } // anonymous namespace
-
- // static
- ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
- const SamplerDescriptor* descriptor) {
- Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
- DAWN_TRY(sampler->Initialize(descriptor));
- return sampler;
- }
-
- MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
- VkSamplerCreateInfo createInfo = {};
- createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.magFilter = VulkanSamplerFilter(descriptor->magFilter);
- createInfo.minFilter = VulkanSamplerFilter(descriptor->minFilter);
- createInfo.mipmapMode = VulkanMipMapMode(descriptor->mipmapFilter);
- createInfo.addressModeU = VulkanSamplerAddressMode(descriptor->addressModeU);
- createInfo.addressModeV = VulkanSamplerAddressMode(descriptor->addressModeV);
- createInfo.addressModeW = VulkanSamplerAddressMode(descriptor->addressModeW);
- createInfo.mipLodBias = 0.0f;
- if (descriptor->compare != wgpu::CompareFunction::Undefined) {
- createInfo.compareOp = ToVulkanCompareOp(descriptor->compare);
- createInfo.compareEnable = VK_TRUE;
- } else {
- // Still set the compareOp so it's not garbage.
- createInfo.compareOp = VK_COMPARE_OP_NEVER;
- createInfo.compareEnable = VK_FALSE;
- }
- createInfo.minLod = descriptor->lodMinClamp;
- createInfo.maxLod = descriptor->lodMaxClamp;
- createInfo.unnormalizedCoordinates = VK_FALSE;
-
- Device* device = ToBackend(GetDevice());
- uint16_t maxAnisotropy = GetMaxAnisotropy();
- if (device->GetDeviceInfo().features.samplerAnisotropy == VK_TRUE && maxAnisotropy > 1) {
- createInfo.anisotropyEnable = VK_TRUE;
- // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSamplerCreateInfo.html
- createInfo.maxAnisotropy =
- std::min(static_cast<float>(maxAnisotropy),
- device->GetDeviceInfo().properties.limits.maxSamplerAnisotropy);
- } else {
- createInfo.anisotropyEnable = VK_FALSE;
- createInfo.maxAnisotropy = 1;
- }
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreateSampler"));
-
- SetLabelImpl();
-
- return {};
- }
-
- Sampler::~Sampler() = default;
-
- void Sampler::DestroyImpl() {
- SamplerBase::DestroyImpl();
- if (mHandle != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
-
- VkSampler Sampler::GetHandle() const {
- return mHandle;
- }
-
- void Sampler::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_SAMPLER,
- reinterpret_cast<uint64_t&>(mHandle), "Dawn_Sampler", GetLabel());
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
deleted file mode 100644
index fec8653d929..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_SAMPLERVK_H_
-#define DAWNNATIVE_VULKAN_SAMPLERVK_H_
-
-#include "dawn_native/Sampler.h"
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class Sampler final : public SamplerBase {
- public:
- static ResultOrError<Ref<Sampler>> Create(Device* device,
- const SamplerDescriptor* descriptor);
-
- VkSampler GetHandle() const;
-
- private:
- ~Sampler() override;
- void DestroyImpl() override;
- using SamplerBase::SamplerBase;
- MaybeError Initialize(const SamplerDescriptor* descriptor);
-
- // Dawn API
- void SetLabelImpl() override;
-
- VkSampler mHandle = VK_NULL_HANDLE;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_SAMPLERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
deleted file mode 100644
index 45ee439f965..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/ShaderModuleVk.h"
-
-#include "dawn_native/SpirvValidation.h"
-#include "dawn_native/TintUtils.h"
-#include "dawn_native/vulkan/BindGroupLayoutVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/PipelineLayoutVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-#include <tint/tint.h>
-#include <spirv-tools/libspirv.hpp>
-
-namespace dawn_native { namespace vulkan {
-
- ShaderModule::ConcurrentTransformedShaderModuleCache::ConcurrentTransformedShaderModuleCache(
- Device* device)
- : mDevice(device) {
- }
-
- ShaderModule::ConcurrentTransformedShaderModuleCache::
- ~ConcurrentTransformedShaderModuleCache() {
- std::lock_guard<std::mutex> lock(mMutex);
- for (const auto& iter : mTransformedShaderModuleCache) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(iter.second);
- }
- }
-
- VkShaderModule ShaderModule::ConcurrentTransformedShaderModuleCache::FindShaderModule(
- const PipelineLayoutEntryPointPair& key) {
- std::lock_guard<std::mutex> lock(mMutex);
- auto iter = mTransformedShaderModuleCache.find(key);
- if (iter != mTransformedShaderModuleCache.end()) {
- auto cached = iter->second;
- return cached;
- }
- return VK_NULL_HANDLE;
- }
-
- VkShaderModule ShaderModule::ConcurrentTransformedShaderModuleCache::AddOrGetCachedShaderModule(
- const PipelineLayoutEntryPointPair& key,
- VkShaderModule value) {
- ASSERT(value != VK_NULL_HANDLE);
- std::lock_guard<std::mutex> lock(mMutex);
- auto iter = mTransformedShaderModuleCache.find(key);
- if (iter == mTransformedShaderModuleCache.end()) {
- mTransformedShaderModuleCache.emplace(key, value);
- return value;
- } else {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(value);
- return iter->second;
- }
- }
-
- // static
- ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
- Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- DAWN_TRY(module->Initialize(parseResult));
- return module;
- }
-
- ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
- : ShaderModuleBase(device, descriptor),
- mTransformedShaderModuleCache(
- std::make_unique<ConcurrentTransformedShaderModuleCache>(device)) {
- }
-
- MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- if (GetDevice()->IsRobustnessEnabled()) {
- ScopedTintICEHandler scopedICEHandler(GetDevice());
-
- tint::transform::Robustness robustness;
- tint::transform::DataMap transformInputs;
-
- tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&robustness, parseResult->tintProgram.get(),
- transformInputs, nullptr, nullptr));
- // Rather than use a new ParseResult object, we just reuse the original parseResult
- parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
- }
-
- return InitializeBase(parseResult);
- }
-
- void ShaderModule::DestroyImpl() {
- ShaderModuleBase::DestroyImpl();
- // Remove reference to internal cache to trigger cleanup.
- mTransformedShaderModuleCache = nullptr;
- }
-
- ShaderModule::~ShaderModule() = default;
-
- ResultOrError<VkShaderModule> ShaderModule::GetTransformedModuleHandle(
- const char* entryPointName,
- PipelineLayout* layout) {
- // If the shader was destroyed, we should never call this function.
- ASSERT(IsAlive());
-
- ScopedTintICEHandler scopedICEHandler(GetDevice());
-
- auto cacheKey = std::make_pair(layout, entryPointName);
- VkShaderModule cachedShaderModule =
- mTransformedShaderModuleCache->FindShaderModule(cacheKey);
- if (cachedShaderModule != VK_NULL_HANDLE) {
- return cachedShaderModule;
- }
-
- // Creation of VkShaderModule is deferred to this point when using tint generator
-
- // Remap BindingNumber to BindingIndex in WGSL shader
- using BindingRemapper = tint::transform::BindingRemapper;
- using BindingPoint = tint::transform::BindingPoint;
- BindingRemapper::BindingPoints bindingPoints;
- BindingRemapper::AccessControls accessControls;
-
- const BindingInfoArray& moduleBindingInfo = GetEntryPoint(entryPointName).bindings;
-
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
- const auto& groupBindingInfo = moduleBindingInfo[group];
- for (const auto& it : groupBindingInfo) {
- BindingNumber binding = it.first;
- BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
- BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(binding)};
-
- BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(bindingIndex)};
- if (srcBindingPoint != dstBindingPoint) {
- bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
- }
- }
-
- tint::transform::Manager transformManager;
- transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
- // Many Vulkan drivers can't handle multi-entrypoint shader modules.
- transformManager.append(std::make_unique<tint::transform::SingleEntryPoint>());
-
- tint::transform::DataMap transformInputs;
- transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
- std::move(accessControls),
- /* mayCollide */ false);
- transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
-
- tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
- nullptr, nullptr));
-
- tint::writer::spirv::Options options;
- options.emit_vertex_point_size = true;
- options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- auto result = tint::writer::spirv::Generate(&program, options);
- DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
- result.error);
-
- std::vector<uint32_t> spirv = std::move(result.spirv);
- DAWN_TRY(
- ValidateSpirv(GetDevice(), spirv, GetDevice()->IsToggleEnabled(Toggle::DumpShaders)));
-
- VkShaderModuleCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.codeSize = spirv.size() * sizeof(uint32_t);
- createInfo.pCode = spirv.data();
-
- Device* device = ToBackend(GetDevice());
-
- VkShaderModule newHandle = VK_NULL_HANDLE;
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &*newHandle),
- "CreateShaderModule"));
- if (newHandle != VK_NULL_HANDLE) {
- newHandle =
- mTransformedShaderModuleCache->AddOrGetCachedShaderModule(cacheKey, newHandle);
- }
-
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_SHADER_MODULE,
- reinterpret_cast<uint64_t&>(newHandle), "Dawn_ShaderModule", GetLabel());
-
- return newHandle;
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
deleted file mode 100644
index 9d28f91d4d3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
-#define DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
-
-#include "dawn_native/ShaderModule.h"
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-
-#include <mutex>
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
- class PipelineLayout;
-
- class ShaderModule final : public ShaderModuleBase {
- public:
- static ResultOrError<Ref<ShaderModule>> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
-
- ResultOrError<VkShaderModule> GetTransformedModuleHandle(const char* entryPointName,
- PipelineLayout* layout);
-
- private:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
- ~ShaderModule() override;
- MaybeError Initialize(ShaderModuleParseResult* parseResult);
- void DestroyImpl() override;
-
- // New handles created by GetTransformedModuleHandle at pipeline creation time
- class ConcurrentTransformedShaderModuleCache {
- public:
- explicit ConcurrentTransformedShaderModuleCache(Device* device);
- ~ConcurrentTransformedShaderModuleCache();
- VkShaderModule FindShaderModule(const PipelineLayoutEntryPointPair& key);
- VkShaderModule AddOrGetCachedShaderModule(const PipelineLayoutEntryPointPair& key,
- VkShaderModule value);
-
- private:
- Device* mDevice;
- std::mutex mMutex;
- std::unordered_map<PipelineLayoutEntryPointPair,
- VkShaderModule,
- PipelineLayoutEntryPointPairHashFunc>
- mTransformedShaderModuleCache;
- };
- std::unique_ptr<ConcurrentTransformedShaderModuleCache> mTransformedShaderModuleCache;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_SHADERMODULEVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
deleted file mode 100644
index c78f0fc4368..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/StagingBufferVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/ResourceHeapVk.h"
-#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- StagingBuffer::StagingBuffer(size_t size, Device* device)
- : StagingBufferBase(size), mDevice(device) {
- }
-
- MaybeError StagingBuffer::Initialize() {
- VkBufferCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.size = GetSize();
- createInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
- createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = 0;
-
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &*mBuffer),
- "vkCreateBuffer"));
-
- VkMemoryRequirements requirements;
- mDevice->fn.GetBufferMemoryRequirements(mDevice->GetVkDevice(), mBuffer, &requirements);
-
- DAWN_TRY_ASSIGN(mAllocation, mDevice->GetResourceMemoryAllocator()->Allocate(
- requirements, MemoryKind::LinearMappable));
-
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer,
- ToBackend(mAllocation.GetResourceHeap())->GetMemory(),
- mAllocation.GetOffset()),
- "vkBindBufferMemory"));
-
- mMappedPointer = mAllocation.GetMappedPointer();
- if (mMappedPointer == nullptr) {
- return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
- }
-
- SetDebugName(mDevice, VK_OBJECT_TYPE_BUFFER, reinterpret_cast<uint64_t&>(mBuffer),
- "Dawn_StagingBuffer");
-
- return {};
- }
-
- StagingBuffer::~StagingBuffer() {
- mMappedPointer = nullptr;
- mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
- mDevice->GetResourceMemoryAllocator()->Deallocate(&mAllocation);
- }
-
- VkBuffer StagingBuffer::GetBufferHandle() const {
- return mBuffer;
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h
deleted file mode 100644
index 50b773a0dac..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_STAGINGBUFFERVK_H_
-#define DAWNNATIVE_STAGINGBUFFERVK_H_
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/ResourceMemoryAllocation.h"
-#include "dawn_native/StagingBuffer.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- class StagingBuffer : public StagingBufferBase {
- public:
- StagingBuffer(size_t size, Device* device);
- ~StagingBuffer() override;
-
- VkBuffer GetBufferHandle() const;
-
- MaybeError Initialize() override;
-
- private:
- Device* mDevice;
- VkBuffer mBuffer;
- ResourceMemoryAllocation mAllocation;
- };
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_STAGINGBUFFERVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
deleted file mode 100644
index e111200dba7..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
+++ /dev/null
@@ -1,664 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/SwapChainVk.h"
-
-#include "common/Compiler.h"
-#include "dawn_native/Instance.h"
-#include "dawn_native/Surface.h"
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-#include <algorithm>
-
-#if defined(DAWN_USE_X11)
-# include "dawn_native/XlibXcbFunctions.h"
-#endif // defined(DAWN_USE_X11)
-
-namespace dawn_native { namespace vulkan {
-
- // OldSwapChain
-
- // static
- Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
- return AcquireRef(new OldSwapChain(device, descriptor));
- }
-
- OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : OldSwapChainBase(device, descriptor) {
- const auto& im = GetImplementation();
- DawnWSIContextVulkan wsiContext = {};
- im.Init(im.userData, &wsiContext);
-
- ASSERT(im.textureUsage != WGPUTextureUsage_None);
- mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
- }
-
- OldSwapChain::~OldSwapChain() {
- }
-
- TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- const auto& im = GetImplementation();
- DawnSwapChainNextTexture next = {};
- DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
-
- if (error) {
- GetDevice()->HandleError(InternalErrorType::Internal, error);
- return nullptr;
- }
-
- ::VkImage image = NativeNonDispatachableHandleFromU64<::VkImage>(next.texture.u64);
- VkImage nativeTexture = VkImage::CreateFromHandle(image);
- return Texture::CreateForSwapChain(ToBackend(GetDevice()), descriptor, nativeTexture)
- .Detach();
- }
-
- MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
- Device* device = ToBackend(GetDevice());
-
- // Perform the necessary pipeline barriers for the texture to be used with the usage
- // requested by the implementation.
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- ToBackend(view->GetTexture())
- ->TransitionUsageNow(recordingContext, mTextureUsage, view->GetSubresourceRange());
-
- DAWN_TRY(device->SubmitPendingCommands());
-
- return {};
- }
-
- // SwapChain
-
- namespace {
-
- ResultOrError<VkSurfaceKHR> CreateVulkanSurface(Adapter* adapter, Surface* surface) {
- const VulkanGlobalInfo& info = adapter->GetVulkanInstance()->GetGlobalInfo();
- const VulkanFunctions& fn = adapter->GetVulkanInstance()->GetFunctions();
- VkInstance instance = adapter->GetVulkanInstance()->GetVkInstance();
-
- // May not be used in the platform-specific switches below.
- DAWN_UNUSED(info);
- DAWN_UNUSED(fn);
- DAWN_UNUSED(instance);
-
- switch (surface->GetType()) {
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- case Surface::Type::MetalLayer:
- if (info.HasExt(InstanceExt::MetalSurface)) {
- VkMetalSurfaceCreateInfoEXT createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.pLayer = surface->GetMetalLayer();
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateMetalSurfaceEXT(instance, &createInfo, nullptr, &*vkSurface),
- "CreateMetalSurface"));
- return vkSurface;
- }
- break;
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_PLATFORM_WINDOWS)
- case Surface::Type::WindowsHWND:
- if (info.HasExt(InstanceExt::Win32Surface)) {
- VkWin32SurfaceCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.hinstance = static_cast<HINSTANCE>(surface->GetHInstance());
- createInfo.hwnd = static_cast<HWND>(surface->GetHWND());
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateWin32SurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
- "CreateWin32Surface"));
- return vkSurface;
- }
- break;
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
-#if defined(DAWN_USE_X11)
- case Surface::Type::Xlib: {
- if (info.HasExt(InstanceExt::XlibSurface)) {
- VkXlibSurfaceCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.dpy = static_cast<Display*>(surface->GetXDisplay());
- createInfo.window = surface->GetXWindow();
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateXlibSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
- "CreateXlibSurface"));
- return vkSurface;
- }
-
- // Fall back to using XCB surfaces if the Xlib extension isn't available.
- // See https://xcb.freedesktop.org/MixingCalls/ for more information about
- // interoperability between Xlib and XCB
- const XlibXcbFunctions* xlibXcb =
- adapter->GetInstance()->GetOrCreateXlibXcbFunctions();
- ASSERT(xlibXcb != nullptr);
-
- if (info.HasExt(InstanceExt::XcbSurface) && xlibXcb->IsLoaded()) {
- VkXcbSurfaceCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- // The XCB connection lives as long as the X11 display.
- createInfo.connection = xlibXcb->xGetXCBConnection(
- static_cast<Display*>(surface->GetXDisplay()));
- createInfo.window = surface->GetXWindow();
-
- VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- fn.CreateXcbSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
- "CreateXcbSurfaceKHR"));
- return vkSurface;
- }
- break;
- }
-#endif // defined(DAWN_USE_X11)
-
- default:
- break;
- }
-
- return DAWN_FORMAT_VALIDATION_ERROR("Unsupported surface type (%s) for Vulkan.",
- surface->GetType());
- }
-
- VkPresentModeKHR ToVulkanPresentMode(wgpu::PresentMode mode) {
- switch (mode) {
- case wgpu::PresentMode::Fifo:
- return VK_PRESENT_MODE_FIFO_KHR;
- case wgpu::PresentMode::Immediate:
- return VK_PRESENT_MODE_IMMEDIATE_KHR;
- case wgpu::PresentMode::Mailbox:
- return VK_PRESENT_MODE_MAILBOX_KHR;
- }
- UNREACHABLE();
- }
-
- uint32_t MinImageCountForPresentMode(VkPresentModeKHR mode) {
- switch (mode) {
- case VK_PRESENT_MODE_FIFO_KHR:
- case VK_PRESENT_MODE_IMMEDIATE_KHR:
- return 2;
- case VK_PRESENT_MODE_MAILBOX_KHR:
- return 3;
- default:
- break;
- }
- UNREACHABLE();
- }
-
- } // anonymous namespace
-
- // static
- ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
- DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain;
- }
-
- SwapChain::~SwapChain() = default;
-
- void SwapChain::DestroyImpl() {
- SwapChainBase::DestroyImpl();
- DetachFromSurface();
- }
-
- // Note that when we need to re-create the swapchain because it is out of date,
- // previousSwapChain can be set to `this`.
- MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
- Device* device = ToBackend(GetDevice());
- Adapter* adapter = ToBackend(GetDevice()->GetAdapter());
-
- VkSwapchainKHR previousVkSwapChain = VK_NULL_HANDLE;
-
- if (previousSwapChain != nullptr) {
- // TODO(crbug.com/dawn/269): The first time a surface is used with a Device, check
- // it is supported with vkGetPhysicalDeviceSurfaceSupportKHR.
-
- // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
- // multiple backends one after the other. It probably needs to block until the backend
- // and GPU are completely finished with the previous swapchain.
- DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Vulkan,
- "Vulkan SwapChain cannot switch backend types from %s to %s.",
- previousSwapChain->GetBackendType(), wgpu::BackendType::Vulkan);
-
- // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
- SwapChain* previousVulkanSwapChain = static_cast<SwapChain*>(previousSwapChain);
-
- // TODO(crbug.com/dawn/269): Figure out switching a single surface between multiple
- // Vulkan devices on different VkInstances. Probably needs to block too!
- VkInstance previousInstance =
- ToBackend(previousSwapChain->GetDevice())->GetVkInstance();
- DAWN_INVALID_IF(previousInstance != ToBackend(GetDevice())->GetVkInstance(),
- "Vulkan SwapChain cannot switch between Vulkan instances.");
-
- // The previous swapchain is a dawn_native::vulkan::SwapChain so we can reuse its
- // VkSurfaceKHR provided since they are on the same instance.
- std::swap(previousVulkanSwapChain->mVkSurface, mVkSurface);
-
- // The previous swapchain was on the same Vulkan instance so we can use Vulkan's
- // "oldSwapchain" mechanism to ensure a seamless transition. We track the previous
- // swapchain for release immediately so it is not leaked in case of an error. (Vulkan
- // allows destroying it immediately after the call to vkCreateSwapChainKHR but tracking
- // using the fenced deleter makes the code simpler).
- std::swap(previousVulkanSwapChain->mSwapChain, previousVkSwapChain);
- ToBackend(previousSwapChain->GetDevice())
- ->GetFencedDeleter()
- ->DeleteWhenUnused(previousVkSwapChain);
- }
-
- if (mVkSurface == VK_NULL_HANDLE) {
- DAWN_TRY_ASSIGN(mVkSurface, CreateVulkanSurface(adapter, GetSurface()));
- }
-
- VulkanSurfaceInfo surfaceInfo;
- DAWN_TRY_ASSIGN(surfaceInfo, GatherSurfaceInfo(*adapter, mVkSurface));
-
- DAWN_TRY_ASSIGN(mConfig, ChooseConfig(surfaceInfo));
-
- // TODO Choose config instead of hardcoding
- VkSwapchainCreateInfoKHR createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.surface = mVkSurface;
- createInfo.minImageCount = mConfig.targetImageCount;
- createInfo.imageFormat = mConfig.format;
- createInfo.imageColorSpace = mConfig.colorSpace;
- createInfo.imageExtent = mConfig.extent;
- createInfo.imageArrayLayers = 1;
- createInfo.imageUsage = mConfig.usage;
- createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = nullptr;
- createInfo.preTransform = mConfig.transform;
- createInfo.compositeAlpha = mConfig.alphaMode;
- createInfo.presentMode = mConfig.presentMode;
- createInfo.clipped = false;
- createInfo.oldSwapchain = previousVkSwapChain;
-
- DAWN_TRY(CheckVkSuccess(device->fn.CreateSwapchainKHR(device->GetVkDevice(), &createInfo,
- nullptr, &*mSwapChain),
- "CreateSwapChain"));
-
- // Gather the swapchain's images. Implementations are allowed to return more images than the
- // number we asked for.
- uint32_t count = 0;
- DAWN_TRY(CheckVkSuccess(
- device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count, nullptr),
- "GetSwapChainImages1"));
-
- mSwapChainImages.resize(count);
- DAWN_TRY(CheckVkSuccess(
- device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count,
- AsVkArray(mSwapChainImages.data())),
- "GetSwapChainImages2"));
-
- return {};
- }
-
- ResultOrError<SwapChain::Config> SwapChain::ChooseConfig(
- const VulkanSurfaceInfo& surfaceInfo) const {
- Config config;
-
- // Choose the present mode. The only guaranteed one is FIFO so it has to be the fallback for
- // all other present modes. IMMEDIATE has tearing which is generally undesirable so it can't
- // be the fallback for MAILBOX. So the fallback order is always IMMEDIATE -> MAILBOX ->
- // FIFO.
- {
- auto HasPresentMode = [](const std::vector<VkPresentModeKHR>& modes,
- VkPresentModeKHR target) -> bool {
- return std::find(modes.begin(), modes.end(), target) != modes.end();
- };
-
- VkPresentModeKHR targetMode = ToVulkanPresentMode(GetPresentMode());
- const std::array<VkPresentModeKHR, 3> kPresentModeFallbacks = {
- VK_PRESENT_MODE_IMMEDIATE_KHR,
- VK_PRESENT_MODE_MAILBOX_KHR,
- VK_PRESENT_MODE_FIFO_KHR,
- };
-
- // Go to the target mode.
- size_t modeIndex = 0;
- while (kPresentModeFallbacks[modeIndex] != targetMode) {
- modeIndex++;
- }
-
- // Find the first available fallback.
- while (!HasPresentMode(surfaceInfo.presentModes, kPresentModeFallbacks[modeIndex])) {
- modeIndex++;
- }
-
- ASSERT(modeIndex < kPresentModeFallbacks.size());
- config.presentMode = kPresentModeFallbacks[modeIndex];
- }
-
- // Choose the target width or do a blit.
- if (GetWidth() < surfaceInfo.capabilities.minImageExtent.width ||
- GetWidth() > surfaceInfo.capabilities.maxImageExtent.width ||
- GetHeight() < surfaceInfo.capabilities.minImageExtent.height ||
- GetHeight() > surfaceInfo.capabilities.maxImageExtent.height) {
- config.needsBlit = true;
- } else {
- config.extent.width = GetWidth();
- config.extent.height = GetHeight();
- }
-
- // Choose the target usage or do a blit.
- VkImageUsageFlags targetUsages =
- VulkanImageUsage(GetUsage(), GetDevice()->GetValidInternalFormat(GetFormat()));
- VkImageUsageFlags supportedUsages = surfaceInfo.capabilities.supportedUsageFlags;
- if (!IsSubset(targetUsages, supportedUsages)) {
- config.needsBlit = true;
- } else {
- config.usage = targetUsages;
- config.wgpuUsage = GetUsage();
- }
-
- // Only support BGRA8Unorm with SRGB color space for now.
- bool hasBGRA8Unorm = false;
- for (const VkSurfaceFormatKHR& format : surfaceInfo.formats) {
- if (format.format == VK_FORMAT_B8G8R8A8_UNORM &&
- format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
- hasBGRA8Unorm = true;
- break;
- }
- }
- if (!hasBGRA8Unorm) {
- return DAWN_INTERNAL_ERROR(
- "Vulkan SwapChain must support BGRA8Unorm with sRGB colorspace.");
- }
- config.format = VK_FORMAT_B8G8R8A8_UNORM;
- config.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
- config.wgpuFormat = wgpu::TextureFormat::BGRA8Unorm;
-
- // Only the identity transform with opaque alpha is supported for now.
- DAWN_INVALID_IF((surfaceInfo.capabilities.supportedTransforms &
- VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) == 0,
- "Vulkan SwapChain must support the identity transform.");
-
- config.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
-
- DAWN_INVALID_IF((surfaceInfo.capabilities.supportedCompositeAlpha &
- VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR) == 0,
- "Vulkan SwapChain must support opaque alpha.");
-
- config.alphaMode = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
-
- // Choose the number of images for the swapchain= and clamp it to the min and max from the
- // surface capabilities. maxImageCount = 0 means there is no limit.
- ASSERT(surfaceInfo.capabilities.maxImageCount == 0 ||
- surfaceInfo.capabilities.minImageCount <= surfaceInfo.capabilities.maxImageCount);
- uint32_t targetCount = MinImageCountForPresentMode(config.presentMode);
-
- targetCount = std::max(targetCount, surfaceInfo.capabilities.minImageCount);
- if (surfaceInfo.capabilities.maxImageCount != 0) {
- targetCount = std::min(targetCount, surfaceInfo.capabilities.maxImageCount);
- }
-
- config.targetImageCount = targetCount;
-
- // Choose a valid config for the swapchain texture that will receive the blit.
- if (config.needsBlit) {
- // Vulkan has provisions to have surfaces that adapt to the swapchain size. If that's
- // the case it is very likely that the target extent works, but clamp it just in case.
- // Using the target extent for the blit is better when possible so that texels don't
- // get stretched. This case is exposed by having the special "-1" value in both
- // dimensions of the extent.
- constexpr uint32_t kSpecialValue = 0xFFFF'FFFF;
- if (surfaceInfo.capabilities.currentExtent.width == kSpecialValue &&
- surfaceInfo.capabilities.currentExtent.height == kSpecialValue) {
- // extent = clamp(targetExtent, minExtent, maxExtent)
- config.extent.width = GetWidth();
- config.extent.width =
- std::min(config.extent.width, surfaceInfo.capabilities.maxImageExtent.width);
- config.extent.width =
- std::max(config.extent.width, surfaceInfo.capabilities.minImageExtent.width);
-
- config.extent.height = GetHeight();
- config.extent.height =
- std::min(config.extent.height, surfaceInfo.capabilities.maxImageExtent.height);
- config.extent.height =
- std::max(config.extent.height, surfaceInfo.capabilities.minImageExtent.height);
- } else {
- // If it is not an adaptable swapchain, just use the current extent for the blit
- // texture.
- config.extent = surfaceInfo.capabilities.currentExtent;
- }
-
- // TODO(crbug.com/dawn/269): If the swapchain image doesn't support TRANSFER_DST
- // then we'll need to have a second fallback that uses a blit shader :(
- if ((supportedUsages & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
- return DAWN_INTERNAL_ERROR(
- "SwapChain cannot fallback to a blit because of a missing "
- "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
- }
- config.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- config.wgpuUsage = wgpu::TextureUsage::CopyDst;
- }
-
- return config;
- }
-
- MaybeError SwapChain::PresentImpl() {
- Device* device = ToBackend(GetDevice());
-
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
-
- if (mConfig.needsBlit) {
- // TODO ditto same as present below: eagerly transition the blit texture to CopySrc.
- mBlitTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
- mBlitTexture->GetAllSubresources());
- mTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
- mTexture->GetAllSubresources());
-
- VkImageBlit region;
- region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
- region.srcSubresource.mipLevel = 0;
- region.srcSubresource.baseArrayLayer = 0;
- region.srcSubresource.layerCount = 1;
- region.srcOffsets[0] = {0, 0, 0};
- region.srcOffsets[1] = {static_cast<int32_t>(mBlitTexture->GetWidth()),
- static_cast<int32_t>(mBlitTexture->GetHeight()), 1};
-
- region.dstSubresource = region.srcSubresource;
- region.dstOffsets[0] = {0, 0, 0};
- region.dstOffsets[1] = {static_cast<int32_t>(mTexture->GetWidth()),
- static_cast<int32_t>(mTexture->GetHeight()), 1};
-
- device->fn.CmdBlitImage(recordingContext->commandBuffer, mBlitTexture->GetHandle(),
- mBlitTexture->GetCurrentLayoutForSwapChain(),
- mTexture->GetHandle(), mTexture->GetCurrentLayoutForSwapChain(),
- 1, &region, VK_FILTER_LINEAR);
-
- // TODO(crbug.com/dawn/269): Find a way to reuse the blit texture between frames
- // instead of creating a new one every time. This will involve "un-destroying" the
- // texture or making the blit texture "external".
- mBlitTexture->APIDestroy();
- mBlitTexture = nullptr;
- }
-
- // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
- // presentable texture to present at the end of submits that use them and ideally even
- // folding that in the free layout transition at the end of render passes.
- mTexture->TransitionUsageNow(recordingContext, kPresentTextureUsage,
- mTexture->GetAllSubresources());
-
- DAWN_TRY(device->SubmitPendingCommands());
-
- // Assuming that the present queue is the same as the graphics queue, the proper
- // synchronization has already been done on the queue so we don't need to wait on any
- // semaphores.
- // TODO(crbug.com/dawn/269): Support the present queue not being the main queue.
- VkPresentInfoKHR presentInfo;
- presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
- presentInfo.pNext = nullptr;
- presentInfo.waitSemaphoreCount = 0;
- presentInfo.pWaitSemaphores = nullptr;
- presentInfo.swapchainCount = 1;
- presentInfo.pSwapchains = &*mSwapChain;
- presentInfo.pImageIndices = &mLastImageIndex;
- presentInfo.pResults = nullptr;
-
- // Free the texture before present so error handling doesn't skip that step.
- mTexture->APIDestroy();
- mTexture = nullptr;
-
- VkResult result =
- VkResult::WrapUnsafe(device->fn.QueuePresentKHR(device->GetQueue(), &presentInfo));
-
- switch (result) {
- case VK_SUCCESS:
- // VK_SUBOPTIMAL_KHR means "a swapchain no longer matches the surface properties
- // exactly, but can still be used to present to the surface successfully", so we
- // can also treat it as a "success" error code of vkQueuePresentKHR().
- case VK_SUBOPTIMAL_KHR:
- return {};
-
- // This present cannot be recovered. Re-initialize the VkSwapchain so that future
- // presents work..
- case VK_ERROR_OUT_OF_DATE_KHR:
- return Initialize(this);
-
- // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
- case VK_ERROR_SURFACE_LOST_KHR:
- default:
- return CheckVkSuccess(::VkResult(result), "QueuePresent");
- }
- }
-
- ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
- return GetCurrentTextureViewInternal();
- }
-
- ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewInternal(bool isReentrant) {
- Device* device = ToBackend(GetDevice());
-
- // Transiently create a semaphore that will be signaled when the presentation engine is done
- // with the swapchain image. Further operations on the image will wait for this semaphore.
- VkSemaphoreCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
-
- VkSemaphore semaphore = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateSemaphore(device->GetVkDevice(), &createInfo, nullptr, &*semaphore),
- "CreateSemaphore"));
-
- VkResult result = VkResult::WrapUnsafe(device->fn.AcquireNextImageKHR(
- device->GetVkDevice(), mSwapChain, std::numeric_limits<uint64_t>::max(), semaphore,
- VkFence{}, &mLastImageIndex));
-
- if (result == VK_SUCCESS) {
- // TODO(crbug.com/dawn/269) put the semaphore on the texture so it is waited on when
- // used instead of directly on the recording context?
- device->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
- } else {
- // The semaphore wasn't actually used (? this is unclear in the spec). Delete it when
- // we get a chance.
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(semaphore);
- }
-
- switch (result) {
- // TODO(crbug.com/dawn/269): Introduce a mechanism to notify the application that
- // the swapchain is in a suboptimal state?
- case VK_SUBOPTIMAL_KHR:
- case VK_SUCCESS:
- break;
-
- case VK_ERROR_OUT_OF_DATE_KHR: {
- // Prevent infinite recursive calls to GetCurrentTextureViewInternal when the
- // swapchains always return that they are out of date.
- if (isReentrant) {
- // TODO(crbug.com/dawn/269): Allow losing the surface instead?
- return DAWN_INTERNAL_ERROR(
- "Wasn't able to recuperate the surface after a VK_ERROR_OUT_OF_DATE_KHR");
- }
-
- // Re-initialize the VkSwapchain and try getting the texture again.
- DAWN_TRY(Initialize(this));
- return GetCurrentTextureViewInternal(true);
- }
-
- // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
- case VK_ERROR_SURFACE_LOST_KHR:
- default:
- DAWN_TRY(CheckVkSuccess(::VkResult(result), "AcquireNextImage"));
- }
-
- TextureDescriptor textureDesc;
- textureDesc.size.width = mConfig.extent.width;
- textureDesc.size.height = mConfig.extent.height;
- textureDesc.format = mConfig.wgpuFormat;
- textureDesc.usage = mConfig.wgpuUsage;
-
- VkImage currentImage = mSwapChainImages[mLastImageIndex];
- mTexture = Texture::CreateForSwapChain(device, &textureDesc, currentImage);
-
- // In the happy path we can use the swapchain image directly.
- if (!mConfig.needsBlit) {
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- return mTexture->APICreateView();
- }
-
- // The blit texture always perfectly matches what the user requested for the swapchain.
- // We need to add the Vulkan TRANSFER_SRC flag for the vkCmdBlitImage call.
- TextureDescriptor desc = GetSwapChainBaseTextureDescriptor(this);
- DAWN_TRY_ASSIGN(mBlitTexture,
- Texture::Create(device, &desc, VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
- // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
- return mBlitTexture->APICreateView();
- }
-
- void SwapChain::DetachFromSurfaceImpl() {
- if (mTexture != nullptr) {
- mTexture->APIDestroy();
- mTexture = nullptr;
- }
-
- if (mBlitTexture != nullptr) {
- mBlitTexture->APIDestroy();
- mBlitTexture = nullptr;
- }
-
- // The swapchain images are destroyed with the swapchain.
- if (mSwapChain != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
- mSwapChain = VK_NULL_HANDLE;
- }
-
- if (mVkSurface != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mVkSurface);
- mVkSurface = VK_NULL_HANDLE;
- }
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
deleted file mode 100644
index e93039b8c8a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
-#define DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
-
-#include "dawn_native/SwapChain.h"
-
-#include "common/vulkan_platform.h"
-
-#include <vector>
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
- class Texture;
- struct VulkanSurfaceInfo;
-
- class OldSwapChain : public OldSwapChainBase {
- public:
- static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
-
- protected:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~OldSwapChain() override;
-
- TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
- MaybeError OnBeforePresent(TextureViewBase* texture) override;
-
- private:
- wgpu::TextureUsage mTextureUsage;
- };
-
- class SwapChain : public NewSwapChainBase {
- public:
- static ResultOrError<Ref<SwapChain>> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
- ~SwapChain() override;
-
- private:
- using NewSwapChainBase::NewSwapChainBase;
- MaybeError Initialize(NewSwapChainBase* previousSwapChain);
- void DestroyImpl() override;
-
- struct Config {
- // Information that's passed to vulkan swapchain creation.
- VkPresentModeKHR presentMode;
- VkExtent2D extent;
- VkImageUsageFlags usage;
- VkFormat format;
- VkColorSpaceKHR colorSpace;
- uint32_t targetImageCount;
- VkSurfaceTransformFlagBitsKHR transform;
- VkCompositeAlphaFlagBitsKHR alphaMode;
-
- // Redundant information but as WebGPU enums to create the wgpu::Texture that
- // encapsulates the native swapchain texture.
- wgpu::TextureUsage wgpuUsage;
- wgpu::TextureFormat wgpuFormat;
-
- // Information about the blit workarounds we need to do (if any)
- bool needsBlit = false;
- };
- ResultOrError<Config> ChooseConfig(const VulkanSurfaceInfo& surfaceInfo) const;
- ResultOrError<TextureViewBase*> GetCurrentTextureViewInternal(bool isReentrant = false);
-
- // NewSwapChainBase implementation
- MaybeError PresentImpl() override;
- ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
- void DetachFromSurfaceImpl() override;
-
- Config mConfig;
-
- VkSurfaceKHR mVkSurface = VK_NULL_HANDLE;
- VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
- std::vector<VkImage> mSwapChainImages;
- uint32_t mLastImageIndex = 0;
-
- Ref<Texture> mBlitTexture;
- Ref<Texture> mTexture;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_SWAPCHAINVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
deleted file mode 100644
index 2edf724b137..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ /dev/null
@@ -1,1333 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/TextureVk.h"
-
-#include "common/Assert.h"
-#include "common/Math.h"
-#include "dawn_native/DynamicUploader.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/VulkanBackend.h"
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BufferVk.h"
-#include "dawn_native/vulkan/CommandRecordingContext.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/ResourceHeapVk.h"
-#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
-#include "dawn_native/vulkan/StagingBufferVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
- // Converts an Dawn texture dimension to a Vulkan image view type.
- // Contrary to image types, image view types include arrayness and cubemapness
- VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
- switch (dimension) {
- case wgpu::TextureViewDimension::e2D:
- return VK_IMAGE_VIEW_TYPE_2D;
- case wgpu::TextureViewDimension::e2DArray:
- return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
- case wgpu::TextureViewDimension::Cube:
- return VK_IMAGE_VIEW_TYPE_CUBE;
- case wgpu::TextureViewDimension::CubeArray:
- return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
- case wgpu::TextureViewDimension::e3D:
- return VK_IMAGE_VIEW_TYPE_3D;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- // Computes which vulkan access type could be required for the given Dawn usage.
- // TODO(crbug.com/dawn/269): We shouldn't need any access usages for srcAccessMask when
- // the previous usage is readonly because an execution dependency is sufficient.
- VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
- VkAccessFlags flags = 0;
-
- if (usage & wgpu::TextureUsage::CopySrc) {
- flags |= VK_ACCESS_TRANSFER_READ_BIT;
- }
- if (usage & wgpu::TextureUsage::CopyDst) {
- flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
- }
- if (usage & wgpu::TextureUsage::TextureBinding) {
- flags |= VK_ACCESS_SHADER_READ_BIT;
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- if (format.HasDepthOrStencil()) {
- flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
- } else {
- flags |=
- VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- }
- }
- if (usage & kPresentTextureUsage) {
- // The present usage is only used internally by the swapchain and is never used in
- // combination with other usages.
- ASSERT(usage == kPresentTextureUsage);
- // The Vulkan spec has the following note:
- //
- // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
- // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
- // processing, or perform any visibility operations (as vkQueuePresentKHR performs
- // automatic visibility operations). To achieve this, the dstAccessMask member of
- // the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
- // should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
- //
- // So on the transition to Present we don't need an access flag. The other
- // direction doesn't matter because swapchain textures always start a new frame
- // as uninitialized.
- flags |= 0;
- }
-
- return flags;
- }
-
- // Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
- VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
- VkPipelineStageFlags flags = 0;
-
- if (usage == wgpu::TextureUsage::None) {
- // This only happens when a texture is initially created (and for srcAccessMask) in
- // which case there is no need to wait on anything to stop accessing this texture.
- return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
- }
- if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
- flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
- }
- if (usage & wgpu::TextureUsage::TextureBinding) {
- // TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
- // introducing FS -> VS dependencies that would prevent parallelization on tiler
- // GPUs
- flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
- VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- flags |=
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- if (format.HasDepthOrStencil()) {
- flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
- VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
- } else {
- flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
- }
- }
- if (usage & kPresentTextureUsage) {
- // The present usage is only used internally by the swapchain and is never used in
- // combination with other usages.
- ASSERT(usage == kPresentTextureUsage);
- // The Vulkan spec has the following note:
- //
- // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
- // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
- // processing, or perform any visibility operations (as vkQueuePresentKHR performs
- // automatic visibility operations). To achieve this, the dstAccessMask member of
- // the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
- // should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
- //
- // So on the transition to Present we use the "bottom of pipe" stage. The other
- // direction doesn't matter because swapchain textures always start a new frame
- // as uninitialized.
- flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
- }
-
- // A zero value isn't a valid pipeline stage mask
- ASSERT(flags != 0);
- return flags;
- }
-
- VkImageMemoryBarrier BuildMemoryBarrier(const Texture* texture,
- wgpu::TextureUsage lastUsage,
- wgpu::TextureUsage usage,
- const SubresourceRange& range) {
- VkImageMemoryBarrier barrier;
- barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
- barrier.pNext = nullptr;
- barrier.srcAccessMask = VulkanAccessFlags(lastUsage, texture->GetFormat());
- barrier.dstAccessMask = VulkanAccessFlags(usage, texture->GetFormat());
- barrier.oldLayout = VulkanImageLayout(texture, lastUsage);
- barrier.newLayout = VulkanImageLayout(texture, usage);
- barrier.image = texture->GetHandle();
- barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects);
- barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
- barrier.subresourceRange.levelCount = range.levelCount;
- barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
- barrier.subresourceRange.layerCount = range.layerCount;
-
- barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- return barrier;
- }
-
- void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
- const Extent3D& size = texture.GetSize();
-
- info->mipLevels = texture.GetNumMipLevels();
- info->samples = VulkanSampleCount(texture.GetSampleCount());
-
- // Fill in the image type, and paper over differences in how the array layer count is
- // specified between WebGPU and Vulkan.
- switch (texture.GetDimension()) {
- case wgpu::TextureDimension::e2D:
- info->imageType = VK_IMAGE_TYPE_2D;
- info->extent = {size.width, size.height, 1};
- info->arrayLayers = size.depthOrArrayLayers;
- break;
-
- case wgpu::TextureDimension::e3D:
- info->imageType = VK_IMAGE_TYPE_3D;
- info->extent = {size.width, size.height, size.depthOrArrayLayers};
- info->arrayLayers = 1;
- break;
-
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
- }
-
- } // namespace
-
- // Converts Dawn texture format to Vulkan formats.
- VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R8Unorm:
- return VK_FORMAT_R8_UNORM;
- case wgpu::TextureFormat::R8Snorm:
- return VK_FORMAT_R8_SNORM;
- case wgpu::TextureFormat::R8Uint:
- return VK_FORMAT_R8_UINT;
- case wgpu::TextureFormat::R8Sint:
- return VK_FORMAT_R8_SINT;
-
- case wgpu::TextureFormat::R16Uint:
- return VK_FORMAT_R16_UINT;
- case wgpu::TextureFormat::R16Sint:
- return VK_FORMAT_R16_SINT;
- case wgpu::TextureFormat::R16Float:
- return VK_FORMAT_R16_SFLOAT;
- case wgpu::TextureFormat::RG8Unorm:
- return VK_FORMAT_R8G8_UNORM;
- case wgpu::TextureFormat::RG8Snorm:
- return VK_FORMAT_R8G8_SNORM;
- case wgpu::TextureFormat::RG8Uint:
- return VK_FORMAT_R8G8_UINT;
- case wgpu::TextureFormat::RG8Sint:
- return VK_FORMAT_R8G8_SINT;
-
- case wgpu::TextureFormat::R32Uint:
- return VK_FORMAT_R32_UINT;
- case wgpu::TextureFormat::R32Sint:
- return VK_FORMAT_R32_SINT;
- case wgpu::TextureFormat::R32Float:
- return VK_FORMAT_R32_SFLOAT;
- case wgpu::TextureFormat::RG16Uint:
- return VK_FORMAT_R16G16_UINT;
- case wgpu::TextureFormat::RG16Sint:
- return VK_FORMAT_R16G16_SINT;
- case wgpu::TextureFormat::RG16Float:
- return VK_FORMAT_R16G16_SFLOAT;
- case wgpu::TextureFormat::RGBA8Unorm:
- return VK_FORMAT_R8G8B8A8_UNORM;
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- return VK_FORMAT_R8G8B8A8_SRGB;
- case wgpu::TextureFormat::RGBA8Snorm:
- return VK_FORMAT_R8G8B8A8_SNORM;
- case wgpu::TextureFormat::RGBA8Uint:
- return VK_FORMAT_R8G8B8A8_UINT;
- case wgpu::TextureFormat::RGBA8Sint:
- return VK_FORMAT_R8G8B8A8_SINT;
- case wgpu::TextureFormat::BGRA8Unorm:
- return VK_FORMAT_B8G8R8A8_UNORM;
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- return VK_FORMAT_B8G8R8A8_SRGB;
- case wgpu::TextureFormat::RGB10A2Unorm:
- return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
- case wgpu::TextureFormat::RG11B10Ufloat:
- return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
-
- case wgpu::TextureFormat::RG32Uint:
- return VK_FORMAT_R32G32_UINT;
- case wgpu::TextureFormat::RG32Sint:
- return VK_FORMAT_R32G32_SINT;
- case wgpu::TextureFormat::RG32Float:
- return VK_FORMAT_R32G32_SFLOAT;
- case wgpu::TextureFormat::RGBA16Uint:
- return VK_FORMAT_R16G16B16A16_UINT;
- case wgpu::TextureFormat::RGBA16Sint:
- return VK_FORMAT_R16G16B16A16_SINT;
- case wgpu::TextureFormat::RGBA16Float:
- return VK_FORMAT_R16G16B16A16_SFLOAT;
-
- case wgpu::TextureFormat::RGBA32Uint:
- return VK_FORMAT_R32G32B32A32_UINT;
- case wgpu::TextureFormat::RGBA32Sint:
- return VK_FORMAT_R32G32B32A32_SINT;
- case wgpu::TextureFormat::RGBA32Float:
- return VK_FORMAT_R32G32B32A32_SFLOAT;
-
- case wgpu::TextureFormat::Depth16Unorm:
- return VK_FORMAT_D16_UNORM;
- case wgpu::TextureFormat::Depth32Float:
- return VK_FORMAT_D32_SFLOAT;
- case wgpu::TextureFormat::Depth24Plus:
- return VK_FORMAT_D32_SFLOAT;
- case wgpu::TextureFormat::Depth24PlusStencil8:
- // Depth24PlusStencil8 maps to either of these two formats because only requires
- // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
- // the environment, default to using D32S8, and availability information so we know
- // that the format is available.
- if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
- return VK_FORMAT_D32_SFLOAT_S8_UINT;
- } else {
- return VK_FORMAT_D24_UNORM_S8_UINT;
- }
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
- case wgpu::TextureFormat::BC2RGBAUnorm:
- return VK_FORMAT_BC2_UNORM_BLOCK;
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- return VK_FORMAT_BC2_SRGB_BLOCK;
- case wgpu::TextureFormat::BC3RGBAUnorm:
- return VK_FORMAT_BC3_UNORM_BLOCK;
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- return VK_FORMAT_BC3_SRGB_BLOCK;
- case wgpu::TextureFormat::BC4RSnorm:
- return VK_FORMAT_BC4_SNORM_BLOCK;
- case wgpu::TextureFormat::BC4RUnorm:
- return VK_FORMAT_BC4_UNORM_BLOCK;
- case wgpu::TextureFormat::BC5RGSnorm:
- return VK_FORMAT_BC5_SNORM_BLOCK;
- case wgpu::TextureFormat::BC5RGUnorm:
- return VK_FORMAT_BC5_UNORM_BLOCK;
- case wgpu::TextureFormat::BC6HRGBFloat:
- return VK_FORMAT_BC6H_SFLOAT_BLOCK;
- case wgpu::TextureFormat::BC6HRGBUfloat:
- return VK_FORMAT_BC6H_UFLOAT_BLOCK;
- case wgpu::TextureFormat::BC7RGBAUnorm:
- return VK_FORMAT_BC7_UNORM_BLOCK;
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return VK_FORMAT_BC7_SRGB_BLOCK;
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
- case wgpu::TextureFormat::EACR11Unorm:
- return VK_FORMAT_EAC_R11_UNORM_BLOCK;
- case wgpu::TextureFormat::EACR11Snorm:
- return VK_FORMAT_EAC_R11_SNORM_BLOCK;
- case wgpu::TextureFormat::EACRG11Unorm:
- return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
- case wgpu::TextureFormat::EACRG11Snorm:
- return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC5x4Unorm:
- return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC5x5Unorm:
- return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC6x5Unorm:
- return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC6x6Unorm:
- return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC8x5Unorm:
- return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC8x6Unorm:
- return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC8x8Unorm:
- return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC10x5Unorm:
- return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC10x6Unorm:
- return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC10x8Unorm:
- return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC10x10Unorm:
- return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC12x10Unorm:
- return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
- case wgpu::TextureFormat::ASTC12x12Unorm:
- return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
-
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
- // TODO(dawn:666): implement stencil8
- case wgpu::TextureFormat::Stencil8:
- // TODO(dawn:690): implement depth24unorm-stencil8
- case wgpu::TextureFormat::Depth24UnormStencil8:
- // TODO(dawn:690): implement depth32float-stencil8
- case wgpu::TextureFormat::Depth32FloatStencil8:
- case wgpu::TextureFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- // Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
- // between color and depth attachment usages.
- VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
- VkImageUsageFlags flags = 0;
-
- if (usage & wgpu::TextureUsage::CopySrc) {
- flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
- }
- if (usage & wgpu::TextureUsage::CopyDst) {
- flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- }
- if (usage & wgpu::TextureUsage::TextureBinding) {
- flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
- }
- if (usage & wgpu::TextureUsage::StorageBinding) {
- flags |= VK_IMAGE_USAGE_STORAGE_BIT;
- }
- if (usage & wgpu::TextureUsage::RenderAttachment) {
- if (format.HasDepthOrStencil()) {
- flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
- } else {
- flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
- }
- }
-
- return flags;
- }
-
- // Chooses which Vulkan image layout should be used for the given Dawn usage. Note that this
- // layout must match the layout given to various Vulkan operations as well as the layout given
- // to descriptor set writes.
- VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage) {
- if (usage == wgpu::TextureUsage::None) {
- return VK_IMAGE_LAYOUT_UNDEFINED;
- }
-
- if (!wgpu::HasZeroOrOneBits(usage)) {
- // Sampled | ReadOnlyStorage is the only possible multi-bit usage, if more appear we
- // might need additional special-casing.
- ASSERT(usage == wgpu::TextureUsage::TextureBinding);
- return VK_IMAGE_LAYOUT_GENERAL;
- }
-
- // Usage has a single bit so we can switch on its value directly.
- switch (usage) {
- case wgpu::TextureUsage::CopyDst:
- return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
-
- // A texture that's sampled and storage may be used as both usages in the same pass.
- // When that happens, the layout must be GENERAL because that's a requirement for
- // the storage usage. We can't know at bindgroup creation time if that case will
- // happen so we must prepare for the pessimistic case and always use the GENERAL
- // layout.
- case wgpu::TextureUsage::TextureBinding:
- if (texture->GetInternalUsage() & wgpu::TextureUsage::StorageBinding) {
- return VK_IMAGE_LAYOUT_GENERAL;
- } else {
- return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- }
-
- // Vulkan texture copy functions require the image to be in _one_ known layout.
- // Depending on whether parts of the texture have been transitioned to only CopySrc
- // or a combination with something else, the texture could be in a combination of
- // GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we make CopySrc use
- // GENERAL.
- // TODO(crbug.com/dawn/851): We no longer need to transition resources all at
- // once and can instead track subresources so we should lift this limitation.
- case wgpu::TextureUsage::CopySrc:
- // Read-only and write-only storage textures must use general layout because load
- // and store operations on storage images can only be done on the images in
- // VK_IMAGE_LAYOUT_GENERAL layout.
- case wgpu::TextureUsage::StorageBinding:
- return VK_IMAGE_LAYOUT_GENERAL;
-
- case wgpu::TextureUsage::RenderAttachment:
- if (texture->GetFormat().HasDepthOrStencil()) {
- return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
- } else {
- return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- }
-
- case kPresentTextureUsage:
- return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
-
- case wgpu::TextureUsage::None:
- break;
- }
- UNREACHABLE();
- }
-
- VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
- switch (sampleCount) {
- case 1:
- return VK_SAMPLE_COUNT_1_BIT;
- case 4:
- return VK_SAMPLE_COUNT_4_BIT;
- }
- UNREACHABLE();
- }
-
- MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
- const TextureDescriptor* descriptor) {
- DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
- "Texture dimension (%s) is not %s.", descriptor->dimension,
- wgpu::TextureDimension::e2D);
-
- DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
- descriptor->mipLevelCount);
-
- DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
- "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
- DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
- descriptor->sampleCount);
-
- return {};
- }
-
- bool IsSampleCountSupported(const dawn_native::vulkan::Device* device,
- const VkImageCreateInfo& imageCreateInfo) {
- ASSERT(device);
-
- VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
- VkImageFormatProperties properties;
- if (device->fn.GetPhysicalDeviceImageFormatProperties(
- physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
- imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
- &properties) != VK_SUCCESS) {
- UNREACHABLE();
- }
-
- return properties.sampleCounts & imageCreateInfo.samples;
- }
-
- // static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const TextureDescriptor* descriptor,
- VkImageUsageFlags extraUsages) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
- DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
- return std::move(texture);
- }
-
- // static
- ResultOrError<Texture*> Texture::CreateFromExternal(
- Device* device,
- const ExternalImageDescriptorVk* descriptor,
- const TextureDescriptor* textureDescriptor,
- external_memory::Service* externalMemoryService) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
- DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
- return texture.Detach();
- }
-
- // static
- Ref<Texture> Texture::CreateForSwapChain(Device* device,
- const TextureDescriptor* descriptor,
- VkImage nativeImage) {
- Ref<Texture> texture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
- texture->InitializeForSwapChain(nativeImage);
- return texture;
- }
-
- Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
- : TextureBase(device, descriptor, state),
- // A usage of none will make sure the texture is transitioned before its first use as
- // required by the Vulkan spec.
- mSubresourceLastUsages(ComputeAspectsForSubresourceStorage(),
- GetArrayLayers(),
- GetNumMipLevels(),
- wgpu::TextureUsage::None) {
- }
-
- MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
- Device* device = ToBackend(GetDevice());
-
- // Create the Vulkan image "container". We don't need to check that the format supports the
- // combination of sample, usage etc. because validation should have been done in the Dawn
- // frontend already based on the minimum supported formats in the Vulkan spec
- VkImageCreateInfo createInfo = {};
- FillVulkanCreateInfoSizesAndType(*this, &createInfo);
-
- createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.format = VulkanImageFormat(device, GetFormat().format);
- createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- createInfo.usage = VulkanImageUsage(GetInternalUsage(), GetFormat()) | extraUsages;
- createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = nullptr;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- ASSERT(IsSampleCountSupported(device, createInfo));
-
- if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
- createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
- }
-
- // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
- // that are used in vkCmdClearColorImage() must have been created with this flag, which is
- // also required for the implementation of robust resource initialization.
- createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreateImage"));
-
- // Create the image memory and associate it with the container
- VkMemoryRequirements requirements;
- device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
-
- DAWN_TRY_ASSIGN(mMemoryAllocation, device->GetResourceMemoryAllocator()->Allocate(
- requirements, MemoryKind::Opaque));
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
- ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
- mMemoryAllocation.GetOffset()),
- "BindImageMemory"));
-
- if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
- GetAllSubresources(), TextureBase::ClearValue::NonZero));
- }
-
- SetLabelImpl();
-
- return {};
- }
-
- // Internally managed, but imported from external handle
- MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
- external_memory::Service* externalMemoryService) {
- VkFormat format = VulkanImageFormat(ToBackend(GetDevice()), GetFormat().format);
- VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
- DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage),
- "Creating an image from external memory is not supported.");
-
- mExternalState = ExternalState::PendingAcquire;
-
- mPendingAcquireOldLayout = descriptor->releasedOldLayout;
- mPendingAcquireNewLayout = descriptor->releasedNewLayout;
-
- VkImageCreateInfo baseCreateInfo = {};
- FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
-
- baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- baseCreateInfo.pNext = nullptr;
- baseCreateInfo.format = format;
- baseCreateInfo.usage = usage;
- baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- baseCreateInfo.queueFamilyIndexCount = 0;
- baseCreateInfo.pQueueFamilyIndices = nullptr;
-
- // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
- // that are used in vkCmdClearColorImage() must have been created with this flag, which is
- // also required for the implementation of robust resource initialization.
- baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
- DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
-
- SetLabelHelper("Dawn_ExternalTexture");
-
- return {};
- }
-
- void Texture::InitializeForSwapChain(VkImage nativeImage) {
- mHandle = nativeImage;
- SetLabelHelper("Dawn_SwapChainTexture");
- }
-
- MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores) {
- Device* device = ToBackend(GetDevice());
- DAWN_TRY(CheckVkSuccess(
- device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
- "BindImageMemory (external)"));
-
- // Don't clear imported texture if already initialized
- if (descriptor->isInitialized) {
- SetIsSubresourceContentInitialized(true, GetAllSubresources());
- }
-
- // Success, acquire all the external objects.
- mExternalAllocation = externalMemoryAllocation;
- mSignalSemaphore = signalSemaphore;
- mWaitRequirements = std::move(waitSemaphores);
- return {};
- }
-
- MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
- VkSemaphore* signalSemaphore,
- VkImageLayout* releasedOldLayout,
- VkImageLayout* releasedNewLayout) {
- Device* device = ToBackend(GetDevice());
-
- DAWN_INVALID_IF(mExternalState == ExternalState::Released,
- "Can't export a signal semaphore from signaled texture %s.", this);
-
- DAWN_INVALID_IF(
- mExternalAllocation == VK_NULL_HANDLE,
- "Can't export a signal semaphore from destroyed or non-external texture %s.", this);
-
- ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
-
- // Release the texture
- mExternalState = ExternalState::Released;
-
- ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
- wgpu::TextureUsage usage = mSubresourceLastUsages.Get(Aspect::Color, 0, 0);
-
- VkImageMemoryBarrier barrier;
- barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
- barrier.pNext = nullptr;
- barrier.image = GetHandle();
- barrier.subresourceRange.aspectMask = VulkanAspectMask(GetFormat().aspects);
- barrier.subresourceRange.baseMipLevel = 0;
- barrier.subresourceRange.levelCount = 1;
- barrier.subresourceRange.baseArrayLayer = 0;
- barrier.subresourceRange.layerCount = 1;
-
- barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
- barrier.dstAccessMask = 0; // The barrier must be paired with another barrier that will
- // specify the dst access mask on the importing queue.
-
- barrier.oldLayout = VulkanImageLayout(this, usage);
- if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
- // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
- // special value to indicate no layout transition should be done.
- barrier.newLayout = barrier.oldLayout;
- } else {
- barrier.newLayout = desiredLayout;
- }
-
- barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
- barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
-
- VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
- VkPipelineStageFlags dstStages =
- VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // We don't know when the importing queue will need
- // the texture, so pass
- // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
- // the barrier happens-before any usage in the
- // importing queue.
-
- CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
- nullptr, 0, nullptr, 1, &barrier);
-
- // Queue submit to signal we are done with the texture
- recordingContext->signalSemaphores.push_back(mSignalSemaphore);
- DAWN_TRY(device->SubmitPendingCommands());
-
- // Write out the layouts and signal semaphore
- *releasedOldLayout = barrier.oldLayout;
- *releasedNewLayout = barrier.newLayout;
- *signalSemaphore = mSignalSemaphore;
-
- mSignalSemaphore = VK_NULL_HANDLE;
-
- // Destroy the texture so it can't be used again
- Destroy();
- return {};
- }
-
- Texture::~Texture() {
- }
-
- void Texture::SetLabelHelper(const char* prefix) {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE,
- reinterpret_cast<uint64_t&>(mHandle), prefix, GetLabel());
- }
-
- void Texture::SetLabelImpl() {
- SetLabelHelper("Dawn_InternalTexture");
- }
-
- void Texture::DestroyImpl() {
- if (GetTextureState() == TextureState::OwnedInternal) {
- Device* device = ToBackend(GetDevice());
-
- // For textures created from a VkImage, the allocation if kInvalid so the Device knows
- // to skip the deallocation of the (absence of) VkDeviceMemory.
- device->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
-
- if (mHandle != VK_NULL_HANDLE) {
- device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- }
-
- if (mExternalAllocation != VK_NULL_HANDLE) {
- device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
- }
-
- mHandle = VK_NULL_HANDLE;
- mExternalAllocation = VK_NULL_HANDLE;
- // If a signal semaphore exists it should be requested before we delete the texture
- ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
- }
- // For Vulkan, we currently run the base destruction code after the internal changes because
- // of the dependency on the texture state which the base code overwrites too early.
- TextureBase::DestroyImpl();
- }
-
- VkImage Texture::GetHandle() const {
- return mHandle;
- }
-
- VkImageAspectFlags Texture::GetVkAspectMask(wgpu::TextureAspect aspect) const {
- // TODO(enga): These masks could be precomputed.
- switch (aspect) {
- case wgpu::TextureAspect::All:
- return VulkanAspectMask(GetFormat().aspects);
- case wgpu::TextureAspect::DepthOnly:
- ASSERT(GetFormat().aspects & Aspect::Depth);
- return VulkanAspectMask(Aspect::Depth);
- case wgpu::TextureAspect::StencilOnly:
- ASSERT(GetFormat().aspects & Aspect::Stencil);
- return VulkanAspectMask(Aspect::Stencil);
- case wgpu::TextureAspect::Plane0Only:
- case wgpu::TextureAspect::Plane1Only:
- break;
- }
- UNREACHABLE();
- }
-
- void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
- std::vector<VkImageMemoryBarrier>* barriers,
- size_t transitionBarrierStart) {
- ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
-
- // transitionBarrierStart specify the index where barriers for current transition start in
- // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
- // have already added into the vector during current transition.
- ASSERT(barriers->size() - transitionBarrierStart <= 1);
-
- if (mExternalState == ExternalState::PendingAcquire) {
- if (barriers->size() == transitionBarrierStart) {
- barriers->push_back(BuildMemoryBarrier(
- this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
- SubresourceRange::SingleMipAndLayer(0, 0, GetFormat().aspects)));
- }
-
- VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
- // Transfer texture from external queue to graphics queue
- barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
- barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
-
- // srcAccessMask means nothing when importing. Queue transfers require a barrier on
- // both the importing and exporting queues. The exporting queue should have specified
- // this.
- barrier->srcAccessMask = 0;
-
- // This should be the first barrier after import.
- ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
-
- // Save the desired layout. We may need to transition through an intermediate
- // |mPendingAcquireLayout| first.
- VkImageLayout desiredLayout = barrier->newLayout;
-
- bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
-
- // We don't care about the pending old layout if the texture is uninitialized. The
- // driver is free to discard it. Likewise, we don't care about the pending new layout if
- // the texture is uninitialized. We can skip the layout transition.
- if (!isInitialized) {
- barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- barrier->newLayout = desiredLayout;
- } else {
- barrier->oldLayout = mPendingAcquireOldLayout;
- barrier->newLayout = mPendingAcquireNewLayout;
- }
-
- // If these are unequal, we need an another barrier to transition the layout.
- if (barrier->newLayout != desiredLayout) {
- VkImageMemoryBarrier layoutBarrier;
- layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
- layoutBarrier.pNext = nullptr;
- layoutBarrier.image = GetHandle();
- layoutBarrier.subresourceRange = barrier->subresourceRange;
-
- // Transition from the acquired new layout to the desired layout.
- layoutBarrier.oldLayout = barrier->newLayout;
- layoutBarrier.newLayout = desiredLayout;
-
- // We already transitioned these.
- layoutBarrier.srcAccessMask = 0;
- layoutBarrier.dstAccessMask = 0;
- layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
-
- barriers->push_back(layoutBarrier);
- }
-
- mExternalState = ExternalState::Acquired;
- }
-
- mLastExternalState = mExternalState;
-
- recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
- mWaitRequirements.begin(), mWaitRequirements.end());
- mWaitRequirements.clear();
- }
-
- bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
- // Reuse the texture directly and avoid encoding barriers when it isn't needed.
- bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
- if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
- return true;
- }
- return false;
- }
-
- bool Texture::ShouldCombineDepthStencilBarriers() const {
- return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
- }
-
- Aspect Texture::ComputeAspectsForSubresourceStorage() const {
- if (ShouldCombineDepthStencilBarriers()) {
- return Aspect::CombinedDepthStencil;
- }
- return GetFormat().aspects;
- }
-
- void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
- const TextureSubresourceUsage& textureUsages,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- // Base Vulkan doesn't support transitioning depth and stencil separately. We work around
- // this limitation by combining the usages in the two planes of `textureUsages` into a
- // single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
- // for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
- if (ShouldCombineDepthStencilBarriers()) {
- SubresourceStorage<wgpu::TextureUsage> combinedUsages(
- Aspect::CombinedDepthStencil, GetArrayLayers(), GetNumMipLevels());
- textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
- SubresourceRange updateRange = range;
- updateRange.aspects = Aspect::CombinedDepthStencil;
-
- combinedUsages.Update(
- updateRange, [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
- *combinedUsage |= usage;
- });
- });
-
- TransitionUsageForPassImpl(recordingContext, combinedUsages, imageBarriers, srcStages,
- dstStages);
- } else {
- TransitionUsageForPassImpl(recordingContext, textureUsages, imageBarriers, srcStages,
- dstStages);
- }
- }
-
- void Texture::TransitionUsageForPassImpl(
- CommandRecordingContext* recordingContext,
- const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- size_t transitionBarrierStart = imageBarriers->size();
- const Format& format = GetFormat();
-
- wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
- wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
-
- // TODO(crbug.com/dawn/814): support 1D textures.
- ASSERT(GetDimension() != wgpu::TextureDimension::e1D);
-
- mSubresourceLastUsages.Merge(
- subresourceUsages, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage,
- const wgpu::TextureUsage& newUsage) {
- if (newUsage == wgpu::TextureUsage::None ||
- CanReuseWithoutBarrier(*lastUsage, newUsage)) {
- return;
- }
-
- imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range));
-
- allLastUsages |= *lastUsage;
- allUsages |= newUsage;
-
- *lastUsage = newUsage;
- });
-
- if (mExternalState != ExternalState::InternalOnly) {
- TweakTransitionForExternalUsage(recordingContext, imageBarriers,
- transitionBarrierStart);
- }
-
- *srcStages |= VulkanPipelineStage(allLastUsages, format);
- *dstStages |= VulkanPipelineStage(allUsages, format);
- }
-
- void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
- wgpu::TextureUsage usage,
- const SubresourceRange& range) {
- std::vector<VkImageMemoryBarrier> barriers;
-
- VkPipelineStageFlags srcStages = 0;
- VkPipelineStageFlags dstStages = 0;
-
- TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
-
- if (mExternalState != ExternalState::InternalOnly) {
- TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
- }
-
- if (!barriers.empty()) {
- ASSERT(srcStages != 0 && dstStages != 0);
- ToBackend(GetDevice())
- ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
- nullptr, 0, nullptr, barriers.size(), barriers.data());
- }
- }
-
- void Texture::TransitionUsageAndGetResourceBarrier(
- wgpu::TextureUsage usage,
- const SubresourceRange& range,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- // Base Vulkan doesn't support transitioning depth and stencil separately. We work around
- // this limitation by modifying the range to be on CombinedDepthStencil. The barriers will
- // be produced for DEPTH | STENCIL since the SubresourceRange uses
- // Aspect::CombinedDepthStencil.
- if (ShouldCombineDepthStencilBarriers()) {
- SubresourceRange updatedRange = range;
- updatedRange.aspects = Aspect::CombinedDepthStencil;
-
- std::vector<VkImageMemoryBarrier> newBarriers;
- TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
- dstStages);
- } else {
- TransitionUsageAndGetResourceBarrierImpl(usage, range, imageBarriers, srcStages,
- dstStages);
- }
- }
-
- void Texture::TransitionUsageAndGetResourceBarrierImpl(
- wgpu::TextureUsage usage,
- const SubresourceRange& range,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages) {
- ASSERT(imageBarriers != nullptr);
- const Format& format = GetFormat();
-
- wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
- mSubresourceLastUsages.Update(
- range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
- if (CanReuseWithoutBarrier(*lastUsage, usage)) {
- return;
- }
-
- imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, usage, range));
-
- allLastUsages |= *lastUsage;
- *lastUsage = usage;
- });
-
- *srcStages |= VulkanPipelineStage(allLastUsages, format);
- *dstStages |= VulkanPipelineStage(usage, format);
- }
-
- MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
- const SubresourceRange& range,
- TextureBase::ClearValue clearValue) {
- Device* device = ToBackend(GetDevice());
-
- const bool isZero = clearValue == TextureBase::ClearValue::Zero;
- uint32_t uClearColor = isZero ? 0 : 1;
- int32_t sClearColor = isZero ? 0 : 1;
- float fClearColor = isZero ? 0.f : 1.f;
-
- TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
-
- VkImageSubresourceRange imageRange = {};
- imageRange.levelCount = 1;
- imageRange.layerCount = 1;
-
- if (GetFormat().isCompressed) {
- if (range.aspects == Aspect::None) {
- return {};
- }
- // need to clear the texture with a copy from buffer
- ASSERT(range.aspects == Aspect::Color);
- const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
-
- Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
-
- uint32_t bytesPerRow =
- Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
- device->GetOptimalBytesPerRowAlignment());
- uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
- largestMipSize.depthOrArrayLayers;
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
- blockInfo.byteSize));
- memset(uploadHandle.mappedBuffer, uClearColor, bufferSize);
-
- std::vector<VkBufferImageCopy> regions;
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- Extent3D copySize = GetMipLevelPhysicalSize(level);
- imageRange.baseMipLevel = level;
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, range.aspects))) {
- // Skip lazy clears if already initialized.
- continue;
- }
-
- TextureDataLayout dataLayout;
- dataLayout.offset = uploadHandle.startOffset;
- dataLayout.rowsPerImage = copySize.height / blockInfo.height;
- dataLayout.bytesPerRow = bytesPerRow;
- TextureCopy textureCopy;
- textureCopy.aspect = range.aspects;
- textureCopy.mipLevel = level;
- textureCopy.origin = {0, 0, layer};
- textureCopy.texture = this;
-
- regions.push_back(
- ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
- }
- }
- device->fn.CmdCopyBufferToImage(
- recordingContext->commandBuffer,
- ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(), GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions.size(), regions.data());
- } else {
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- imageRange.baseMipLevel = level;
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- Aspect aspects = Aspect::None;
- for (Aspect aspect : IterateEnumMask(range.aspects)) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
- // Skip lazy clears if already initialized.
- continue;
- }
- aspects |= aspect;
- }
-
- if (aspects == Aspect::None) {
- continue;
- }
-
- imageRange.aspectMask = VulkanAspectMask(aspects);
- imageRange.baseArrayLayer = layer;
-
- if (aspects &
- (Aspect::Depth | Aspect::Stencil | Aspect::CombinedDepthStencil)) {
- VkClearDepthStencilValue clearDepthStencilValue[1];
- clearDepthStencilValue[0].depth = fClearColor;
- clearDepthStencilValue[0].stencil = uClearColor;
- device->fn.CmdClearDepthStencilImage(
- recordingContext->commandBuffer, GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clearDepthStencilValue, 1,
- &imageRange);
- } else {
- ASSERT(aspects == Aspect::Color);
- VkClearColorValue clearColorValue;
- switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
- case wgpu::TextureComponentType::Float:
- clearColorValue.float32[0] = fClearColor;
- clearColorValue.float32[1] = fClearColor;
- clearColorValue.float32[2] = fClearColor;
- clearColorValue.float32[3] = fClearColor;
- break;
- case wgpu::TextureComponentType::Sint:
- clearColorValue.int32[0] = sClearColor;
- clearColorValue.int32[1] = sClearColor;
- clearColorValue.int32[2] = sClearColor;
- clearColorValue.int32[3] = sClearColor;
- break;
- case wgpu::TextureComponentType::Uint:
- clearColorValue.uint32[0] = uClearColor;
- clearColorValue.uint32[1] = uClearColor;
- clearColorValue.uint32[2] = uClearColor;
- clearColorValue.uint32[3] = uClearColor;
- break;
- case wgpu::TextureComponentType::DepthComparison:
- UNREACHABLE();
- }
- device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- &clearColorValue, 1, &imageRange);
- }
- }
- }
- }
-
- if (clearValue == TextureBase::ClearValue::Zero) {
- SetIsSubresourceContentInitialized(true, range);
- device->IncrementLazyClearCountForTesting();
- }
- return {};
- }
-
- void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
- const SubresourceRange& range) {
- if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
- return;
- }
- if (!IsSubresourceContentInitialized(range)) {
- // If subresource has not been initialized, clear it to black as it could contain dirty
- // bits from recycled memory
- GetDevice()->ConsumedError(
- ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
- }
- }
-
- VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
- return VulkanImageLayout(this, mSubresourceLastUsages.Get(Aspect::Color, 0, 0));
- }
-
- // static
- ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
- Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
- DAWN_TRY(view->Initialize(descriptor));
- return view;
- }
-
- MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
- if ((GetTexture()->GetUsage() &
- ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
- // If the texture view has no other usage than CopySrc and CopyDst, then it can't
- // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
- // validation errors warn if you create such a vkImageView, so return early.
- return {};
- }
-
- // Texture could be destroyed by the time we make a view.
- if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
- return {};
- }
-
- Device* device = ToBackend(GetTexture()->GetDevice());
-
- VkImageViewCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.image = ToBackend(GetTexture())->GetHandle();
- createInfo.viewType = VulkanImageViewType(descriptor->dimension);
- createInfo.format = VulkanImageFormat(device, descriptor->format);
- createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
- VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
-
- const SubresourceRange& subresources = GetSubresourceRange();
- createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
- createInfo.subresourceRange.levelCount = subresources.levelCount;
- createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
- createInfo.subresourceRange.layerCount = subresources.layerCount;
- createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
- "CreateImageView"));
-
- SetLabelImpl();
-
- return {};
- }
-
- TextureView::~TextureView() {
- }
-
- void TextureView::DestroyImpl() {
- Device* device = ToBackend(GetTexture()->GetDevice());
-
- if (mHandle != VK_NULL_HANDLE) {
- device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- mHandle = VK_NULL_HANDLE;
- }
- }
-
- VkImageView TextureView::GetHandle() const {
- return mHandle;
- }
-
- void TextureView::SetLabelImpl() {
- SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE_VIEW,
- reinterpret_cast<uint64_t&>(mHandle), "Dawn_InternalTextureView", GetLabel());
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
deleted file mode 100644
index 4a1745da511..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_TEXTUREVK_H_
-#define DAWNNATIVE_VULKAN_TEXTUREVK_H_
-
-#include "dawn_native/Texture.h"
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/PassResourceUsage.h"
-#include "dawn_native/ResourceMemoryAllocation.h"
-#include "dawn_native/vulkan/ExternalHandle.h"
-#include "dawn_native/vulkan/external_memory/MemoryService.h"
-
-namespace dawn_native { namespace vulkan {
-
- struct CommandRecordingContext;
- class Device;
- class Texture;
-
- VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format);
- VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
- VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage);
- VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
-
- MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase* device,
- const TextureDescriptor* descriptor);
-
- bool IsSampleCountSupported(const dawn_native::vulkan::Device* device,
- const VkImageCreateInfo& imageCreateInfo);
-
- class Texture final : public TextureBase {
- public:
- // Used to create a regular texture from a descriptor.
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const TextureDescriptor* descriptor,
- VkImageUsageFlags extraUsages = 0);
-
- // Creates a texture and initializes it with a VkImage that references an external memory
- // object. Before the texture can be used, the VkDeviceMemory associated with the external
- // image must be bound via Texture::BindExternalMemory.
- static ResultOrError<Texture*> CreateFromExternal(
- Device* device,
- const ExternalImageDescriptorVk* descriptor,
- const TextureDescriptor* textureDescriptor,
- external_memory::Service* externalMemoryService);
-
- // Creates a texture that wraps a swapchain-allocated VkImage.
- static Ref<Texture> CreateForSwapChain(Device* device,
- const TextureDescriptor* descriptor,
- VkImage nativeImage);
-
- VkImage GetHandle() const;
- VkImageAspectFlags GetVkAspectMask(wgpu::TextureAspect aspect) const;
-
- // Transitions the texture to be used as `usage`, recording any necessary barrier in
- // `commands`.
- // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
- void TransitionUsageNow(CommandRecordingContext* recordingContext,
- wgpu::TextureUsage usage,
- const SubresourceRange& range);
- void TransitionUsageForPass(CommandRecordingContext* recordingContext,
- const TextureSubresourceUsage& textureUsages,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages);
-
- void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
- const SubresourceRange& range);
-
- VkImageLayout GetCurrentLayoutForSwapChain() const;
-
- // Binds externally allocated memory to the VkImage and on success, takes ownership of
- // semaphores.
- MaybeError BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores);
-
- MaybeError ExportExternalTexture(VkImageLayout desiredLayout,
- VkSemaphore* signalSemaphore,
- VkImageLayout* releasedOldLayout,
- VkImageLayout* releasedNewLayout);
-
- void SetLabelHelper(const char* prefix);
-
- // Dawn API
- void SetLabelImpl() override;
-
- private:
- ~Texture() override;
- Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
-
- MaybeError InitializeAsInternalTexture(VkImageUsageFlags extraUsages);
- MaybeError InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
- external_memory::Service* externalMemoryService);
- void InitializeForSwapChain(VkImage nativeImage);
-
- void DestroyImpl() override;
- MaybeError ClearTexture(CommandRecordingContext* recordingContext,
- const SubresourceRange& range,
- TextureBase::ClearValue);
-
- // Implementation details of the barrier computations for the texture.
- void TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
- const SubresourceRange& range,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages);
- void TransitionUsageForPassImpl(
- CommandRecordingContext* recordingContext,
- const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages);
- void TransitionUsageAndGetResourceBarrierImpl(
- wgpu::TextureUsage usage,
- const SubresourceRange& range,
- std::vector<VkImageMemoryBarrier>* imageBarriers,
- VkPipelineStageFlags* srcStages,
- VkPipelineStageFlags* dstStages);
- void TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
- std::vector<VkImageMemoryBarrier>* barriers,
- size_t transitionBarrierStart);
- bool CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage);
-
- // In base Vulkan, Depth and stencil can only be transitioned together. This function
- // indicates whether we should combine depth and stencil barriers to accommodate this
- // limitation.
- bool ShouldCombineDepthStencilBarriers() const;
- // Compute the Aspects of the SubresourceStoage for this texture depending on whether we're
- // doing the workaround for combined depth and stencil barriers.
- Aspect ComputeAspectsForSubresourceStorage() const;
-
- VkImage mHandle = VK_NULL_HANDLE;
- ResourceMemoryAllocation mMemoryAllocation;
- VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
-
- enum class ExternalState {
- InternalOnly,
- PendingAcquire,
- Acquired,
- Released
- };
- ExternalState mExternalState = ExternalState::InternalOnly;
- ExternalState mLastExternalState = ExternalState::InternalOnly;
-
- VkImageLayout mPendingAcquireOldLayout;
- VkImageLayout mPendingAcquireNewLayout;
-
- VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
- std::vector<VkSemaphore> mWaitRequirements;
-
- // Note that in early Vulkan versions it is not possible to transition depth and stencil
- // separately so textures with Depth|Stencil aspects will have a single Depth aspect in the
- // storage.
- SubresourceStorage<wgpu::TextureUsage> mSubresourceLastUsages;
- };
-
- class TextureView final : public TextureViewBase {
- public:
- static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor);
- VkImageView GetHandle() const;
-
- private:
- ~TextureView() override;
- void DestroyImpl() override;
- using TextureViewBase::TextureViewBase;
- MaybeError Initialize(const TextureViewDescriptor* descriptor);
-
- // Dawn API
- void SetLabelImpl() override;
-
- VkImageView mHandle = VK_NULL_HANDLE;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_TEXTUREVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
deleted file mode 100644
index 6e316c567d1..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/UtilsVulkan.h"
-
-#include "common/Assert.h"
-#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/Format.h"
-#include "dawn_native/Pipeline.h"
-#include "dawn_native/ShaderModule.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/Forward.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op) {
- switch (op) {
- case wgpu::CompareFunction::Never:
- return VK_COMPARE_OP_NEVER;
- case wgpu::CompareFunction::Less:
- return VK_COMPARE_OP_LESS;
- case wgpu::CompareFunction::LessEqual:
- return VK_COMPARE_OP_LESS_OR_EQUAL;
- case wgpu::CompareFunction::Greater:
- return VK_COMPARE_OP_GREATER;
- case wgpu::CompareFunction::GreaterEqual:
- return VK_COMPARE_OP_GREATER_OR_EQUAL;
- case wgpu::CompareFunction::Equal:
- return VK_COMPARE_OP_EQUAL;
- case wgpu::CompareFunction::NotEqual:
- return VK_COMPARE_OP_NOT_EQUAL;
- case wgpu::CompareFunction::Always:
- return VK_COMPARE_OP_ALWAYS;
-
- case wgpu::CompareFunction::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- // Convert Dawn texture aspects to Vulkan texture aspect flags
- VkImageAspectFlags VulkanAspectMask(const Aspect& aspects) {
- VkImageAspectFlags flags = 0;
- for (Aspect aspect : IterateEnumMask(aspects)) {
- switch (aspect) {
- case Aspect::Color:
- flags |= VK_IMAGE_ASPECT_COLOR_BIT;
- break;
- case Aspect::Depth:
- flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
- break;
- case Aspect::Stencil:
- flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
- break;
-
- case Aspect::CombinedDepthStencil:
- flags |= VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
- break;
-
- case Aspect::Plane0:
- case Aspect::Plane1:
- case Aspect::None:
- UNREACHABLE();
- }
- }
- return flags;
- }
-
- // Vulkan SPEC requires the source/destination region specified by each element of
- // pRegions must be a region that is contained within srcImage/dstImage. Here the size of
- // the image refers to the virtual size, while Dawn validates texture copy extent with the
- // physical size, so we need to re-calculate the texture copy extent to ensure it should fit
- // in the virtual size of the subresource.
- Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
- Extent3D validTextureCopyExtent = copySize;
- const TextureBase* texture = textureCopy.texture.Get();
- Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
- ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
- ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
- if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
- ASSERT(texture->GetFormat().isCompressed);
- validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
- }
- if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
- ASSERT(texture->GetFormat().isCompressed);
- validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
- }
-
- return validTextureCopyExtent;
- }
-
- VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
- const TextureCopy& textureCopy,
- const Extent3D& copySize) {
- TextureDataLayout passDataLayout;
- passDataLayout.offset = bufferCopy.offset;
- passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
- passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
- return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
- }
-
- VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
- const TextureCopy& textureCopy,
- const Extent3D& copySize) {
- const Texture* texture = ToBackend(textureCopy.texture.Get());
-
- VkBufferImageCopy region;
-
- region.bufferOffset = dataLayout.offset;
- // In Vulkan the row length is in texels while it is in bytes for Dawn
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
- ASSERT(dataLayout.bytesPerRow % blockInfo.byteSize == 0);
- region.bufferRowLength = dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width;
- region.bufferImageHeight = dataLayout.rowsPerImage * blockInfo.height;
-
- region.imageSubresource.aspectMask = VulkanAspectMask(textureCopy.aspect);
- region.imageSubresource.mipLevel = textureCopy.mipLevel;
-
- switch (textureCopy.texture->GetDimension()) {
- case wgpu::TextureDimension::e2D: {
- region.imageOffset.x = textureCopy.origin.x;
- region.imageOffset.y = textureCopy.origin.y;
- region.imageOffset.z = 0;
-
- region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
- region.imageSubresource.layerCount = copySize.depthOrArrayLayers;
-
- Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
- region.imageExtent.width = imageExtent.width;
- region.imageExtent.height = imageExtent.height;
- region.imageExtent.depth = 1;
- break;
- }
-
- case wgpu::TextureDimension::e3D: {
- region.imageOffset.x = textureCopy.origin.x;
- region.imageOffset.y = textureCopy.origin.y;
- region.imageOffset.z = textureCopy.origin.z;
-
- region.imageSubresource.baseArrayLayer = 0;
- region.imageSubresource.layerCount = 1;
-
- Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
- region.imageExtent.width = imageExtent.width;
- region.imageExtent.height = imageExtent.height;
- region.imageExtent.depth = imageExtent.depthOrArrayLayers;
- break;
- }
-
- case wgpu::TextureDimension::e1D:
- UNREACHABLE();
- }
-
- return region;
- }
-
- void SetDebugName(Device* device,
- VkObjectType objectType,
- uint64_t objectHandle,
- const char* prefix,
- std::string label) {
- if (!objectHandle) {
- return;
- }
-
- if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
- VkDebugUtilsObjectNameInfoEXT objectNameInfo;
- objectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
- objectNameInfo.pNext = nullptr;
- objectNameInfo.objectType = objectType;
- objectNameInfo.objectHandle = objectHandle;
-
- if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
- objectNameInfo.pObjectName = prefix;
- device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
- return;
- }
-
- std::string objectName = prefix;
- objectName += "_";
- objectName += label;
- objectNameInfo.pObjectName = objectName.c_str();
- device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
- }
- }
-
- VkSpecializationInfo* GetVkSpecializationInfo(
- const ProgrammableStage& programmableStage,
- VkSpecializationInfo* specializationInfo,
- std::vector<OverridableConstantScalar>* specializationDataEntries,
- std::vector<VkSpecializationMapEntry>* specializationMapEntries) {
- ASSERT(specializationInfo);
- ASSERT(specializationDataEntries);
- ASSERT(specializationMapEntries);
-
- if (programmableStage.constants.size() == 0) {
- return nullptr;
- }
-
- const EntryPointMetadata& entryPointMetaData =
- programmableStage.module->GetEntryPoint(programmableStage.entryPoint);
-
- for (const auto& pipelineConstant : programmableStage.constants) {
- const std::string& identifier = pipelineConstant.first;
- double value = pipelineConstant.second;
-
- // This is already validated so `identifier` must exist
- const auto& moduleConstant = entryPointMetaData.overridableConstants.at(identifier);
-
- specializationMapEntries->push_back(
- VkSpecializationMapEntry{moduleConstant.id,
- static_cast<uint32_t>(specializationDataEntries->size() *
- sizeof(OverridableConstantScalar)),
- sizeof(OverridableConstantScalar)});
-
- OverridableConstantScalar entry{};
- switch (moduleConstant.type) {
- case EntryPointMetadata::OverridableConstant::Type::Boolean:
- entry.b = static_cast<int32_t>(value);
- break;
- case EntryPointMetadata::OverridableConstant::Type::Float32:
- entry.f32 = static_cast<float>(value);
- break;
- case EntryPointMetadata::OverridableConstant::Type::Int32:
- entry.i32 = static_cast<int32_t>(value);
- break;
- case EntryPointMetadata::OverridableConstant::Type::Uint32:
- entry.u32 = static_cast<uint32_t>(value);
- break;
- default:
- UNREACHABLE();
- }
- specializationDataEntries->push_back(entry);
- }
-
- specializationInfo->mapEntryCount = static_cast<uint32_t>(specializationMapEntries->size());
- specializationInfo->pMapEntries = specializationMapEntries->data();
- specializationInfo->dataSize =
- specializationDataEntries->size() * sizeof(OverridableConstantScalar);
- specializationInfo->pData = specializationDataEntries->data();
-
- return specializationInfo;
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
deleted file mode 100644
index 53b6d41e262..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_UTILSVULKAN_H_
-#define DAWNNATIVE_VULKAN_UTILSVULKAN_H_
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/Commands.h"
-#include "dawn_native/dawn_platform.h"
-
-namespace dawn_native {
- struct ProgrammableStage;
- union OverridableConstantScalar;
-} // namespace dawn_native
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
-
- // A Helper type used to build a pNext chain of extension structs.
- // Usage is:
- // 1) Create instance, passing the address of the first struct in the
- // chain. This will parse the existing |pNext| chain in it to find
- // its tail.
- //
- // 2) Call Add(&vk_struct) every time a new struct needs to be appended
- // to the chain.
- //
- // 3) Alternatively, call Add(&vk_struct, VK_STRUCTURE_TYPE_XXX) to
- // initialize the struct with a given VkStructureType value while
- // appending it to the chain.
- //
- // Examples:
- // VkPhysicalFeatures2 features2 = {
- // .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
- // .pNext = nullptr,
- // };
- //
- // PNextChainBuilder featuresChain(&features2);
- //
- // featuresChain.Add(&featuresExtensions.subgroupSizeControl,
- // VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
- //
- struct PNextChainBuilder {
- // Constructor takes the address of a Vulkan structure instance, and
- // walks its pNext chain to record the current location of its tail.
- //
- // NOTE: Some VK_STRUCT_TYPEs define their pNext field as a const void*
- // which is why the VkBaseOutStructure* casts below are necessary.
- template <typename VK_STRUCT_TYPE>
- explicit PNextChainBuilder(VK_STRUCT_TYPE* head)
- : mCurrent(reinterpret_cast<VkBaseOutStructure*>(head)) {
- // Find the end of the current chain.
- while (mCurrent->pNext != nullptr) {
- mCurrent = mCurrent->pNext;
- }
- }
-
- // Add one item to the chain. |vk_struct| must be a Vulkan structure
- // that is already initialized.
- template <typename VK_STRUCT_TYPE>
- void Add(VK_STRUCT_TYPE* vkStruct) {
- // Sanity checks to ensure proper type safety.
- static_assert(
- offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
- offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
- "Argument type is not a proper Vulkan structure type");
- vkStruct->pNext = nullptr;
-
- mCurrent->pNext = reinterpret_cast<VkBaseOutStructure*>(vkStruct);
- mCurrent = mCurrent->pNext;
- }
-
- // A variant of Add() above that also initializes the |sType| field in |vk_struct|.
- template <typename VK_STRUCT_TYPE>
- void Add(VK_STRUCT_TYPE* vkStruct, VkStructureType sType) {
- vkStruct->sType = sType;
- Add(vkStruct);
- }
-
- private:
- VkBaseOutStructure* mCurrent;
- };
-
- VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
-
- VkImageAspectFlags VulkanAspectMask(const Aspect& aspects);
-
- Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
-
- VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
- const TextureCopy& textureCopy,
- const Extent3D& copySize);
- VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
- const TextureCopy& textureCopy,
- const Extent3D& copySize);
-
- void SetDebugName(Device* device,
- VkObjectType objectType,
- uint64_t objectHandle,
- const char* prefix,
- std::string label = "");
-
- // Returns nullptr or &specializationInfo
- // specializationInfo, specializationDataEntries, specializationMapEntries needs to
- // be alive at least until VkSpecializationInfo is passed into Vulkan Create*Pipelines
- VkSpecializationInfo* GetVkSpecializationInfo(
- const ProgrammableStage& programmableStage,
- VkSpecializationInfo* specializationInfo,
- std::vector<OverridableConstantScalar>* specializationDataEntries,
- std::vector<VkSpecializationMapEntry>* specializationMapEntries);
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_UTILSVULKAN_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
deleted file mode 100644
index f406f9f55d3..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// VulkanBackend.cpp: contains the definition of symbols exported by VulkanBackend.h so that they
-// can be compiled twice: once export (shared library), once not exported (static library)
-
-// Include vulkan_platform.h before VulkanBackend.h includes vulkan.h so that we use our version
-// of the non-dispatchable handles.
-#include "common/vulkan_platform.h"
-
-#include "dawn_native/VulkanBackend.h"
-
-#include "common/SwapChainUtils.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/NativeSwapChainImplVk.h"
-#include "dawn_native/vulkan/TextureVk.h"
-
-namespace dawn_native { namespace vulkan {
-
- VkInstance GetInstance(WGPUDevice device) {
- Device* backendDevice = ToBackend(FromAPI(device));
- return backendDevice->GetVkInstance();
- }
-
- DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
- const char* pName) {
- Device* backendDevice = ToBackend(FromAPI(device));
- return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
- }
-
- // Explicitly export this function because it uses the "native" type for surfaces while the
- // header as seen in this file uses the wrapped type.
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
- Device* backendDevice = ToBackend(FromAPI(device));
- VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
-
- DawnSwapChainImplementation impl;
- impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
- impl.textureUsage = WGPUTextureUsage_Present;
-
- return impl;
- }
-
- WGPUTextureFormat GetNativeSwapChainPreferredFormat(
- const DawnSwapChainImplementation* swapChain) {
- NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
- }
-
- AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(WGPUBackendType_Vulkan) {
- }
-
-#if defined(DAWN_PLATFORM_LINUX)
- ExternalImageDescriptorOpaqueFD::ExternalImageDescriptorOpaqueFD()
- : ExternalImageDescriptorFD(ExternalImageType::OpaqueFD) {
- }
-
- ExternalImageDescriptorDmaBuf::ExternalImageDescriptorDmaBuf()
- : ExternalImageDescriptorFD(ExternalImageType::DmaBuf) {
- }
-
- ExternalImageExportInfoOpaqueFD::ExternalImageExportInfoOpaqueFD()
- : ExternalImageExportInfoFD(ExternalImageType::OpaqueFD) {
- }
-
- ExternalImageExportInfoDmaBuf::ExternalImageExportInfoDmaBuf()
- : ExternalImageExportInfoFD(ExternalImageType::DmaBuf) {
- }
-#endif // DAWN_PLATFORM_LINUX
-
- WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
-#if defined(DAWN_PLATFORM_LINUX)
- switch (descriptor->type) {
- case ExternalImageType::OpaqueFD:
- case ExternalImageType::DmaBuf: {
- Device* backendDevice = ToBackend(FromAPI(device));
- const ExternalImageDescriptorFD* fdDescriptor =
- static_cast<const ExternalImageDescriptorFD*>(descriptor);
-
- return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
- fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
- }
- default:
- return nullptr;
- }
-#else
- return nullptr;
-#endif // DAWN_PLATFORM_LINUX
- }
-
- bool ExportVulkanImage(WGPUTexture texture,
- VkImageLayout desiredLayout,
- ExternalImageExportInfoVk* info) {
- if (texture == nullptr) {
- return false;
- }
-#if defined(DAWN_PLATFORM_LINUX)
- switch (info->type) {
- case ExternalImageType::OpaqueFD:
- case ExternalImageType::DmaBuf: {
- Texture* backendTexture = ToBackend(FromAPI(texture));
- Device* device = ToBackend(backendTexture->GetDevice());
- ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
-
- return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
- &fdInfo->semaphoreHandles);
- }
- default:
- return false;
- }
-#else
- return false;
-#endif // DAWN_PLATFORM_LINUX
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp
deleted file mode 100644
index e0655b959fe..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/VulkanError.h"
-
-#include <string>
-
-namespace dawn_native { namespace vulkan {
-
- const char* VkResultAsString(::VkResult result) {
- // Convert to a int32_t to silence and MSVC warning that the fake errors don't appear in
- // the original VkResult enum.
- int32_t code = static_cast<int32_t>(result);
-
- switch (code) {
- case VK_SUCCESS:
- return "VK_SUCCESS";
- case VK_NOT_READY:
- return "VK_NOT_READY";
- case VK_TIMEOUT:
- return "VK_TIMEOUT";
- case VK_EVENT_SET:
- return "VK_EVENT_SET";
- case VK_EVENT_RESET:
- return "VK_EVENT_RESET";
- case VK_INCOMPLETE:
- return "VK_INCOMPLETE";
- case VK_ERROR_OUT_OF_HOST_MEMORY:
- return "VK_ERROR_OUT_OF_HOST_MEMORY";
- case VK_ERROR_OUT_OF_DEVICE_MEMORY:
- return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
- case VK_ERROR_INITIALIZATION_FAILED:
- return "VK_ERROR_INITIALIZATION_FAILED";
- case VK_ERROR_DEVICE_LOST:
- return "VK_ERROR_DEVICE_LOST";
- case VK_ERROR_MEMORY_MAP_FAILED:
- return "VK_ERROR_MEMORY_MAP_FAILED";
- case VK_ERROR_LAYER_NOT_PRESENT:
- return "VK_ERROR_LAYER_NOT_PRESENT";
- case VK_ERROR_EXTENSION_NOT_PRESENT:
- return "VK_ERROR_EXTENSION_NOT_PRESENT";
- case VK_ERROR_FEATURE_NOT_PRESENT:
- return "VK_ERROR_FEATURE_NOT_PRESENT";
- case VK_ERROR_INCOMPATIBLE_DRIVER:
- return "VK_ERROR_INCOMPATIBLE_DRIVER";
- case VK_ERROR_TOO_MANY_OBJECTS:
- return "VK_ERROR_TOO_MANY_OBJECTS";
- case VK_ERROR_FORMAT_NOT_SUPPORTED:
- return "VK_ERROR_FORMAT_NOT_SUPPORTED";
- case VK_ERROR_FRAGMENTED_POOL:
- return "VK_ERROR_FRAGMENTED_POOL";
-
- case VK_ERROR_SURFACE_LOST_KHR:
- return "VK_ERROR_SURFACE_LOST_KHR";
- case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
- return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
-
- case VK_FAKE_DEVICE_OOM_FOR_TESTING:
- return "VK_FAKE_DEVICE_OOM_FOR_TESTING";
- case VK_FAKE_ERROR_FOR_TESTING:
- return "VK_FAKE_ERROR_FOR_TESTING";
- default:
- return "<Unknown VkResult>";
- }
- }
-
- MaybeError CheckVkSuccessImpl(VkResult result, const char* context) {
- if (DAWN_LIKELY(result == VK_SUCCESS)) {
- return {};
- }
-
- std::string message = std::string(context) + " failed with " + VkResultAsString(result);
-
- if (result == VK_ERROR_DEVICE_LOST) {
- return DAWN_DEVICE_LOST_ERROR(message);
- } else {
- return DAWN_INTERNAL_ERROR(message);
- }
- }
-
- MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context) {
- if (DAWN_LIKELY(result == VK_SUCCESS)) {
- return {};
- }
-
- std::string message = std::string(context) + " failed with " + VkResultAsString(result);
-
- if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_ERROR_OUT_OF_HOST_MEMORY ||
- result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
- return DAWN_OUT_OF_MEMORY_ERROR(message);
- } else if (result == VK_ERROR_DEVICE_LOST) {
- return DAWN_DEVICE_LOST_ERROR(message);
- } else {
- return DAWN_INTERNAL_ERROR(message);
- }
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h
deleted file mode 100644
index 7748f56cd39..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_VULKANERROR_H_
-#define DAWNNATIVE_VULKAN_VULKANERROR_H_
-
-#include "dawn_native/ErrorInjector.h"
-#include "dawn_native/vulkan/VulkanFunctions.h"
-
-constexpr VkResult VK_FAKE_ERROR_FOR_TESTING = VK_RESULT_MAX_ENUM;
-constexpr VkResult VK_FAKE_DEVICE_OOM_FOR_TESTING = static_cast<VkResult>(VK_RESULT_MAX_ENUM - 1);
-
-namespace dawn_native { namespace vulkan {
-
- // Returns a string version of the result.
- const char* VkResultAsString(::VkResult result);
-
- MaybeError CheckVkSuccessImpl(VkResult result, const char* context);
- MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context);
-
-// Returns a success only if result if VK_SUCCESS, an error with the context and stringified
-// result value instead. Can be used like this:
-//
-// DAWN_TRY(CheckVkSuccess(vkDoSomething, "doing something"));
-#define CheckVkSuccess(resultIn, contextIn) \
- ::dawn_native::vulkan::CheckVkSuccessImpl( \
- ::dawn_native::vulkan::VkResult::WrapUnsafe( \
- INJECT_ERROR_OR_RUN(resultIn, VK_FAKE_ERROR_FOR_TESTING)), \
- contextIn)
-
-#define CheckVkOOMThenSuccess(resultIn, contextIn) \
- ::dawn_native::vulkan::CheckVkOOMThenSuccessImpl( \
- ::dawn_native::vulkan::VkResult::WrapUnsafe(INJECT_ERROR_OR_RUN( \
- resultIn, VK_FAKE_DEVICE_OOM_FOR_TESTING, VK_FAKE_ERROR_FOR_TESTING)), \
- contextIn)
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_VULKANERROR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.cpp
deleted file mode 100644
index 3073625ce31..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.cpp
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/VulkanExtensions.h"
-
-#include "common/Assert.h"
-#include "common/vulkan_platform.h"
-
-#include <array>
-#include <limits>
-
-namespace dawn_native { namespace vulkan {
-
- static constexpr uint32_t VulkanVersion_1_1 = VK_MAKE_VERSION(1, 1, 0);
- static constexpr uint32_t VulkanVersion_1_2 = VK_MAKE_VERSION(1, 2, 0);
- static constexpr uint32_t NeverPromoted = std::numeric_limits<uint32_t>::max();
-
- // A static array for InstanceExtInfo that can be indexed with InstanceExts.
- // GetInstanceExtInfo checks that "index" matches the index used to access this array so an
- // assert will fire if it isn't in the correct order.
- static constexpr size_t kInstanceExtCount = static_cast<size_t>(InstanceExt::EnumCount);
- static constexpr std::array<InstanceExtInfo, kInstanceExtCount> sInstanceExtInfos{{
- //
- {InstanceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
- VulkanVersion_1_1},
- {InstanceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
- VulkanVersion_1_1},
- {InstanceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
- VulkanVersion_1_1},
-
- {InstanceExt::Surface, "VK_KHR_surface", NeverPromoted},
- {InstanceExt::FuchsiaImagePipeSurface, "VK_FUCHSIA_imagepipe_surface", NeverPromoted},
- {InstanceExt::MetalSurface, "VK_EXT_metal_surface", NeverPromoted},
- {InstanceExt::WaylandSurface, "VK_KHR_wayland_surface", NeverPromoted},
- {InstanceExt::Win32Surface, "VK_KHR_win32_surface", NeverPromoted},
- {InstanceExt::XcbSurface, "VK_KHR_xcb_surface", NeverPromoted},
- {InstanceExt::XlibSurface, "VK_KHR_xlib_surface", NeverPromoted},
-
- {InstanceExt::DebugUtils, "VK_EXT_debug_utils", NeverPromoted},
- {InstanceExt::ValidationFeatures, "VK_EXT_validation_features", NeverPromoted},
- //
- }};
-
- const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext) {
- uint32_t index = static_cast<uint32_t>(ext);
- ASSERT(index < sInstanceExtInfos.size());
- ASSERT(sInstanceExtInfos[index].index == ext);
- return sInstanceExtInfos[index];
- }
-
- std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap() {
- std::unordered_map<std::string, InstanceExt> result;
- for (const InstanceExtInfo& info : sInstanceExtInfos) {
- result[info.name] = info.index;
- }
- return result;
- }
-
- InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts) {
- // We need to check that all transitive dependencies of extensions are advertised.
- // To do that in a single pass and no data structures, the extensions are topologically
- // sorted in the definition of InstanceExt.
- // To ensure the order is correct, we mark visited extensions in `visitedSet` and each
- // dependency check will first assert all its dependents have been visited.
- InstanceExtSet visitedSet;
- InstanceExtSet trimmedSet;
-
- auto HasDep = [&](InstanceExt ext) -> bool {
- ASSERT(visitedSet[ext]);
- return trimmedSet[ext];
- };
-
- for (uint32_t i = 0; i < sInstanceExtInfos.size(); i++) {
- InstanceExt ext = static_cast<InstanceExt>(i);
-
- bool hasDependencies = false;
- switch (ext) {
- case InstanceExt::GetPhysicalDeviceProperties2:
- case InstanceExt::Surface:
- case InstanceExt::DebugUtils:
- case InstanceExt::ValidationFeatures:
- hasDependencies = true;
- break;
-
- case InstanceExt::ExternalMemoryCapabilities:
- case InstanceExt::ExternalSemaphoreCapabilities:
- hasDependencies = HasDep(InstanceExt::GetPhysicalDeviceProperties2);
- break;
-
- case InstanceExt::FuchsiaImagePipeSurface:
- case InstanceExt::MetalSurface:
- case InstanceExt::WaylandSurface:
- case InstanceExt::Win32Surface:
- case InstanceExt::XcbSurface:
- case InstanceExt::XlibSurface:
- hasDependencies = HasDep(InstanceExt::Surface);
- break;
-
- case InstanceExt::EnumCount:
- UNREACHABLE();
- }
-
- trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
- visitedSet.set(ext, true);
- }
-
- return trimmedSet;
- }
-
- void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version) {
- for (const InstanceExtInfo& info : sInstanceExtInfos) {
- if (info.versionPromoted <= version) {
- extensions->set(info.index, true);
- }
- }
- }
-
- static constexpr size_t kDeviceExtCount = static_cast<size_t>(DeviceExt::EnumCount);
- static constexpr std::array<DeviceExtInfo, kDeviceExtCount> sDeviceExtInfos{{
- //
- {DeviceExt::BindMemory2, "VK_KHR_bind_memory2", VulkanVersion_1_1},
- {DeviceExt::Maintenance1, "VK_KHR_maintenance1", VulkanVersion_1_1},
- {DeviceExt::StorageBufferStorageClass, "VK_KHR_storage_buffer_storage_class",
- VulkanVersion_1_1},
- {DeviceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
- VulkanVersion_1_1},
- {DeviceExt::GetMemoryRequirements2, "VK_KHR_get_memory_requirements2", VulkanVersion_1_1},
- {DeviceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
- VulkanVersion_1_1},
- {DeviceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
- VulkanVersion_1_1},
- {DeviceExt::ExternalMemory, "VK_KHR_external_memory", VulkanVersion_1_1},
- {DeviceExt::ExternalSemaphore, "VK_KHR_external_semaphore", VulkanVersion_1_1},
- {DeviceExt::_16BitStorage, "VK_KHR_16bit_storage", VulkanVersion_1_1},
- {DeviceExt::SamplerYCbCrConversion, "VK_KHR_sampler_ycbcr_conversion", VulkanVersion_1_1},
-
- {DeviceExt::DriverProperties, "VK_KHR_driver_properties", VulkanVersion_1_2},
- {DeviceExt::ImageFormatList, "VK_KHR_image_format_list", VulkanVersion_1_2},
- {DeviceExt::ShaderFloat16Int8, "VK_KHR_shader_float16_int8", VulkanVersion_1_2},
-
- {DeviceExt::ExternalMemoryFD, "VK_KHR_external_memory_fd", NeverPromoted},
- {DeviceExt::ExternalMemoryDmaBuf, "VK_EXT_external_memory_dma_buf", NeverPromoted},
- {DeviceExt::ExternalMemoryZirconHandle, "VK_FUCHSIA_external_memory", NeverPromoted},
- {DeviceExt::ExternalSemaphoreFD, "VK_KHR_external_semaphore_fd", NeverPromoted},
- {DeviceExt::ExternalSemaphoreZirconHandle, "VK_FUCHSIA_external_semaphore", NeverPromoted},
-
- {DeviceExt::ImageDrmFormatModifier, "VK_EXT_image_drm_format_modifier", NeverPromoted},
- {DeviceExt::Swapchain, "VK_KHR_swapchain", NeverPromoted},
- {DeviceExt::SubgroupSizeControl, "VK_EXT_subgroup_size_control", NeverPromoted},
- //
- }};
-
- const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext) {
- uint32_t index = static_cast<uint32_t>(ext);
- ASSERT(index < sDeviceExtInfos.size());
- ASSERT(sDeviceExtInfos[index].index == ext);
- return sDeviceExtInfos[index];
- }
-
- std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap() {
- std::unordered_map<std::string, DeviceExt> result;
- for (const DeviceExtInfo& info : sDeviceExtInfos) {
- result[info.name] = info.index;
- }
- return result;
- }
-
- DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
- const InstanceExtSet& instanceExts,
- uint32_t icdVersion) {
- // This is very similar to EnsureDependencies for instanceExtSet. See comment there for
- // an explanation of what happens.
- DeviceExtSet visitedSet;
- DeviceExtSet trimmedSet;
-
- auto HasDep = [&](DeviceExt ext) -> bool {
- ASSERT(visitedSet[ext]);
- return trimmedSet[ext];
- };
-
- for (uint32_t i = 0; i < sDeviceExtInfos.size(); i++) {
- DeviceExt ext = static_cast<DeviceExt>(i);
-
- bool hasDependencies = false;
- switch (ext) {
- // Happy extensions don't need anybody else!
- case DeviceExt::BindMemory2:
- case DeviceExt::GetMemoryRequirements2:
- case DeviceExt::Maintenance1:
- case DeviceExt::ImageFormatList:
- case DeviceExt::StorageBufferStorageClass:
- hasDependencies = true;
- break;
-
- // Physical device extensions technically don't require the instance to support
- // them but VulkanFunctions only loads the function pointers if the instance
- // advertises the extension. So if we didn't have this check, we'd risk a calling
- // a nullptr.
- case DeviceExt::GetPhysicalDeviceProperties2:
- hasDependencies = instanceExts[InstanceExt::GetPhysicalDeviceProperties2];
- break;
- case DeviceExt::ExternalMemoryCapabilities:
- hasDependencies = instanceExts[InstanceExt::ExternalMemoryCapabilities] &&
- HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
- case DeviceExt::ExternalSemaphoreCapabilities:
- hasDependencies = instanceExts[InstanceExt::ExternalSemaphoreCapabilities] &&
- HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
-
- case DeviceExt::ImageDrmFormatModifier:
- hasDependencies = HasDep(DeviceExt::BindMemory2) &&
- HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
- HasDep(DeviceExt::ImageFormatList) &&
- HasDep(DeviceExt::SamplerYCbCrConversion);
- break;
-
- case DeviceExt::Swapchain:
- hasDependencies = instanceExts[InstanceExt::Surface];
- break;
-
- case DeviceExt::SamplerYCbCrConversion:
- hasDependencies = HasDep(DeviceExt::Maintenance1) &&
- HasDep(DeviceExt::BindMemory2) &&
- HasDep(DeviceExt::GetMemoryRequirements2) &&
- HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
-
- case DeviceExt::DriverProperties:
- case DeviceExt::ShaderFloat16Int8:
- hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
- break;
-
- case DeviceExt::ExternalMemory:
- hasDependencies = HasDep(DeviceExt::ExternalMemoryCapabilities);
- break;
-
- case DeviceExt::ExternalSemaphore:
- hasDependencies = HasDep(DeviceExt::ExternalSemaphoreCapabilities);
- break;
-
- case DeviceExt::ExternalMemoryFD:
- case DeviceExt::ExternalMemoryZirconHandle:
- hasDependencies = HasDep(DeviceExt::ExternalMemory);
- break;
-
- case DeviceExt::ExternalMemoryDmaBuf:
- hasDependencies = HasDep(DeviceExt::ExternalMemoryFD);
- break;
-
- case DeviceExt::ExternalSemaphoreFD:
- case DeviceExt::ExternalSemaphoreZirconHandle:
- hasDependencies = HasDep(DeviceExt::ExternalSemaphore);
- break;
-
- case DeviceExt::_16BitStorage:
- hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
- HasDep(DeviceExt::StorageBufferStorageClass);
- break;
-
- case DeviceExt::SubgroupSizeControl:
- // Using the extension requires DeviceExt::GetPhysicalDeviceProperties2, but we
- // don't need to check for it as it also requires Vulkan 1.1 in which
- // VK_KHR_get_physical_device_properties2 was promoted.
- hasDependencies = icdVersion >= VulkanVersion_1_1;
- break;
-
- case DeviceExt::EnumCount:
- UNREACHABLE();
- }
-
- trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
- visitedSet.set(ext, true);
- }
-
- return trimmedSet;
- }
-
- void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version) {
- for (const DeviceExtInfo& info : sDeviceExtInfos) {
- if (info.versionPromoted <= version) {
- extensions->set(info.index, true);
- }
- }
- }
-
- // A static array for VulkanLayerInfo that can be indexed with VulkanLayers.
- // GetVulkanLayerInfo checks that "index" matches the index used to access this array so an
- // assert will fire if it isn't in the correct order.
- static constexpr size_t kVulkanLayerCount = static_cast<size_t>(VulkanLayer::EnumCount);
- static constexpr std::array<VulkanLayerInfo, kVulkanLayerCount> sVulkanLayerInfos{{
- //
- {VulkanLayer::Validation, "VK_LAYER_KHRONOS_validation"},
- {VulkanLayer::LunargVkTrace, "VK_LAYER_LUNARG_vktrace"},
- {VulkanLayer::RenderDocCapture, "VK_LAYER_RENDERDOC_Capture"},
- {VulkanLayer::FuchsiaImagePipeSwapchain, "VK_LAYER_FUCHSIA_imagepipe_swapchain"},
- //
- }};
-
- const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer) {
- uint32_t index = static_cast<uint32_t>(layer);
- ASSERT(index < sVulkanLayerInfos.size());
- ASSERT(sVulkanLayerInfos[index].layer == layer);
- return sVulkanLayerInfos[index];
- }
-
- std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap() {
- std::unordered_map<std::string, VulkanLayer> result;
- for (const VulkanLayerInfo& info : sVulkanLayerInfos) {
- result[info.name] = info.layer;
- }
- return result;
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.h
deleted file mode 100644
index bd875279453..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanExtensions.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
-#define DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
-
-#include "common/ityp_bitset.h"
-
-#include <unordered_map>
-
-namespace dawn_native { namespace vulkan {
-
- // The list of known instance extensions. They must be in dependency order (this is checked
- // inside EnsureDependencies)
- enum class InstanceExt {
- // Promoted to 1.1
- GetPhysicalDeviceProperties2,
- ExternalMemoryCapabilities,
- ExternalSemaphoreCapabilities,
-
- // Surface extensions
- Surface,
- FuchsiaImagePipeSurface,
- MetalSurface,
- WaylandSurface,
- Win32Surface,
- XcbSurface,
- XlibSurface,
-
- // Others
- DebugUtils,
- ValidationFeatures,
-
- EnumCount,
- };
-
- // A bitset that is indexed with InstanceExt.
- using InstanceExtSet = ityp::bitset<InstanceExt, static_cast<uint32_t>(InstanceExt::EnumCount)>;
-
- // Information about a known instance extension.
- struct InstanceExtInfo {
- InstanceExt index;
- const char* name;
- // The version in which this extension was promoted as built with VK_MAKE_VERSION,
- // or NeverPromoted if it was never promoted.
- uint32_t versionPromoted;
- };
-
- // Returns the information about a known InstanceExt
- const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext);
- // Returns a map that maps a Vulkan extension name to its InstanceExt.
- std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap();
-
- // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
- void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version);
- // From a set of extensions advertised as supported by the instance (or promoted), remove all
- // extensions that don't have all their transitive dependencies in advertisedExts.
- InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts);
-
- // The list of known device extensions. They must be in dependency order (this is checked
- // inside EnsureDependencies)
- enum class DeviceExt {
- // Promoted to 1.1
- BindMemory2,
- Maintenance1,
- StorageBufferStorageClass,
- GetPhysicalDeviceProperties2,
- GetMemoryRequirements2,
- ExternalMemoryCapabilities,
- ExternalSemaphoreCapabilities,
- ExternalMemory,
- ExternalSemaphore,
- _16BitStorage,
- SamplerYCbCrConversion,
-
- // Promoted to 1.2
- DriverProperties,
- ImageFormatList,
- ShaderFloat16Int8,
-
- // External* extensions
- ExternalMemoryFD,
- ExternalMemoryDmaBuf,
- ExternalMemoryZirconHandle,
- ExternalSemaphoreFD,
- ExternalSemaphoreZirconHandle,
-
- // Others
- ImageDrmFormatModifier,
- Swapchain,
- SubgroupSizeControl,
-
- EnumCount,
- };
-
- // A bitset that is indexed with DeviceExt.
- using DeviceExtSet = ityp::bitset<DeviceExt, static_cast<uint32_t>(DeviceExt::EnumCount)>;
-
- // Information about a known device extension.
- struct DeviceExtInfo {
- DeviceExt index;
- const char* name;
- // The version in which this extension was promoted as built with VK_MAKE_VERSION,
- // or NeverPromoted if it was never promoted.
- uint32_t versionPromoted;
- };
-
- // Returns the information about a known DeviceExt
- const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext);
- // Returns a map that maps a Vulkan extension name to its DeviceExt.
- std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap();
-
- // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
- void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version);
- // From a set of extensions advertised as supported by the device (or promoted), remove all
- // extensions that don't have all their transitive dependencies in advertisedExts or in
- // instanceExts.
- DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
- const InstanceExtSet& instanceExts,
- uint32_t icdVersion);
-
- // The list of all known Vulkan layers.
- enum class VulkanLayer {
- Validation,
- LunargVkTrace,
- RenderDocCapture,
-
- // Fuchsia implements the swapchain through a layer (VK_LAYER_FUCHSIA_image_pipe_swapchain),
- // which adds an instance extensions (VK_FUCHSIA_image_surface) to all ICDs.
- FuchsiaImagePipeSwapchain,
-
- EnumCount,
- };
-
- // A bitset that is indexed with VulkanLayer.
- using VulkanLayerSet = ityp::bitset<VulkanLayer, static_cast<uint32_t>(VulkanLayer::EnumCount)>;
-
- // Information about a known layer
- struct VulkanLayerInfo {
- VulkanLayer layer;
- const char* name;
- };
-
- // Returns the information about a known VulkanLayer
- const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer);
- // Returns a map that maps a Vulkan layer name to its VulkanLayer.
- std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap();
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_VULKANEXTENSIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
deleted file mode 100644
index 2e052e73b4c..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/VulkanFunctions.h"
-
-#include "common/DynamicLib.h"
-#include "dawn_native/vulkan/VulkanInfo.h"
-
-namespace dawn_native { namespace vulkan {
-
-#define GET_GLOBAL_PROC(name) \
- do { \
- name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(nullptr, "vk" #name)); \
- if (name == nullptr) { \
- return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
- } \
- } while (0)
-
- MaybeError VulkanFunctions::LoadGlobalProcs(const DynamicLib& vulkanLib) {
- if (!vulkanLib.GetProc(&GetInstanceProcAddr, "vkGetInstanceProcAddr")) {
- return DAWN_INTERNAL_ERROR("Couldn't get vkGetInstanceProcAddr");
- }
-
- GET_GLOBAL_PROC(CreateInstance);
- GET_GLOBAL_PROC(EnumerateInstanceExtensionProperties);
- GET_GLOBAL_PROC(EnumerateInstanceLayerProperties);
-
- // Is not available in Vulkan 1.0, so allow nullptr
- EnumerateInstanceVersion = reinterpret_cast<decltype(EnumerateInstanceVersion)>(
- GetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
-
- return {};
- }
-
-#define GET_INSTANCE_PROC_BASE(name, procName) \
- do { \
- name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(instance, "vk" #procName)); \
- if (name == nullptr) { \
- return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #procName); \
- } \
- } while (0)
-
-#define GET_INSTANCE_PROC(name) GET_INSTANCE_PROC_BASE(name, name)
-#define GET_INSTANCE_PROC_VENDOR(name, vendor) GET_INSTANCE_PROC_BASE(name, name##vendor)
-
- MaybeError VulkanFunctions::LoadInstanceProcs(VkInstance instance,
- const VulkanGlobalInfo& globalInfo) {
- // Load this proc first so that we can destroy the instance even if some other
- // GET_INSTANCE_PROC fails
- GET_INSTANCE_PROC(DestroyInstance);
-
- GET_INSTANCE_PROC(CreateDevice);
- GET_INSTANCE_PROC(DestroyDevice);
- GET_INSTANCE_PROC(EnumerateDeviceExtensionProperties);
- GET_INSTANCE_PROC(EnumerateDeviceLayerProperties);
- GET_INSTANCE_PROC(EnumeratePhysicalDevices);
- GET_INSTANCE_PROC(GetDeviceProcAddr);
- GET_INSTANCE_PROC(GetPhysicalDeviceFeatures);
- GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties);
- GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties);
-
- if (globalInfo.HasExt(InstanceExt::DebugUtils)) {
- GET_INSTANCE_PROC(CmdBeginDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(CmdEndDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(CmdInsertDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(CreateDebugUtilsMessengerEXT);
- GET_INSTANCE_PROC(DestroyDebugUtilsMessengerEXT);
- GET_INSTANCE_PROC(QueueBeginDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(QueueEndDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(QueueInsertDebugUtilsLabelEXT);
- GET_INSTANCE_PROC(SetDebugUtilsObjectNameEXT);
- GET_INSTANCE_PROC(SetDebugUtilsObjectTagEXT);
- GET_INSTANCE_PROC(SubmitDebugUtilsMessageEXT);
- }
-
- // Vulkan 1.1 is not required to report promoted extensions from 1.0 and is not required to
- // support the vendor entrypoint in GetProcAddress.
- if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
- } else if (globalInfo.HasExt(InstanceExt::ExternalMemoryCapabilities)) {
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
- }
-
- if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
- } else if (globalInfo.HasExt(InstanceExt::ExternalSemaphoreCapabilities)) {
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
- }
-
- if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2);
- GET_INSTANCE_PROC(GetPhysicalDeviceProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
- GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
- } else if (globalInfo.HasExt(InstanceExt::GetPhysicalDeviceProperties2)) {
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceImageFormatProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceQueueFamilyProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceMemoryProperties2, KHR);
- GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
- }
-
- if (globalInfo.HasExt(InstanceExt::Surface)) {
- GET_INSTANCE_PROC(DestroySurfaceKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceSupportKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
- }
-
-#if defined(VK_USE_PLATFORM_FUCHSIA)
- if (globalInfo.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
- GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
- }
-#endif // defined(VK_USE_PLATFORM_FUCHSIA)
-
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- if (globalInfo.HasExt(InstanceExt::MetalSurface)) {
- GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
- }
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_PLATFORM_WINDOWS)
- if (globalInfo.HasExt(InstanceExt::Win32Surface)) {
- GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
- }
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
-#if defined(DAWN_USE_X11)
- if (globalInfo.HasExt(InstanceExt::XlibSurface)) {
- GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
- }
- if (globalInfo.HasExt(InstanceExt::XcbSurface)) {
- GET_INSTANCE_PROC(CreateXcbSurfaceKHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceXcbPresentationSupportKHR);
- }
-#endif // defined(DAWN_USE_X11)
- return {};
- }
-
-#define GET_DEVICE_PROC(name) \
- do { \
- name = reinterpret_cast<decltype(name)>(GetDeviceProcAddr(device, "vk" #name)); \
- if (name == nullptr) { \
- return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
- } \
- } while (0)
-
- MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device,
- const VulkanDeviceInfo& deviceInfo) {
- GET_DEVICE_PROC(AllocateCommandBuffers);
- GET_DEVICE_PROC(AllocateDescriptorSets);
- GET_DEVICE_PROC(AllocateMemory);
- GET_DEVICE_PROC(BeginCommandBuffer);
- GET_DEVICE_PROC(BindBufferMemory);
- GET_DEVICE_PROC(BindImageMemory);
- GET_DEVICE_PROC(CmdBeginQuery);
- GET_DEVICE_PROC(CmdBeginRenderPass);
- GET_DEVICE_PROC(CmdBindDescriptorSets);
- GET_DEVICE_PROC(CmdBindIndexBuffer);
- GET_DEVICE_PROC(CmdBindPipeline);
- GET_DEVICE_PROC(CmdBindVertexBuffers);
- GET_DEVICE_PROC(CmdBlitImage);
- GET_DEVICE_PROC(CmdClearAttachments);
- GET_DEVICE_PROC(CmdClearColorImage);
- GET_DEVICE_PROC(CmdClearDepthStencilImage);
- GET_DEVICE_PROC(CmdCopyBuffer);
- GET_DEVICE_PROC(CmdCopyBufferToImage);
- GET_DEVICE_PROC(CmdCopyImage);
- GET_DEVICE_PROC(CmdCopyImageToBuffer);
- GET_DEVICE_PROC(CmdCopyQueryPoolResults);
- GET_DEVICE_PROC(CmdDispatch);
- GET_DEVICE_PROC(CmdDispatchIndirect);
- GET_DEVICE_PROC(CmdDraw);
- GET_DEVICE_PROC(CmdDrawIndexed);
- GET_DEVICE_PROC(CmdDrawIndexedIndirect);
- GET_DEVICE_PROC(CmdDrawIndirect);
- GET_DEVICE_PROC(CmdEndQuery);
- GET_DEVICE_PROC(CmdEndRenderPass);
- GET_DEVICE_PROC(CmdExecuteCommands);
- GET_DEVICE_PROC(CmdFillBuffer);
- GET_DEVICE_PROC(CmdNextSubpass);
- GET_DEVICE_PROC(CmdPipelineBarrier);
- GET_DEVICE_PROC(CmdPushConstants);
- GET_DEVICE_PROC(CmdResetEvent);
- GET_DEVICE_PROC(CmdResetQueryPool);
- GET_DEVICE_PROC(CmdResolveImage);
- GET_DEVICE_PROC(CmdSetBlendConstants);
- GET_DEVICE_PROC(CmdSetDepthBias);
- GET_DEVICE_PROC(CmdSetDepthBounds);
- GET_DEVICE_PROC(CmdSetEvent);
- GET_DEVICE_PROC(CmdSetLineWidth);
- GET_DEVICE_PROC(CmdSetScissor);
- GET_DEVICE_PROC(CmdSetStencilCompareMask);
- GET_DEVICE_PROC(CmdSetStencilReference);
- GET_DEVICE_PROC(CmdSetStencilWriteMask);
- GET_DEVICE_PROC(CmdSetViewport);
- GET_DEVICE_PROC(CmdUpdateBuffer);
- GET_DEVICE_PROC(CmdWaitEvents);
- GET_DEVICE_PROC(CmdWriteTimestamp);
- GET_DEVICE_PROC(CreateBuffer);
- GET_DEVICE_PROC(CreateBufferView);
- GET_DEVICE_PROC(CreateCommandPool);
- GET_DEVICE_PROC(CreateComputePipelines);
- GET_DEVICE_PROC(CreateDescriptorPool);
- GET_DEVICE_PROC(CreateDescriptorSetLayout);
- GET_DEVICE_PROC(CreateEvent);
- GET_DEVICE_PROC(CreateFence);
- GET_DEVICE_PROC(CreateFramebuffer);
- GET_DEVICE_PROC(CreateGraphicsPipelines);
- GET_DEVICE_PROC(CreateImage);
- GET_DEVICE_PROC(CreateImageView);
- GET_DEVICE_PROC(CreatePipelineCache);
- GET_DEVICE_PROC(CreatePipelineLayout);
- GET_DEVICE_PROC(CreateQueryPool);
- GET_DEVICE_PROC(CreateRenderPass);
- GET_DEVICE_PROC(CreateSampler);
- GET_DEVICE_PROC(CreateSemaphore);
- GET_DEVICE_PROC(CreateShaderModule);
- GET_DEVICE_PROC(DestroyBuffer);
- GET_DEVICE_PROC(DestroyBufferView);
- GET_DEVICE_PROC(DestroyCommandPool);
- GET_DEVICE_PROC(DestroyDescriptorPool);
- GET_DEVICE_PROC(DestroyDescriptorSetLayout);
- GET_DEVICE_PROC(DestroyEvent);
- GET_DEVICE_PROC(DestroyFence);
- GET_DEVICE_PROC(DestroyFramebuffer);
- GET_DEVICE_PROC(DestroyImage);
- GET_DEVICE_PROC(DestroyImageView);
- GET_DEVICE_PROC(DestroyPipeline);
- GET_DEVICE_PROC(DestroyPipelineCache);
- GET_DEVICE_PROC(DestroyPipelineLayout);
- GET_DEVICE_PROC(DestroyQueryPool);
- GET_DEVICE_PROC(DestroyRenderPass);
- GET_DEVICE_PROC(DestroySampler);
- GET_DEVICE_PROC(DestroySemaphore);
- GET_DEVICE_PROC(DestroyShaderModule);
- GET_DEVICE_PROC(DeviceWaitIdle);
- GET_DEVICE_PROC(EndCommandBuffer);
- GET_DEVICE_PROC(FlushMappedMemoryRanges);
- GET_DEVICE_PROC(FreeCommandBuffers);
- GET_DEVICE_PROC(FreeDescriptorSets);
- GET_DEVICE_PROC(FreeMemory);
- GET_DEVICE_PROC(GetBufferMemoryRequirements);
- GET_DEVICE_PROC(GetDeviceMemoryCommitment);
- GET_DEVICE_PROC(GetDeviceQueue);
- GET_DEVICE_PROC(GetEventStatus);
- GET_DEVICE_PROC(GetFenceStatus);
- GET_DEVICE_PROC(GetImageMemoryRequirements);
- GET_DEVICE_PROC(GetImageSparseMemoryRequirements);
- GET_DEVICE_PROC(GetImageSubresourceLayout);
- GET_DEVICE_PROC(GetPipelineCacheData);
- GET_DEVICE_PROC(GetQueryPoolResults);
- GET_DEVICE_PROC(GetRenderAreaGranularity);
- GET_DEVICE_PROC(InvalidateMappedMemoryRanges);
- GET_DEVICE_PROC(MapMemory);
- GET_DEVICE_PROC(MergePipelineCaches);
- GET_DEVICE_PROC(QueueBindSparse);
- GET_DEVICE_PROC(QueueSubmit);
- GET_DEVICE_PROC(QueueWaitIdle);
- GET_DEVICE_PROC(ResetCommandBuffer);
- GET_DEVICE_PROC(ResetCommandPool);
- GET_DEVICE_PROC(ResetDescriptorPool);
- GET_DEVICE_PROC(ResetEvent);
- GET_DEVICE_PROC(ResetFences);
- GET_DEVICE_PROC(SetEvent);
- GET_DEVICE_PROC(UnmapMemory);
- GET_DEVICE_PROC(UpdateDescriptorSets);
- GET_DEVICE_PROC(WaitForFences);
-
- if (deviceInfo.HasExt(DeviceExt::ExternalMemoryFD)) {
- GET_DEVICE_PROC(GetMemoryFdKHR);
- GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
- }
-
- if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
- GET_DEVICE_PROC(ImportSemaphoreFdKHR);
- GET_DEVICE_PROC(GetSemaphoreFdKHR);
- }
-
- if (deviceInfo.HasExt(DeviceExt::Swapchain)) {
- GET_DEVICE_PROC(CreateSwapchainKHR);
- GET_DEVICE_PROC(DestroySwapchainKHR);
- GET_DEVICE_PROC(GetSwapchainImagesKHR);
- GET_DEVICE_PROC(AcquireNextImageKHR);
- GET_DEVICE_PROC(QueuePresentKHR);
- }
-
- if (deviceInfo.HasExt(DeviceExt::GetMemoryRequirements2)) {
- GET_DEVICE_PROC(GetBufferMemoryRequirements2);
- GET_DEVICE_PROC(GetImageMemoryRequirements2);
- GET_DEVICE_PROC(GetImageSparseMemoryRequirements2);
- }
-
-#if VK_USE_PLATFORM_FUCHSIA
- if (deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle)) {
- GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
- GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
- }
-
- if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
- GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
- GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
- }
-#endif
-
- return {};
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
deleted file mode 100644
index eb20ee67be1..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
-#define DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
-
-#include "common/vulkan_platform.h"
-
-#include "dawn_native/Error.h"
-
-class DynamicLib;
-
-namespace dawn_native { namespace vulkan {
-
- struct VulkanGlobalInfo;
- struct VulkanDeviceInfo;
-
- // Stores the Vulkan entry points. Also loads them from the dynamic library
- // and the vkGet*ProcAddress entry points.
- struct VulkanFunctions {
- MaybeError LoadGlobalProcs(const DynamicLib& vulkanLib);
- MaybeError LoadInstanceProcs(VkInstance instance, const VulkanGlobalInfo& globalInfo);
- MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo);
-
- // ---------- Global procs
-
- // Initial proc from which we can get all the others
- PFN_vkGetInstanceProcAddr GetInstanceProcAddr = nullptr;
-
- PFN_vkCreateInstance CreateInstance = nullptr;
- PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties = nullptr;
- PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties = nullptr;
- // DestroyInstance isn't technically a global proc but we want to be able to use it
- // before querying the instance procs in case we need to error out during initialization.
- PFN_vkDestroyInstance DestroyInstance = nullptr;
-
- // Core Vulkan 1.1
- PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion = nullptr;
-
- // ---------- Instance procs
-
- // Core Vulkan 1.0
- PFN_vkCreateDevice CreateDevice = nullptr;
- PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties = nullptr;
- PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties = nullptr;
- PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices = nullptr;
- PFN_vkGetDeviceProcAddr GetDeviceProcAddr = nullptr;
- PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures = nullptr;
- PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties = nullptr;
- PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties =
- nullptr;
- PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties = nullptr;
- PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties = nullptr;
- PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties =
- nullptr;
- PFN_vkGetPhysicalDeviceSparseImageFormatProperties
- GetPhysicalDeviceSparseImageFormatProperties = nullptr;
- // Not technically an instance proc but we want to be able to use it as soon as the
- // device is created.
- PFN_vkDestroyDevice DestroyDevice = nullptr;
-
- // VK_EXT_debug_utils
- PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT = nullptr;
- PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT = nullptr;
- PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT = nullptr;
- PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT = nullptr;
- PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT = nullptr;
- PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT = nullptr;
- PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT = nullptr;
- PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT = nullptr;
- PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT = nullptr;
- PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT = nullptr;
- PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT = nullptr;
-
- // VK_KHR_surface
- PFN_vkDestroySurfaceKHR DestroySurfaceKHR = nullptr;
- PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR = nullptr;
- PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR =
- nullptr;
- PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR = nullptr;
- PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR =
- nullptr;
-
- // Core Vulkan 1.1 promoted extensions, set if either the core version or the extension is
- // present.
-
- // VK_KHR_external_memory_capabilities
- PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties =
- nullptr;
-
- // VK_KHR_external_semaphore_capabilities
- PFN_vkGetPhysicalDeviceExternalSemaphoreProperties
- GetPhysicalDeviceExternalSemaphoreProperties = nullptr;
-
- // VK_KHR_get_physical_device_properties2
- PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2 = nullptr;
- PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2 = nullptr;
- PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2 = nullptr;
- PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2 =
- nullptr;
- PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2 =
- nullptr;
- PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2 = nullptr;
- PFN_vkGetPhysicalDeviceSparseImageFormatProperties2
- GetPhysicalDeviceSparseImageFormatProperties2 = nullptr;
-
-#if defined(VK_USE_PLATFORM_FUCHSIA)
- // FUCHSIA_image_pipe_surface
- PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA = nullptr;
-#endif // defined(VK_USE_PLATFORM_FUCHSIA)
-
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- // EXT_metal_surface
- PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT = nullptr;
-#endif // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_PLATFORM_WINDOWS)
- // KHR_win32_surface
- PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR = nullptr;
- PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
- GetPhysicalDeviceWin32PresentationSupportKHR = nullptr;
-#endif // defined(DAWN_PLATFORM_WINDOWS)
-
-#if defined(DAWN_USE_X11)
- // KHR_xlib_surface
- PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR = nullptr;
- PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR
- GetPhysicalDeviceXlibPresentationSupportKHR = nullptr;
-
- // KHR_xcb_surface
- PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR = nullptr;
- PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR
- GetPhysicalDeviceXcbPresentationSupportKHR = nullptr;
-#endif // defined(DAWN_USE_X11)
-
- // ---------- Device procs
-
- // Core Vulkan 1.0
- PFN_vkAllocateCommandBuffers AllocateCommandBuffers = nullptr;
- PFN_vkAllocateDescriptorSets AllocateDescriptorSets = nullptr;
- PFN_vkAllocateMemory AllocateMemory = nullptr;
- PFN_vkBeginCommandBuffer BeginCommandBuffer = nullptr;
- PFN_vkBindBufferMemory BindBufferMemory = nullptr;
- PFN_vkBindImageMemory BindImageMemory = nullptr;
- PFN_vkCmdBeginQuery CmdBeginQuery = nullptr;
- PFN_vkCmdBeginRenderPass CmdBeginRenderPass = nullptr;
- PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets = nullptr;
- PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer = nullptr;
- PFN_vkCmdBindPipeline CmdBindPipeline = nullptr;
- PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers = nullptr;
- PFN_vkCmdBlitImage CmdBlitImage = nullptr;
- PFN_vkCmdClearAttachments CmdClearAttachments = nullptr;
- PFN_vkCmdClearColorImage CmdClearColorImage = nullptr;
- PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage = nullptr;
- PFN_vkCmdCopyBuffer CmdCopyBuffer = nullptr;
- PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage = nullptr;
- PFN_vkCmdCopyImage CmdCopyImage = nullptr;
- PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer = nullptr;
- PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults = nullptr;
- PFN_vkCmdDispatch CmdDispatch = nullptr;
- PFN_vkCmdDispatchIndirect CmdDispatchIndirect = nullptr;
- PFN_vkCmdDraw CmdDraw = nullptr;
- PFN_vkCmdDrawIndexed CmdDrawIndexed = nullptr;
- PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect = nullptr;
- PFN_vkCmdDrawIndirect CmdDrawIndirect = nullptr;
- PFN_vkCmdEndQuery CmdEndQuery = nullptr;
- PFN_vkCmdEndRenderPass CmdEndRenderPass = nullptr;
- PFN_vkCmdExecuteCommands CmdExecuteCommands = nullptr;
- PFN_vkCmdFillBuffer CmdFillBuffer = nullptr;
- PFN_vkCmdNextSubpass CmdNextSubpass = nullptr;
- PFN_vkCmdPipelineBarrier CmdPipelineBarrier = nullptr;
- PFN_vkCmdPushConstants CmdPushConstants = nullptr;
- PFN_vkCmdResetEvent CmdResetEvent = nullptr;
- PFN_vkCmdResetQueryPool CmdResetQueryPool = nullptr;
- PFN_vkCmdResolveImage CmdResolveImage = nullptr;
- PFN_vkCmdSetBlendConstants CmdSetBlendConstants = nullptr;
- PFN_vkCmdSetDepthBias CmdSetDepthBias = nullptr;
- PFN_vkCmdSetDepthBounds CmdSetDepthBounds = nullptr;
- PFN_vkCmdSetEvent CmdSetEvent = nullptr;
- PFN_vkCmdSetLineWidth CmdSetLineWidth = nullptr;
- PFN_vkCmdSetScissor CmdSetScissor = nullptr;
- PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask = nullptr;
- PFN_vkCmdSetStencilReference CmdSetStencilReference = nullptr;
- PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask = nullptr;
- PFN_vkCmdSetViewport CmdSetViewport = nullptr;
- PFN_vkCmdUpdateBuffer CmdUpdateBuffer = nullptr;
- PFN_vkCmdWaitEvents CmdWaitEvents = nullptr;
- PFN_vkCmdWriteTimestamp CmdWriteTimestamp = nullptr;
- PFN_vkCreateBuffer CreateBuffer = nullptr;
- PFN_vkCreateBufferView CreateBufferView = nullptr;
- PFN_vkCreateCommandPool CreateCommandPool = nullptr;
- PFN_vkCreateComputePipelines CreateComputePipelines = nullptr;
- PFN_vkCreateDescriptorPool CreateDescriptorPool = nullptr;
- PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout = nullptr;
- PFN_vkCreateEvent CreateEvent = nullptr;
- PFN_vkCreateFence CreateFence = nullptr;
- PFN_vkCreateFramebuffer CreateFramebuffer = nullptr;
- PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines = nullptr;
- PFN_vkCreateImage CreateImage = nullptr;
- PFN_vkCreateImageView CreateImageView = nullptr;
- PFN_vkCreatePipelineCache CreatePipelineCache = nullptr;
- PFN_vkCreatePipelineLayout CreatePipelineLayout = nullptr;
- PFN_vkCreateQueryPool CreateQueryPool = nullptr;
- PFN_vkCreateRenderPass CreateRenderPass = nullptr;
- PFN_vkCreateSampler CreateSampler = nullptr;
- PFN_vkCreateSemaphore CreateSemaphore = nullptr;
- PFN_vkCreateShaderModule CreateShaderModule = nullptr;
- PFN_vkDestroyBuffer DestroyBuffer = nullptr;
- PFN_vkDestroyBufferView DestroyBufferView = nullptr;
- PFN_vkDestroyCommandPool DestroyCommandPool = nullptr;
- PFN_vkDestroyDescriptorPool DestroyDescriptorPool = nullptr;
- PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout = nullptr;
- PFN_vkDestroyEvent DestroyEvent = nullptr;
- PFN_vkDestroyFence DestroyFence = nullptr;
- PFN_vkDestroyFramebuffer DestroyFramebuffer = nullptr;
- PFN_vkDestroyImage DestroyImage = nullptr;
- PFN_vkDestroyImageView DestroyImageView = nullptr;
- PFN_vkDestroyPipeline DestroyPipeline = nullptr;
- PFN_vkDestroyPipelineCache DestroyPipelineCache = nullptr;
- PFN_vkDestroyPipelineLayout DestroyPipelineLayout = nullptr;
- PFN_vkDestroyQueryPool DestroyQueryPool = nullptr;
- PFN_vkDestroyRenderPass DestroyRenderPass = nullptr;
- PFN_vkDestroySampler DestroySampler = nullptr;
- PFN_vkDestroySemaphore DestroySemaphore = nullptr;
- PFN_vkDestroyShaderModule DestroyShaderModule = nullptr;
- PFN_vkDeviceWaitIdle DeviceWaitIdle = nullptr;
- PFN_vkEndCommandBuffer EndCommandBuffer = nullptr;
- PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges = nullptr;
- PFN_vkFreeCommandBuffers FreeCommandBuffers = nullptr;
- PFN_vkFreeDescriptorSets FreeDescriptorSets = nullptr;
- PFN_vkFreeMemory FreeMemory = nullptr;
- PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements = nullptr;
- PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment = nullptr;
- PFN_vkGetDeviceQueue GetDeviceQueue = nullptr;
- PFN_vkGetEventStatus GetEventStatus = nullptr;
- PFN_vkGetFenceStatus GetFenceStatus = nullptr;
- PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements = nullptr;
- PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements = nullptr;
- PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout = nullptr;
- PFN_vkGetPipelineCacheData GetPipelineCacheData = nullptr;
- PFN_vkGetQueryPoolResults GetQueryPoolResults = nullptr;
- PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity = nullptr;
- PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges = nullptr;
- PFN_vkMapMemory MapMemory = nullptr;
- PFN_vkMergePipelineCaches MergePipelineCaches = nullptr;
- PFN_vkQueueBindSparse QueueBindSparse = nullptr;
- PFN_vkQueueSubmit QueueSubmit = nullptr;
- PFN_vkQueueWaitIdle QueueWaitIdle = nullptr;
- PFN_vkResetCommandBuffer ResetCommandBuffer = nullptr;
- PFN_vkResetCommandPool ResetCommandPool = nullptr;
- PFN_vkResetDescriptorPool ResetDescriptorPool = nullptr;
- PFN_vkResetEvent ResetEvent = nullptr;
- PFN_vkResetFences ResetFences = nullptr;
- PFN_vkSetEvent SetEvent = nullptr;
- PFN_vkUnmapMemory UnmapMemory = nullptr;
- PFN_vkUpdateDescriptorSets UpdateDescriptorSets = nullptr;
- PFN_vkWaitForFences WaitForFences = nullptr;
-
- // VK_KHR_external_memory_fd
- PFN_vkGetMemoryFdKHR GetMemoryFdKHR = nullptr;
- PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR = nullptr;
-
- // VK_KHR_external_semaphore_fd
- PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR = nullptr;
- PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR = nullptr;
-
- // VK_KHR_get_memory_requirements2
- PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2 = nullptr;
- PFN_vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2 = nullptr;
- PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2 = nullptr;
-
- // VK_KHR_swapchain
- PFN_vkCreateSwapchainKHR CreateSwapchainKHR = nullptr;
- PFN_vkDestroySwapchainKHR DestroySwapchainKHR = nullptr;
- PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR = nullptr;
- PFN_vkAcquireNextImageKHR AcquireNextImageKHR = nullptr;
- PFN_vkQueuePresentKHR QueuePresentKHR = nullptr;
-
-#if VK_USE_PLATFORM_FUCHSIA
- // VK_FUCHSIA_external_memory
- PFN_vkGetMemoryZirconHandleFUCHSIA GetMemoryZirconHandleFUCHSIA = nullptr;
- PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA GetMemoryZirconHandlePropertiesFUCHSIA =
- nullptr;
-
- // VK_FUCHSIA_external_semaphore
- PFN_vkImportSemaphoreZirconHandleFUCHSIA ImportSemaphoreZirconHandleFUCHSIA = nullptr;
- PFN_vkGetSemaphoreZirconHandleFUCHSIA GetSemaphoreZirconHandleFUCHSIA = nullptr;
-#endif
- };
-
- // Create a wrapper around VkResult in the dawn_native::vulkan namespace. This shadows the
- // default VkResult (::VkResult). This ensures that assigning or creating a VkResult from a raw
- // ::VkResult uses WrapUnsafe. This makes it clear that users of VkResult must be intentional
- // about handling error cases.
- class VkResult {
- public:
- constexpr static VkResult WrapUnsafe(::VkResult value) {
- return VkResult(value);
- }
-
- constexpr operator ::VkResult() const {
- return mValue;
- }
-
- private:
- // Private. Use VkResult::WrapUnsafe instead.
- constexpr VkResult(::VkResult value) : mValue(value) {
- }
-
- ::VkResult mValue;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
deleted file mode 100644
index a053df18a47..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/VulkanInfo.h"
-
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/UtilsVulkan.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-#include <cstring>
-
-namespace dawn_native { namespace vulkan {
-
- namespace {
- ResultOrError<InstanceExtSet> GatherInstanceExtensions(
- const char* layerName,
- const dawn_native::vulkan::VulkanFunctions& vkFunctions,
- const std::unordered_map<std::string, InstanceExt>& knownExts) {
- uint32_t count = 0;
- VkResult vkResult = VkResult::WrapUnsafe(
- vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr));
- if (vkResult != VK_SUCCESS && vkResult != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
- }
-
- std::vector<VkExtensionProperties> extensions(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceExtensionProperties(
- layerName, &count, extensions.data()),
- "vkEnumerateInstanceExtensionProperties"));
-
- InstanceExtSet result;
- for (const VkExtensionProperties& extension : extensions) {
- auto it = knownExts.find(extension.extensionName);
- if (it != knownExts.end()) {
- result.set(it->second, true);
- }
- }
-
- return result;
- }
-
- } // namespace
-
- bool VulkanGlobalKnobs::HasExt(InstanceExt ext) const {
- return extensions[ext];
- }
-
- bool VulkanDeviceKnobs::HasExt(DeviceExt ext) const {
- return extensions[ext];
- }
-
- ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions) {
- VulkanGlobalInfo info = {};
- // Gather info on available API version
- {
- info.apiVersion = VK_MAKE_VERSION(1, 0, 0);
- if (vkFunctions.EnumerateInstanceVersion != nullptr) {
- DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceVersion(&info.apiVersion),
- "vkEnumerateInstanceVersion"));
- }
- }
-
- // Gather the info about the instance layers
- {
- uint32_t count = 0;
- VkResult result =
- VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr));
- // From the Vulkan spec result should be success if there are 0 layers,
- // incomplete otherwise. This means that both values represent a success.
- // This is the same for all Enumarte functions
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumerateInstanceLayerProperties");
- }
-
- std::vector<VkLayerProperties> layersProperties(count);
- DAWN_TRY(CheckVkSuccess(
- vkFunctions.EnumerateInstanceLayerProperties(&count, layersProperties.data()),
- "vkEnumerateInstanceLayerProperties"));
-
- std::unordered_map<std::string, VulkanLayer> knownLayers = CreateVulkanLayerNameMap();
- for (const VkLayerProperties& layer : layersProperties) {
- auto it = knownLayers.find(layer.layerName);
- if (it != knownLayers.end()) {
- info.layers.set(it->second, true);
- }
- }
- }
-
- // Gather the info about the instance extensions
- {
- std::unordered_map<std::string, InstanceExt> knownExts = CreateInstanceExtNameMap();
-
- DAWN_TRY_ASSIGN(info.extensions,
- GatherInstanceExtensions(nullptr, vkFunctions, knownExts));
- MarkPromotedExtensions(&info.extensions, info.apiVersion);
- info.extensions = EnsureDependencies(info.extensions);
-
- for (VulkanLayer layer : IterateBitSet(info.layers)) {
- DAWN_TRY_ASSIGN(info.layerExtensions[layer],
- GatherInstanceExtensions(GetVulkanLayerInfo(layer).name,
- vkFunctions, knownExts));
- MarkPromotedExtensions(&info.layerExtensions[layer], info.apiVersion);
- info.layerExtensions[layer] = EnsureDependencies(info.layerExtensions[layer]);
- }
- }
-
- return std::move(info);
- }
-
- ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
- VkInstance instance,
- const VulkanFunctions& vkFunctions) {
- uint32_t count = 0;
- VkResult result =
- VkResult::WrapUnsafe(vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumeratePhysicalDevices");
- }
-
- std::vector<VkPhysicalDevice> physicalDevices(count);
- DAWN_TRY(CheckVkSuccess(
- vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data()),
- "vkEnumeratePhysicalDevices"));
-
- return std::move(physicalDevices);
- }
-
- ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
- VulkanDeviceInfo info = {};
- VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
- const VulkanGlobalInfo& globalInfo = adapter.GetVulkanInstance()->GetGlobalInfo();
- const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
-
- // Query the device properties first to get the ICD's `apiVersion`
- vkFunctions.GetPhysicalDeviceProperties(physicalDevice, &info.properties);
-
- // Gather info about device memory.
- {
- VkPhysicalDeviceMemoryProperties memory;
- vkFunctions.GetPhysicalDeviceMemoryProperties(physicalDevice, &memory);
-
- info.memoryTypes.assign(memory.memoryTypes,
- memory.memoryTypes + memory.memoryTypeCount);
- info.memoryHeaps.assign(memory.memoryHeaps,
- memory.memoryHeaps + memory.memoryHeapCount);
- }
-
- // Gather info about device queue families
- {
- uint32_t count = 0;
- vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
-
- info.queueFamilies.resize(count);
- vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count,
- info.queueFamilies.data());
- }
-
- // Gather the info about the device layers
- {
- uint32_t count = 0;
- VkResult result = VkResult::WrapUnsafe(
- vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumerateDeviceLayerProperties");
- }
-
- info.layers.resize(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceLayerProperties(
- physicalDevice, &count, info.layers.data()),
- "vkEnumerateDeviceLayerProperties"));
- }
-
- // Gather the info about the device extensions
- {
- uint32_t count = 0;
- VkResult result = VkResult::WrapUnsafe(vkFunctions.EnumerateDeviceExtensionProperties(
- physicalDevice, nullptr, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
- }
-
- std::vector<VkExtensionProperties> extensionsProperties;
- extensionsProperties.resize(count);
- DAWN_TRY(
- CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
- physicalDevice, nullptr, &count, extensionsProperties.data()),
- "vkEnumerateDeviceExtensionProperties"));
-
- std::unordered_map<std::string, DeviceExt> knownExts = CreateDeviceExtNameMap();
-
- for (const VkExtensionProperties& extension : extensionsProperties) {
- auto it = knownExts.find(extension.extensionName);
- if (it != knownExts.end()) {
- info.extensions.set(it->second, true);
- }
- }
-
- MarkPromotedExtensions(&info.extensions, info.properties.apiVersion);
- info.extensions = EnsureDependencies(info.extensions, globalInfo.extensions,
- info.properties.apiVersion);
- }
-
- // Gather general and extension features and properties
- //
- // Use vkGetPhysicalDevice{Features,Properties}2 if required to gather information about
- // the extensions. DeviceExt::GetPhysicalDeviceProperties2 is guaranteed to be available
- // because these extensions (transitively) depend on it in `EnsureDependencies`
- VkPhysicalDeviceFeatures2 features2 = {};
- features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- PNextChainBuilder featuresChain(&features2);
-
- VkPhysicalDeviceProperties2 properties2 = {};
- properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- PNextChainBuilder propertiesChain(&properties2);
-
- if (info.extensions[DeviceExt::ShaderFloat16Int8]) {
- featuresChain.Add(&info.shaderFloat16Int8Features,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
- }
-
- if (info.extensions[DeviceExt::_16BitStorage]) {
- featuresChain.Add(&info._16BitStorageFeatures,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
- }
-
- if (info.extensions[DeviceExt::SubgroupSizeControl]) {
- featuresChain.Add(&info.subgroupSizeControlFeatures,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
- propertiesChain.Add(
- &info.subgroupSizeControlProperties,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT);
- }
-
- if (info.extensions[DeviceExt::DriverProperties]) {
- propertiesChain.Add(&info.driverProperties,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES);
- }
-
- // If we have DeviceExt::GetPhysicalDeviceProperties2, use features2 and properties2 so
- // that features no covered by VkPhysicalDevice{Features,Properties} can be queried.
- //
- // Note that info.properties has already been filled at the start of this function to get
- // `apiVersion`.
- ASSERT(info.properties.apiVersion != 0);
- if (info.extensions[DeviceExt::GetPhysicalDeviceProperties2]) {
- vkFunctions.GetPhysicalDeviceProperties2(physicalDevice, &properties2);
- vkFunctions.GetPhysicalDeviceFeatures2(physicalDevice, &features2);
- info.features = features2.features;
- } else {
- ASSERT(features2.pNext == nullptr && properties2.pNext == nullptr);
- vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
- }
-
- // TODO(cwallez@chromium.org): gather info about formats
-
- return std::move(info);
- }
-
- ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
- VkSurfaceKHR surface) {
- VulkanSurfaceInfo info = {};
-
- VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
- const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
-
- // Get the surface capabilities
- DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
- physicalDevice, surface, &info.capabilities),
- "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
-
- // Query which queue families support presenting this surface
- {
- size_t nQueueFamilies = adapter.GetDeviceInfo().queueFamilies.size();
- info.supportedQueueFamilies.resize(nQueueFamilies, false);
-
- for (uint32_t i = 0; i < nQueueFamilies; ++i) {
- VkBool32 supported = VK_FALSE;
- DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
- physicalDevice, i, surface, &supported),
- "vkGetPhysicalDeviceSurfaceSupportKHR"));
-
- info.supportedQueueFamilies[i] = (supported == VK_TRUE);
- }
- }
-
- // Gather supported formats
- {
- uint32_t count = 0;
- VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
- physicalDevice, surface, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
- }
-
- info.formats.resize(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
- physicalDevice, surface, &count, info.formats.data()),
- "vkGetPhysicalDeviceSurfaceFormatsKHR"));
- }
-
- // Gather supported presents modes
- {
- uint32_t count = 0;
- VkResult result =
- VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
- physicalDevice, surface, &count, nullptr));
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
- }
-
- info.presentModes.resize(count);
- DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
- physicalDevice, surface, &count, info.presentModes.data()),
- "vkGetPhysicalDeviceSurfacePresentModesKHR"));
- }
-
- return std::move(info);
- }
-
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
deleted file mode 100644
index ff7a5bcaa7f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_VULKANINFO_H_
-#define DAWNNATIVE_VULKAN_VULKANINFO_H_
-
-#include "common/ityp_array.h"
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/vulkan/VulkanExtensions.h"
-
-#include <vector>
-
-namespace dawn_native { namespace vulkan {
-
- class Adapter;
- class Backend;
- struct VulkanFunctions;
-
- // Global information - gathered before the instance is created
- struct VulkanGlobalKnobs {
- VulkanLayerSet layers;
- ityp::array<VulkanLayer, InstanceExtSet, static_cast<uint32_t>(VulkanLayer::EnumCount)>
- layerExtensions;
-
- // During information gathering `extensions` only contains the instance's extensions but
- // during the instance creation logic it becomes the OR of the instance's extensions and
- // the selected layers' extensions.
- InstanceExtSet extensions;
- bool HasExt(InstanceExt ext) const;
- };
-
- struct VulkanGlobalInfo : VulkanGlobalKnobs {
- uint32_t apiVersion;
- };
-
- // Device information - gathered before the device is created.
- struct VulkanDeviceKnobs {
- VkPhysicalDeviceFeatures features;
- VkPhysicalDeviceShaderFloat16Int8FeaturesKHR shaderFloat16Int8Features;
- VkPhysicalDevice16BitStorageFeaturesKHR _16BitStorageFeatures;
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
-
- bool HasExt(DeviceExt ext) const;
- DeviceExtSet extensions;
- };
-
- struct VulkanDeviceInfo : VulkanDeviceKnobs {
- VkPhysicalDeviceProperties properties;
- VkPhysicalDeviceDriverProperties driverProperties;
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
-
- std::vector<VkQueueFamilyProperties> queueFamilies;
-
- std::vector<VkMemoryType> memoryTypes;
- std::vector<VkMemoryHeap> memoryHeaps;
-
- std::vector<VkLayerProperties> layers;
- // TODO(cwallez@chromium.org): layer instance extensions
- };
-
- struct VulkanSurfaceInfo {
- VkSurfaceCapabilitiesKHR capabilities;
- std::vector<VkSurfaceFormatKHR> formats;
- std::vector<VkPresentModeKHR> presentModes;
- std::vector<bool> supportedQueueFamilies;
- };
-
- ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions);
- ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
- VkInstance instance,
- const VulkanFunctions& vkFunctions);
- ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter);
- ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
- VkSurfaceKHR surface);
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_VULKANINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h
deleted file mode 100644
index f0653f2c96e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
-#define DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/VulkanBackend.h"
-#include "dawn_native/vulkan/ExternalHandle.h"
-
-namespace dawn_native { namespace vulkan {
- class Device;
- struct VulkanDeviceInfo;
-}} // namespace dawn_native::vulkan
-
-namespace dawn_native { namespace vulkan { namespace external_memory {
-
- struct MemoryImportParams {
- VkDeviceSize allocationSize;
- uint32_t memoryTypeIndex;
- };
-
- class Service {
- public:
- explicit Service(Device* device);
- ~Service();
-
- static bool CheckSupport(const VulkanDeviceInfo& deviceInfo);
-
- // True if the device reports it supports importing external memory.
- bool SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags);
-
- // True if the device reports it supports creating VkImages from external memory.
- bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage);
-
- // Returns the parameters required for importing memory
- ResultOrError<MemoryImportParams> GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image);
-
- // Given an external handle pointing to memory, import it into a VkDeviceMemory
- ResultOrError<VkDeviceMemory> ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image);
-
- // Create a VkImage for the given handle type
- ResultOrError<VkImage> CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo);
-
- private:
- Device* mDevice = nullptr;
-
- // True if early checks pass that determine if the service is supported
- bool mSupported = false;
- };
-
-}}} // namespace dawn_native::vulkan::external_memory
-
-#endif // DAWNNATIVE_VULKAN_EXTERNALMEMORY_SERVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
deleted file mode 100644
index 90b63734fac..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Assert.h"
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-#include "dawn_native/vulkan/external_memory/MemoryService.h"
-
-namespace dawn_native { namespace vulkan { namespace external_memory {
-
- namespace {
-
- // Some modifiers use multiple planes (for example, see the comment for
- // I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h), but dma-buf import in Dawn only
- // supports single-plane formats.
- ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- uint64_t modifier) {
- VkDrmFormatModifierPropertiesListEXT formatModifierPropsList;
- formatModifierPropsList.sType =
- VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT;
- formatModifierPropsList.pNext = nullptr;
- formatModifierPropsList.drmFormatModifierCount = 0;
- formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
-
- VkFormatProperties2 formatProps;
- formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
- formatProps.pNext = &formatModifierPropsList;
-
- fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
-
- uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
- std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierProps(modifierCount);
- formatModifierPropsList.pDrmFormatModifierProperties = formatModifierProps.data();
-
- fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
- for (const auto& props : formatModifierProps) {
- if (props.drmFormatModifier == modifier) {
- uint32_t count = props.drmFormatModifierPlaneCount;
- return count;
- }
- }
- return DAWN_FORMAT_VALIDATION_ERROR("DRM format modifier not supported.");
- }
-
- } // anonymous namespace
-
- Service::Service(Device* device)
- : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
- return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD) &&
- deviceInfo.HasExt(DeviceExt::ImageDrmFormatModifier);
- }
-
- bool Service::SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
- return mSupported;
- }
-
- bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage) {
- // Early out before we try using extension functions
- if (!mSupported) {
- return false;
- }
- if (descriptor->type != ExternalImageType::DmaBuf) {
- return false;
- }
- const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
- static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
-
- // Verify plane count for the modifier.
- VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
- uint32_t planeCount = 0;
- if (mDevice->ConsumedError(GetModifierPlaneCount(mDevice->fn, physicalDevice, format,
- dmaBufDescriptor->drmModifier),
- &planeCount)) {
- return false;
- }
- if (planeCount == 0) {
- return false;
- }
- // TODO(hob): Support multi-plane formats like I915_FORMAT_MOD_Y_TILED_CCS.
- if (planeCount > 1) {
- return false;
- }
-
- // Verify that the format modifier of the external memory and the requested Vulkan format
- // are actually supported together in a dma-buf import.
- VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo;
- drmModifierInfo.sType =
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT;
- drmModifierInfo.pNext = nullptr;
- drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
- drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-
- VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo;
- externalImageFormatInfo.sType =
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
- externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
- externalImageFormatInfo.pNext = &drmModifierInfo;
-
- VkPhysicalDeviceImageFormatInfo2 imageFormatInfo;
- imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
- imageFormatInfo.format = format;
- imageFormatInfo.type = VK_IMAGE_TYPE_2D;
- imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
- imageFormatInfo.usage = usage;
- imageFormatInfo.flags = 0;
- imageFormatInfo.pNext = &externalImageFormatInfo;
-
- VkExternalImageFormatProperties externalImageFormatProps;
- externalImageFormatProps.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
- externalImageFormatProps.pNext = nullptr;
-
- VkImageFormatProperties2 imageFormatProps;
- imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
- imageFormatProps.pNext = &externalImageFormatProps;
-
- VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
- physicalDevice, &imageFormatInfo, &imageFormatProps));
- if (result != VK_SUCCESS) {
- return false;
- }
- VkExternalMemoryFeatureFlags featureFlags =
- externalImageFormatProps.externalMemoryProperties.externalMemoryFeatures;
- return featureFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
- }
-
- ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image) {
- DAWN_INVALID_IF(descriptor->type != ExternalImageType::DmaBuf,
- "ExternalImageDescriptor is not a ExternalImageDescriptorDmaBuf.");
-
- const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
- static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
- VkDevice device = mDevice->GetVkDevice();
-
- // Get the valid memory types for the VkImage.
- VkMemoryRequirements memoryRequirements;
- mDevice->fn.GetImageMemoryRequirements(device, image, &memoryRequirements);
-
- VkMemoryFdPropertiesKHR fdProperties;
- fdProperties.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
- fdProperties.pNext = nullptr;
-
- // Get the valid memory types that the external memory can be imported as.
- mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- dmaBufDescriptor->memoryFD, &fdProperties);
- // Choose the best memory type that satisfies both the image's constraint and the import's
- // constraint.
- memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
- int memoryTypeIndex = mDevice->GetResourceMemoryAllocator()->FindBestTypeIndex(
- memoryRequirements, MemoryKind::Opaque);
- DAWN_INVALID_IF(memoryTypeIndex == -1,
- "Unable to find an appropriate memory type for import.");
-
- MemoryImportParams params = {memoryRequirements.size,
- static_cast<uint32_t>(memoryTypeIndex)};
- return params;
- }
-
- ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image) {
- DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
-
- VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
- memoryDedicatedAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
- memoryDedicatedAllocateInfo.pNext = nullptr;
- memoryDedicatedAllocateInfo.image = image;
- memoryDedicatedAllocateInfo.buffer = VkBuffer{};
-
- VkImportMemoryFdInfoKHR importMemoryFdInfo;
- importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
- importMemoryFdInfo.pNext = &memoryDedicatedAllocateInfo;
- importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- importMemoryFdInfo.fd = handle;
-
- VkMemoryAllocateInfo memoryAllocateInfo;
- memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- memoryAllocateInfo.pNext = &importMemoryFdInfo;
- memoryAllocateInfo.allocationSize = importParams.allocationSize;
- memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- DAWN_TRY(
- CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
- nullptr, &*allocatedMemory),
- "vkAllocateMemory"));
- return allocatedMemory;
- }
-
- ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo) {
- DAWN_INVALID_IF(descriptor->type != ExternalImageType::DmaBuf,
- "ExternalImageDescriptor is not a dma-buf descriptor.");
-
- const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
- static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
- VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
- VkDevice device = mDevice->GetVkDevice();
-
- // Dawn currently doesn't support multi-plane formats, so we only need to create a single
- // VkSubresourceLayout here.
- VkSubresourceLayout planeLayout;
- planeLayout.offset = 0;
- planeLayout.size = 0; // VK_EXT_image_drm_format_modifier mandates size = 0.
- planeLayout.rowPitch = dmaBufDescriptor->stride;
- planeLayout.arrayPitch = 0; // Not an array texture
- planeLayout.depthPitch = 0; // Not a depth texture
-
- uint32_t planeCount;
- DAWN_TRY_ASSIGN(planeCount,
- GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
- dmaBufDescriptor->drmModifier));
- ASSERT(planeCount == 1);
-
- VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo;
- explicitCreateInfo.sType =
- VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT;
- explicitCreateInfo.pNext = NULL;
- explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
- explicitCreateInfo.drmFormatModifierPlaneCount = planeCount;
- explicitCreateInfo.pPlaneLayouts = &planeLayout;
-
- VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
- externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
- externalMemoryImageCreateInfo.pNext = &explicitCreateInfo;
- externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
-
- VkImageCreateInfo createInfo = baseCreateInfo;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- createInfo.pNext = &externalMemoryImageCreateInfo;
- createInfo.flags = 0;
- createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
-
- // Create a new VkImage with tiling equal to the DRM format modifier.
- VkImage image;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &*image),
- "CreateImage"));
- return image;
- }
-
-}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp
deleted file mode 100644
index 1bb4f38dd21..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/external_memory/MemoryService.h"
-
-namespace dawn_native { namespace vulkan { namespace external_memory {
-
- Service::Service(Device* device) : mDevice(device) {
- DAWN_UNUSED(mDevice);
- DAWN_UNUSED(mSupported);
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
- return false;
- }
-
- bool Service::SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
- return false;
- }
-
- bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage) {
- return false;
- }
-
- ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
- }
-
- ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
- }
-
- ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
- }
-
-}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
deleted file mode 100644
index ad2c4c23d50..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Assert.h"
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-#include "dawn_native/vulkan/external_memory/MemoryService.h"
-
-namespace dawn_native { namespace vulkan { namespace external_memory {
-
- Service::Service(Device* device)
- : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
- return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD);
- }
-
- bool Service::SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
- // Early out before we try using extension functions
- if (!mSupported) {
- return false;
- }
-
- VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
- externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
- externalFormatInfo.pNext = nullptr;
- externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-
- VkPhysicalDeviceImageFormatInfo2 formatInfo;
- formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
- formatInfo.pNext = &externalFormatInfo;
- formatInfo.format = format;
- formatInfo.type = type;
- formatInfo.tiling = tiling;
- formatInfo.usage = usage;
- formatInfo.flags = flags;
-
- VkExternalImageFormatProperties externalFormatProperties;
- externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
- externalFormatProperties.pNext = nullptr;
-
- VkImageFormatProperties2 formatProperties;
- formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
- formatProperties.pNext = &externalFormatProperties;
-
- VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
- ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties));
-
- // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
- if (result != VK_SUCCESS) {
- return false;
- }
-
- // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
- VkFlags memoryFlags =
- externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
- return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
- }
-
- bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage) {
- return mSupported;
- }
-
- ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image) {
- DAWN_INVALID_IF(descriptor->type != ExternalImageType::OpaqueFD,
- "ExternalImageDescriptor is not an OpaqueFD descriptor.");
-
- const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
- static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
-
- MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
- opaqueFDDescriptor->memoryTypeIndex};
- return params;
- }
-
- ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image) {
- DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
-
- VkMemoryRequirements requirements;
- mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
- DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
- "Requested allocation size (%u) is smaller than the image requires (%u).",
- importParams.allocationSize, requirements.size);
-
- VkImportMemoryFdInfoKHR importMemoryFdInfo;
- importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
- importMemoryFdInfo.pNext = nullptr;
- importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
- importMemoryFdInfo.fd = handle;
-
- VkMemoryAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- allocateInfo.pNext = &importMemoryFdInfo;
- allocateInfo.allocationSize = importParams.allocationSize;
- allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
- nullptr, &*allocatedMemory),
- "vkAllocateMemory"));
- return allocatedMemory;
- }
-
- ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo) {
- VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
- externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
- externalMemoryImageCreateInfo.pNext = nullptr;
- externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
-
- VkImageCreateInfo createInfo = baseCreateInfo;
- createInfo.pNext = &externalMemoryImageCreateInfo;
- createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
- createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- ASSERT(IsSampleCountSupported(mDevice, createInfo));
-
- VkImage image;
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
- "CreateImage"));
- return image;
- }
-
-}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
deleted file mode 100644
index 9eb5b702f8e..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Assert.h"
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/TextureVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-#include "dawn_native/vulkan/external_memory/MemoryService.h"
-
-namespace dawn_native { namespace vulkan { namespace external_memory {
-
- Service::Service(Device* device)
- : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
- return deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle);
- }
-
- bool Service::SupportsImportMemory(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
- // Early out before we try using extension functions
- if (!mSupported) {
- return false;
- }
-
- VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
- externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
- externalFormatInfo.pNext = nullptr;
- externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
-
- VkPhysicalDeviceImageFormatInfo2 formatInfo;
- formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
- formatInfo.pNext = &externalFormatInfo;
- formatInfo.format = format;
- formatInfo.type = type;
- formatInfo.tiling = tiling;
- formatInfo.usage = usage;
- formatInfo.flags = flags;
-
- VkExternalImageFormatProperties externalFormatProperties;
- externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
- externalFormatProperties.pNext = nullptr;
-
- VkImageFormatProperties2 formatProperties;
- formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
- formatProperties.pNext = &externalFormatProperties;
-
- VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
- ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
-
- // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
- if (result != VK_SUCCESS) {
- return false;
- }
-
- // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
- VkFlags memoryFlags =
- externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
- return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
- }
-
- bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
- VkFormat format,
- VkImageUsageFlags usage) {
- return mSupported;
- }
-
- ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
- const ExternalImageDescriptor* descriptor,
- VkImage image) {
- DAWN_INVALID_IF(descriptor->type != ExternalImageType::OpaqueFD,
- "ExternalImageDescriptor is not an OpaqueFD descriptor.");
-
- const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
- static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
-
- MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
- opaqueFDDescriptor->memoryTypeIndex};
- return params;
- }
-
- ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- const MemoryImportParams& importParams,
- VkImage image) {
- DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing memory with an invalid handle.");
-
- VkMemoryRequirements requirements;
- mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
- DAWN_INVALID_IF(
- requirements.size > importParams.allocationSize,
- "Requested allocation size (%u) is smaller than the required image size (%u).",
- importParams.allocationSize, requirements.size);
-
- VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
- importMemoryHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA;
- importMemoryHandleInfo.pNext = nullptr;
- importMemoryHandleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
- importMemoryHandleInfo.handle = handle;
-
- VkMemoryAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- allocateInfo.pNext = &importMemoryHandleInfo;
- allocateInfo.allocationSize = importParams.allocationSize;
- allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
- nullptr, &*allocatedMemory),
- "vkAllocateMemory"));
- return allocatedMemory;
- }
-
- ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
- const VkImageCreateInfo& baseCreateInfo) {
- VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
- externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
- externalMemoryImageCreateInfo.pNext = nullptr;
- externalMemoryImageCreateInfo.handleTypes =
- VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
-
- VkImageCreateInfo createInfo = baseCreateInfo;
- createInfo.pNext = &externalMemoryImageCreateInfo;
- createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
- createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- ASSERT(IsSampleCountSupported(mDevice, createInfo));
-
- VkImage image;
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
- "CreateImage"));
- return image;
- }
-
-}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h
deleted file mode 100644
index 91d9576b716..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
-#define DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
-
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/vulkan/ExternalHandle.h"
-#include "dawn_native/vulkan/VulkanFunctions.h"
-#include "dawn_native/vulkan/VulkanInfo.h"
-
-namespace dawn_native { namespace vulkan {
- class Device;
-}} // namespace dawn_native::vulkan
-
-namespace dawn_native { namespace vulkan { namespace external_semaphore {
-
- class Service {
- public:
- explicit Service(Device* device);
- ~Service();
-
- static bool CheckSupport(const VulkanDeviceInfo& deviceInfo,
- VkPhysicalDevice physicalDevice,
- const VulkanFunctions& fn);
-
- // True if the device reports it supports this feature
- bool Supported();
-
- // Given an external handle, import it into a VkSemaphore
- ResultOrError<VkSemaphore> ImportSemaphore(ExternalSemaphoreHandle handle);
-
- // Create a VkSemaphore that is exportable into an external handle later
- ResultOrError<VkSemaphore> CreateExportableSemaphore();
-
- // Export a VkSemaphore into an external handle
- ResultOrError<ExternalSemaphoreHandle> ExportSemaphore(VkSemaphore semaphore);
-
- private:
- Device* mDevice = nullptr;
-
- // True if early checks pass that determine if the service is supported
- bool mSupported = false;
- };
-
-}}} // namespace dawn_native::vulkan::external_semaphore
-
-#endif // DAWNNATIVE_VULKAN_EXTERNALSEMAPHORE_SERVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceFD.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
deleted file mode 100644
index b81f96c33e0..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-#include "dawn_native/vulkan/external_semaphore/SemaphoreService.h"
-
-static constexpr VkExternalSemaphoreHandleTypeFlagBits kHandleType =
-#if defined(DAWN_USE_SYNC_FDS)
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
-#else
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-#endif // defined(DAWN_USE_SYNC_FDS)
-
-namespace dawn_native { namespace vulkan { namespace external_semaphore {
-
- Service::Service(Device* device)
- : mDevice(device),
- mSupported(CheckSupport(device->GetDeviceInfo(),
- ToBackend(device->GetAdapter())->GetPhysicalDevice(),
- device->fn)) {
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
- VkPhysicalDevice physicalDevice,
- const VulkanFunctions& fn) {
- if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
- return false;
- }
-
- VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
- semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
- semaphoreInfo.pNext = nullptr;
- semaphoreInfo.handleType = kHandleType;
-
- VkExternalSemaphorePropertiesKHR semaphoreProperties;
- semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
- semaphoreProperties.pNext = nullptr;
-
- fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
- &semaphoreProperties);
-
- VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
- VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
-
- return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
- }
-
- bool Service::Supported() {
- return mSupported;
- }
-
- ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
- DAWN_INVALID_IF(handle < 0, "Importing a semaphore with an invalid handle.");
-
- VkSemaphore semaphore = VK_NULL_HANDLE;
- VkSemaphoreCreateInfo info;
- info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- info.pNext = nullptr;
- info.flags = 0;
-
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
- "vkCreateSemaphore"));
-
- VkImportSemaphoreFdInfoKHR importSemaphoreFdInfo;
- importSemaphoreFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
- importSemaphoreFdInfo.pNext = nullptr;
- importSemaphoreFdInfo.semaphore = semaphore;
- importSemaphoreFdInfo.flags = 0;
- importSemaphoreFdInfo.handleType = kHandleType;
- importSemaphoreFdInfo.fd = handle;
-
- MaybeError status = CheckVkSuccess(
- mDevice->fn.ImportSemaphoreFdKHR(mDevice->GetVkDevice(), &importSemaphoreFdInfo),
- "vkImportSemaphoreFdKHR");
-
- if (status.IsError()) {
- mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
- DAWN_TRY(std::move(status));
- }
-
- return semaphore;
- }
-
- ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
- VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
- exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
- exportSemaphoreInfo.pNext = nullptr;
- exportSemaphoreInfo.handleTypes = kHandleType;
-
- VkSemaphoreCreateInfo semaphoreCreateInfo;
- semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
- semaphoreCreateInfo.flags = 0;
-
- VkSemaphore signalSemaphore;
- DAWN_TRY(
- CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
- nullptr, &*signalSemaphore),
- "vkCreateSemaphore"));
- return signalSemaphore;
- }
-
- ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
- VkSemaphoreGetFdInfoKHR semaphoreGetFdInfo;
- semaphoreGetFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
- semaphoreGetFdInfo.pNext = nullptr;
- semaphoreGetFdInfo.semaphore = semaphore;
- semaphoreGetFdInfo.handleType = kHandleType;
-
- int fd = -1;
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.GetSemaphoreFdKHR(mDevice->GetVkDevice(), &semaphoreGetFdInfo, &fd),
- "vkGetSemaphoreFdKHR"));
-
- ASSERT(fd >= 0);
- return fd;
- }
-
-}}} // namespace dawn_native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceNull.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
deleted file mode 100644
index 3053029825a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/external_semaphore/SemaphoreService.h"
-
-namespace dawn_native { namespace vulkan { namespace external_semaphore {
-
- Service::Service(Device* device) : mDevice(device) {
- DAWN_UNUSED(mDevice);
- DAWN_UNUSED(mSupported);
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
- VkPhysicalDevice physicalDevice,
- const VulkanFunctions& fn) {
- return false;
- }
-
- bool Service::Supported() {
- return false;
- }
-
- ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
- }
-
- ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
- return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
- }
-
- ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
- }
-
-}}} // namespace dawn_native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
deleted file mode 100644
index 7a773f9c42f..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/AdapterVk.h"
-#include "dawn_native/vulkan/BackendVk.h"
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-#include "dawn_native/vulkan/external_semaphore/SemaphoreService.h"
-
-namespace dawn_native { namespace vulkan { namespace external_semaphore {
-
- Service::Service(Device* device)
- : mDevice(device),
- mSupported(CheckSupport(device->GetDeviceInfo(),
- ToBackend(device->GetAdapter())->GetPhysicalDevice(),
- device->fn)) {
- }
-
- Service::~Service() = default;
-
- // static
- bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
- VkPhysicalDevice physicalDevice,
- const VulkanFunctions& fn) {
- if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
- return false;
- }
-
- VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
- semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
- semaphoreInfo.pNext = nullptr;
- semaphoreInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-
- VkExternalSemaphorePropertiesKHR semaphoreProperties;
- semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
- semaphoreProperties.pNext = nullptr;
-
- fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
- &semaphoreProperties);
-
- VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
- VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
-
- return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
- }
-
- bool Service::Supported() {
- return mSupported;
- }
-
- ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
- DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID,
- "Importing a semaphore with an invalid handle.");
-
- VkSemaphore semaphore = VK_NULL_HANDLE;
- VkSemaphoreCreateInfo info;
- info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- info.pNext = nullptr;
- info.flags = 0;
-
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
- "vkCreateSemaphore"));
-
- VkImportSemaphoreZirconHandleInfoFUCHSIA importSemaphoreHandleInfo;
- importSemaphoreHandleInfo.sType =
- VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA;
- importSemaphoreHandleInfo.pNext = nullptr;
- importSemaphoreHandleInfo.semaphore = semaphore;
- importSemaphoreHandleInfo.flags = 0;
- importSemaphoreHandleInfo.handleType =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
- importSemaphoreHandleInfo.handle = handle;
-
- MaybeError status = CheckVkSuccess(mDevice->fn.ImportSemaphoreZirconHandleFUCHSIA(
- mDevice->GetVkDevice(), &importSemaphoreHandleInfo),
- "vkImportSemaphoreZirconHandleFUCHSIA");
-
- if (status.IsError()) {
- mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
- DAWN_TRY(std::move(status));
- }
-
- return semaphore;
- }
-
- ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
- VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
- exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
- exportSemaphoreInfo.pNext = nullptr;
- exportSemaphoreInfo.handleTypes =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-
- VkSemaphoreCreateInfo semaphoreCreateInfo;
- semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
- semaphoreCreateInfo.flags = 0;
-
- VkSemaphore signalSemaphore;
- DAWN_TRY(
- CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
- nullptr, &*signalSemaphore),
- "vkCreateSemaphore"));
- return signalSemaphore;
- }
-
- ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
- VkSemaphoreGetZirconHandleInfoFUCHSIA semaphoreGetHandleInfo;
- semaphoreGetHandleInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA;
- semaphoreGetHandleInfo.pNext = nullptr;
- semaphoreGetHandleInfo.semaphore = semaphore;
- semaphoreGetHandleInfo.handleType =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-
- zx_handle_t handle = ZX_HANDLE_INVALID;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.GetSemaphoreZirconHandleFUCHSIA(
- mDevice->GetVkDevice(), &semaphoreGetHandleInfo, &handle),
- "VkSemaphoreGetZirconHandleInfoFUCHSIA"));
-
- ASSERT(handle != ZX_HANDLE_INVALID);
- return handle;
- }
-
-}}} // namespace dawn_native::vulkan::external_semaphore
diff --git a/chromium/third_party/dawn/src/dawn_node/Module.cpp b/chromium/third_party/dawn/src/dawn_node/Module.cpp
deleted file mode 100644
index 96fce3f0736..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/Module.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn/dawn_proc.h"
-#include "src/dawn_node/binding/Flags.h"
-#include "src/dawn_node/binding/GPU.h"
-
-namespace {
- Napi::Value CreateGPU(const Napi::CallbackInfo& info) {
- const auto& env = info.Env();
-
- std::tuple<std::vector<std::string>> args;
- auto res = wgpu::interop::FromJS(info, args);
- if (res != wgpu::interop::Success) {
- Napi::Error::New(env, res.error).ThrowAsJavaScriptException();
- return env.Undefined();
- }
-
- wgpu::binding::Flags flags;
-
- // Parse out the key=value flags out of the input args array
- for (const auto& arg : std::get<0>(args)) {
- const size_t sep_index = arg.find("=");
- if (sep_index == std::string::npos) {
- Napi::Error::New(env, "Flags expected argument format is <key>=<value>")
- .ThrowAsJavaScriptException();
- return env.Undefined();
- }
- flags.Set(arg.substr(0, sep_index), arg.substr(sep_index + 1));
- }
-
- // Construct a wgpu::interop::GPU interface, implemented by wgpu::bindings::GPU.
- return wgpu::interop::GPU::Create<wgpu::binding::GPU>(env, std::move(flags));
- }
-
-} // namespace
-
-// Initialize() initializes the Dawn node module, registering all the WebGPU
-// types into the global object, and adding the 'create' function on the exported
-// object.
-Napi::Object Initialize(Napi::Env env, Napi::Object exports) {
- // Begin by setting the Dawn procedure function pointers.
- dawnProcSetProcs(&dawn_native::GetProcs());
-
- // Register all the interop types
- wgpu::interop::Initialize(env);
-
- // Export function that creates and returns the wgpu::interop::GPU interface
- exports.Set(Napi::String::New(env, "create"), Napi::Function::New<CreateGPU>(env));
-
- return exports;
-}
-
-NODE_API_MODULE(addon, Initialize)
diff --git a/chromium/third_party/dawn/src/dawn_node/NapiSymbols.cpp b/chromium/third_party/dawn/src/dawn_node/NapiSymbols.cpp
deleted file mode 100644
index 3c4aac1e2ed..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/NapiSymbols.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/utils/Debug.h"
-
-// To reduce the build dependencies for compiling the dawn.node targets, we do
-// not use cmake-js for building, but instead just depend on node_api_headers.
-// As the name suggests, node_api_headers contains just the *headers* of Napi,
-// and does not provide a library to link against.
-// Fortunately node_api_headers provides a list of Napi symbols exported by Node,
-// which we can use to produce weak-symbol stubs.
-
-#ifdef _WIN32
-# error "NapiSymbols.cpp is not used on Windows"
-#endif
-
-#define NAPI_SYMBOL(NAME) \
- __attribute__((weak)) void NAME() { \
- UNREACHABLE( \
- "#NAME is a weak stub, and should have been runtime replaced by the node " \
- "implementation"); \
- }
-
-extern "C" {
-// List of Napi symbols generated from the node_api_headers/symbols.js file
-#include "NapiSymbols.h"
-}
diff --git a/chromium/third_party/dawn/src/dawn_node/README.md b/chromium/third_party/dawn/src/dawn_node/README.md
deleted file mode 100644
index 2faf96c68b3..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/README.md
+++ /dev/null
@@ -1,122 +0,0 @@
-# Dawn bindings for NodeJS
-
-Note: This code is currently WIP. There are a number of [known issues](#known-issues).
-
-## Building
-
-## System requirements
-
-- [CMake 3.10](https://cmake.org/download/) or greater
-- [Go 1.13](https://golang.org/dl/) or greater
-
-## Install `depot_tools`
-
-Dawn uses the Chromium build system and dependency management so you need to [install depot_tools] and add it to the PATH.
-
-[install depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
-
-### Fetch dependencies
-
-First, the steps are similar to [`doc/building.md`](../../docs/building.md), but instead of the `Get the code` step, run:
-
-```sh
-# Clone the repo as "dawn"
-git clone https://dawn.googlesource.com/dawn dawn && cd dawn
-
-# Bootstrap the NodeJS binding gclient configuration
-cp scripts/standalone-with-node.gclient .gclient
-
-# Fetch external dependencies and toolchains with gclient
-gclient sync
-```
-
-### Build
-
-Currently, the node bindings can only be built with CMake:
-
-```sh
-mkdir <build-output-path>
-cd <build-output-path>
-cmake <dawn-root-path> -GNinja -DDAWN_BUILD_NODE_BINDINGS=1 -DDAWN_ENABLE_PIC=1
-ninja dawn.node
-```
-
-### Running WebGPU CTS
-
-1. [Build](#build) the `dawn.node` NodeJS module.
-2. Checkout the [WebGPU CTS repo](https://github.com/gpuweb/cts)
- - Run `npm install` from inside the CTS directory to install its dependencies
-
-```sh
-./src/dawn_node/tools/run-cts --cts=<path-to-webgpu-cts> --dawn-node=<path-to-dawn.node> [WebGPU CTS query]
-```
-
-If this fails with the error message `TypeError: expander is not a function or its return value is not iterable`, try appending `--build=false` to the start of the `run-cts` command line flags.
-
-To test against SwiftShader instead of the default Vulkan device, prefix `./src/dawn_node/tools/run-cts` with `VK_ICD_FILENAMES=<swiftshader-cmake-build>/Linux/vk_swiftshader_icd.json` and append `--flag=dawn-backend=vulkan` to the start of run-cts command line flags. For example:
-
-```sh
-VK_ICD_FILENAMES=<swiftshader-cmake-build>/Linux/vk_swiftshader_icd.json ./src/dawn_node/tools/run-cts --cts=<path-to-webgpu-cts> --dawn-node=<path-to-dawn.node> --flag=dawn-backend=vulkan [WebGPU CTS query]
-```
-
-The `--flag` parameter must be passed in multiple times, once for each flag begin set. Here are some common arguments:
-* `dawn-backend=<null|webgpu|d3d11|d3d12|metal|vulkan|opengl|opengles>`
-* `dlldir=<path>` - used to add an extra DLL search path on Windows, primarily to load the right d3dcompiler_47.dll
-* `enable-dawn-features=<features>` - enable [Dawn toggles](https://dawn.googlesource.com/dawn/+/refs/heads/main/src/dawn_native/Toggles.cpp), e.g. `dump_shaders`
-* `disable-dawn-features=<features>` - disable [Dawn toggles](https://dawn.googlesource.com/dawn/+/refs/heads/main/src/dawn_native/Toggles.cpp)
-
-For example, on Windows, to use the d3dcompiler_47.dll from a Chromium checkout, and to dump shader output, we could run the following using Git Bash:
-
-```sh
-./src/dawn_node/tools/run-cts --verbose --dawn-node=/c/src/dawn/build/Debug/dawn.node --cts=/c/src/gpuweb-cts --flag=dlldir="C:\src\chromium\src\out\Release" --flag=enable-dawn-features=dump_shaders 'webgpu:shader,execution,builtin,abs:integer_builtin_functions,abs_unsigned:storageClass="storage";storageMode="read_write";containerType="vector";isAtomic=false;baseType="u32";type="vec2%3Cu32%3E"'
-```
-
-Note that we pass `--verbose` above so that all test output, including the dumped shader, is written to stdout.
-
-## Debugging TypeScript with VSCode
-
-Open or create the `.vscode/launch.json` file, and add:
-
-```json
-{
- "version": "0.2.0",
- "configurations": [
- {
- "name": "Debug with node",
- "type": "node",
- "request": "launch",
- "outFiles": [ "./**/*.js" ],
- "args": [
- "-e", "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');",
- "--", "dummy-arg",
- "--gpu-provider",
- "[path-to-dawn.node]", // REPLACE: [path-to-dawn.node]
- "[test-query]", // REPLACE: [test-query]
- ],
- "cwd": "[cts-root]" // REPLACE: [cts-root]
- }
- ]
-}
-```
-
-Replacing:
-
-- `[cts-root]` with the path to the CTS root directory. If you are editing the `.vscode/launch.json` from within the CTS workspace, then you may use `${workspaceFolder}`.
-- `[path-to-dawn.node]` this the path to the `dawn.node` module built by the [build step](#Build)
-- `test-query` with the test query string. Example: `webgpu:shader,execution,builtin,abs:*`
-
-
-## Known issues
-
-- Many WebGPU CTS tests are currently known to fail
-- Dawn uses special token values for some parameters / fields. These are currently passed straight through to dawn from the JavaScript. discussions: [1](https://dawn-review.googlesource.com/c/dawn/+/64907/5/src/dawn_node/binding/Converter.cpp#167), [2](https://dawn-review.googlesource.com/c/dawn/+/64907/5/src/dawn_node/binding/Converter.cpp#928), [3](https://dawn-review.googlesource.com/c/dawn/+/64909/4/src/dawn_node/binding/GPUTexture.cpp#42)
-- Backend validation is currently always set to 'full' to aid in debugging. This can be extremely slow. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64916/4/src/dawn_node/binding/GPU.cpp#25)
-- Attempting to call `new T` in JavaScript, where `T` is an IDL interface type, should result in a TypeError "Illegal constructor". [discussion](https://dawn-review.googlesource.com/c/dawn/+/64902/9/src/dawn_node/interop/WebGPU.cpp.tmpl#293)
-- `GPUDevice` currently maintains a list of "lost promises". This should return the same promise. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64906/6/src/dawn_node/binding/GPUDevice.h#107)
-
-## Remaining work
-
-- Investigate CTS failures that are not expected to fail.
-- Generated includes live in `src/` for `dawn_node`, but outside for Dawn. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64903/9/src/dawn_node/interop/CMakeLists.txt#56)
-- Hook up to presubmit bots (CQ / Kokoro)
-- `binding::GPU` will require significant rework [once Dawn implements the device / adapter creation path properly](https://dawn-review.googlesource.com/c/dawn/+/64916/4/src/dawn_node/binding/GPU.cpp).
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.cpp b/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.cpp
deleted file mode 100644
index 788abc2410c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/AsyncRunner.h"
-
-#include <cassert>
-#include <limits>
-
-namespace wgpu { namespace binding {
-
- AsyncRunner::AsyncRunner(Napi::Env env, wgpu::Device device) : env_(env), device_(device) {
- }
-
- void AsyncRunner::Begin() {
- assert(count_ != std::numeric_limits<decltype(count_)>::max());
- if (count_++ == 0) {
- QueueTick();
- }
- }
-
- void AsyncRunner::End() {
- assert(count_ > 0);
- count_--;
- }
-
- void AsyncRunner::QueueTick() {
- // TODO(crbug.com/dawn/1127): We probably want to reduce the frequency at which this gets
- // called.
- if (tick_queued_) {
- return;
- }
- tick_queued_ = true;
- env_.Global()
- .Get("setImmediate")
- .As<Napi::Function>()
- .Call({
- // TODO(crbug.com/dawn/1127): Create once, reuse.
- Napi::Function::New(env_,
- [this](const Napi::CallbackInfo&) {
- tick_queued_ = false;
- if (count_ > 0) {
- device_.Tick();
- QueueTick();
- }
- }),
- });
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.h b/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.h
deleted file mode 100644
index a86ee3a0b60..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_ASYNC_RUNNER_H_
-#define DAWN_NODE_BINDING_ASYNC_RUNNER_H_
-
-#include <stdint.h>
-#include <memory>
-
-#include "dawn/webgpu_cpp.h"
-#include "napi.h"
-
-namespace wgpu { namespace binding {
-
- // AsyncRunner is used to poll a wgpu::Device with calls to Tick() while there are asynchronous
- // tasks in flight.
- class AsyncRunner {
- public:
- AsyncRunner(Napi::Env env, wgpu::Device device);
-
- // Begin() should be called when a new asynchronous task is started.
- // If the number of executing asynchronous tasks transitions from 0 to 1, then a function
- // will be scheduled on the main JavaScript thread to call wgpu::Device::Tick() whenever the
- // thread is idle. This will be repeatedly called until the number of executing asynchronous
- // tasks reaches 0 again.
- void Begin();
-
- // End() should be called once the asynchronous task has finished.
- // Every call to Begin() should eventually result in a call to End().
- void End();
-
- private:
- void QueueTick();
- Napi::Env env_;
- wgpu::Device const device_;
- uint64_t count_ = 0;
- bool tick_queued_ = false;
- };
-
- // AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
- // AsyncRunner::End() on destruction.
- class AsyncTask {
- public:
- inline AsyncTask(AsyncTask&&) = default;
-
- // Constructor.
- // Calls AsyncRunner::Begin()
- inline AsyncTask(std::shared_ptr<AsyncRunner> runner) : runner_(std::move(runner)) {
- runner_->Begin();
- };
-
- // Destructor.
- // Calls AsyncRunner::End()
- inline ~AsyncTask() {
- runner_->End();
- }
-
- private:
- AsyncTask(const AsyncTask&) = delete;
- AsyncTask& operator=(const AsyncTask&) = delete;
- std::shared_ptr<AsyncRunner> runner_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_ASYNC_RUNNER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_node/binding/CMakeLists.txt
deleted file mode 100644
index be0d508349c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/CMakeLists.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2021 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-add_library(dawn_node_binding STATIC
- "AsyncRunner.cpp"
- "AsyncRunner.h"
- "Converter.cpp"
- "Converter.h"
- "Errors.cpp"
- "Errors.h"
- "Flags.cpp"
- "Flags.h"
- "GPU.cpp"
- "GPU.h"
- "GPUAdapter.cpp"
- "GPUAdapter.h"
- "GPUBindGroup.cpp"
- "GPUBindGroup.h"
- "GPUBindGroupLayout.cpp"
- "GPUBindGroupLayout.h"
- "GPUBuffer.cpp"
- "GPUBuffer.h"
- "GPUCommandBuffer.cpp"
- "GPUCommandBuffer.h"
- "GPUCommandEncoder.cpp"
- "GPUCommandEncoder.h"
- "GPUComputePassEncoder.cpp"
- "GPUComputePassEncoder.h"
- "GPUComputePipeline.cpp"
- "GPUComputePipeline.h"
- "GPUDevice.cpp"
- "GPUDevice.h"
- "GPUPipelineLayout.cpp"
- "GPUPipelineLayout.h"
- "GPUQuerySet.cpp"
- "GPUQuerySet.h"
- "GPUQueue.cpp"
- "GPUQueue.h"
- "GPURenderBundle.cpp"
- "GPURenderBundle.h"
- "GPURenderBundleEncoder.cpp"
- "GPURenderBundleEncoder.h"
- "GPURenderPassEncoder.cpp"
- "GPURenderPassEncoder.h"
- "GPURenderPipeline.cpp"
- "GPURenderPipeline.h"
- "GPUSampler.cpp"
- "GPUSampler.h"
- "GPUShaderModule.cpp"
- "GPUShaderModule.h"
- "GPUSupportedLimits.cpp"
- "GPUSupportedLimits.h"
- "GPUTexture.cpp"
- "GPUTexture.h"
- "GPUTextureView.cpp"
- "GPUTextureView.h"
-)
-
-target_include_directories(dawn_node_binding
- PRIVATE
- "${CMAKE_SOURCE_DIR}"
- "${NODE_API_HEADERS_DIR}/include"
- "${NODE_ADDON_API_DIR}"
- "${GEN_DIR}"
-)
-
-target_link_libraries(dawn_node_binding
- PRIVATE
- dawncpp
- dawn_node_interop
-)
-
-# dawn_node targets require C++17
-set_property(
- TARGET dawn_node_binding
- PROPERTY CXX_STANDARD 17
-)
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Converter.cpp b/chromium/third_party/dawn/src/dawn_node/binding/Converter.cpp
deleted file mode 100644
index d75e217a988..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/Converter.cpp
+++ /dev/null
@@ -1,1153 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/Converter.h"
-
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/binding/GPUPipelineLayout.h"
-#include "src/dawn_node/binding/GPUSampler.h"
-#include "src/dawn_node/binding/GPUShaderModule.h"
-#include "src/dawn_node/binding/GPUTexture.h"
-#include "src/dawn_node/binding/GPUTextureView.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- Converter::~Converter() {
- for (auto& free : free_) {
- free();
- }
- }
-
- bool Converter::Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in) {
- out = {};
- if (auto* dict = std::get_if<interop::GPUExtent3DDict>(&in)) {
- out.depthOrArrayLayers = dict->depthOrArrayLayers;
- out.width = dict->width;
- out.height = dict->height;
- return true;
- }
- if (auto* vec = std::get_if<std::vector<interop::GPUIntegerCoordinate>>(&in)) {
- switch (vec->size()) {
- default:
- case 3:
- out.depthOrArrayLayers = (*vec)[2];
- case 2: // fallthrough
- out.height = (*vec)[1];
- case 1: // fallthrough
- out.width = (*vec)[0];
- return true;
- case 0:
- break;
- }
- }
- Napi::Error::New(env, "invalid value for GPUExtent3D").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in) {
- out = {};
- out.x = in.x;
- out.y = in.y;
- out.z = in.z;
- return true;
- }
-
- bool Converter::Convert(wgpu::Color& out, const interop::GPUColor& in) {
- out = {};
- if (auto* dict = std::get_if<interop::GPUColorDict>(&in)) {
- out.r = dict->r;
- out.g = dict->g;
- out.b = dict->b;
- out.a = dict->a;
- return true;
- }
- if (auto* vec = std::get_if<std::vector<double>>(&in)) {
- switch (vec->size()) {
- default:
- case 4:
- out.a = (*vec)[3];
- case 3: // fallthrough
- out.b = (*vec)[2];
- case 2: // fallthrough
- out.g = (*vec)[1];
- case 1: // fallthrough
- out.r = (*vec)[0];
- return true;
- case 0:
- break;
- }
- }
- Napi::Error::New(env, "invalid value for GPUColor").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::Origin3D& out,
- const std::vector<interop::GPUIntegerCoordinate>& in) {
- out = {};
- switch (in.size()) {
- default:
- case 3:
- out.z = in[2];
- case 2: // fallthrough
- out.y = in[1];
- case 1: // fallthrough
- out.x = in[0];
- case 0:
- break;
- }
- return true;
- }
-
- bool Converter::Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in) {
- out = wgpu::TextureAspect::All;
- switch (in) {
- case interop::GPUTextureAspect::kAll:
- out = wgpu::TextureAspect::All;
- return true;
- case interop::GPUTextureAspect::kStencilOnly:
- out = wgpu::TextureAspect::StencilOnly;
- return true;
- case interop::GPUTextureAspect::kDepthOnly:
- out = wgpu::TextureAspect::DepthOnly;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUTextureAspect").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in) {
- out = {};
- return Convert(out.texture, in.texture) && Convert(out.mipLevel, in.mipLevel) &&
- Convert(out.origin, in.origin) && Convert(out.aspect, in.aspect);
- }
-
- bool Converter::Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in) {
- out = {};
- out.buffer = *in.buffer.As<GPUBuffer>();
- return Convert(out.layout.offset, in.offset) &&
- Convert(out.layout.bytesPerRow, in.bytesPerRow) &&
- Convert(out.layout.rowsPerImage, in.rowsPerImage);
- }
-
- bool Converter::Convert(BufferSource& out, interop::BufferSource in) {
- out = {};
- if (auto* view = std::get_if<interop::ArrayBufferView>(&in)) {
- std::visit(
- [&](auto&& v) {
- auto arr = v.ArrayBuffer();
- out.data = arr.Data();
- out.size = arr.ByteLength();
- },
- *view);
- return true;
- }
- if (auto* arr = std::get_if<interop::ArrayBuffer>(&in)) {
- out.data = arr->Data();
- out.size = arr->ByteLength();
- return true;
- }
- Napi::Error::New(env, "invalid value for BufferSource").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in) {
- out = {};
- return Convert(out.bytesPerRow, in.bytesPerRow) && Convert(out.offset, in.offset) &&
- Convert(out.rowsPerImage, in.rowsPerImage);
- }
-
- bool Converter::Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in) {
- out = wgpu::TextureFormat::Undefined;
- switch (in) {
- case interop::GPUTextureFormat::kR8Unorm:
- out = wgpu::TextureFormat::R8Unorm;
- return true;
- case interop::GPUTextureFormat::kR8Snorm:
- out = wgpu::TextureFormat::R8Snorm;
- return true;
- case interop::GPUTextureFormat::kR8Uint:
- out = wgpu::TextureFormat::R8Uint;
- return true;
- case interop::GPUTextureFormat::kR8Sint:
- out = wgpu::TextureFormat::R8Sint;
- return true;
- case interop::GPUTextureFormat::kR16Uint:
- out = wgpu::TextureFormat::R16Uint;
- return true;
- case interop::GPUTextureFormat::kR16Sint:
- out = wgpu::TextureFormat::R16Sint;
- return true;
- case interop::GPUTextureFormat::kR16Float:
- out = wgpu::TextureFormat::R16Float;
- return true;
- case interop::GPUTextureFormat::kRg8Unorm:
- out = wgpu::TextureFormat::RG8Unorm;
- return true;
- case interop::GPUTextureFormat::kRg8Snorm:
- out = wgpu::TextureFormat::RG8Snorm;
- return true;
- case interop::GPUTextureFormat::kRg8Uint:
- out = wgpu::TextureFormat::RG8Uint;
- return true;
- case interop::GPUTextureFormat::kRg8Sint:
- out = wgpu::TextureFormat::RG8Sint;
- return true;
- case interop::GPUTextureFormat::kR32Uint:
- out = wgpu::TextureFormat::R32Uint;
- return true;
- case interop::GPUTextureFormat::kR32Sint:
- out = wgpu::TextureFormat::R32Sint;
- return true;
- case interop::GPUTextureFormat::kR32Float:
- out = wgpu::TextureFormat::R32Float;
- return true;
- case interop::GPUTextureFormat::kRg16Uint:
- out = wgpu::TextureFormat::RG16Uint;
- return true;
- case interop::GPUTextureFormat::kRg16Sint:
- out = wgpu::TextureFormat::RG16Sint;
- return true;
- case interop::GPUTextureFormat::kRg16Float:
- out = wgpu::TextureFormat::RG16Float;
- return true;
- case interop::GPUTextureFormat::kRgba8Unorm:
- out = wgpu::TextureFormat::RGBA8Unorm;
- return true;
- case interop::GPUTextureFormat::kRgba8UnormSrgb:
- out = wgpu::TextureFormat::RGBA8UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kRgba8Snorm:
- out = wgpu::TextureFormat::RGBA8Snorm;
- return true;
- case interop::GPUTextureFormat::kRgba8Uint:
- out = wgpu::TextureFormat::RGBA8Uint;
- return true;
- case interop::GPUTextureFormat::kRgba8Sint:
- out = wgpu::TextureFormat::RGBA8Sint;
- return true;
- case interop::GPUTextureFormat::kBgra8Unorm:
- out = wgpu::TextureFormat::BGRA8Unorm;
- return true;
- case interop::GPUTextureFormat::kBgra8UnormSrgb:
- out = wgpu::TextureFormat::BGRA8UnormSrgb;
- return true;
- case interop::GPUTextureFormat::kRgb9E5Ufloat:
- out = wgpu::TextureFormat::RGB9E5Ufloat;
- return true;
- case interop::GPUTextureFormat::kRgb10A2Unorm:
- out = wgpu::TextureFormat::RGB10A2Unorm;
- return true;
- case interop::GPUTextureFormat::kRg11B10Ufloat:
- out = wgpu::TextureFormat::RG11B10Ufloat;
- return true;
- case interop::GPUTextureFormat::kRg32Uint:
- out = wgpu::TextureFormat::RG32Uint;
- return true;
- case interop::GPUTextureFormat::kRg32Sint:
- out = wgpu::TextureFormat::RG32Sint;
- return true;
- case interop::GPUTextureFormat::kRg32Float:
- out = wgpu::TextureFormat::RG32Float;
- return true;
- case interop::GPUTextureFormat::kRgba16Uint:
- out = wgpu::TextureFormat::RGBA16Uint;
- return true;
- case interop::GPUTextureFormat::kRgba16Sint:
- out = wgpu::TextureFormat::RGBA16Sint;
- return true;
- case interop::GPUTextureFormat::kRgba16Float:
- out = wgpu::TextureFormat::RGBA16Float;
- return true;
- case interop::GPUTextureFormat::kRgba32Uint:
- out = wgpu::TextureFormat::RGBA32Uint;
- return true;
- case interop::GPUTextureFormat::kRgba32Sint:
- out = wgpu::TextureFormat::RGBA32Sint;
- return true;
- case interop::GPUTextureFormat::kRgba32Float:
- out = wgpu::TextureFormat::RGBA32Float;
- return true;
- case interop::GPUTextureFormat::kStencil8:
- out = wgpu::TextureFormat::Stencil8;
- return true;
- case interop::GPUTextureFormat::kDepth16Unorm:
- break; // TODO(crbug.com/dawn/1130): Unsupported.
- case interop::GPUTextureFormat::kDepth24Plus:
- out = wgpu::TextureFormat::Depth24Plus;
- return true;
- case interop::GPUTextureFormat::kDepth24PlusStencil8:
- out = wgpu::TextureFormat::Depth24PlusStencil8;
- return true;
- case interop::GPUTextureFormat::kDepth32Float:
- out = wgpu::TextureFormat::Depth32Float;
- return true;
- case interop::GPUTextureFormat::kBc1RgbaUnorm:
- out = wgpu::TextureFormat::BC1RGBAUnorm;
- return true;
- case interop::GPUTextureFormat::kBc1RgbaUnormSrgb:
- out = wgpu::TextureFormat::BC1RGBAUnormSrgb;
- return true;
- case interop::GPUTextureFormat::kBc2RgbaUnorm:
- out = wgpu::TextureFormat::BC2RGBAUnorm;
- return true;
- case interop::GPUTextureFormat::kBc2RgbaUnormSrgb:
- out = wgpu::TextureFormat::BC2RGBAUnormSrgb;
- return true;
- case interop::GPUTextureFormat::kBc3RgbaUnorm:
- out = wgpu::TextureFormat::BC3RGBAUnorm;
- return true;
- case interop::GPUTextureFormat::kBc3RgbaUnormSrgb:
- out = wgpu::TextureFormat::BC3RGBAUnormSrgb;
- return true;
- case interop::GPUTextureFormat::kBc4RUnorm:
- out = wgpu::TextureFormat::BC4RUnorm;
- return true;
- case interop::GPUTextureFormat::kBc4RSnorm:
- out = wgpu::TextureFormat::BC4RSnorm;
- return true;
- case interop::GPUTextureFormat::kBc5RgUnorm:
- out = wgpu::TextureFormat::BC5RGUnorm;
- return true;
- case interop::GPUTextureFormat::kBc5RgSnorm:
- out = wgpu::TextureFormat::BC5RGSnorm;
- return true;
- case interop::GPUTextureFormat::kBc6HRgbUfloat:
- out = wgpu::TextureFormat::BC6HRGBUfloat;
- return true;
- case interop::GPUTextureFormat::kBc6HRgbFloat:
- out = wgpu::TextureFormat::BC6HRGBFloat;
- return true;
- case interop::GPUTextureFormat::kBc7RgbaUnorm:
- out = wgpu::TextureFormat::BC7RGBAUnorm;
- return true;
- case interop::GPUTextureFormat::kBc7RgbaUnormSrgb:
- out = wgpu::TextureFormat::BC7RGBAUnormSrgb;
- return true;
- case interop::GPUTextureFormat::kDepth24UnormStencil8:
- break; // TODO(crbug.com/dawn/1130): Unsupported.
- case interop::GPUTextureFormat::kDepth32FloatStencil8:
- break; // TODO(crbug.com/dawn/1130): Unsupported.
- }
- // TODO(crbug.com/dawn/1130): Add ASTC and ETC formats.
- Napi::Error::New(env, "invalid value for GPUTextureFormat").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in) {
- out = static_cast<wgpu::TextureUsage>(in);
- return true;
- }
-
- bool Converter::Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in) {
- out = static_cast<wgpu::ColorWriteMask>(in);
- return true;
- }
-
- bool Converter::Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in) {
- out = static_cast<wgpu::BufferUsage>(in);
- return true;
- }
-
- bool Converter::Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in) {
- out = static_cast<wgpu::MapMode>(in);
- return true;
- }
-
- bool Converter::Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in) {
- out = static_cast<wgpu::ShaderStage>(in);
- return true;
- }
-
- bool Converter::Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in) {
- out = wgpu::TextureDimension::e1D;
- switch (in) {
- case interop::GPUTextureDimension::k1D:
- out = wgpu::TextureDimension::e1D;
- return true;
- case interop::GPUTextureDimension::k2D:
- out = wgpu::TextureDimension::e2D;
- return true;
- case interop::GPUTextureDimension::k3D:
- out = wgpu::TextureDimension::e3D;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUTextureDimension").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::TextureViewDimension& out,
- const interop::GPUTextureViewDimension& in) {
- out = wgpu::TextureViewDimension::Undefined;
- switch (in) {
- case interop::GPUTextureViewDimension::k1D:
- out = wgpu::TextureViewDimension::e1D;
- return true;
- case interop::GPUTextureViewDimension::k2D:
- out = wgpu::TextureViewDimension::e2D;
- return true;
- case interop::GPUTextureViewDimension::k2DArray:
- out = wgpu::TextureViewDimension::e2DArray;
- return true;
- case interop::GPUTextureViewDimension::kCube:
- out = wgpu::TextureViewDimension::Cube;
- return true;
- case interop::GPUTextureViewDimension::kCubeArray:
- out = wgpu::TextureViewDimension::CubeArray;
- return true;
- case interop::GPUTextureViewDimension::k3D:
- out = wgpu::TextureViewDimension::e3D;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUTextureViewDimension")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::ProgrammableStageDescriptor& out,
- const interop::GPUProgrammableStage& in) {
- out = {};
- out.entryPoint = in.entryPoint.c_str();
- out.module = *in.module.As<GPUShaderModule>();
- return Convert(out.constants, out.constantCount, in.constants);
- }
-
- bool Converter::Convert(wgpu::ConstantEntry& out,
- const std::string& in_name,
- wgpu::interop::GPUPipelineConstantValue in_value) {
- out.key = in_name.c_str();
- out.value = in_value;
- return true;
- }
-
- bool Converter::Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in) {
- out = {};
- return Convert(out.operation, in.operation) && Convert(out.dstFactor, in.dstFactor) &&
- Convert(out.srcFactor, in.srcFactor);
- }
-
- bool Converter::Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in) {
- out = wgpu::BlendFactor::Zero;
- switch (in) {
- case interop::GPUBlendFactor::kZero:
- out = wgpu::BlendFactor::Zero;
- return true;
- case interop::GPUBlendFactor::kOne:
- out = wgpu::BlendFactor::One;
- return true;
- case interop::GPUBlendFactor::kSrc:
- out = wgpu::BlendFactor::Src;
- return true;
- case interop::GPUBlendFactor::kOneMinusSrc:
- out = wgpu::BlendFactor::OneMinusSrc;
- return true;
- case interop::GPUBlendFactor::kSrcAlpha:
- out = wgpu::BlendFactor::SrcAlpha;
- return true;
- case interop::GPUBlendFactor::kOneMinusSrcAlpha:
- out = wgpu::BlendFactor::OneMinusSrcAlpha;
- return true;
- case interop::GPUBlendFactor::kDst:
- out = wgpu::BlendFactor::Dst;
- return true;
- case interop::GPUBlendFactor::kOneMinusDst:
- out = wgpu::BlendFactor::OneMinusDst;
- return true;
- case interop::GPUBlendFactor::kDstAlpha:
- out = wgpu::BlendFactor::DstAlpha;
- return true;
- case interop::GPUBlendFactor::kOneMinusDstAlpha:
- out = wgpu::BlendFactor::OneMinusDstAlpha;
- return true;
- case interop::GPUBlendFactor::kSrcAlphaSaturated:
- out = wgpu::BlendFactor::SrcAlphaSaturated;
- return true;
- case interop::GPUBlendFactor::kConstant:
- out = wgpu::BlendFactor::Constant;
- return true;
- case interop::GPUBlendFactor::kOneMinusConstant:
- out = wgpu::BlendFactor::OneMinusConstant;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUBlendFactor").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in) {
- out = wgpu::BlendOperation::Add;
- switch (in) {
- case interop::GPUBlendOperation::kAdd:
- out = wgpu::BlendOperation::Add;
- return true;
- case interop::GPUBlendOperation::kSubtract:
- out = wgpu::BlendOperation::Subtract;
- return true;
- case interop::GPUBlendOperation::kReverseSubtract:
- out = wgpu::BlendOperation::ReverseSubtract;
- return true;
- case interop::GPUBlendOperation::kMin:
- out = wgpu::BlendOperation::Min;
- return true;
- case interop::GPUBlendOperation::kMax:
- out = wgpu::BlendOperation::Max;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUBlendOperation").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::BlendState& out, const interop::GPUBlendState& in) {
- out = {};
- return Convert(out.alpha, in.alpha) && Convert(out.color, in.color);
- }
-
- bool Converter::Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in) {
- out = {};
- return Convert(out.topology, in.topology) &&
- Convert(out.stripIndexFormat, in.stripIndexFormat) &&
- Convert(out.frontFace, in.frontFace) && Convert(out.cullMode, in.cullMode);
- }
-
- bool Converter::Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in) {
- out = {};
- return Convert(out.format, in.format) && Convert(out.blend, in.blend) &&
- Convert(out.writeMask, in.writeMask);
- }
-
- bool Converter::Convert(wgpu::DepthStencilState& out, const interop::GPUDepthStencilState& in) {
- out = {};
- return Convert(out.format, in.format) &&
- Convert(out.depthWriteEnabled, in.depthWriteEnabled) &&
- Convert(out.depthCompare, in.depthCompare) &&
- Convert(out.stencilFront, in.stencilFront) &&
- Convert(out.stencilBack, in.stencilBack) &&
- Convert(out.stencilReadMask, in.stencilReadMask) &&
- Convert(out.stencilWriteMask, in.stencilWriteMask) &&
- Convert(out.depthBias, in.depthBias) &&
- Convert(out.depthBiasSlopeScale, in.depthBiasSlopeScale) &&
- Convert(out.depthBiasClamp, in.depthBiasClamp);
- }
-
- bool Converter::Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in) {
- out = {};
- return Convert(out.count, in.count) && Convert(out.mask, in.mask) &&
- Convert(out.alphaToCoverageEnabled, in.alphaToCoverageEnabled);
- }
-
- bool Converter::Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in) {
- out = {};
- return Convert(out.targets, out.targetCount, in.targets) && //
- Convert(out.module, in.module) && //
- Convert(out.entryPoint, in.entryPoint) && //
- Convert(out.constants, out.constantCount, in.constants);
- }
-
- bool Converter::Convert(wgpu::PrimitiveTopology& out, const interop::GPUPrimitiveTopology& in) {
- out = wgpu::PrimitiveTopology::LineList;
- switch (in) {
- case interop::GPUPrimitiveTopology::kPointList:
- out = wgpu::PrimitiveTopology::PointList;
- return true;
- case interop::GPUPrimitiveTopology::kLineList:
- out = wgpu::PrimitiveTopology::LineList;
- return true;
- case interop::GPUPrimitiveTopology::kLineStrip:
- out = wgpu::PrimitiveTopology::LineStrip;
- return true;
- case interop::GPUPrimitiveTopology::kTriangleList:
- out = wgpu::PrimitiveTopology::TriangleList;
- return true;
- case interop::GPUPrimitiveTopology::kTriangleStrip:
- out = wgpu::PrimitiveTopology::TriangleStrip;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUPrimitiveTopology")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in) {
- out = wgpu::FrontFace::CW;
- switch (in) {
- case interop::GPUFrontFace::kCw:
- out = wgpu::FrontFace::CW;
- return true;
- case interop::GPUFrontFace::kCcw:
- out = wgpu::FrontFace::CCW;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUFrontFace").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::CullMode& out, const interop::GPUCullMode& in) {
- out = wgpu::CullMode::None;
- switch (in) {
- case interop::GPUCullMode::kNone:
- out = wgpu::CullMode::None;
- return true;
- case interop::GPUCullMode::kFront:
- out = wgpu::CullMode::Front;
- return true;
- case interop::GPUCullMode::kBack:
- out = wgpu::CullMode::Back;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUCullMode").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in) {
- out = wgpu::CompareFunction::Undefined;
- switch (in) {
- case interop::GPUCompareFunction::kNever:
- out = wgpu::CompareFunction::Never;
- return true;
- case interop::GPUCompareFunction::kLess:
- out = wgpu::CompareFunction::Less;
- return true;
- case interop::GPUCompareFunction::kLessEqual:
- out = wgpu::CompareFunction::LessEqual;
- return true;
- case interop::GPUCompareFunction::kGreater:
- out = wgpu::CompareFunction::Greater;
- return true;
- case interop::GPUCompareFunction::kGreaterEqual:
- out = wgpu::CompareFunction::GreaterEqual;
- return true;
- case interop::GPUCompareFunction::kEqual:
- out = wgpu::CompareFunction::Equal;
- return true;
- case interop::GPUCompareFunction::kNotEqual:
- out = wgpu::CompareFunction::NotEqual;
- return true;
- case interop::GPUCompareFunction::kAlways:
- out = wgpu::CompareFunction::Always;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUCompareFunction").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in) {
- out = wgpu::IndexFormat::Undefined;
- switch (in) {
- case interop::GPUIndexFormat::kUint16:
- out = wgpu::IndexFormat::Uint16;
- return true;
- case interop::GPUIndexFormat::kUint32:
- out = wgpu::IndexFormat::Uint32;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUIndexFormat").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in) {
- out = wgpu::StencilOperation::Zero;
- switch (in) {
- case interop::GPUStencilOperation::kKeep:
- out = wgpu::StencilOperation::Keep;
- return true;
- case interop::GPUStencilOperation::kZero:
- out = wgpu::StencilOperation::Zero;
- return true;
- case interop::GPUStencilOperation::kReplace:
- out = wgpu::StencilOperation::Replace;
- return true;
- case interop::GPUStencilOperation::kInvert:
- out = wgpu::StencilOperation::Invert;
- return true;
- case interop::GPUStencilOperation::kIncrementClamp:
- out = wgpu::StencilOperation::IncrementClamp;
- return true;
- case interop::GPUStencilOperation::kDecrementClamp:
- out = wgpu::StencilOperation::DecrementClamp;
- return true;
- case interop::GPUStencilOperation::kIncrementWrap:
- out = wgpu::StencilOperation::IncrementWrap;
- return true;
- case interop::GPUStencilOperation::kDecrementWrap:
- out = wgpu::StencilOperation::DecrementWrap;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUStencilOperation").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in) {
- return Convert(out.compare, in.compare) && Convert(out.failOp, in.failOp) &&
- Convert(out.depthFailOp, in.depthFailOp) && Convert(out.passOp, in.passOp);
- }
-
- bool Converter::Convert(wgpu::VertexBufferLayout& out,
- const interop::GPUVertexBufferLayout& in) {
- out = {};
- return Convert(out.attributes, out.attributeCount, in.attributes) &&
- Convert(out.arrayStride, in.arrayStride) && Convert(out.stepMode, in.stepMode);
- }
-
- bool Converter::Convert(wgpu::VertexState& out, const interop::GPUVertexState& in) {
- out = {};
- return Convert(out.module, in.module) &&
- Convert(out.buffers, out.bufferCount, in.buffers) &&
- Convert(out.entryPoint, in.entryPoint) &&
- Convert(out.constants, out.constantCount, in.constants);
- }
-
- bool Converter::Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in) {
- out = wgpu::VertexStepMode::Instance;
- switch (in) {
- case interop::GPUVertexStepMode::kInstance:
- out = wgpu::VertexStepMode::Instance;
- return true;
- case interop::GPUVertexStepMode::kVertex:
- out = wgpu::VertexStepMode::Vertex;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUVertexStepMode").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in) {
- return Convert(out.format, in.format) && Convert(out.offset, in.offset) &&
- Convert(out.shaderLocation, in.shaderLocation);
- }
-
- bool Converter::Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in) {
- out = wgpu::VertexFormat::Undefined;
- switch (in) {
- case interop::GPUVertexFormat::kUint8X2:
- out = wgpu::VertexFormat::Uint8x2;
- return true;
- case interop::GPUVertexFormat::kUint8X4:
- out = wgpu::VertexFormat::Uint8x4;
- return true;
- case interop::GPUVertexFormat::kSint8X2:
- out = wgpu::VertexFormat::Sint8x2;
- return true;
- case interop::GPUVertexFormat::kSint8X4:
- out = wgpu::VertexFormat::Sint8x4;
- return true;
- case interop::GPUVertexFormat::kUnorm8X2:
- out = wgpu::VertexFormat::Unorm8x2;
- return true;
- case interop::GPUVertexFormat::kUnorm8X4:
- out = wgpu::VertexFormat::Unorm8x4;
- return true;
- case interop::GPUVertexFormat::kSnorm8X2:
- out = wgpu::VertexFormat::Snorm8x2;
- return true;
- case interop::GPUVertexFormat::kSnorm8X4:
- out = wgpu::VertexFormat::Snorm8x4;
- return true;
- case interop::GPUVertexFormat::kUint16X2:
- out = wgpu::VertexFormat::Uint16x2;
- return true;
- case interop::GPUVertexFormat::kUint16X4:
- out = wgpu::VertexFormat::Uint16x4;
- return true;
- case interop::GPUVertexFormat::kSint16X2:
- out = wgpu::VertexFormat::Sint16x2;
- return true;
- case interop::GPUVertexFormat::kSint16X4:
- out = wgpu::VertexFormat::Sint16x4;
- return true;
- case interop::GPUVertexFormat::kUnorm16X2:
- out = wgpu::VertexFormat::Unorm16x2;
- return true;
- case interop::GPUVertexFormat::kUnorm16X4:
- out = wgpu::VertexFormat::Unorm16x4;
- return true;
- case interop::GPUVertexFormat::kSnorm16X2:
- out = wgpu::VertexFormat::Snorm16x2;
- return true;
- case interop::GPUVertexFormat::kSnorm16X4:
- out = wgpu::VertexFormat::Snorm16x4;
- return true;
- case interop::GPUVertexFormat::kFloat16X2:
- out = wgpu::VertexFormat::Float16x2;
- return true;
- case interop::GPUVertexFormat::kFloat16X4:
- out = wgpu::VertexFormat::Float16x4;
- return true;
- case interop::GPUVertexFormat::kFloat32:
- out = wgpu::VertexFormat::Float32;
- return true;
- case interop::GPUVertexFormat::kFloat32X2:
- out = wgpu::VertexFormat::Float32x2;
- return true;
- case interop::GPUVertexFormat::kFloat32X3:
- out = wgpu::VertexFormat::Float32x3;
- return true;
- case interop::GPUVertexFormat::kFloat32X4:
- out = wgpu::VertexFormat::Float32x4;
- return true;
- case interop::GPUVertexFormat::kUint32:
- out = wgpu::VertexFormat::Uint32;
- return true;
- case interop::GPUVertexFormat::kUint32X2:
- out = wgpu::VertexFormat::Uint32x2;
- return true;
- case interop::GPUVertexFormat::kUint32X3:
- out = wgpu::VertexFormat::Uint32x3;
- return true;
- case interop::GPUVertexFormat::kUint32X4:
- out = wgpu::VertexFormat::Uint32x4;
- return true;
- case interop::GPUVertexFormat::kSint32:
- out = wgpu::VertexFormat::Sint32;
- return true;
- case interop::GPUVertexFormat::kSint32X2:
- out = wgpu::VertexFormat::Sint32x2;
- return true;
- case interop::GPUVertexFormat::kSint32X3:
- out = wgpu::VertexFormat::Sint32x3;
- return true;
- case interop::GPUVertexFormat::kSint32X4:
- out = wgpu::VertexFormat::Sint32x4;
- return true;
- default:
- break;
- }
- Napi::Error::New(env, "invalid value for GPUVertexFormat").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::RenderPassColorAttachment& out,
- const interop::GPURenderPassColorAttachment& in) {
- out = {};
- if (auto* op = std::get_if<interop::GPULoadOp>(&in.loadValue)) {
- if (!Convert(out.loadOp, *op)) {
- return false;
- }
- } else if (auto* color = std::get_if<interop::GPUColor>(&in.loadValue)) {
- out.loadOp = wgpu::LoadOp::Clear;
- if (!Convert(out.clearColor, *color)) {
- return false;
- }
- } else {
- Napi::Error::New(env, "invalid value for GPURenderPassColorAttachment.loadValue")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- return Convert(out.view, in.view) && Convert(out.resolveTarget, in.resolveTarget) &&
- Convert(out.storeOp, in.storeOp);
- }
-
- bool Converter::Convert(wgpu::RenderPassDepthStencilAttachment& out,
- const interop::GPURenderPassDepthStencilAttachment& in) {
- out = {};
- if (auto* op = std::get_if<interop::GPULoadOp>(&in.depthLoadValue)) {
- if (!Convert(out.depthLoadOp, *op)) {
- return false;
- }
- } else if (auto* value = std::get_if<float>(&in.depthLoadValue)) {
- out.stencilLoadOp = wgpu::LoadOp::Clear;
- if (!Convert(out.clearDepth, *value)) {
- return false;
- }
- } else {
- Napi::Error::New(env,
- "invalid value for GPURenderPassDepthStencilAttachment.depthLoadValue")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- if (auto* op = std::get_if<interop::GPULoadOp>(&in.stencilLoadValue)) {
- if (!Convert(out.stencilLoadOp, *op)) {
- return false;
- }
- } else if (auto* value = std::get_if<interop::GPUStencilValue>(&in.stencilLoadValue)) {
- if (!Convert(out.clearStencil, *value)) {
- return false;
- }
- } else {
- Napi::Error::New(env,
- "invalid value for "
- "GPURenderPassDepthStencilAttachment.stencilLoadValue")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- return Convert(out.view, in.view) && Convert(out.depthStoreOp, in.depthStoreOp) &&
- Convert(out.depthReadOnly, in.depthReadOnly) &&
- Convert(out.stencilStoreOp, in.stencilStoreOp) &&
- Convert(out.stencilReadOnly, in.stencilReadOnly);
- }
-
- bool Converter::Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in) {
- out = wgpu::LoadOp::Clear;
- switch (in) {
- case interop::GPULoadOp::kLoad:
- out = wgpu::LoadOp::Load;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPULoadOp").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in) {
- out = wgpu::StoreOp::Store;
- switch (in) {
- case interop::GPUStoreOp::kStore:
- out = wgpu::StoreOp::Store;
- return true;
- case interop::GPUStoreOp::kDiscard:
- out = wgpu::StoreOp::Discard;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUStoreOp").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in) {
- out = {};
- if (!Convert(out.binding, in.binding)) {
- return false;
- }
-
- if (auto* res = std::get_if<interop::Interface<interop::GPUSampler>>(&in.resource)) {
- return Convert(out.sampler, *res);
- }
- if (auto* res = std::get_if<interop::Interface<interop::GPUTextureView>>(&in.resource)) {
- return Convert(out.textureView, *res);
- }
- if (auto* res = std::get_if<interop::GPUBufferBinding>(&in.resource)) {
- auto buffer = res->buffer.As<GPUBuffer>();
- out.size = wgpu::kWholeSize;
- if (!buffer || !Convert(out.offset, res->offset) || !Convert(out.size, res->size)) {
- return false;
- }
- out.buffer = *buffer;
- return true;
- }
- if (auto* res =
- std::get_if<interop::Interface<interop::GPUExternalTexture>>(&in.resource)) {
- // TODO(crbug.com/dawn/1129): External textures
- UNIMPLEMENTED();
- }
- Napi::Error::New(env, "invalid value for GPUBindGroupEntry.resource")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::BindGroupLayoutEntry& out,
- const interop::GPUBindGroupLayoutEntry& in) {
- // TODO(crbug.com/dawn/1129): External textures
- return Convert(out.binding, in.binding) && Convert(out.visibility, in.visibility) &&
- Convert(out.buffer, in.buffer) && Convert(out.sampler, in.sampler) &&
- Convert(out.texture, in.texture) && Convert(out.storageTexture, in.storageTexture);
- }
-
- bool Converter::Convert(wgpu::BufferBindingLayout& out,
- const interop::GPUBufferBindingLayout& in) {
- return Convert(out.type, in.type) && Convert(out.hasDynamicOffset, in.hasDynamicOffset) &&
- Convert(out.minBindingSize, in.minBindingSize);
- }
-
- bool Converter::Convert(wgpu::SamplerBindingLayout& out,
- const interop::GPUSamplerBindingLayout& in) {
- return Convert(out.type, in.type);
- }
-
- bool Converter::Convert(wgpu::TextureBindingLayout& out,
- const interop::GPUTextureBindingLayout& in) {
- return Convert(out.sampleType, in.sampleType) &&
- Convert(out.viewDimension, in.viewDimension) &&
- Convert(out.multisampled, in.multisampled);
- }
-
- bool Converter::Convert(wgpu::StorageTextureBindingLayout& out,
- const interop::GPUStorageTextureBindingLayout& in) {
- return Convert(out.access, in.access) && Convert(out.format, in.format) &&
- Convert(out.viewDimension, in.viewDimension);
- }
-
- bool Converter::Convert(wgpu::BufferBindingType& out, const interop::GPUBufferBindingType& in) {
- out = wgpu::BufferBindingType::Undefined;
- switch (in) {
- case interop::GPUBufferBindingType::kUniform:
- out = wgpu::BufferBindingType::Uniform;
- return true;
- case interop::GPUBufferBindingType::kStorage:
- out = wgpu::BufferBindingType::Storage;
- return true;
- case interop::GPUBufferBindingType::kReadOnlyStorage:
- out = wgpu::BufferBindingType::ReadOnlyStorage;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUBufferBindingType")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::TextureSampleType& out, const interop::GPUTextureSampleType& in) {
- out = wgpu::TextureSampleType::Undefined;
- switch (in) {
- case interop::GPUTextureSampleType::kFloat:
- out = wgpu::TextureSampleType::Float;
- return true;
- case interop::GPUTextureSampleType::kUnfilterableFloat:
- out = wgpu::TextureSampleType::UnfilterableFloat;
- return true;
- case interop::GPUTextureSampleType::kDepth:
- out = wgpu::TextureSampleType::Depth;
- return true;
- case interop::GPUTextureSampleType::kSint:
- out = wgpu::TextureSampleType::Sint;
- return true;
- case interop::GPUTextureSampleType::kUint:
- out = wgpu::TextureSampleType::Uint;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUTextureSampleType")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::SamplerBindingType& out,
- const interop::GPUSamplerBindingType& in) {
- out = wgpu::SamplerBindingType::Undefined;
- switch (in) {
- case interop::GPUSamplerBindingType::kFiltering:
- out = wgpu::SamplerBindingType::Filtering;
- return true;
- case interop::GPUSamplerBindingType::kNonFiltering:
- out = wgpu::SamplerBindingType::NonFiltering;
- return true;
- case interop::GPUSamplerBindingType::kComparison:
- out = wgpu::SamplerBindingType::Comparison;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUSamplerBindingType")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::StorageTextureAccess& out,
- const interop::GPUStorageTextureAccess& in) {
- out = wgpu::StorageTextureAccess::Undefined;
- switch (in) {
- case interop::GPUStorageTextureAccess::kWriteOnly:
- out = wgpu::StorageTextureAccess::WriteOnly;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUStorageTextureAccess")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::QueryType& out, const interop::GPUQueryType& in) {
- out = wgpu::QueryType::Occlusion;
- switch (in) {
- case interop::GPUQueryType::kOcclusion:
- out = wgpu::QueryType::Occlusion;
- return true;
- case interop::GPUQueryType::kPipelineStatistics:
- out = wgpu::QueryType::PipelineStatistics;
- return true;
- case interop::GPUQueryType::kTimestamp:
- out = wgpu::QueryType::Timestamp;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUQueryType").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::PipelineStatisticName& out,
- const interop::GPUPipelineStatisticName& in) {
- out = wgpu::PipelineStatisticName::VertexShaderInvocations;
- switch (in) {
- case interop::GPUPipelineStatisticName::kVertexShaderInvocations:
- out = wgpu::PipelineStatisticName::VertexShaderInvocations;
- return true;
- case interop::GPUPipelineStatisticName::kClipperInvocations:
- out = wgpu::PipelineStatisticName::ClipperInvocations;
- return true;
- case interop::GPUPipelineStatisticName::kClipperPrimitivesOut:
- out = wgpu::PipelineStatisticName::ClipperPrimitivesOut;
- return true;
- case interop::GPUPipelineStatisticName::kFragmentShaderInvocations:
- out = wgpu::PipelineStatisticName::FragmentShaderInvocations;
- return true;
- case interop::GPUPipelineStatisticName::kComputeShaderInvocations:
- out = wgpu::PipelineStatisticName::ComputeShaderInvocations;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUPipelineStatisticName")
- .ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in) {
- out = wgpu::AddressMode::Repeat;
- switch (in) {
- case interop::GPUAddressMode::kClampToEdge:
- out = wgpu::AddressMode::ClampToEdge;
- return true;
- case interop::GPUAddressMode::kRepeat:
- out = wgpu::AddressMode::Repeat;
- return true;
- case interop::GPUAddressMode::kMirrorRepeat:
- out = wgpu::AddressMode::MirrorRepeat;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUAddressMode").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in) {
- out = wgpu::FilterMode::Nearest;
- switch (in) {
- case interop::GPUFilterMode::kNearest:
- out = wgpu::FilterMode::Nearest;
- return true;
- case interop::GPUFilterMode::kLinear:
- out = wgpu::FilterMode::Linear;
- return true;
- }
- Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
- return false;
- }
-
- bool Converter::Convert(wgpu::ComputePipelineDescriptor& out,
- const interop::GPUComputePipelineDescriptor& in) {
- return Convert(out.label, in.label) && //
- Convert(out.layout, in.layout) && //
- Convert(out.compute, in.compute);
- }
-
- bool Converter::Convert(wgpu::RenderPipelineDescriptor& out,
- const interop::GPURenderPipelineDescriptor& in) {
- wgpu::RenderPipelineDescriptor desc{};
- return Convert(out.label, in.label) && //
- Convert(out.layout, in.layout) && //
- Convert(out.vertex, in.vertex) && //
- Convert(out.primitive, in.primitive) && //
- Convert(out.depthStencil, in.depthStencil) && //
- Convert(out.multisample, in.multisample) && //
- Convert(out.fragment, in.fragment);
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Converter.h b/chromium/third_party/dawn/src/dawn_node/binding/Converter.h
deleted file mode 100644
index 7169cb11752..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/Converter.h
+++ /dev/null
@@ -1,398 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_CONVERTER_H_
-#define DAWN_NODE_BINDING_CONVERTER_H_
-
-#include <functional>
-#include <type_traits>
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/binding/Errors.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // ImplOfTraits is a traits helper that is used to associate the interop interface type to the
- // binding implementation type.
- template <typename T>
- struct ImplOfTraits {};
-
- // DECLARE_IMPL() is a macro that declares a specialization of ImplOfTraits so that
- // `typename ImplOfTraits<interop::NAME>::type` is equivalent to `binding::NAME`.
-#define DECLARE_IMPL(NAME) \
- class NAME; \
- template <> \
- struct ImplOfTraits<interop::NAME> { \
- using type = binding::NAME; \
- }
-
- // Declare the interop interface to binding implementations
- DECLARE_IMPL(GPUBindGroup);
- DECLARE_IMPL(GPUBindGroupLayout);
- DECLARE_IMPL(GPUBuffer);
- DECLARE_IMPL(GPUPipelineLayout);
- DECLARE_IMPL(GPUQuerySet);
- DECLARE_IMPL(GPURenderBundle);
- DECLARE_IMPL(GPURenderPipeline);
- DECLARE_IMPL(GPUSampler);
- DECLARE_IMPL(GPUShaderModule);
- DECLARE_IMPL(GPUTexture);
- DECLARE_IMPL(GPUTextureView);
-#undef DECLARE_IMPL
-
- // Helper for obtaining the binding implementation type from the interop interface type
- template <typename T>
- using ImplOf = typename ImplOfTraits<T>::type;
-
- // Converter is a utility class for converting IDL generated interop types into Dawn types.
- // As the Dawn C++ API uses raw C pointers for a number of its interfaces, Converter performs
- // heap allocations for conversions of vector or optional types. These pointers are
- // automatically freed when the Converter is destructed.
- class Converter {
- public:
- Converter(Napi::Env e) : env(e) {
- }
- ~Converter();
-
- // Conversion function. Converts the interop type IN to the Dawn type OUT.
- // Returns true on success, false on failure.
- template <typename OUT, typename IN>
- [[nodiscard]] inline bool operator()(OUT&& out, IN&& in) {
- return Convert(std::forward<OUT>(out), std::forward<IN>(in));
- }
-
- // Vector conversion function. Converts the vector of interop type IN to a pointer of
- // elements of Dawn type OUT, which is assigned to 'out_els'.
- // out_count is assigned the number of elements in 'in'.
- // Returns true on success, false on failure.
- // The pointer assigned to 'out_els' is valid until the Converter is destructed.
- template <typename OUT, typename IN>
- [[nodiscard]] inline bool operator()(OUT*& out_els,
- uint32_t& out_count,
- const std::vector<IN>& in) {
- return Convert(out_els, out_count, in);
- }
-
- // Returns the Env that this Converter was constructed with.
- inline Napi::Env Env() const {
- return env;
- }
-
- // BufferSource is the converted type of interop::BufferSource.
- struct BufferSource {
- void* data;
- size_t size;
- };
-
- private:
- // Below are the various overloads of Convert() used to convert the interop -> Dawn types.
- [[nodiscard]] bool Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in);
-
- [[nodiscard]] bool Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in);
-
- [[nodiscard]] bool Convert(wgpu::Color& out, const interop::GPUColor& in);
-
- [[nodiscard]] bool Convert(wgpu::Origin3D& out,
- const std::vector<interop::GPUIntegerCoordinate>& in);
-
- [[nodiscard]] bool Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in);
-
- [[nodiscard]] bool Convert(wgpu::ImageCopyTexture& out,
- const interop::GPUImageCopyTexture& in);
-
- [[nodiscard]] bool Convert(wgpu::ImageCopyBuffer& out,
- const interop::GPUImageCopyBuffer& in);
-
- [[nodiscard]] bool Convert(BufferSource& out, interop::BufferSource in);
-
- [[nodiscard]] bool Convert(wgpu::TextureDataLayout& out,
- const interop::GPUImageDataLayout& in);
-
- [[nodiscard]] bool Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in);
-
- [[nodiscard]] bool Convert(wgpu::TextureUsage& out,
- const interop::GPUTextureUsageFlags& in);
-
- [[nodiscard]] bool Convert(wgpu::ColorWriteMask& out,
- const interop::GPUColorWriteFlags& in);
-
- [[nodiscard]] bool Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in);
-
- [[nodiscard]] bool Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in);
-
- [[nodiscard]] bool Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in);
-
- [[nodiscard]] bool Convert(wgpu::TextureDimension& out,
- const interop::GPUTextureDimension& in);
-
- [[nodiscard]] bool Convert(wgpu::TextureViewDimension& out,
- const interop::GPUTextureViewDimension& in);
-
- [[nodiscard]] bool Convert(wgpu::ProgrammableStageDescriptor& out,
- const interop::GPUProgrammableStage& in);
-
- [[nodiscard]] bool Convert(wgpu::ConstantEntry& out,
- const std::string& in_name,
- wgpu::interop::GPUPipelineConstantValue in_value);
-
- [[nodiscard]] bool Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in);
-
- [[nodiscard]] bool Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in);
-
- [[nodiscard]] bool Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in);
-
- [[nodiscard]] bool Convert(wgpu::BlendState& out, const interop::GPUBlendState& in);
-
- [[nodiscard]] bool Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in);
-
- [[nodiscard]] bool Convert(wgpu::ColorTargetState& out,
- const interop::GPUColorTargetState& in);
-
- [[nodiscard]] bool Convert(wgpu::DepthStencilState& out,
- const interop::GPUDepthStencilState& in);
-
- [[nodiscard]] bool Convert(wgpu::MultisampleState& out,
- const interop::GPUMultisampleState& in);
-
- [[nodiscard]] bool Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in);
-
- [[nodiscard]] bool Convert(wgpu::PrimitiveTopology& out,
- const interop::GPUPrimitiveTopology& in);
-
- [[nodiscard]] bool Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in);
-
- [[nodiscard]] bool Convert(wgpu::CullMode& out, const interop::GPUCullMode& in);
-
- [[nodiscard]] bool Convert(wgpu::CompareFunction& out,
- const interop::GPUCompareFunction& in);
-
- [[nodiscard]] bool Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in);
-
- [[nodiscard]] bool Convert(wgpu::StencilOperation& out,
- const interop::GPUStencilOperation& in);
-
- [[nodiscard]] bool Convert(wgpu::StencilFaceState& out,
- const interop::GPUStencilFaceState& in);
-
- [[nodiscard]] bool Convert(wgpu::VertexState& out, const interop::GPUVertexState& in);
-
- [[nodiscard]] bool Convert(wgpu::VertexBufferLayout& out,
- const interop::GPUVertexBufferLayout& in);
-
- [[nodiscard]] bool Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in);
-
- [[nodiscard]] bool Convert(wgpu::VertexAttribute& out,
- const interop::GPUVertexAttribute& in);
-
- [[nodiscard]] bool Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in);
-
- [[nodiscard]] bool Convert(wgpu::RenderPassColorAttachment& out,
- const interop::GPURenderPassColorAttachment& in);
-
- [[nodiscard]] bool Convert(wgpu::RenderPassDepthStencilAttachment& out,
- const interop::GPURenderPassDepthStencilAttachment& in);
-
- [[nodiscard]] bool Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in);
-
- [[nodiscard]] bool Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in);
-
- [[nodiscard]] bool Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in);
-
- [[nodiscard]] bool Convert(wgpu::BindGroupLayoutEntry& out,
- const interop::GPUBindGroupLayoutEntry& in);
-
- [[nodiscard]] bool Convert(wgpu::BufferBindingLayout& out,
- const interop::GPUBufferBindingLayout& in);
-
- [[nodiscard]] bool Convert(wgpu::SamplerBindingLayout& out,
- const interop::GPUSamplerBindingLayout& in);
-
- [[nodiscard]] bool Convert(wgpu::TextureBindingLayout& out,
- const interop::GPUTextureBindingLayout& in);
-
- [[nodiscard]] bool Convert(wgpu::StorageTextureBindingLayout& out,
- const interop::GPUStorageTextureBindingLayout& in);
-
- [[nodiscard]] bool Convert(wgpu::BufferBindingType& out,
- const interop::GPUBufferBindingType& in);
-
- [[nodiscard]] bool Convert(wgpu::SamplerBindingType& out,
- const interop::GPUSamplerBindingType& in);
-
- [[nodiscard]] bool Convert(wgpu::TextureSampleType& out,
- const interop::GPUTextureSampleType& in);
-
- [[nodiscard]] bool Convert(wgpu::StorageTextureAccess& out,
- const interop::GPUStorageTextureAccess& in);
-
- [[nodiscard]] bool Convert(wgpu::QueryType& out, const interop::GPUQueryType& in);
-
- [[nodiscard]] bool Convert(wgpu::PipelineStatisticName& out,
- const interop::GPUPipelineStatisticName& in);
-
- [[nodiscard]] bool Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in);
-
- [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in);
-
- [[nodiscard]] bool Convert(wgpu::ComputePipelineDescriptor& out,
- const interop::GPUComputePipelineDescriptor& in);
-
- [[nodiscard]] bool Convert(wgpu::RenderPipelineDescriptor& out,
- const interop::GPURenderPipelineDescriptor& in);
-
- // std::string to C string
- inline bool Convert(const char*& out, const std::string& in) {
- out = in.c_str();
- return true;
- }
-
- // Pass-through (no conversion)
- template <typename T>
- inline bool Convert(T& out, const T& in) {
- out = in;
- return true;
- }
-
- // Integral number conversion, with dynamic limit checking
- template <typename OUT,
- typename IN,
- typename = std::enable_if_t<std::is_integral_v<IN> && std::is_integral_v<OUT>>>
- inline bool Convert(OUT& out, const IN& in) {
- out = static_cast<OUT>(in);
- if (static_cast<IN>(out) != in) {
- Napi::Error::New(env, "Integer value (" + std::to_string(in) +
- ") cannot be converted to the Dawn data type without "
- "truncation of the value")
- .ThrowAsJavaScriptException();
- return false;
- }
- return true;
- }
-
- template <typename OUT, typename... IN_TYPES>
- inline bool Convert(OUT& out, const std::variant<IN_TYPES...>& in) {
- return std::visit([&](auto&& i) { return Convert(out, i); }, in);
- }
-
- // If the std::optional does not have a value, then Convert() simply returns true and 'out'
- // is not assigned a new value.
- template <typename OUT, typename IN>
- inline bool Convert(OUT& out, const std::optional<IN>& in) {
- if (in.has_value()) {
- return Convert(out, in.value());
- }
- return true;
- }
-
- // std::optional -> T*
- // OUT* is assigned either a pointer to the converted value, or nullptr, depending on
- // whether 'in' has a value.
- template <typename OUT,
- typename IN,
- typename _ = std::enable_if_t<!std::is_same_v<IN, std::string>>>
- inline bool Convert(OUT*& out, const std::optional<IN>& in) {
- if (in.has_value()) {
- auto* el = Allocate<std::remove_const_t<OUT>>();
- if (!Convert(*el, in.value())) {
- return false;
- }
- out = el;
- } else {
- out = nullptr;
- }
- return true;
- }
-
- // interop::Interface -> Dawn object
- template <typename OUT, typename IN>
- inline bool Convert(OUT& out, const interop::Interface<IN>& in) {
- using Impl = ImplOf<IN>;
- out = *in.template As<Impl>();
- if (!out) {
- LOG("Dawn object has been destroyed. This should not happen");
- return false;
- }
- return true;
- }
-
- // vector -> raw pointer + count
- template <typename OUT, typename IN>
- inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::vector<IN>& in) {
- if (in.size() == 0) {
- out_els = nullptr;
- out_count = 0;
- return true;
- }
- auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
- for (size_t i = 0; i < in.size(); i++) {
- if (!Convert(els[i], in[i])) {
- return false;
- }
- }
- out_els = els;
- return Convert(out_count, in.size());
- }
-
- // unordered_map -> raw pointer + count
- template <typename OUT, typename IN_KEY, typename IN_VALUE>
- inline bool Convert(OUT*& out_els,
- uint32_t& out_count,
- const std::unordered_map<IN_KEY, IN_VALUE>& in) {
- if (in.size() == 0) {
- out_els = nullptr;
- out_count = 0;
- return true;
- }
- auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
- size_t i = 0;
- for (auto& it : in) {
- if (!Convert(els[i++], it.first, it.second)) {
- return false;
- }
- }
- out_els = els;
- return Convert(out_count, in.size());
- }
-
- // std::optional<T> -> raw pointer + count
- template <typename OUT, typename IN>
- inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::optional<IN>& in) {
- if (!in.has_value()) {
- out_els = nullptr;
- out_count = 0;
- return true;
- }
- return Convert(out_els, out_count, in.value());
- }
-
- Napi::Env env;
-
- // Allocate() allocates and constructs an array of 'n' elements, and returns a pointer to
- // the first element. The array is freed when the Converter is destructed.
- template <typename T>
- T* Allocate(size_t n = 1) {
- auto* ptr = new T[n]{};
- free_.emplace_back([ptr] { delete[] ptr; });
- return ptr;
- }
-
- std::vector<std::function<void()>> free_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_CONVERTER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Errors.cpp b/chromium/third_party/dawn/src/dawn_node/binding/Errors.cpp
deleted file mode 100644
index 953a1a8bc6f..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/Errors.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/Errors.h"
-
-namespace wgpu { namespace binding {
-
- namespace {
- constexpr char kHierarchyRequestError[] = "HierarchyRequestError";
- constexpr char kWrongDocumentError[] = "WrongDocumentError";
- constexpr char kInvalidCharacterError[] = "InvalidCharacterError";
- constexpr char kNoModificationAllowedError[] = "NoModificationAllowedError";
- constexpr char kNotFoundError[] = "NotFoundError";
- constexpr char kNotSupportedError[] = "NotSupportedError";
- constexpr char kInUseAttributeError[] = "InUseAttributeError";
- constexpr char kInvalidStateError[] = "InvalidStateError";
- constexpr char kSyntaxError[] = "SyntaxError";
- constexpr char kInvalidModificationError[] = "InvalidModificationError";
- constexpr char kNamespaceError[] = "NamespaceError";
- constexpr char kSecurityError[] = "SecurityError";
- constexpr char kNetworkError[] = "NetworkError";
- constexpr char kAbortError[] = "AbortError";
- constexpr char kURLMismatchError[] = "URLMismatchError";
- constexpr char kQuotaExceededError[] = "QuotaExceededError";
- constexpr char kTimeoutError[] = "TimeoutError";
- constexpr char kInvalidNodeTypeError[] = "InvalidNodeTypeError";
- constexpr char kDataCloneError[] = "DataCloneError";
- constexpr char kEncodingError[] = "EncodingError";
- constexpr char kNotReadableError[] = "NotReadableError";
- constexpr char kUnknownError[] = "UnknownError";
- constexpr char kConstraintError[] = "ConstraintError";
- constexpr char kDataError[] = "DataError";
- constexpr char kTransactionInactiveError[] = "TransactionInactiveError";
- constexpr char kReadOnlyError[] = "ReadOnlyError";
- constexpr char kVersionError[] = "VersionError";
- constexpr char kOperationError[] = "OperationError";
- constexpr char kNotAllowedError[] = "NotAllowedError";
-
- static Napi::Error New(Napi::Env env,
- std::string name,
- std::string message = {},
- unsigned short code = 0) {
- auto err = Napi::Error::New(env);
- err.Set("name", name);
- err.Set("message", message.empty() ? name : message);
- err.Set("code", static_cast<double>(code));
- return err;
- }
-
- } // namespace
-
- Napi::Error Errors::HierarchyRequestError(Napi::Env env) {
- return New(env, kHierarchyRequestError);
- }
-
- Napi::Error Errors::WrongDocumentError(Napi::Env env) {
- return New(env, kWrongDocumentError);
- }
-
- Napi::Error Errors::InvalidCharacterError(Napi::Env env) {
- return New(env, kInvalidCharacterError);
- }
-
- Napi::Error Errors::NoModificationAllowedError(Napi::Env env) {
- return New(env, kNoModificationAllowedError);
- }
-
- Napi::Error Errors::NotFoundError(Napi::Env env) {
- return New(env, kNotFoundError);
- }
-
- Napi::Error Errors::NotSupportedError(Napi::Env env) {
- return New(env, kNotSupportedError);
- }
-
- Napi::Error Errors::InUseAttributeError(Napi::Env env) {
- return New(env, kInUseAttributeError);
- }
-
- Napi::Error Errors::InvalidStateError(Napi::Env env) {
- return New(env, kInvalidStateError);
- }
-
- Napi::Error Errors::SyntaxError(Napi::Env env) {
- return New(env, kSyntaxError);
- }
-
- Napi::Error Errors::InvalidModificationError(Napi::Env env) {
- return New(env, kInvalidModificationError);
- }
-
- Napi::Error Errors::NamespaceError(Napi::Env env) {
- return New(env, kNamespaceError);
- }
-
- Napi::Error Errors::SecurityError(Napi::Env env) {
- return New(env, kSecurityError);
- }
-
- Napi::Error Errors::NetworkError(Napi::Env env) {
- return New(env, kNetworkError);
- }
-
- Napi::Error Errors::AbortError(Napi::Env env) {
- return New(env, kAbortError);
- }
-
- Napi::Error Errors::URLMismatchError(Napi::Env env) {
- return New(env, kURLMismatchError);
- }
-
- Napi::Error Errors::QuotaExceededError(Napi::Env env) {
- return New(env, kQuotaExceededError);
- }
-
- Napi::Error Errors::TimeoutError(Napi::Env env) {
- return New(env, kTimeoutError);
- }
-
- Napi::Error Errors::InvalidNodeTypeError(Napi::Env env) {
- return New(env, kInvalidNodeTypeError);
- }
-
- Napi::Error Errors::DataCloneError(Napi::Env env) {
- return New(env, kDataCloneError);
- }
-
- Napi::Error Errors::EncodingError(Napi::Env env) {
- return New(env, kEncodingError);
- }
-
- Napi::Error Errors::NotReadableError(Napi::Env env) {
- return New(env, kNotReadableError);
- }
-
- Napi::Error Errors::UnknownError(Napi::Env env) {
- return New(env, kUnknownError);
- }
-
- Napi::Error Errors::ConstraintError(Napi::Env env) {
- return New(env, kConstraintError);
- }
-
- Napi::Error Errors::DataError(Napi::Env env) {
- return New(env, kDataError);
- }
-
- Napi::Error Errors::TransactionInactiveError(Napi::Env env) {
- return New(env, kTransactionInactiveError);
- }
-
- Napi::Error Errors::ReadOnlyError(Napi::Env env) {
- return New(env, kReadOnlyError);
- }
-
- Napi::Error Errors::VersionError(Napi::Env env) {
- return New(env, kVersionError);
- }
-
- Napi::Error Errors::OperationError(Napi::Env env) {
- return New(env, kOperationError);
- }
-
- Napi::Error Errors::NotAllowedError(Napi::Env env) {
- return New(env, kNotAllowedError);
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Errors.h b/chromium/third_party/dawn/src/dawn_node/binding/Errors.h
deleted file mode 100644
index 0f1a40e6b8e..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/Errors.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_ERRORS_H_
-#define DAWN_NODE_BINDING_ERRORS_H_
-
-#include "napi.h"
-
-namespace wgpu { namespace binding {
-
- // Errors contains static helper methods for creating DOMException error
- // messages as documented at:
- // https://heycam.github.io/webidl/#idl-DOMException-error-names
- class Errors {
- public:
- static Napi::Error HierarchyRequestError(Napi::Env);
- static Napi::Error WrongDocumentError(Napi::Env);
- static Napi::Error InvalidCharacterError(Napi::Env);
- static Napi::Error NoModificationAllowedError(Napi::Env);
- static Napi::Error NotFoundError(Napi::Env);
- static Napi::Error NotSupportedError(Napi::Env);
- static Napi::Error InUseAttributeError(Napi::Env);
- static Napi::Error InvalidStateError(Napi::Env);
- static Napi::Error SyntaxError(Napi::Env);
- static Napi::Error InvalidModificationError(Napi::Env);
- static Napi::Error NamespaceError(Napi::Env);
- static Napi::Error SecurityError(Napi::Env);
- static Napi::Error NetworkError(Napi::Env);
- static Napi::Error AbortError(Napi::Env);
- static Napi::Error URLMismatchError(Napi::Env);
- static Napi::Error QuotaExceededError(Napi::Env);
- static Napi::Error TimeoutError(Napi::Env);
- static Napi::Error InvalidNodeTypeError(Napi::Env);
- static Napi::Error DataCloneError(Napi::Env);
- static Napi::Error EncodingError(Napi::Env);
- static Napi::Error NotReadableError(Napi::Env);
- static Napi::Error UnknownError(Napi::Env);
- static Napi::Error ConstraintError(Napi::Env);
- static Napi::Error DataError(Napi::Env);
- static Napi::Error TransactionInactiveError(Napi::Env);
- static Napi::Error ReadOnlyError(Napi::Env);
- static Napi::Error VersionError(Napi::Env);
- static Napi::Error OperationError(Napi::Env);
- static Napi::Error NotAllowedError(Napi::Env);
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_ERRORS_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Flags.cpp b/chromium/third_party/dawn/src/dawn_node/binding/Flags.cpp
deleted file mode 100644
index 3602e92ce80..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/Flags.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/Flags.h"
-
-namespace wgpu { namespace binding {
- void Flags::Set(const std::string& key, const std::string& value) {
- flags_[key] = value;
- }
-
- std::optional<std::string> Flags::Get(const std::string& key) const {
- auto iter = flags_.find(key);
- if (iter != flags_.end()) {
- return iter->second;
- }
- return {};
- }
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Flags.h b/chromium/third_party/dawn/src/dawn_node/binding/Flags.h
deleted file mode 100644
index 1ca4c3004b9..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/Flags.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_FLAGS_H_
-#define DAWN_NODE_BINDING_FLAGS_H_
-
-#include <optional>
-#include <string>
-#include <unordered_map>
-
-namespace wgpu { namespace binding {
- // Flags maintains a key-value mapping of input flags passed into the module's create()
- // function, used to configure dawn_node.
- class Flags {
- public:
- void Set(const std::string& key, const std::string& value);
- std::optional<std::string> Get(const std::string& key) const;
-
- private:
- std::unordered_map<std::string, std::string> flags_;
- };
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_FLAGS_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPU.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPU.cpp
deleted file mode 100644
index 50eb020195f..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPU.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPU.h"
-
-#include "src/dawn_node/binding/GPUAdapter.h"
-
-#include <cstdlib>
-
-#if defined(_WIN32)
-# include <Windows.h>
-#endif
-
-namespace {
- std::string GetEnvVar(const char* varName) {
-#if defined(_WIN32)
- // Use _dupenv_s to avoid unsafe warnings about std::getenv
- char* value = nullptr;
- _dupenv_s(&value, nullptr, varName);
- if (value) {
- std::string result = value;
- free(value);
- return result;
- }
- return "";
-#else
- if (auto* val = std::getenv(varName)) {
- return val;
- }
- return "";
-#endif
- }
-
- void SetDllDir(const char* dir) {
- (void)dir;
-#if defined(_WIN32)
- ::SetDllDirectory(dir);
-#endif
- }
-
-} // namespace
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPU
- ////////////////////////////////////////////////////////////////////////////////
- GPU::GPU(Flags flags) : flags_(std::move(flags)) {
- // TODO: Disable in 'release'
- instance_.EnableBackendValidation(true);
- instance_.SetBackendValidationLevel(dawn_native::BackendValidationLevel::Full);
-
- // Setting the DllDir changes where we load adapter DLLs from (e.g. d3dcompiler_47.dll)
- if (auto dir = flags_.Get("dlldir")) {
- SetDllDir(dir->c_str());
- }
- instance_.DiscoverDefaultAdapters();
- }
-
- interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> GPU::requestAdapter(
- Napi::Env env,
- interop::GPURequestAdapterOptions options) {
- auto promise = interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>>(
- env, PROMISE_INFO);
-
- if (options.forceFallbackAdapter) {
- // Software adapters are not currently supported.
- promise.Resolve({});
- return promise;
- }
-
- auto adapters = instance_.GetAdapters();
- if (adapters.empty()) {
- promise.Resolve({});
- return promise;
- }
-
-#if defined(_WIN32)
- constexpr auto defaultBackendType = wgpu::BackendType::D3D12;
-#elif defined(__linux__)
- constexpr auto defaultBackendType = wgpu::BackendType::Vulkan;
-#elif defined(__APPLE__)
- constexpr auto defaultBackendType = wgpu::BackendType::Metal;
-#else
-# error "Unsupported platform"
-#endif
-
- auto targetBackendType = defaultBackendType;
- std::string forceBackend;
-
- // Check for override from env var
- if (std::string envVar = GetEnvVar("DAWNNODE_BACKEND"); !envVar.empty()) {
- forceBackend = envVar;
- }
-
- // Check for override from flag
- if (auto f = flags_.Get("dawn-backend")) {
- forceBackend = *f;
- }
-
- std::transform(forceBackend.begin(), forceBackend.end(), forceBackend.begin(),
- [](char c) { return std::tolower(c); });
-
- if (!forceBackend.empty()) {
- if (forceBackend == "null") {
- targetBackendType = wgpu::BackendType::Null;
- } else if (forceBackend == "webgpu") {
- targetBackendType = wgpu::BackendType::WebGPU;
- } else if (forceBackend == "d3d11") {
- targetBackendType = wgpu::BackendType::D3D11;
- } else if (forceBackend == "d3d12" || forceBackend == "d3d") {
- targetBackendType = wgpu::BackendType::D3D12;
- } else if (forceBackend == "metal") {
- targetBackendType = wgpu::BackendType::Metal;
- } else if (forceBackend == "vulkan" || forceBackend == "vk") {
- targetBackendType = wgpu::BackendType::Vulkan;
- } else if (forceBackend == "opengl" || forceBackend == "gl") {
- targetBackendType = wgpu::BackendType::OpenGL;
- } else if (forceBackend == "opengles" || forceBackend == "gles") {
- targetBackendType = wgpu::BackendType::OpenGLES;
- }
- }
-
- // Default to first adapter if we don't find a match
- size_t adapterIndex = 0;
- for (size_t i = 0; i < adapters.size(); ++i) {
- wgpu::AdapterProperties props;
- adapters[i].GetProperties(&props);
- if (props.backendType == targetBackendType) {
- adapterIndex = i;
- break;
- }
- }
-
- auto adapter = GPUAdapter::Create<GPUAdapter>(env, adapters[adapterIndex], flags_);
- promise.Resolve(std::optional<interop::Interface<interop::GPUAdapter>>(adapter));
- return promise;
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPU.h b/chromium/third_party/dawn/src/dawn_node/binding/GPU.h
deleted file mode 100644
index 8f1999df315..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPU.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPU_H_
-#define DAWN_NODE_BINDING_GPU_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/binding/Flags.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
- // GPU is an implementation of interop::GPU that wraps a dawn_native::Instance.
- class GPU final : public interop::GPU {
- public:
- GPU(Flags flags);
-
- // interop::GPU interface compliance
- interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> requestAdapter(
- Napi::Env env,
- interop::GPURequestAdapterOptions options) override;
-
- private:
- const Flags flags_;
- dawn_native::Instance instance_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPU_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.cpp
deleted file mode 100644
index c9a6edb42e3..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.cpp
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUAdapter.h"
-
-#include <unordered_set>
-
-#include "src/dawn_node/binding/Flags.h"
-#include "src/dawn_node/binding/GPUDevice.h"
-#include "src/dawn_node/binding/GPUSupportedLimits.h"
-
-namespace {
- // TODO(amaiorano): Move to utility header
- std::vector<std::string> Split(const std::string& s, char delim) {
- if (s.empty())
- return {};
-
- std::vector<std::string> result;
- const size_t lastIndex = s.length() - 1;
- size_t startIndex = 0;
- size_t i = startIndex;
-
- while (i <= lastIndex) {
- if (s[i] == delim) {
- auto token = s.substr(startIndex, i - startIndex);
- if (!token.empty()) // Discard empty tokens
- result.push_back(token);
- startIndex = i + 1;
- } else if (i == lastIndex) {
- auto token = s.substr(startIndex, i - startIndex + 1);
- if (!token.empty()) // Discard empty tokens
- result.push_back(token);
- }
- ++i;
- }
- return result;
- }
-} // namespace
-
-namespace wgpu { namespace binding {
-
- namespace {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::binding::<anon>::Features
- // Implements interop::GPUSupportedFeatures
- ////////////////////////////////////////////////////////////////////////////////
- class Features : public interop::GPUSupportedFeatures {
- public:
- Features(WGPUDeviceProperties properties) {
- if (properties.depthClamping) {
- enabled_.emplace(interop::GPUFeatureName::kDepthClamping);
- }
- if (properties.pipelineStatisticsQuery) {
- enabled_.emplace(interop::GPUFeatureName::kPipelineStatisticsQuery);
- }
- if (properties.textureCompressionBC) {
- enabled_.emplace(interop::GPUFeatureName::kTextureCompressionBc);
- }
- if (properties.timestampQuery) {
- enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
- }
-
- // TODO(crbug.com/dawn/1130)
- // interop::GPUFeatureName::kDepth24UnormStencil8:
- // interop::GPUFeatureName::kDepth32FloatStencil8:
- }
-
- bool has(interop::GPUFeatureName feature) {
- return enabled_.count(feature) != 0;
- }
-
- // interop::GPUSupportedFeatures compliance
- bool has(Napi::Env, std::string name) override {
- interop::GPUFeatureName feature;
- if (interop::Converter<interop::GPUFeatureName>::FromString(name, feature)) {
- return has(feature);
- }
- return false;
- }
- std::vector<std::string> keys(Napi::Env) override {
- std::vector<std::string> out;
- out.reserve(enabled_.size());
- for (auto feature : enabled_) {
- out.push_back(interop::Converter<interop::GPUFeatureName>::ToString(feature));
- }
- return out;
- }
-
- private:
- std::unordered_set<interop::GPUFeatureName> enabled_;
- };
-
- } // namespace
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUAdapter
- // TODO(crbug.com/dawn/1133): This is a stub implementation. Properly implement.
- ////////////////////////////////////////////////////////////////////////////////
- GPUAdapter::GPUAdapter(dawn_native::Adapter a, const Flags& flags)
- : adapter_(a), flags_(flags) {
- }
-
- std::string GPUAdapter::getName(Napi::Env) {
- return "dawn-adapter";
- }
-
- interop::Interface<interop::GPUSupportedFeatures> GPUAdapter::getFeatures(Napi::Env env) {
- return interop::GPUSupportedFeatures::Create<Features>(env,
- adapter_.GetAdapterProperties());
- }
-
- interop::Interface<interop::GPUSupportedLimits> GPUAdapter::getLimits(Napi::Env env) {
- WGPUSupportedLimits limits{};
- if (!adapter_.GetLimits(&limits)) {
- Napi::Error::New(env, "failed to get adapter limits").ThrowAsJavaScriptException();
- }
-
- wgpu::SupportedLimits wgpuLimits{};
-
-#define COPY_LIMIT(LIMIT) wgpuLimits.limits.LIMIT = limits.limits.LIMIT
- COPY_LIMIT(maxTextureDimension1D);
- COPY_LIMIT(maxTextureDimension2D);
- COPY_LIMIT(maxTextureDimension3D);
- COPY_LIMIT(maxTextureArrayLayers);
- COPY_LIMIT(maxBindGroups);
- COPY_LIMIT(maxDynamicUniformBuffersPerPipelineLayout);
- COPY_LIMIT(maxDynamicStorageBuffersPerPipelineLayout);
- COPY_LIMIT(maxSampledTexturesPerShaderStage);
- COPY_LIMIT(maxSamplersPerShaderStage);
- COPY_LIMIT(maxStorageBuffersPerShaderStage);
- COPY_LIMIT(maxStorageTexturesPerShaderStage);
- COPY_LIMIT(maxUniformBuffersPerShaderStage);
- COPY_LIMIT(maxUniformBufferBindingSize);
- COPY_LIMIT(maxStorageBufferBindingSize);
- COPY_LIMIT(minUniformBufferOffsetAlignment);
- COPY_LIMIT(minStorageBufferOffsetAlignment);
- COPY_LIMIT(maxVertexBuffers);
- COPY_LIMIT(maxVertexAttributes);
- COPY_LIMIT(maxVertexBufferArrayStride);
- COPY_LIMIT(maxInterStageShaderComponents);
- COPY_LIMIT(maxComputeWorkgroupStorageSize);
- COPY_LIMIT(maxComputeInvocationsPerWorkgroup);
- COPY_LIMIT(maxComputeWorkgroupSizeX);
- COPY_LIMIT(maxComputeWorkgroupSizeY);
- COPY_LIMIT(maxComputeWorkgroupSizeZ);
- COPY_LIMIT(maxComputeWorkgroupsPerDimension);
-#undef COPY_LIMIT
-
- return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, wgpuLimits);
- }
-
- bool GPUAdapter::getIsFallbackAdapter(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
- Napi::Env env,
- interop::GPUDeviceDescriptor descriptor) {
- dawn_native::DawnDeviceDescriptor desc{}; // TODO(crbug.com/dawn/1133): Fill in.
- interop::Promise<interop::Interface<interop::GPUDevice>> promise(env, PROMISE_INFO);
-
- // See src/dawn_native/Features.cpp for enum <-> string mappings.
- for (auto required : descriptor.requiredFeatures) {
- switch (required) {
- case interop::GPUFeatureName::kDepthClamping:
- desc.requiredFeatures.emplace_back("depth-clamping");
- continue;
- case interop::GPUFeatureName::kPipelineStatisticsQuery:
- desc.requiredFeatures.emplace_back("pipeline-statistics-query");
- continue;
- case interop::GPUFeatureName::kTextureCompressionBc:
- desc.requiredFeatures.emplace_back("texture-compression-bc");
- continue;
- case interop::GPUFeatureName::kTimestampQuery:
- desc.requiredFeatures.emplace_back("timestamp-query");
- continue;
- case interop::GPUFeatureName::kDepth24UnormStencil8:
- case interop::GPUFeatureName::kDepth32FloatStencil8:
- continue; // TODO(crbug.com/dawn/1130)
- }
- UNIMPLEMENTED("required: ", required);
- }
-
- // Propogate enabled/disabled dawn features
- // Note: DawnDeviceDescriptor::forceEnabledToggles and forceDisabledToggles are vectors of
- // 'const char*', so we make sure the parsed strings survive the CreateDevice() call by
- // storing them on the stack.
- std::vector<std::string> enabledToggles;
- std::vector<std::string> disabledToggles;
- if (auto values = flags_.Get("enable-dawn-features")) {
- enabledToggles = Split(*values, ',');
- for (auto& t : enabledToggles) {
- desc.forceEnabledToggles.emplace_back(t.c_str());
- }
- }
- if (auto values = flags_.Get("disable-dawn-features")) {
- disabledToggles = Split(*values, ',');
- for (auto& t : disabledToggles) {
- desc.forceDisabledToggles.emplace_back(t.c_str());
- }
- }
-
- auto wgpu_device = adapter_.CreateDevice(&desc);
- if (wgpu_device) {
- promise.Resolve(interop::GPUDevice::Create<GPUDevice>(env, env, wgpu_device));
- } else {
- Napi::Error::New(env, "failed to create device").ThrowAsJavaScriptException();
- }
- return promise;
- }
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.h
deleted file mode 100644
index 6f837c86be0..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUADAPTER_H_
-#define DAWN_NODE_BINDING_GPUADAPTER_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
- class Flags;
-
- // GPUAdapter is an implementation of interop::GPUAdapter that wraps a dawn_native::Adapter.
- class GPUAdapter final : public interop::GPUAdapter {
- public:
- GPUAdapter(dawn_native::Adapter a, const Flags& flags);
-
- // interop::GPUAdapter interface compliance
- std::string getName(Napi::Env) override;
- interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
- interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
- bool getIsFallbackAdapter(Napi::Env) override;
- interop::Promise<interop::Interface<interop::GPUDevice>> requestDevice(
- Napi::Env env,
- interop::GPUDeviceDescriptor descriptor) override;
-
- private:
- dawn_native::Adapter adapter_;
- const Flags& flags_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUADAPTER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.cpp
deleted file mode 100644
index e1567005808..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUBindGroup.h"
-
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUBindGroup
- ////////////////////////////////////////////////////////////////////////////////
- GPUBindGroup::GPUBindGroup(wgpu::BindGroup group) : group_(std::move(group)) {
- }
-
- std::optional<std::string> GPUBindGroup::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUBindGroup::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.h
deleted file mode 100644
index e71fc8be5ce..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUBINDGROUP_H_
-#define DAWN_NODE_BINDING_GPUBINDGROUP_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUBindGroup is an implementation of interop::GPUBindGroup that wraps a wgpu::BindGroup.
- class GPUBindGroup final : public interop::GPUBindGroup {
- public:
- GPUBindGroup(wgpu::BindGroup group);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::BindGroup &() const {
- return group_;
- }
-
- // interop::GPUBindGroup interface compliance
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::BindGroup group_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUBINDGROUP_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.cpp
deleted file mode 100644
index ddaeaba6f11..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUBindGroupLayout.h"
-
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUBindGroupLayout
- ////////////////////////////////////////////////////////////////////////////////
- GPUBindGroupLayout::GPUBindGroupLayout(wgpu::BindGroupLayout layout)
- : layout_(std::move(layout)) {
- }
-
- std::optional<std::string> GPUBindGroupLayout::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUBindGroupLayout::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.h
deleted file mode 100644
index 34874bf1d91..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
-#define DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUBindGroupLayout is an implementation of interop::GPUBindGroupLayout that wraps a
- // wgpu::BindGroupLayout.
- class GPUBindGroupLayout final : public interop::GPUBindGroupLayout {
- public:
- GPUBindGroupLayout(wgpu::BindGroupLayout layout);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::BindGroupLayout &() const {
- return layout_;
- }
-
- // interop::GPUBindGroupLayout interface compliance
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::BindGroupLayout layout_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.cpp
deleted file mode 100644
index 2fca5d51446..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUBuffer.h"
-
-#include <memory>
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/Errors.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUBuffer
- // TODO(crbug.com/dawn/1134): We may be doing more validation here than necessary. Once CTS is
- // robustly passing, pull out validation and see what / if breaks.
- ////////////////////////////////////////////////////////////////////////////////
- GPUBuffer::GPUBuffer(wgpu::Buffer buffer,
- wgpu::BufferDescriptor desc,
- wgpu::Device device,
- std::shared_ptr<AsyncRunner> async)
- : buffer_(std::move(buffer)),
- desc_(desc),
- device_(std::move(device)),
- async_(std::move(async)) {
- if (desc.mappedAtCreation) {
- state_ = State::MappedAtCreation;
- }
- }
-
- interop::Promise<void> GPUBuffer::mapAsync(Napi::Env env,
- interop::GPUMapModeFlags mode,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- wgpu::MapMode md{};
- Converter conv(env);
- if (!conv(md, mode)) {
- interop::Promise<void> promise(env, PROMISE_INFO);
- promise.Reject(Errors::OperationError(env));
- return promise;
- }
-
- if (state_ != State::Unmapped) {
- interop::Promise<void> promise(env, PROMISE_INFO);
- promise.Reject(Errors::OperationError(env));
- device_.InjectError(wgpu::ErrorType::Validation,
- "mapAsync called on buffer that is not in the unmapped state");
- return promise;
- }
-
- struct Context {
- Napi::Env env;
- interop::Promise<void> promise;
- AsyncTask task;
- State& state;
- };
- auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), async_, state_};
- auto promise = ctx->promise;
-
- uint64_t s = size.has_value() ? size.value() : (desc_.size - offset);
-
- state_ = State::MappingPending;
-
- buffer_.MapAsync(
- md, offset, s,
- [](WGPUBufferMapAsyncStatus status, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
- c->state = State::Unmapped;
- switch (status) {
- case WGPUBufferMapAsyncStatus_Force32:
- UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
- break;
- case WGPUBufferMapAsyncStatus_Success:
- c->promise.Resolve();
- c->state = State::Mapped;
- break;
- case WGPUBufferMapAsyncStatus_Error:
- c->promise.Reject(Errors::OperationError(c->env));
- break;
- case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
- case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
- c->promise.Reject(Errors::AbortError(c->env));
- break;
- case WGPUBufferMapAsyncStatus_Unknown:
- case WGPUBufferMapAsyncStatus_DeviceLost:
- // TODO: The spec is a bit vague around what the promise should do
- // here.
- c->promise.Reject(Errors::UnknownError(c->env));
- break;
- }
- },
- ctx);
-
- return promise;
- }
-
- interop::ArrayBuffer GPUBuffer::getMappedRange(Napi::Env env,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- if (state_ != State::Mapped && state_ != State::MappedAtCreation) {
- Errors::OperationError(env).ThrowAsJavaScriptException();
- return {};
- }
-
- uint64_t s = size.has_value() ? size.value() : (desc_.size - offset);
-
- uint64_t start = offset;
- uint64_t end = offset + s;
- for (auto& mapping : mapped_) {
- if (mapping.Intersects(start, end)) {
- Errors::OperationError(env).ThrowAsJavaScriptException();
- return {};
- }
- }
-
- auto* ptr = (desc_.usage & wgpu::BufferUsage::MapWrite)
- ? buffer_.GetMappedRange(offset, s)
- : const_cast<void*>(buffer_.GetConstMappedRange(offset, s));
- if (!ptr) {
- Errors::OperationError(env).ThrowAsJavaScriptException();
- return {};
- }
- auto array_buffer = Napi::ArrayBuffer::New(env, ptr, s);
- // TODO(crbug.com/dawn/1135): Ownership here is the wrong way around.
- mapped_.emplace_back(Mapping{start, end, Napi::Persistent(array_buffer)});
- return array_buffer;
- }
-
- void GPUBuffer::unmap(Napi::Env env) {
- if (state_ == State::Destroyed) {
- device_.InjectError(wgpu::ErrorType::Validation,
- "unmap() called on a destroyed buffer");
- return;
- }
-
- for (auto& mapping : mapped_) {
- mapping.buffer.Value().Detach();
- }
- mapped_.clear();
- buffer_.Unmap();
- state_ = State::Unmapped;
- }
-
- void GPUBuffer::destroy(Napi::Env) {
- buffer_.Destroy();
- state_ = State::Destroyed;
- }
-
- std::optional<std::string> GPUBuffer::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUBuffer::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.h
deleted file mode 100644
index c3d8b031988..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUBUFFER_H_
-#define DAWN_NODE_BINDING_GPUBUFFER_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/binding/AsyncRunner.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUBuffer is an implementation of interop::GPUBuffer that wraps a wgpu::Buffer.
- class GPUBuffer final : public interop::GPUBuffer {
- public:
- GPUBuffer(wgpu::Buffer buffer,
- wgpu::BufferDescriptor desc,
- wgpu::Device device,
- std::shared_ptr<AsyncRunner> async);
-
- // Desc() returns the wgpu::BufferDescriptor used to construct the buffer
- const wgpu::BufferDescriptor& Desc() const {
- return desc_;
- }
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::Buffer &() const {
- return buffer_;
- }
-
- // interop::GPUBuffer interface compliance
- interop::Promise<void> mapAsync(Napi::Env env,
- interop::GPUMapModeFlags mode,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- interop::ArrayBuffer getMappedRange(Napi::Env env,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void unmap(Napi::Env) override;
- void destroy(Napi::Env) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- struct Mapping {
- uint64_t start;
- uint64_t end;
- inline bool Intersects(uint64_t s, uint64_t e) const {
- return s < end && e > start;
- }
- Napi::Reference<interop::ArrayBuffer> buffer;
- };
-
- // https://www.w3.org/TR/webgpu/#buffer-interface
- enum class State {
- Unmapped,
- Mapped,
- MappedAtCreation,
- MappingPending,
- Destroyed,
- };
-
- wgpu::Buffer buffer_;
- wgpu::BufferDescriptor const desc_;
- wgpu::Device const device_;
- std::shared_ptr<AsyncRunner> async_;
- State state_ = State::Unmapped;
- std::vector<Mapping> mapped_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.cpp
deleted file mode 100644
index 0ff503fc54c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUCommandBuffer.h"
-
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUCommandBuffer
- ////////////////////////////////////////////////////////////////////////////////
-
- GPUCommandBuffer::GPUCommandBuffer(wgpu::CommandBuffer cmd_buf) : cmd_buf_(std::move(cmd_buf)) {
- }
-
- interop::Promise<double> GPUCommandBuffer::getExecutionTime(Napi::Env) {
- UNIMPLEMENTED();
- };
-
- std::optional<std::string> GPUCommandBuffer::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUCommandBuffer::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.h
deleted file mode 100644
index b6fc3ba511e..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
-#define DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUCommandBuffer is an implementation of interop::GPUCommandBuffer that wraps a
- // wgpu::CommandBuffer.
- class GPUCommandBuffer final : public interop::GPUCommandBuffer {
- public:
- GPUCommandBuffer(wgpu::CommandBuffer cmd_buf);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::CommandBuffer &() const {
- return cmd_buf_;
- }
-
- // interop::GPUCommandBuffer interface compliance
- interop::Promise<double> getExecutionTime(Napi::Env) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::CommandBuffer cmd_buf_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.cpp
deleted file mode 100644
index cf3925c2680..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.cpp
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUCommandEncoder.h"
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/GPU.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/binding/GPUCommandBuffer.h"
-#include "src/dawn_node/binding/GPUComputePassEncoder.h"
-#include "src/dawn_node/binding/GPUQuerySet.h"
-#include "src/dawn_node/binding/GPURenderPassEncoder.h"
-#include "src/dawn_node/binding/GPUTexture.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUCommandEncoder
- ////////////////////////////////////////////////////////////////////////////////
- GPUCommandEncoder::GPUCommandEncoder(wgpu::CommandEncoder enc) : enc_(std::move(enc)) {
- }
-
- interop::Interface<interop::GPURenderPassEncoder> GPUCommandEncoder::beginRenderPass(
- Napi::Env env,
- interop::GPURenderPassDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::RenderPassDescriptor desc{};
- if (!conv(desc.colorAttachments, desc.colorAttachmentCount, descriptor.colorAttachments) ||
- !conv(desc.depthStencilAttachment, descriptor.depthStencilAttachment) ||
- !conv(desc.label, descriptor.label) ||
- !conv(desc.occlusionQuerySet, descriptor.occlusionQuerySet)) {
- return {};
- }
- return interop::GPURenderPassEncoder::Create<GPURenderPassEncoder>(
- env, enc_.BeginRenderPass(&desc));
- }
-
- interop::Interface<interop::GPUComputePassEncoder> GPUCommandEncoder::beginComputePass(
- Napi::Env env,
- interop::GPUComputePassDescriptor descriptor) {
- wgpu::ComputePassDescriptor desc{};
- return interop::GPUComputePassEncoder::Create<GPUComputePassEncoder>(
- env, enc_.BeginComputePass(&desc));
- }
-
- void GPUCommandEncoder::copyBufferToBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> source,
- interop::GPUSize64 sourceOffset,
- interop::Interface<interop::GPUBuffer> destination,
- interop::GPUSize64 destinationOffset,
- interop::GPUSize64 size) {
- Converter conv(env);
-
- wgpu::Buffer src{};
- wgpu::Buffer dst{};
- if (!conv(src, source) || //
- !conv(dst, destination)) {
- return;
- }
-
- enc_.CopyBufferToBuffer(src, sourceOffset, dst, destinationOffset, size);
- }
-
- void GPUCommandEncoder::copyBufferToTexture(Napi::Env env,
- interop::GPUImageCopyBuffer source,
- interop::GPUImageCopyTexture destination,
- interop::GPUExtent3D copySize) {
- Converter conv(env);
-
- wgpu::ImageCopyBuffer src{};
- wgpu::ImageCopyTexture dst{};
- wgpu::Extent3D size{};
- if (!conv(src, source) || //
- !conv(dst, destination) || //
- !conv(size, copySize)) {
- return;
- }
-
- enc_.CopyBufferToTexture(&src, &dst, &size);
- }
-
- void GPUCommandEncoder::copyTextureToBuffer(Napi::Env env,
- interop::GPUImageCopyTexture source,
- interop::GPUImageCopyBuffer destination,
- interop::GPUExtent3D copySize) {
- Converter conv(env);
-
- wgpu::ImageCopyTexture src{};
- wgpu::ImageCopyBuffer dst{};
- wgpu::Extent3D size{};
- if (!conv(src, source) || //
- !conv(dst, destination) || //
- !conv(size, copySize)) {
- return;
- }
-
- enc_.CopyTextureToBuffer(&src, &dst, &size);
- }
-
- void GPUCommandEncoder::copyTextureToTexture(Napi::Env env,
- interop::GPUImageCopyTexture source,
- interop::GPUImageCopyTexture destination,
- interop::GPUExtent3D copySize) {
- Converter conv(env);
-
- wgpu::ImageCopyTexture src{};
- wgpu::ImageCopyTexture dst{};
- wgpu::Extent3D size{};
- if (!conv(src, source) || //
- !conv(dst, destination) || //
- !conv(size, copySize)) {
- return;
- }
-
- enc_.CopyTextureToTexture(&src, &dst, &size);
- }
-
- void GPUCommandEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
- enc_.PushDebugGroup(groupLabel.c_str());
- }
-
- void GPUCommandEncoder::popDebugGroup(Napi::Env) {
- enc_.PopDebugGroup();
- }
-
- void GPUCommandEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
- enc_.InsertDebugMarker(markerLabel.c_str());
- }
-
- void GPUCommandEncoder::writeTimestamp(Napi::Env env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) {
- Converter conv(env);
-
- wgpu::QuerySet q{};
- if (!conv(q, querySet)) {
- return;
- }
-
- enc_.WriteTimestamp(q, queryIndex);
- }
-
- void GPUCommandEncoder::resolveQuerySet(Napi::Env env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 firstQuery,
- interop::GPUSize32 queryCount,
- interop::Interface<interop::GPUBuffer> destination,
- interop::GPUSize64 destinationOffset) {
- Converter conv(env);
-
- wgpu::QuerySet q{};
- uint32_t f = 0;
- uint32_t c = 0;
- wgpu::Buffer b{};
- uint64_t o = 0;
-
- if (!conv(q, querySet) || //
- !conv(f, firstQuery) || //
- !conv(c, queryCount) || //
- !conv(b, destination) || //
- !conv(o, destinationOffset)) {
- return;
- }
-
- enc_.ResolveQuerySet(q, f, c, b, o);
- }
-
- interop::Interface<interop::GPUCommandBuffer> GPUCommandEncoder::finish(
- Napi::Env env,
- interop::GPUCommandBufferDescriptor descriptor) {
- wgpu::CommandBufferDescriptor desc{};
- return interop::GPUCommandBuffer::Create<GPUCommandBuffer>(env, enc_.Finish(&desc));
- }
-
- std::optional<std::string> GPUCommandEncoder::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUCommandEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.h
deleted file mode 100644
index f23e281234f..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
-#define DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUCommandEncoder is an implementation of interop::GPUCommandEncoder that wraps a
- // wgpu::CommandEncoder.
- class GPUCommandEncoder final : public interop::GPUCommandEncoder {
- public:
- GPUCommandEncoder(wgpu::CommandEncoder enc);
-
- // interop::GPUCommandEncoder interface compliance
- interop::Interface<interop::GPURenderPassEncoder> beginRenderPass(
- Napi::Env,
- interop::GPURenderPassDescriptor descriptor) override;
- interop::Interface<interop::GPUComputePassEncoder> beginComputePass(
- Napi::Env,
- interop::GPUComputePassDescriptor descriptor) override;
- void copyBufferToBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> source,
- interop::GPUSize64 sourceOffset,
- interop::Interface<interop::GPUBuffer> destination,
- interop::GPUSize64 destinationOffset,
- interop::GPUSize64 size) override;
- void copyBufferToTexture(Napi::Env,
- interop::GPUImageCopyBuffer source,
- interop::GPUImageCopyTexture destination,
- interop::GPUExtent3D copySize) override;
- void copyTextureToBuffer(Napi::Env,
- interop::GPUImageCopyTexture source,
- interop::GPUImageCopyBuffer destination,
- interop::GPUExtent3D copySize) override;
- void copyTextureToTexture(Napi::Env,
- interop::GPUImageCopyTexture source,
- interop::GPUImageCopyTexture destination,
- interop::GPUExtent3D copySize) override;
- void pushDebugGroup(Napi::Env, std::string groupLabel) override;
- void popDebugGroup(Napi::Env) override;
- void insertDebugMarker(Napi::Env, std::string markerLabel) override;
- void writeTimestamp(Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) override;
- void resolveQuerySet(Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 firstQuery,
- interop::GPUSize32 queryCount,
- interop::Interface<interop::GPUBuffer> destination,
- interop::GPUSize64 destinationOffset) override;
- interop::Interface<interop::GPUCommandBuffer> finish(
- Napi::Env env,
- interop::GPUCommandBufferDescriptor descriptor) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::CommandEncoder enc_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.cpp
deleted file mode 100644
index 6edc467ccf5..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUComputePassEncoder.h"
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/GPUBindGroup.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/binding/GPUComputePipeline.h"
-#include "src/dawn_node/binding/GPUQuerySet.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUComputePassEncoder
- ////////////////////////////////////////////////////////////////////////////////
- GPUComputePassEncoder::GPUComputePassEncoder(wgpu::ComputePassEncoder enc)
- : enc_(std::move(enc)) {
- }
-
- void GPUComputePassEncoder::setPipeline(
- Napi::Env,
- interop::Interface<interop::GPUComputePipeline> pipeline) {
- enc_.SetPipeline(*pipeline.As<GPUComputePipeline>());
- }
-
- void GPUComputePassEncoder::dispatch(Napi::Env,
- interop::GPUSize32 x,
- interop::GPUSize32 y,
- interop::GPUSize32 z) {
- enc_.Dispatch(x, y, z);
- }
-
- void GPUComputePassEncoder::dispatchIndirect(
- Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- enc_.DispatchIndirect(*indirectBuffer.As<GPUBuffer>(), indirectOffset);
- }
-
- void GPUComputePassEncoder::beginPipelineStatisticsQuery(
- Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) {
- UNIMPLEMENTED();
- }
-
- void GPUComputePassEncoder::endPipelineStatisticsQuery(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUComputePassEncoder::writeTimestamp(Napi::Env env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) {
- Converter conv(env);
-
- wgpu::QuerySet q{};
- if (!conv(q, querySet)) {
- return;
- }
-
- enc_.WriteTimestamp(q, queryIndex);
- }
-
- void GPUComputePassEncoder::endPass(Napi::Env) {
- enc_.EndPass();
- }
-
- void GPUComputePassEncoder::setBindGroup(
- Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- uint32_t* offsets = nullptr;
- uint32_t num_offsets = 0;
- if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, num_offsets, offsets);
- }
-
- void GPUComputePassEncoder::setBindGroup(Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- if (!conv(bg, bindGroup)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
- dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
- }
-
- void GPUComputePassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
- enc_.PushDebugGroup(groupLabel.c_str());
- }
-
- void GPUComputePassEncoder::popDebugGroup(Napi::Env) {
- enc_.PopDebugGroup();
- }
-
- void GPUComputePassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
- enc_.InsertDebugMarker(markerLabel.c_str());
- }
-
- std::optional<std::string> GPUComputePassEncoder::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUComputePassEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.h
deleted file mode 100644
index 9c7064be99b..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
-#define DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUComputePassEncoder is an implementation of interop::GPUComputePassEncoder that wraps a
- // wgpu::ComputePassEncoder.
- class GPUComputePassEncoder final : public interop::GPUComputePassEncoder {
- public:
- GPUComputePassEncoder(wgpu::ComputePassEncoder enc);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::ComputePassEncoder &() const {
- return enc_;
- }
-
- // interop::GPUComputePassEncoder interface compliance
- void setPipeline(Napi::Env,
- interop::Interface<interop::GPUComputePipeline> pipeline) override;
- void dispatch(Napi::Env,
- interop::GPUSize32 x,
- interop::GPUSize32 y,
- interop::GPUSize32 z) override;
- void dispatchIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- void beginPipelineStatisticsQuery(Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) override;
- void endPipelineStatisticsQuery(Napi::Env) override;
- void writeTimestamp(Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) override;
- void endPass(Napi::Env) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) override;
- void pushDebugGroup(Napi::Env, std::string groupLabel) override;
- void popDebugGroup(Napi::Env) override;
- void insertDebugMarker(Napi::Env, std::string markerLabel) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::ComputePassEncoder enc_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.cpp
deleted file mode 100644
index 0eef82ec544..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUComputePipeline.h"
-
-#include "src/dawn_node/binding/GPUBindGroupLayout.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUComputePipeline
- ////////////////////////////////////////////////////////////////////////////////
- GPUComputePipeline::GPUComputePipeline(wgpu::ComputePipeline pipeline)
- : pipeline_(std::move(pipeline)) {
- }
-
- interop::Interface<interop::GPUBindGroupLayout> GPUComputePipeline::getBindGroupLayout(
- Napi::Env env,
- uint32_t index) {
- return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
- env, pipeline_.GetBindGroupLayout(index));
- }
-
- std::optional<std::string> GPUComputePipeline::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUComputePipeline::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.h
deleted file mode 100644
index e9dde76a6a7..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUPIPELINE_H_
-#define DAWN_NODE_BINDING_GPUPIPELINE_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUComputePipeline is an implementation of interop::GPUComputePipeline that wraps a
- // wgpu::ComputePipeline.
- class GPUComputePipeline final : public interop::GPUComputePipeline {
- public:
- GPUComputePipeline(wgpu::ComputePipeline pipeline);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::ComputePipeline &() const {
- return pipeline_;
- }
-
- // interop::GPUComputePipeline interface compliance
- interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
- uint32_t index) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::ComputePipeline pipeline_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.cpp
deleted file mode 100644
index 2f928e5dc1b..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.cpp
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUDevice.h"
-
-#include <memory>
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/Errors.h"
-#include "src/dawn_node/binding/GPUBindGroup.h"
-#include "src/dawn_node/binding/GPUBindGroupLayout.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/binding/GPUCommandBuffer.h"
-#include "src/dawn_node/binding/GPUCommandEncoder.h"
-#include "src/dawn_node/binding/GPUComputePipeline.h"
-#include "src/dawn_node/binding/GPUPipelineLayout.h"
-#include "src/dawn_node/binding/GPUQuerySet.h"
-#include "src/dawn_node/binding/GPUQueue.h"
-#include "src/dawn_node/binding/GPURenderBundleEncoder.h"
-#include "src/dawn_node/binding/GPURenderPipeline.h"
-#include "src/dawn_node/binding/GPUSampler.h"
-#include "src/dawn_node/binding/GPUShaderModule.h"
-#include "src/dawn_node/binding/GPUSupportedLimits.h"
-#include "src/dawn_node/binding/GPUTexture.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- namespace {
-
- class DeviceLostInfo : public interop::GPUDeviceLostInfo {
- public:
- DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
- : reason_(reason), message_(message) {
- }
- std::variant<interop::GPUDeviceLostReason> getReason(Napi::Env env) override {
- return reason_;
- }
- std::string getMessage(Napi::Env) override {
- return message_;
- }
-
- private:
- interop::GPUDeviceLostReason reason_;
- std::string message_;
- };
-
- class OOMError : public interop::GPUOutOfMemoryError {};
- class ValidationError : public interop::GPUValidationError {
- public:
- ValidationError(std::string message) : message_(std::move(message)) {
- }
-
- std::string getMessage(Napi::Env) override {
- return message_;
- };
-
- private:
- std::string message_;
- };
-
- } // namespace
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUDevice
- ////////////////////////////////////////////////////////////////////////////////
- GPUDevice::GPUDevice(Napi::Env env, wgpu::Device device)
- : env_(env), device_(device), async_(std::make_shared<AsyncRunner>(env, device)) {
- device_.SetLoggingCallback(
- [](WGPULoggingType type, char const* message, void* userdata) {
- std::cout << type << ": " << message << std::endl;
- },
- nullptr);
- device_.SetUncapturedErrorCallback(
- [](WGPUErrorType type, char const* message, void* userdata) {
- std::cout << type << ": " << message << std::endl;
- },
- nullptr);
-
- device_.SetDeviceLostCallback(
- [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
- auto r = interop::GPUDeviceLostReason::kDestroyed;
- switch (reason) {
- case WGPUDeviceLostReason_Force32:
- UNREACHABLE("WGPUDeviceLostReason_Force32");
- break;
- case WGPUDeviceLostReason_Destroyed:
- case WGPUDeviceLostReason_Undefined:
- r = interop::GPUDeviceLostReason::kDestroyed;
- break;
- }
- auto* self = static_cast<GPUDevice*>(userdata);
- for (auto promise : self->lost_promises_) {
- promise.Resolve(
- interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
- }
- },
- this);
- }
-
- GPUDevice::~GPUDevice() {
- }
-
- interop::Interface<interop::GPUSupportedFeatures> GPUDevice::getFeatures(Napi::Env env) {
- class Features : public interop::GPUSupportedFeatures {
- public:
- bool has(Napi::Env, std::string feature) override {
- UNIMPLEMENTED();
- }
- std::vector<std::string> keys(Napi::Env) override {
- UNIMPLEMENTED();
- }
- };
- return interop::GPUSupportedFeatures::Create<Features>(env);
- }
-
- interop::Interface<interop::GPUSupportedLimits> GPUDevice::getLimits(Napi::Env env) {
- wgpu::SupportedLimits limits{};
- if (!device_.GetLimits(&limits)) {
- Napi::Error::New(env, "failed to get device limits").ThrowAsJavaScriptException();
- }
- return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, limits);
- }
-
- interop::Interface<interop::GPUQueue> GPUDevice::getQueue(Napi::Env env) {
- // TODO(crbug.com/dawn/1144): Should probably return the same Queue JS object.
- return interop::GPUQueue::Create<GPUQueue>(env, device_.GetQueue(), async_);
- }
-
- void GPUDevice::destroy(Napi::Env env) {
- for (auto promise : lost_promises_) {
- promise.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
- env_, interop::GPUDeviceLostReason::kDestroyed, "device was destroyed"));
- }
- lost_promises_.clear();
- device_.Release();
- }
-
- interop::Interface<interop::GPUBuffer> GPUDevice::createBuffer(
- Napi::Env env,
- interop::GPUBufferDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::BufferDescriptor desc{};
- if (!conv(desc.label, descriptor.label) ||
- !conv(desc.mappedAtCreation, descriptor.mappedAtCreation) ||
- !conv(desc.size, descriptor.size) || !conv(desc.usage, descriptor.usage)) {
- return {};
- }
- return interop::GPUBuffer::Create<GPUBuffer>(env, device_.CreateBuffer(&desc), desc,
- device_, async_);
- }
-
- interop::Interface<interop::GPUTexture> GPUDevice::createTexture(
- Napi::Env env,
- interop::GPUTextureDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::TextureDescriptor desc{};
- if (!conv(desc.label, descriptor.label) || !conv(desc.usage, descriptor.usage) || //
- !conv(desc.size, descriptor.size) || //
- !conv(desc.dimension, descriptor.dimension) || //
- !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
- !conv(desc.sampleCount, descriptor.sampleCount) || //
- !conv(desc.format, descriptor.format)) {
- return {};
- }
- return interop::GPUTexture::Create<GPUTexture>(env, device_.CreateTexture(&desc));
- }
-
- interop::Interface<interop::GPUSampler> GPUDevice::createSampler(
- Napi::Env env,
- interop::GPUSamplerDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::SamplerDescriptor desc{};
- if (!conv(desc.label, descriptor.label) || //
- !conv(desc.addressModeU, descriptor.addressModeU) || //
- !conv(desc.addressModeV, descriptor.addressModeV) || //
- !conv(desc.addressModeW, descriptor.addressModeW) || //
- !conv(desc.magFilter, descriptor.magFilter) || //
- !conv(desc.minFilter, descriptor.minFilter) || //
- !conv(desc.mipmapFilter, descriptor.mipmapFilter) || //
- !conv(desc.lodMinClamp, descriptor.lodMinClamp) || //
- !conv(desc.lodMaxClamp, descriptor.lodMaxClamp) || //
- !conv(desc.compare, descriptor.compare) || //
- !conv(desc.maxAnisotropy, descriptor.maxAnisotropy)) {
- return {};
- }
- return interop::GPUSampler::Create<GPUSampler>(env, device_.CreateSampler(&desc));
- }
-
- interop::Interface<interop::GPUExternalTexture> GPUDevice::importExternalTexture(
- Napi::Env,
- interop::GPUExternalTextureDescriptor descriptor) {
- UNIMPLEMENTED();
- }
-
- interop::Interface<interop::GPUBindGroupLayout> GPUDevice::createBindGroupLayout(
- Napi::Env env,
- interop::GPUBindGroupLayoutDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::BindGroupLayoutDescriptor desc{};
- if (!conv(desc.label, descriptor.label) ||
- !conv(desc.entries, desc.entryCount, descriptor.entries)) {
- return {};
- }
-
- return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
- env, device_.CreateBindGroupLayout(&desc));
- }
-
- interop::Interface<interop::GPUPipelineLayout> GPUDevice::createPipelineLayout(
- Napi::Env env,
- interop::GPUPipelineLayoutDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::PipelineLayoutDescriptor desc{};
- if (!conv(desc.label, descriptor.label) ||
- !conv(desc.bindGroupLayouts, desc.bindGroupLayoutCount, descriptor.bindGroupLayouts)) {
- return {};
- }
-
- return interop::GPUPipelineLayout::Create<GPUPipelineLayout>(
- env, device_.CreatePipelineLayout(&desc));
- }
-
- interop::Interface<interop::GPUBindGroup> GPUDevice::createBindGroup(
- Napi::Env env,
- interop::GPUBindGroupDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::BindGroupDescriptor desc{};
- if (!conv(desc.label, descriptor.label) || !conv(desc.layout, descriptor.layout) ||
- !conv(desc.entries, desc.entryCount, descriptor.entries)) {
- return {};
- }
-
- return interop::GPUBindGroup::Create<GPUBindGroup>(env, device_.CreateBindGroup(&desc));
- }
-
- interop::Interface<interop::GPUShaderModule> GPUDevice::createShaderModule(
- Napi::Env env,
- interop::GPUShaderModuleDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::ShaderModuleWGSLDescriptor wgsl_desc{};
- wgpu::ShaderModuleDescriptor sm_desc{};
- if (!conv(wgsl_desc.source, descriptor.code) || !conv(sm_desc.label, descriptor.label)) {
- return {};
- }
- sm_desc.nextInChain = &wgsl_desc;
-
- return interop::GPUShaderModule::Create<GPUShaderModule>(
- env, device_.CreateShaderModule(&sm_desc), async_);
- }
-
- interop::Interface<interop::GPUComputePipeline> GPUDevice::createComputePipeline(
- Napi::Env env,
- interop::GPUComputePipelineDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::ComputePipelineDescriptor desc{};
- if (!conv(desc, descriptor)) {
- return {};
- }
-
- return interop::GPUComputePipeline::Create<GPUComputePipeline>(
- env, device_.CreateComputePipeline(&desc));
- }
-
- interop::Interface<interop::GPURenderPipeline> GPUDevice::createRenderPipeline(
- Napi::Env env,
- interop::GPURenderPipelineDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::RenderPipelineDescriptor desc{};
- if (!conv(desc, descriptor)) {
- return {};
- }
-
- return interop::GPURenderPipeline::Create<GPURenderPipeline>(
- env, device_.CreateRenderPipeline(&desc));
- }
-
- interop::Promise<interop::Interface<interop::GPUComputePipeline>>
- GPUDevice::createComputePipelineAsync(Napi::Env env,
- interop::GPUComputePipelineDescriptor descriptor) {
- using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
-
- Converter conv(env);
-
- wgpu::ComputePipelineDescriptor desc{};
- if (!conv(desc, descriptor)) {
- Promise promise(env, PROMISE_INFO);
- promise.Reject(Errors::OperationError(env));
- return promise;
- }
-
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- device_.CreateComputePipelineAsync(
- &desc,
- [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
- char const* message, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
- switch (status) {
- case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
- c->promise.Resolve(interop::GPUComputePipeline::Create<GPUComputePipeline>(
- c->env, pipeline));
- break;
- default:
- c->promise.Reject(Errors::OperationError(c->env));
- break;
- }
- },
- ctx);
-
- return promise;
- }
-
- interop::Promise<interop::Interface<interop::GPURenderPipeline>>
- GPUDevice::createRenderPipelineAsync(Napi::Env env,
- interop::GPURenderPipelineDescriptor descriptor) {
- using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
-
- Converter conv(env);
-
- wgpu::RenderPipelineDescriptor desc{};
- if (!conv(desc, descriptor)) {
- Promise promise(env, PROMISE_INFO);
- promise.Reject(Errors::OperationError(env));
- return promise;
- }
-
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- device_.CreateRenderPipelineAsync(
- &desc,
- [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
- char const* message, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
- switch (status) {
- case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
- c->promise.Resolve(interop::GPURenderPipeline::Create<GPURenderPipeline>(
- c->env, pipeline));
- break;
- default:
- c->promise.Reject(Errors::OperationError(c->env));
- break;
- }
- },
- ctx);
-
- return promise;
- }
-
- interop::Interface<interop::GPUCommandEncoder> GPUDevice::createCommandEncoder(
- Napi::Env env,
- interop::GPUCommandEncoderDescriptor descriptor) {
- wgpu::CommandEncoderDescriptor desc{};
- return interop::GPUCommandEncoder::Create<GPUCommandEncoder>(
- env, device_.CreateCommandEncoder(&desc));
- }
-
- interop::Interface<interop::GPURenderBundleEncoder> GPUDevice::createRenderBundleEncoder(
- Napi::Env env,
- interop::GPURenderBundleEncoderDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::RenderBundleEncoderDescriptor desc{};
- if (!conv(desc.label, descriptor.label) ||
- !conv(desc.colorFormats, desc.colorFormatsCount, descriptor.colorFormats) ||
- !conv(desc.depthStencilFormat, descriptor.depthStencilFormat) ||
- !conv(desc.sampleCount, descriptor.sampleCount)) {
- return {};
- }
-
- return interop::GPURenderBundleEncoder::Create<GPURenderBundleEncoder>(
- env, device_.CreateRenderBundleEncoder(&desc));
- }
-
- interop::Interface<interop::GPUQuerySet> GPUDevice::createQuerySet(
- Napi::Env env,
- interop::GPUQuerySetDescriptor descriptor) {
- Converter conv(env);
-
- wgpu::QuerySetDescriptor desc{};
- if (!conv(desc.label, descriptor.label) || !conv(desc.type, descriptor.type) ||
- !conv(desc.count, descriptor.count) ||
- !conv(desc.pipelineStatistics, desc.pipelineStatisticsCount,
- descriptor.pipelineStatistics)) {
- return {};
- }
-
- return interop::GPUQuerySet::Create<GPUQuerySet>(env, device_.CreateQuerySet(&desc));
- }
-
- interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> GPUDevice::getLost(
- Napi::Env env) {
- auto promise =
- interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>>(env, PROMISE_INFO);
- lost_promises_.emplace_back(promise);
- return promise;
- }
-
- void GPUDevice::pushErrorScope(Napi::Env env, interop::GPUErrorFilter filter) {
- wgpu::ErrorFilter f = wgpu::ErrorFilter::None;
- switch (filter) {
- case interop::GPUErrorFilter::kOutOfMemory:
- f = wgpu::ErrorFilter::OutOfMemory;
- break;
- case interop::GPUErrorFilter::kValidation:
- f = wgpu::ErrorFilter::Validation;
- break;
- default:
- Napi::Error::New(env, "unhandled GPUErrorFilter value")
- .ThrowAsJavaScriptException();
- return;
- }
- device_.PushErrorScope(f);
- }
-
- interop::Promise<std::optional<interop::GPUError>> GPUDevice::popErrorScope(Napi::Env env) {
- using Promise = interop::Promise<std::optional<interop::GPUError>>;
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto* ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- bool ok = device_.PopErrorScope(
- [](WGPUErrorType type, char const* message, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
- auto env = c->env;
- switch (type) {
- case WGPUErrorType::WGPUErrorType_NoError:
- c->promise.Resolve({});
- break;
- case WGPUErrorType::WGPUErrorType_OutOfMemory:
- c->promise.Resolve(interop::GPUOutOfMemoryError::Create<OOMError>(env));
- break;
- case WGPUErrorType::WGPUErrorType_Unknown:
- case WGPUErrorType::WGPUErrorType_DeviceLost:
- case WGPUErrorType::WGPUErrorType_Validation:
- c->promise.Resolve(
- interop::GPUValidationError::Create<ValidationError>(env, message));
- break;
- default:
- c->promise.Reject("unhandled error type");
- break;
- }
- },
- ctx);
-
- if (ok) {
- return promise;
- }
-
- delete ctx;
- promise.Reject(Errors::OperationError(env));
- return promise;
- }
-
- std::optional<std::string> GPUDevice::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- };
-
- void GPUDevice::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- };
-
- interop::Interface<interop::EventHandler> GPUDevice::getOnuncapturederror(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUDevice::setOnuncapturederror(Napi::Env,
- interop::Interface<interop::EventHandler> value) {
- UNIMPLEMENTED();
- }
-
- void GPUDevice::addEventListener(
- Napi::Env,
- std::string type,
- std::optional<interop::Interface<interop::EventListener>> callback,
- std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) {
- UNIMPLEMENTED();
- }
-
- void GPUDevice::removeEventListener(
- Napi::Env,
- std::string type,
- std::optional<interop::Interface<interop::EventListener>> callback,
- std::optional<std::variant<interop::EventListenerOptions, bool>> options) {
- UNIMPLEMENTED();
- }
-
- bool GPUDevice::dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.h
deleted file mode 100644
index a3c5dce31df..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUDEVICE_H_
-#define DAWN_NODE_BINDING_GPUDEVICE_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "napi.h"
-#include "src/dawn_node/binding/AsyncRunner.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
- // GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
- class GPUDevice final : public interop::GPUDevice {
- public:
- GPUDevice(Napi::Env env, wgpu::Device device);
- ~GPUDevice();
-
- // interop::GPUDevice interface compliance
- interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
- interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
- interop::Interface<interop::GPUQueue> getQueue(Napi::Env env) override;
- void destroy(Napi::Env) override;
- interop::Interface<interop::GPUBuffer> createBuffer(
- Napi::Env env,
- interop::GPUBufferDescriptor descriptor) override;
- interop::Interface<interop::GPUTexture> createTexture(
- Napi::Env,
- interop::GPUTextureDescriptor descriptor) override;
- interop::Interface<interop::GPUSampler> createSampler(
- Napi::Env,
- interop::GPUSamplerDescriptor descriptor) override;
- interop::Interface<interop::GPUExternalTexture> importExternalTexture(
- Napi::Env,
- interop::GPUExternalTextureDescriptor descriptor) override;
- interop::Interface<interop::GPUBindGroupLayout> createBindGroupLayout(
- Napi::Env,
- interop::GPUBindGroupLayoutDescriptor descriptor) override;
- interop::Interface<interop::GPUPipelineLayout> createPipelineLayout(
- Napi::Env,
- interop::GPUPipelineLayoutDescriptor descriptor) override;
- interop::Interface<interop::GPUBindGroup> createBindGroup(
- Napi::Env,
- interop::GPUBindGroupDescriptor descriptor) override;
- interop::Interface<interop::GPUShaderModule> createShaderModule(
- Napi::Env,
- interop::GPUShaderModuleDescriptor descriptor) override;
- interop::Interface<interop::GPUComputePipeline> createComputePipeline(
- Napi::Env,
- interop::GPUComputePipelineDescriptor descriptor) override;
- interop::Interface<interop::GPURenderPipeline> createRenderPipeline(
- Napi::Env,
- interop::GPURenderPipelineDescriptor descriptor) override;
- interop::Promise<interop::Interface<interop::GPUComputePipeline>>
- createComputePipelineAsync(Napi::Env env,
- interop::GPUComputePipelineDescriptor descriptor) override;
- interop::Promise<interop::Interface<interop::GPURenderPipeline>> createRenderPipelineAsync(
- Napi::Env env,
- interop::GPURenderPipelineDescriptor descriptor) override;
- interop::Interface<interop::GPUCommandEncoder> createCommandEncoder(
- Napi::Env env,
- interop::GPUCommandEncoderDescriptor descriptor) override;
- interop::Interface<interop::GPURenderBundleEncoder> createRenderBundleEncoder(
- Napi::Env,
- interop::GPURenderBundleEncoderDescriptor descriptor) override;
- interop::Interface<interop::GPUQuerySet> createQuerySet(
- Napi::Env,
- interop::GPUQuerySetDescriptor descriptor) override;
- interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> getLost(
- Napi::Env env) override;
- void pushErrorScope(Napi::Env, interop::GPUErrorFilter filter) override;
- interop::Promise<std::optional<interop::GPUError>> popErrorScope(Napi::Env env) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
- interop::Interface<interop::EventHandler> getOnuncapturederror(Napi::Env) override;
- void setOnuncapturederror(Napi::Env,
- interop::Interface<interop::EventHandler> value) override;
- void addEventListener(
- Napi::Env,
- std::string type,
- std::optional<interop::Interface<interop::EventListener>> callback,
- std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) override;
- void removeEventListener(
- Napi::Env,
- std::string type,
- std::optional<interop::Interface<interop::EventListener>> callback,
- std::optional<std::variant<interop::EventListenerOptions, bool>> options) override;
- bool dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) override;
-
- private:
- void QueueTick();
-
- Napi::Env env_;
- wgpu::Device device_;
- std::shared_ptr<AsyncRunner> async_;
- std::vector<interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>>>
- lost_promises_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUDEVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.cpp
deleted file mode 100644
index 861df210d4e..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUPipelineLayout.h"
-
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUPipelineLayout
- ////////////////////////////////////////////////////////////////////////////////
- GPUPipelineLayout::GPUPipelineLayout(wgpu::PipelineLayout layout) : layout_(std::move(layout)) {
- }
-
- std::optional<std::string> GPUPipelineLayout::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUPipelineLayout::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.h
deleted file mode 100644
index a1d0b87bcd4..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
-#define DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUPipelineLayout is an implementation of interop::GPUPipelineLayout that wraps a
- // wgpu::PipelineLayout.
- class GPUPipelineLayout final : public interop::GPUPipelineLayout {
- public:
- GPUPipelineLayout(wgpu::PipelineLayout layout);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::PipelineLayout &() const {
- return layout_;
- }
-
- // interop::GPUPipelineLayout interface compliance
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::PipelineLayout layout_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.cpp
deleted file mode 100644
index e56564a94c0..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUQuerySet.h"
-
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUQuerySet
- ////////////////////////////////////////////////////////////////////////////////
- GPUQuerySet::GPUQuerySet(wgpu::QuerySet query_set) : query_set_(std::move(query_set)) {
- }
-
- void GPUQuerySet::destroy(Napi::Env) {
- query_set_.Destroy();
- }
-
- std::optional<std::string> GPUQuerySet::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUQuerySet::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.h
deleted file mode 100644
index 8669e16ac28..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUQUERYSET_H_
-#define DAWN_NODE_BINDING_GPUQUERYSET_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUQuerySet is an implementation of interop::GPUQuerySet that wraps a wgpu::QuerySet.
- class GPUQuerySet final : public interop::GPUQuerySet {
- public:
- GPUQuerySet(wgpu::QuerySet query_set);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::QuerySet &() const {
- return query_set_;
- }
-
- // interop::GPUQuerySet interface compliance
- void destroy(Napi::Env) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::QuerySet query_set_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUQUERYSET_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.cpp
deleted file mode 100644
index e1c0413d706..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUQueue.h"
-
-#include <memory>
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/binding/GPUCommandBuffer.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUQueue
- ////////////////////////////////////////////////////////////////////////////////
- GPUQueue::GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async)
- : queue_(std::move(queue)), async_(std::move(async)) {
- }
-
- void GPUQueue::submit(
- Napi::Env env,
- std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) {
- std::vector<wgpu::CommandBuffer> bufs(commandBuffers.size());
- for (size_t i = 0; i < commandBuffers.size(); i++) {
- bufs[i] = *commandBuffers[i].As<GPUCommandBuffer>();
- }
- Converter conv(env);
- uint32_t bufs_size;
- if (!conv(bufs_size, bufs.size())) {
- return;
- }
- queue_.Submit(bufs_size, bufs.data());
- }
-
- interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
- struct Context {
- Napi::Env env;
- interop::Promise<void> promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- queue_.OnSubmittedWorkDone(
- 0,
- [](WGPUQueueWorkDoneStatus status, void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
- if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
- Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
- .ThrowAsJavaScriptException();
- }
- c->promise.Resolve();
- },
- ctx);
-
- return promise;
- }
-
- void GPUQueue::writeBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 bufferOffset,
- interop::BufferSource data,
- interop::GPUSize64 dataOffset,
- std::optional<interop::GPUSize64> size) {
- wgpu::Buffer buf = *buffer.As<GPUBuffer>();
- Converter::BufferSource src{};
- Converter conv(env);
- if (!conv(src, data)) {
- return;
- }
-
- // TODO(crbug.com/dawn/1132): Bounds check
- if (src.data) {
- src.data = reinterpret_cast<uint8_t*>(src.data) + dataOffset;
- }
- src.size -= dataOffset;
- if (size.has_value()) {
- src.size = size.value();
- }
-
- queue_.WriteBuffer(buf, bufferOffset, src.data, src.size);
- }
-
- void GPUQueue::writeTexture(Napi::Env env,
- interop::GPUImageCopyTexture destination,
- interop::BufferSource data,
- interop::GPUImageDataLayout dataLayout,
- interop::GPUExtent3D size) {
- wgpu::ImageCopyTexture dst{};
- Converter::BufferSource src{};
- wgpu::TextureDataLayout layout{};
- wgpu::Extent3D sz{};
- Converter conv(env);
- if (!conv(dst, destination) || //
- !conv(src, data) || //
- !conv(layout, dataLayout) || //
- !conv(sz, size)) {
- return;
- }
-
- queue_.WriteTexture(&dst, src.data, src.size, &layout, &sz);
- }
-
- void GPUQueue::copyExternalImageToTexture(Napi::Env,
- interop::GPUImageCopyExternalImage source,
- interop::GPUImageCopyTextureTagged destination,
- interop::GPUExtent3D copySize) {
- UNIMPLEMENTED();
- }
-
- std::optional<std::string> GPUQueue::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUQueue::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.h
deleted file mode 100644
index 69952c6dd42..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUQUEUE_H_
-#define DAWN_NODE_BINDING_GPUQUEUE_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/binding/AsyncRunner.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUQueue is an implementation of interop::GPUQueue that wraps a wgpu::Queue.
- class GPUQueue final : public interop::GPUQueue {
- public:
- GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async);
-
- // interop::GPUQueue interface compliance
- void submit(
- Napi::Env,
- std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) override;
- interop::Promise<void> onSubmittedWorkDone(Napi::Env) override;
- void writeBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 bufferOffset,
- interop::BufferSource data,
- interop::GPUSize64 dataOffset,
- std::optional<interop::GPUSize64> size) override;
- void writeTexture(Napi::Env,
- interop::GPUImageCopyTexture destination,
- interop::BufferSource data,
- interop::GPUImageDataLayout dataLayout,
- interop::GPUExtent3D size) override;
- void copyExternalImageToTexture(Napi::Env,
- interop::GPUImageCopyExternalImage source,
- interop::GPUImageCopyTextureTagged destination,
- interop::GPUExtent3D copySize) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::Queue queue_;
- std::shared_ptr<AsyncRunner> async_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUQUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.cpp
deleted file mode 100644
index 2f42ac72d3c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPURenderBundle.h"
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/binding/GPURenderBundle.h"
-#include "src/dawn_node/binding/GPURenderPipeline.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPURenderBundle
- ////////////////////////////////////////////////////////////////////////////////
- GPURenderBundle::GPURenderBundle(wgpu::RenderBundle bundle) : bundle_(std::move(bundle)) {
- }
-
- std::optional<std::string> GPURenderBundle::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPURenderBundle::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.h b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.h
deleted file mode 100644
index 9f824f239ea..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
-#define DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPURenderBundle is an implementation of interop::GPURenderBundle that wraps a
- // wgpu::RenderBundle.
- class GPURenderBundle final : public interop::GPURenderBundle {
- public:
- GPURenderBundle(wgpu::RenderBundle bundle);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::RenderBundle &() const {
- return bundle_;
- }
-
- // interop::GPURenderBundle interface compliance
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::RenderBundle bundle_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.cpp
deleted file mode 100644
index 123741d6c4d..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.cpp
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPURenderBundleEncoder.h"
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/GPUBindGroup.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/binding/GPURenderBundle.h"
-#include "src/dawn_node/binding/GPURenderPipeline.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPURenderBundleEncoder
- ////////////////////////////////////////////////////////////////////////////////
- GPURenderBundleEncoder::GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc)
- : enc_(std::move(enc)) {
- }
-
- interop::Interface<interop::GPURenderBundle> GPURenderBundleEncoder::finish(
- Napi::Env env,
- interop::GPURenderBundleDescriptor descriptor) {
- wgpu::RenderBundleDescriptor desc{};
-
- return interop::GPURenderBundle::Create<GPURenderBundle>(env, enc_.Finish(&desc));
- }
-
- void GPURenderBundleEncoder::setBindGroup(
- Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- uint32_t* offsets = nullptr;
- uint32_t num_offsets = 0;
- if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, num_offsets, offsets);
- }
-
- void GPURenderBundleEncoder::setBindGroup(Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- if (!conv(bg, bindGroup)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
- dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
- }
-
- void GPURenderBundleEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
- enc_.PushDebugGroup(groupLabel.c_str());
- }
-
- void GPURenderBundleEncoder::popDebugGroup(Napi::Env) {
- enc_.PopDebugGroup();
- }
-
- void GPURenderBundleEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
- enc_.InsertDebugMarker(markerLabel.c_str());
- }
-
- void GPURenderBundleEncoder::setPipeline(
- Napi::Env env,
- interop::Interface<interop::GPURenderPipeline> pipeline) {
- Converter conv(env);
-
- wgpu::RenderPipeline p{};
- if (!conv(p, pipeline)) {
- return;
- }
-
- enc_.SetPipeline(p);
- }
-
- void GPURenderBundleEncoder::setIndexBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUIndexFormat indexFormat,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- wgpu::IndexFormat f{};
- uint64_t o = 0;
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || //
- !conv(f, indexFormat) || //
- !conv(o, offset) || //
- !conv(s, size)) {
- return;
- }
-
- enc_.SetIndexBuffer(b, f, o, s);
- }
-
- void GPURenderBundleEncoder::setVertexBuffer(Napi::Env env,
- interop::GPUIndex32 slot,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || !conv(s, size)) {
- return;
- }
- enc_.SetVertexBuffer(slot, b, offset, s);
- }
-
- void GPURenderBundleEncoder::draw(Napi::Env env,
- interop::GPUSize32 vertexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstVertex,
- interop::GPUSize32 firstInstance) {
- enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
- }
-
- void GPURenderBundleEncoder::drawIndexed(Napi::Env env,
- interop::GPUSize32 indexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstIndex,
- interop::GPUSignedOffset32 baseVertex,
- interop::GPUSize32 firstInstance) {
- enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
- }
-
- void GPURenderBundleEncoder::drawIndirect(Napi::Env env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint32_t o = 0;
-
- if (!conv(b, indirectBuffer) || //
- !conv(o, indirectOffset)) {
- return;
- }
- enc_.DrawIndirect(b, o);
- }
-
- void GPURenderBundleEncoder::drawIndexedIndirect(
- Napi::Env env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint32_t o = 0;
-
- if (!conv(b, indirectBuffer) || //
- !conv(o, indirectOffset)) {
- return;
- }
- enc_.DrawIndexedIndirect(b, o);
- }
-
- std::optional<std::string> GPURenderBundleEncoder::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPURenderBundleEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.h
deleted file mode 100644
index 3d11e330e4c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
-#define DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPURenderBundleEncoder is an implementation of interop::GPURenderBundleEncoder that wraps a
- // wgpu::RenderBundleEncoder.
- class GPURenderBundleEncoder final : public interop::GPURenderBundleEncoder {
- public:
- GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc);
-
- // interop::GPURenderBundleEncoder interface compliance
- interop::Interface<interop::GPURenderBundle> finish(
- Napi::Env,
- interop::GPURenderBundleDescriptor descriptor) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) override;
- void pushDebugGroup(Napi::Env, std::string groupLabel) override;
- void popDebugGroup(Napi::Env) override;
- void insertDebugMarker(Napi::Env, std::string markerLabel) override;
- void setPipeline(Napi::Env,
- interop::Interface<interop::GPURenderPipeline> pipeline) override;
- void setIndexBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUIndexFormat indexFormat,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void setVertexBuffer(Napi::Env,
- interop::GPUIndex32 slot,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void draw(Napi::Env,
- interop::GPUSize32 vertexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstVertex,
- interop::GPUSize32 firstInstance) override;
- void drawIndexed(Napi::Env,
- interop::GPUSize32 indexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstIndex,
- interop::GPUSignedOffset32 baseVertex,
- interop::GPUSize32 firstInstance) override;
- void drawIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- void drawIndexedIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::RenderBundleEncoder enc_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.cpp
deleted file mode 100644
index 5dce4f23894..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.cpp
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPURenderPassEncoder.h"
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/GPUBindGroup.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/binding/GPUQuerySet.h"
-#include "src/dawn_node/binding/GPURenderBundle.h"
-#include "src/dawn_node/binding/GPURenderPipeline.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPURenderPassEncoder
- ////////////////////////////////////////////////////////////////////////////////
- GPURenderPassEncoder::GPURenderPassEncoder(wgpu::RenderPassEncoder enc) : enc_(std::move(enc)) {
- }
-
- void GPURenderPassEncoder::setViewport(Napi::Env,
- float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth) {
- enc_.SetViewport(x, y, width, height, minDepth, maxDepth);
- }
-
- void GPURenderPassEncoder::setScissorRect(Napi::Env,
- interop::GPUIntegerCoordinate x,
- interop::GPUIntegerCoordinate y,
- interop::GPUIntegerCoordinate width,
- interop::GPUIntegerCoordinate height) {
- enc_.SetScissorRect(x, y, width, height);
- }
-
- void GPURenderPassEncoder::setBlendConstant(Napi::Env env, interop::GPUColor color) {
- Converter conv(env);
-
- wgpu::Color c{};
- if (!conv(c, color)) {
- return;
- }
-
- enc_.SetBlendConstant(&c);
- }
-
- void GPURenderPassEncoder::setStencilReference(Napi::Env, interop::GPUStencilValue reference) {
- enc_.SetStencilReference(reference);
- }
-
- void GPURenderPassEncoder::beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) {
- enc_.BeginOcclusionQuery(queryIndex);
- }
-
- void GPURenderPassEncoder::endOcclusionQuery(Napi::Env) {
- enc_.EndOcclusionQuery();
- }
-
- void GPURenderPassEncoder::beginPipelineStatisticsQuery(
- Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) {
- UNIMPLEMENTED();
- }
-
- void GPURenderPassEncoder::endPipelineStatisticsQuery(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPURenderPassEncoder::writeTimestamp(Napi::Env env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) {
- Converter conv(env);
-
- wgpu::QuerySet q{};
- if (!conv(q, querySet)) {
- return;
- }
-
- enc_.WriteTimestamp(q, queryIndex);
- }
-
- void GPURenderPassEncoder::executeBundles(
- Napi::Env env,
- std::vector<interop::Interface<interop::GPURenderBundle>> bundles_in) {
- Converter conv(env);
-
- wgpu::RenderBundle* bundles = nullptr;
- uint32_t bundleCount = 0;
- if (!conv(bundles, bundleCount, bundles_in)) {
- return;
- }
-
- enc_.ExecuteBundles(bundleCount, bundles);
- }
-
- void GPURenderPassEncoder::endPass(Napi::Env) {
- enc_.EndPass();
- }
-
- void GPURenderPassEncoder::setBindGroup(
- Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- uint32_t* offsets = nullptr;
- uint32_t num_offsets = 0;
- if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, num_offsets, offsets);
- }
-
- void GPURenderPassEncoder::setBindGroup(Napi::Env env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) {
- Converter conv(env);
-
- wgpu::BindGroup bg{};
- if (!conv(bg, bindGroup)) {
- return;
- }
-
- enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
- dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
- }
-
- void GPURenderPassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
- enc_.PushDebugGroup(groupLabel.c_str());
- }
-
- void GPURenderPassEncoder::popDebugGroup(Napi::Env) {
- enc_.PopDebugGroup();
- }
-
- void GPURenderPassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
- enc_.InsertDebugMarker(markerLabel.c_str());
- }
-
- void GPURenderPassEncoder::setPipeline(
- Napi::Env env,
- interop::Interface<interop::GPURenderPipeline> pipeline) {
- Converter conv(env);
- wgpu::RenderPipeline rp{};
- if (!conv(rp, pipeline)) {
- return;
- }
- enc_.SetPipeline(rp);
- }
-
- void GPURenderPassEncoder::setIndexBuffer(Napi::Env env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUIndexFormat indexFormat,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- wgpu::IndexFormat f;
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || //
- !conv(f, indexFormat) || //
- !conv(s, size)) {
- return;
- }
- enc_.SetIndexBuffer(b, f, offset, s);
- }
-
- void GPURenderPassEncoder::setVertexBuffer(Napi::Env env,
- interop::GPUIndex32 slot,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint64_t s = wgpu::kWholeSize;
- if (!conv(b, buffer) || !conv(s, size)) {
- return;
- }
- enc_.SetVertexBuffer(slot, b, offset, s);
- }
-
- void GPURenderPassEncoder::draw(Napi::Env env,
- interop::GPUSize32 vertexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstVertex,
- interop::GPUSize32 firstInstance) {
- enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
- }
-
- void GPURenderPassEncoder::drawIndexed(Napi::Env env,
- interop::GPUSize32 indexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstIndex,
- interop::GPUSignedOffset32 baseVertex,
- interop::GPUSize32 firstInstance) {
- enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
- }
-
- void GPURenderPassEncoder::drawIndirect(Napi::Env env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint32_t o = 0;
-
- if (!conv(b, indirectBuffer) || //
- !conv(o, indirectOffset)) {
- return;
- }
- enc_.DrawIndirect(b, o);
- }
-
- void GPURenderPassEncoder::drawIndexedIndirect(
- Napi::Env env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) {
- Converter conv(env);
-
- wgpu::Buffer b{};
- uint32_t o = 0;
-
- if (!conv(b, indirectBuffer) || //
- !conv(o, indirectOffset)) {
- return;
- }
- enc_.DrawIndexedIndirect(b, o);
- }
-
- std::optional<std::string> GPURenderPassEncoder::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPURenderPassEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.h b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.h
deleted file mode 100644
index 866aaab822c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
-#define DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPURenderPassEncoder is an implementation of interop::GPURenderPassEncoder that wraps a
- // wgpu::RenderPassEncoder.
- class GPURenderPassEncoder final : public interop::GPURenderPassEncoder {
- public:
- GPURenderPassEncoder(wgpu::RenderPassEncoder enc);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::RenderPassEncoder &() const {
- return enc_;
- }
-
- // interop::GPURenderPassEncoder interface compliance
- void setViewport(Napi::Env,
- float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth) override;
- void setScissorRect(Napi::Env,
- interop::GPUIntegerCoordinate x,
- interop::GPUIntegerCoordinate y,
- interop::GPUIntegerCoordinate width,
- interop::GPUIntegerCoordinate height) override;
- void setBlendConstant(Napi::Env, interop::GPUColor color) override;
- void setStencilReference(Napi::Env, interop::GPUStencilValue reference) override;
- void beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) override;
- void endOcclusionQuery(Napi::Env) override;
- void beginPipelineStatisticsQuery(Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) override;
- void endPipelineStatisticsQuery(Napi::Env) override;
- void writeTimestamp(Napi::Env,
- interop::Interface<interop::GPUQuerySet> querySet,
- interop::GPUSize32 queryIndex) override;
- void executeBundles(
- Napi::Env,
- std::vector<interop::Interface<interop::GPURenderBundle>> bundles) override;
- void endPass(Napi::Env) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
- void setBindGroup(Napi::Env,
- interop::GPUIndex32 index,
- interop::Interface<interop::GPUBindGroup> bindGroup,
- interop::Uint32Array dynamicOffsetsData,
- interop::GPUSize64 dynamicOffsetsDataStart,
- interop::GPUSize32 dynamicOffsetsDataLength) override;
- void pushDebugGroup(Napi::Env, std::string groupLabel) override;
- void popDebugGroup(Napi::Env) override;
- void insertDebugMarker(Napi::Env, std::string markerLabel) override;
- void setPipeline(Napi::Env,
- interop::Interface<interop::GPURenderPipeline> pipeline) override;
- void setIndexBuffer(Napi::Env,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUIndexFormat indexFormat,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void setVertexBuffer(Napi::Env,
- interop::GPUIndex32 slot,
- interop::Interface<interop::GPUBuffer> buffer,
- interop::GPUSize64 offset,
- std::optional<interop::GPUSize64> size) override;
- void draw(Napi::Env,
- interop::GPUSize32 vertexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstVertex,
- interop::GPUSize32 firstInstance) override;
- void drawIndexed(Napi::Env,
- interop::GPUSize32 indexCount,
- interop::GPUSize32 instanceCount,
- interop::GPUSize32 firstIndex,
- interop::GPUSignedOffset32 baseVertex,
- interop::GPUSize32 firstInstance) override;
- void drawIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- void drawIndexedIndirect(Napi::Env,
- interop::Interface<interop::GPUBuffer> indirectBuffer,
- interop::GPUSize64 indirectOffset) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::RenderPassEncoder enc_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.cpp
deleted file mode 100644
index 3f363fc2a46..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPURenderPipeline.h"
-
-#include "src/dawn_node/binding/GPUBindGroupLayout.h"
-#include "src/dawn_node/binding/GPUBuffer.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPURenderPipeline
- ////////////////////////////////////////////////////////////////////////////////
- GPURenderPipeline::GPURenderPipeline(wgpu::RenderPipeline pipeline)
- : pipeline_(std::move(pipeline)) {
- }
-
- interop::Interface<interop::GPUBindGroupLayout> GPURenderPipeline::getBindGroupLayout(
- Napi::Env env,
- uint32_t index) {
- return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
- env, pipeline_.GetBindGroupLayout(index));
- }
-
- std::optional<std::string> GPURenderPipeline::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPURenderPipeline::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.h b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.h
deleted file mode 100644
index fc15fc6acbd..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
-#define DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPURenderPipeline is an implementation of interop::GPURenderPipeline that wraps a
- // wgpu::RenderPipeline.
- class GPURenderPipeline final : public interop::GPURenderPipeline {
- public:
- GPURenderPipeline(wgpu::RenderPipeline pipeline);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::RenderPipeline &() const {
- return pipeline_;
- }
-
- // interop::GPURenderPipeline interface compliance
- interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
- uint32_t index) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::RenderPipeline pipeline_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.cpp
deleted file mode 100644
index 6de3aa49c0c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUSampler.h"
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUSampler
- ////////////////////////////////////////////////////////////////////////////////
- GPUSampler::GPUSampler(wgpu::Sampler sampler) : sampler_(std::move(sampler)) {
- }
-
- std::optional<std::string> GPUSampler::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUSampler::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.h
deleted file mode 100644
index 24e77d2b5e7..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUSAMPLER_H_
-#define DAWN_NODE_BINDING_GPUSAMPLER_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
- // GPUSampler is an implementation of interop::GPUSampler that wraps a wgpu::Sampler.
- class GPUSampler final : public interop::GPUSampler {
- public:
- GPUSampler(wgpu::Sampler sampler);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::Sampler &() const {
- return sampler_;
- }
-
- // interop::GPUSampler interface compliance
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::Sampler sampler_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUSAMPLER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.cpp
deleted file mode 100644
index 52efabd3eac..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUShaderModule.h"
-
-#include <memory>
-
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUShaderModule
- ////////////////////////////////////////////////////////////////////////////////
- GPUShaderModule::GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async)
- : shader_(std::move(shader)), async_(std::move(async)) {
- }
-
- interop::Promise<interop::Interface<interop::GPUCompilationInfo>>
- GPUShaderModule::compilationInfo(Napi::Env env) {
- struct GPUCompilationMessage : public interop::GPUCompilationMessage {
- WGPUCompilationMessage message;
-
- GPUCompilationMessage(const WGPUCompilationMessage& m) : message(m) {
- }
- std::string getMessage(Napi::Env) override {
- return message.message;
- }
- interop::GPUCompilationMessageType getType(Napi::Env) override {
- switch (message.type) {
- case WGPUCompilationMessageType_Error:
- return interop::GPUCompilationMessageType::kError;
- case WGPUCompilationMessageType_Warning:
- return interop::GPUCompilationMessageType::kWarning;
- case WGPUCompilationMessageType_Info:
- return interop::GPUCompilationMessageType::kInfo;
- default:
- UNIMPLEMENTED();
- }
- }
- uint64_t getLineNum(Napi::Env) override {
- return message.lineNum;
- }
- uint64_t getLinePos(Napi::Env) override {
- return message.linePos;
- }
- uint64_t getOffset(Napi::Env) override {
- return message.offset;
- }
- uint64_t getLength(Napi::Env) override {
- return message.length;
- }
- };
-
- using Messages = std::vector<interop::Interface<interop::GPUCompilationMessage>>;
-
- struct GPUCompilationInfo : public interop::GPUCompilationInfo {
- std::vector<Napi::ObjectReference> messages;
-
- GPUCompilationInfo(Napi::Env env, Messages msgs) {
- messages.reserve(msgs.size());
- for (auto& msg : msgs) {
- messages.emplace_back(Napi::Persistent(Napi::Object(env, msg)));
- }
- }
- Messages getMessages(Napi::Env) override {
- Messages out;
- out.reserve(messages.size());
- for (auto& msg : messages) {
- out.emplace_back(msg.Value());
- }
- return out;
- }
- };
-
- using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
-
- struct Context {
- Napi::Env env;
- Promise promise;
- AsyncTask task;
- };
- auto ctx = new Context{env, Promise(env, PROMISE_INFO), async_};
- auto promise = ctx->promise;
-
- shader_.GetCompilationInfo(
- [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
- void* userdata) {
- auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
- Messages messages(compilationInfo->messageCount);
- for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
- auto& msg = compilationInfo->messages[i];
- messages[i] =
- interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
- }
-
- c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
- c->env, c->env, std::move(messages)));
- },
- ctx);
-
- return promise;
- }
-
- std::optional<std::string> GPUShaderModule::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUShaderModule::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.h
deleted file mode 100644
index 2fcd140120c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUSHADERMODULE_H_
-#define DAWN_NODE_BINDING_GPUSHADERMODULE_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/binding/AsyncRunner.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUShaderModule is an implementation of interop::GPUShaderModule that wraps a
- // wgpu::ShaderModule.
- class GPUShaderModule final : public interop::GPUShaderModule {
- public:
- GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::ShaderModule &() const {
- return shader_;
- }
-
- // interop::GPUShaderModule interface compliance
- interop::Promise<interop::Interface<interop::GPUCompilationInfo>> compilationInfo(
- Napi::Env) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::ShaderModule shader_;
- std::shared_ptr<AsyncRunner> async_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUSHADERMODULE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.cpp
deleted file mode 100644
index 4a8399bc737..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.cpp
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUSupportedLimits.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUSupportedLimits
- ////////////////////////////////////////////////////////////////////////////////
-
- GPUSupportedLimits::GPUSupportedLimits(wgpu::SupportedLimits limits)
- : limits_(std::move(limits)) {
- }
-
- uint32_t GPUSupportedLimits::getMaxTextureDimension1D(Napi::Env) {
- return limits_.limits.maxTextureDimension1D;
- }
-
- uint32_t GPUSupportedLimits::getMaxTextureDimension2D(Napi::Env) {
- return limits_.limits.maxTextureDimension2D;
- }
-
- uint32_t GPUSupportedLimits::getMaxTextureDimension3D(Napi::Env) {
- return limits_.limits.maxTextureDimension3D;
- }
-
- uint32_t GPUSupportedLimits::getMaxTextureArrayLayers(Napi::Env) {
- return limits_.limits.maxTextureArrayLayers;
- }
-
- uint32_t GPUSupportedLimits::getMaxBindGroups(Napi::Env) {
- return limits_.limits.maxBindGroups;
- }
-
- uint32_t GPUSupportedLimits::getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) {
- return limits_.limits.maxDynamicUniformBuffersPerPipelineLayout;
- }
-
- uint32_t GPUSupportedLimits::getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) {
- return limits_.limits.maxDynamicStorageBuffersPerPipelineLayout;
- }
-
- uint32_t GPUSupportedLimits::getMaxSampledTexturesPerShaderStage(Napi::Env) {
- return limits_.limits.maxSampledTexturesPerShaderStage;
- }
-
- uint32_t GPUSupportedLimits::getMaxSamplersPerShaderStage(Napi::Env) {
- return limits_.limits.maxSamplersPerShaderStage;
- }
-
- uint32_t GPUSupportedLimits::getMaxStorageBuffersPerShaderStage(Napi::Env) {
- return limits_.limits.maxStorageBuffersPerShaderStage;
- }
-
- uint32_t GPUSupportedLimits::getMaxStorageTexturesPerShaderStage(Napi::Env) {
- return limits_.limits.maxStorageTexturesPerShaderStage;
- }
-
- uint32_t GPUSupportedLimits::getMaxUniformBuffersPerShaderStage(Napi::Env) {
- return limits_.limits.maxUniformBuffersPerShaderStage;
- }
-
- uint64_t GPUSupportedLimits::getMaxUniformBufferBindingSize(Napi::Env) {
- return limits_.limits.maxUniformBufferBindingSize;
- }
-
- uint64_t GPUSupportedLimits::getMaxStorageBufferBindingSize(Napi::Env) {
- return limits_.limits.maxStorageBufferBindingSize;
- }
-
- uint32_t GPUSupportedLimits::getMinUniformBufferOffsetAlignment(Napi::Env) {
- return limits_.limits.minUniformBufferOffsetAlignment;
- }
-
- uint32_t GPUSupportedLimits::getMinStorageBufferOffsetAlignment(Napi::Env) {
- return limits_.limits.minStorageBufferOffsetAlignment;
- }
-
- uint32_t GPUSupportedLimits::getMaxVertexBuffers(Napi::Env) {
- return limits_.limits.maxVertexBuffers;
- }
-
- uint32_t GPUSupportedLimits::getMaxVertexAttributes(Napi::Env) {
- return limits_.limits.maxVertexAttributes;
- }
-
- uint32_t GPUSupportedLimits::getMaxVertexBufferArrayStride(Napi::Env) {
- return limits_.limits.maxVertexBufferArrayStride;
- }
-
- uint32_t GPUSupportedLimits::getMaxInterStageShaderComponents(Napi::Env) {
- return limits_.limits.maxInterStageShaderComponents;
- }
-
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupStorageSize(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupStorageSize;
- }
-
- uint32_t GPUSupportedLimits::getMaxComputeInvocationsPerWorkgroup(Napi::Env) {
- return limits_.limits.maxComputeInvocationsPerWorkgroup;
- }
-
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeX(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupSizeX;
- }
-
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeY(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupSizeY;
- }
-
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeZ(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupSizeZ;
- }
-
- uint32_t GPUSupportedLimits::getMaxComputeWorkgroupsPerDimension(Napi::Env) {
- return limits_.limits.maxComputeWorkgroupsPerDimension;
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.h
deleted file mode 100644
index faed8139492..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
-#define DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUSupportedLimits is an implementation of interop::GPUSupportedLimits.
- class GPUSupportedLimits final : public interop::GPUSupportedLimits {
- public:
- GPUSupportedLimits(wgpu::SupportedLimits);
-
- // interop::GPUSupportedLimits interface compliance
- uint32_t getMaxTextureDimension1D(Napi::Env) override;
- uint32_t getMaxTextureDimension2D(Napi::Env) override;
- uint32_t getMaxTextureDimension3D(Napi::Env) override;
- uint32_t getMaxTextureArrayLayers(Napi::Env) override;
- uint32_t getMaxBindGroups(Napi::Env) override;
- uint32_t getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) override;
- uint32_t getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) override;
- uint32_t getMaxSampledTexturesPerShaderStage(Napi::Env) override;
- uint32_t getMaxSamplersPerShaderStage(Napi::Env) override;
- uint32_t getMaxStorageBuffersPerShaderStage(Napi::Env) override;
- uint32_t getMaxStorageTexturesPerShaderStage(Napi::Env) override;
- uint32_t getMaxUniformBuffersPerShaderStage(Napi::Env) override;
- uint64_t getMaxUniformBufferBindingSize(Napi::Env) override;
- uint64_t getMaxStorageBufferBindingSize(Napi::Env) override;
- uint32_t getMinUniformBufferOffsetAlignment(Napi::Env) override;
- uint32_t getMinStorageBufferOffsetAlignment(Napi::Env) override;
- uint32_t getMaxVertexBuffers(Napi::Env) override;
- uint32_t getMaxVertexAttributes(Napi::Env) override;
- uint32_t getMaxVertexBufferArrayStride(Napi::Env) override;
- uint32_t getMaxInterStageShaderComponents(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupStorageSize(Napi::Env) override;
- uint32_t getMaxComputeInvocationsPerWorkgroup(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupSizeX(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupSizeY(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupSizeZ(Napi::Env) override;
- uint32_t getMaxComputeWorkgroupsPerDimension(Napi::Env) override;
-
- private:
- wgpu::SupportedLimits limits_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.cpp
deleted file mode 100644
index 284cd8a91f9..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUTexture.h"
-
-#include "src/dawn_node/binding/Converter.h"
-#include "src/dawn_node/binding/Errors.h"
-#include "src/dawn_node/binding/GPUTextureView.h"
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUTexture
- ////////////////////////////////////////////////////////////////////////////////
- GPUTexture::GPUTexture(wgpu::Texture texture) : texture_(std::move(texture)) {
- }
-
- interop::Interface<interop::GPUTextureView> GPUTexture::createView(
- Napi::Env env,
- interop::GPUTextureViewDescriptor descriptor) {
- if (!texture_) {
- Errors::OperationError(env).ThrowAsJavaScriptException();
- return {};
- }
-
- wgpu::TextureViewDescriptor desc{};
- Converter conv(env);
- if (!conv(desc.baseMipLevel, descriptor.baseMipLevel) || //
- !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
- !conv(desc.baseArrayLayer, descriptor.baseArrayLayer) || //
- !conv(desc.arrayLayerCount, descriptor.arrayLayerCount) || //
- !conv(desc.format, descriptor.format) || //
- !conv(desc.dimension, descriptor.dimension) || //
- !conv(desc.aspect, descriptor.aspect)) {
- return {};
- }
- return interop::GPUTextureView::Create<GPUTextureView>(env, texture_.CreateView(&desc));
- }
-
- void GPUTexture::destroy(Napi::Env) {
- texture_.Destroy();
- }
-
- std::optional<std::string> GPUTexture::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUTexture::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- }
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.h
deleted file mode 100644
index f5a2a473928..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUTEXTURE_H_
-#define DAWN_NODE_BINDING_GPUTEXTURE_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUTexture is an implementation of interop::GPUTexture that wraps a wgpu::Texture.
- class GPUTexture final : public interop::GPUTexture {
- public:
- GPUTexture(wgpu::Texture texture);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::Texture &() const {
- return texture_;
- }
-
- // interop::GPUTexture interface compliance
- interop::Interface<interop::GPUTextureView> createView(
- Napi::Env,
- interop::GPUTextureViewDescriptor descriptor) override;
- void destroy(Napi::Env) override;
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::Texture texture_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUTEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.cpp
deleted file mode 100644
index f03bc3f266e..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/binding/GPUTextureView.h"
-
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu { namespace binding {
-
- ////////////////////////////////////////////////////////////////////////////////
- // wgpu::bindings::GPUTextureView
- ////////////////////////////////////////////////////////////////////////////////
- GPUTextureView::GPUTextureView(wgpu::TextureView view) : view_(std::move(view)) {
- }
-
- std::optional<std::string> GPUTextureView::getLabel(Napi::Env) {
- UNIMPLEMENTED();
- }
-
- void GPUTextureView::setLabel(Napi::Env, std::optional<std::string> value) {
- UNIMPLEMENTED();
- };
-
-}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.h
deleted file mode 100644
index 8590735093d..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
-#define DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-#include "napi.h"
-#include "src/dawn_node/interop/WebGPU.h"
-
-namespace wgpu { namespace binding {
-
- // GPUTextureView is an implementation of interop::GPUTextureView that wraps a
- // wgpu::TextureView.
- class GPUTextureView final : public interop::GPUTextureView {
- public:
- GPUTextureView(wgpu::TextureView view);
-
- // Implicit cast operator to Dawn GPU object
- inline operator const wgpu::TextureView &() const {
- return view_;
- }
-
- // interop::GPUTextureView interface compliance
- std::optional<std::string> getLabel(Napi::Env) override;
- void setLabel(Napi::Env, std::optional<std::string> value) override;
-
- private:
- wgpu::TextureView view_;
- };
-
-}} // namespace wgpu::binding
-
-#endif // DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_node/interop/CMakeLists.txt
deleted file mode 100644
index 0b84c0ab971..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/interop/CMakeLists.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2021 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Paths to generated files
-set(INTEROP_GEN_DIR "${GEN_DIR}/src/dawn_node/interop")
-set(INTEROP_WEBGPU_H "${INTEROP_GEN_DIR}/WebGPU.h")
-set(INTEROP_WEBGPU_CPP "${INTEROP_GEN_DIR}/WebGPU.cpp")
-
-idlgen(
- TEMPLATE
- "${CMAKE_CURRENT_SOURCE_DIR}/WebGPU.h.tmpl"
- IDLS
- "${CMAKE_CURRENT_SOURCE_DIR}/Browser.idl"
- "${WEBGPU_IDL_PATH}"
- DEPENDS
- "${CMAKE_CURRENT_SOURCE_DIR}/WebGPUCommon.tmpl"
- OUTPUT
- "${INTEROP_WEBGPU_H}"
-)
-
-idlgen(
- TEMPLATE
- "${CMAKE_CURRENT_SOURCE_DIR}/WebGPU.cpp.tmpl"
- IDLS
- "${CMAKE_CURRENT_SOURCE_DIR}/Browser.idl"
- "${WEBGPU_IDL_PATH}"
- DEPENDS
- "${CMAKE_CURRENT_SOURCE_DIR}/WebGPUCommon.tmpl"
- OUTPUT
- "${INTEROP_WEBGPU_CPP}"
-)
-
-add_library(dawn_node_interop STATIC
- "Core.cpp"
- "Core.h"
- "${INTEROP_WEBGPU_H}"
- "${INTEROP_WEBGPU_CPP}"
-)
-
-target_include_directories(dawn_node_interop
- PRIVATE
- "${CMAKE_SOURCE_DIR}"
- "${NODE_API_HEADERS_DIR}/include"
- "${NODE_ADDON_API_DIR}"
- "${GEN_DIR}"
-)
-
-target_link_libraries(dawn_node_interop
- PRIVATE
- dawncpp
-)
-
-# dawn_node targets require C++17
-set_property(
- TARGET dawn_node_interop
- PROPERTY CXX_STANDARD 17
-)
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/Core.cpp b/chromium/third_party/dawn/src/dawn_node/interop/Core.cpp
deleted file mode 100644
index 8ee22cf90fb..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/interop/Core.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "src/dawn_node/interop/Core.h"
-
-namespace wgpu { namespace interop {
-
- Result Success;
-
- Result Error(std::string msg) {
- return {msg};
- }
-
- Result Converter<bool>::FromJS(Napi::Env env, Napi::Value value, bool& out) {
- if (value.IsBoolean()) {
- out = value.ToBoolean();
- return Success;
- }
- return Error("value is not a boolean");
- }
- Napi::Value Converter<bool>::ToJS(Napi::Env env, bool value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<std::string>::FromJS(Napi::Env env, Napi::Value value, std::string& out) {
- if (value.IsString()) {
- out = value.ToString();
- return Success;
- }
- return Error("value is not a string");
- }
- Napi::Value Converter<std::string>::ToJS(Napi::Env env, std::string value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<int8_t>::FromJS(Napi::Env env, Napi::Value value, int8_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Int32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<int8_t>::ToJS(Napi::Env env, int8_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<uint8_t>::FromJS(Napi::Env env, Napi::Value value, uint8_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Uint32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<uint8_t>::ToJS(Napi::Env env, uint8_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<int16_t>::FromJS(Napi::Env env, Napi::Value value, int16_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Int32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<int16_t>::ToJS(Napi::Env env, int16_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<uint16_t>::FromJS(Napi::Env env, Napi::Value value, uint16_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Uint32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<uint16_t>::ToJS(Napi::Env env, uint16_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<int32_t>::FromJS(Napi::Env env, Napi::Value value, int32_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Int32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<int32_t>::ToJS(Napi::Env env, int32_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<uint32_t>::FromJS(Napi::Env env, Napi::Value value, uint32_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Uint32Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<uint32_t>::ToJS(Napi::Env env, uint32_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<int64_t>::FromJS(Napi::Env env, Napi::Value value, int64_t& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().Int64Value();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<int64_t>::ToJS(Napi::Env env, int64_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<uint64_t>::FromJS(Napi::Env env, Napi::Value value, uint64_t& out) {
- if (value.IsNumber()) {
- // Note that the JS Number type only stores doubles, so the max integer
- // range of values without precision loss is -2^53 to 2^53 (52 bit mantissa
- // with 1 implicit bit). This is why there's no UInt64Value() function.
- out = static_cast<uint64_t>(value.ToNumber().Int64Value());
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<uint64_t>::ToJS(Napi::Env env, uint64_t value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<float>::FromJS(Napi::Env env, Napi::Value value, float& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().FloatValue();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<float>::ToJS(Napi::Env env, float value) {
- return Napi::Value::From(env, value);
- }
-
- Result Converter<double>::FromJS(Napi::Env env, Napi::Value value, double& out) {
- if (value.IsNumber()) {
- out = value.ToNumber().DoubleValue();
- return Success;
- }
- return Error("value is not a number");
- }
- Napi::Value Converter<double>::ToJS(Napi::Env env, double value) {
- return Napi::Value::From(env, value);
- }
-
-}} // namespace wgpu::interop
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/Core.h b/chromium/third_party/dawn/src/dawn_node/interop/Core.h
deleted file mode 100644
index d4d0ec60a06..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/interop/Core.h
+++ /dev/null
@@ -1,692 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file provides core interop helpers used by the code generated by the
-// templates.
-
-#ifndef DAWN_NODE_INTEROP_CORE_WEBGPU_H_
-#define DAWN_NODE_INTEROP_CORE_WEBGPU_H_
-
-#include <cstdint>
-#include <optional>
-#include <string>
-#include <type_traits>
-#include <unordered_map>
-#include <variant>
-#include <vector>
-
-#include "napi.h"
-
-#include "src/dawn_node/utils/Debug.h"
-
-#define ENABLE_INTEROP_LOGGING 0 // Enable for verbose interop logging
-
-#if ENABLE_INTEROP_LOGGING
-# define INTEROP_LOG(...) LOG(__VA_ARGS__)
-#else
-# define INTEROP_LOG(...)
-#endif
-
-// A helper macro for constructing a PromiseInfo with the current file, function and line.
-// See PromiseInfo
-#define PROMISE_INFO \
- ::wgpu::interop::PromiseInfo { \
- __FILE__, __FUNCTION__, __LINE__ \
- }
-
-namespace wgpu { namespace interop {
-
- ////////////////////////////////////////////////////////////////////////////////
- // Primitive JavaScript types
- ////////////////////////////////////////////////////////////////////////////////
- using Object = Napi::Object;
- using ArrayBuffer = Napi::ArrayBuffer;
- using Int8Array = Napi::TypedArrayOf<int8_t>;
- using Int16Array = Napi::TypedArrayOf<int16_t>;
- using Int32Array = Napi::TypedArrayOf<int32_t>;
- using Uint8Array = Napi::TypedArrayOf<uint8_t>;
- using Uint16Array = Napi::TypedArrayOf<uint16_t>;
- using Uint32Array = Napi::TypedArrayOf<uint32_t>;
- using Float32Array = Napi::TypedArrayOf<float>;
- using Float64Array = Napi::TypedArrayOf<double>;
- using DataView = Napi::TypedArray;
-
- template <typename T>
- using FrozenArray = std::vector<T>;
-
- ////////////////////////////////////////////////////////////////////////////////
- // Result
- ////////////////////////////////////////////////////////////////////////////////
-
- // Result is used to hold an success / error state by functions that perform JS <-> C++
- // conversion
- struct [[nodiscard]] Result {
- // Returns true if the operation succeeded, false if there was an error
- inline operator bool() const {
- return error.empty();
- }
-
- // If Result is an error, then a new Error is returned with the
- // stringified values append to the error message.
- // If Result is a success, then a success Result is returned.
- template <typename... VALUES>
- Result Append(VALUES && ... values) {
- if (*this) {
- return *this;
- }
- std::stringstream ss;
- ss << error << "\n";
- utils::Write(ss, std::forward<VALUES>(values)...);
- return {ss.str()};
- }
-
- // The error message, if the operation failed.
- std::string error;
- };
-
- // A successful result
- extern Result Success;
-
- // Returns a Result with the given error message
- Result Error(std::string msg);
-
- ////////////////////////////////////////////////////////////////////////////////
- // Interface<T>
- ////////////////////////////////////////////////////////////////////////////////
-
- // Interface<T> is a templated wrapper around a JavaScript object, which
- // implements the template-generated interface type T. Interfaces are returned
- // by either calling T::Bind() or T::Create().
- template <typename T>
- class Interface {
- public:
- // Constructs an Interface with no JS object.
- inline Interface() {
- }
-
- // Constructs an Interface wrapping the given JS object.
- // The JS object must have been created with a call to T::Bind().
- explicit inline Interface(Napi::Object o) : object(o) {
- }
-
- // Implicit conversion operators to Napi objects.
- inline operator napi_value() const {
- return object;
- }
- inline operator const Napi::Value &() const {
- return object;
- }
- inline operator const Napi::Object &() const {
- return object;
- }
-
- // Member and dereference operators
- inline T* operator->() const {
- return T::Unwrap(object);
- }
- inline T* operator*() const {
- return T::Unwrap(object);
- }
-
- // As<IMPL>() returns the unwrapped object cast to the implementation type.
- // The interface implementation *must* be of the template type IMPL.
- template <typename IMPL>
- inline IMPL* As() const {
- return static_cast<IMPL*>(T::Unwrap(object));
- }
-
- private:
- Napi::Object object;
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // Promise<T>
- ////////////////////////////////////////////////////////////////////////////////
-
- // Info holds details about where the promise was constructed.
- // Used for printing debug messages when a promise is finalized without being resolved
- // or rejected.
- // Use the PROMISE_INFO macro to populate this structure.
- struct PromiseInfo {
- const char* file = nullptr;
- const char* function = nullptr;
- int line = 0;
- };
-
- namespace detail {
- // Base class for Promise<T> specializations.
- class PromiseBase {
- public:
- // Implicit conversion operators to Napi promises.
- inline operator napi_value() const {
- return state->deferred.Promise();
- }
- inline operator Napi::Value() const {
- return state->deferred.Promise();
- }
- inline operator Napi::Promise() const {
- return state->deferred.Promise();
- }
-
- // Reject() rejects the promise with the given failure value.
- void Reject(Napi::Value value) const {
- state->deferred.Reject(value);
- state->resolved_or_rejected = true;
- }
- void Reject(Napi::Error err) const {
- Reject(err.Value());
- }
- void Reject(std::string err) const {
- Reject(Napi::Error::New(state->deferred.Env(), err));
- }
-
- protected:
- void Resolve(Napi::Value value) const {
- state->deferred.Resolve(value);
- state->resolved_or_rejected = true;
- }
-
- struct State {
- Napi::Promise::Deferred deferred;
- PromiseInfo info;
- bool resolved_or_rejected = false;
- };
-
- PromiseBase(Napi::Env env, const PromiseInfo& info)
- : state(new State{Napi::Promise::Deferred::New(env), info}) {
- state->deferred.Promise().AddFinalizer(
- [](Napi::Env, State* state) {
- // TODO(https://github.com/gpuweb/cts/issues/784):
- // Devices are never destroyed, so we always end up
- // leaking the Device.lost promise. Enable this once
- // fixed.
- if ((false)) {
- if (!state->resolved_or_rejected) {
- ::wgpu::utils::Fatal("Promise not resolved or rejected",
- state->info.file, state->info.line,
- state->info.function);
- }
- }
- delete state;
- },
- state);
- }
-
- State* const state;
- };
- } // namespace detail
-
- // Promise<T> is a templated wrapper around a JavaScript promise, which can
- // resolve to the template type T.
- template <typename T>
- class Promise : public detail::PromiseBase {
- public:
- // Constructor
- Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
- }
-
- // Resolve() fulfills the promise with the given value.
- void Resolve(T&& value) const {
- PromiseBase::Resolve(ToJS(state->deferred.Env(), std::forward<T>(value)));
- }
- };
-
- // Specialization for Promises that resolve with no value
- template <>
- class Promise<void> : public detail::PromiseBase {
- public:
- // Constructor
- Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
- }
-
- // Resolve() fulfills the promise.
- void Resolve() const {
- PromiseBase::Resolve(state->deferred.Env().Undefined());
- }
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // Converter<T>
- ////////////////////////////////////////////////////////////////////////////////
-
- // Converter<T> is specialized for each type T which can be converted from C++
- // to JavaScript, or JavaScript to C++.
- // Each specialization of Converter<T> is expected to have two static methods
- // with the signatures:
- //
- // // FromJS() converts the JavaScript value 'in' to the C++ value 'out'.
- // static Result FromJS(Napi::Env, Napi::Value in, T& out);
- //
- // // ToJS() converts the C++ value 'in' to a JavaScript value, and returns
- // // this value.
- // static Napi::Value ToJS(Napi::Env, T in);
- template <typename T>
- class Converter {};
-
- template <>
- class Converter<Napi::Object> {
- public:
- static inline Result FromJS(Napi::Env, Napi::Value value, Napi::Object& out) {
- if (value.IsObject()) {
- out = value.ToObject();
- return Success;
- }
- return Error("value is not an object");
- }
- static inline Napi::Value ToJS(Napi::Env, Napi::Object value) {
- return value;
- }
- };
-
- template <>
- class Converter<ArrayBuffer> {
- public:
- static inline Result FromJS(Napi::Env, Napi::Value value, ArrayBuffer& out) {
- if (value.IsArrayBuffer()) {
- out = value.As<ArrayBuffer>();
- return Success;
- }
- return Error("value is not a ArrayBuffer");
- };
- static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
- return value;
- }
- };
-
- template <>
- class Converter<Napi::TypedArray> {
- public:
- static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArray& out) {
- if (value.IsTypedArray()) {
- out = value.As<Napi::TypedArray>();
- return Success;
- }
- return Error("value is not a TypedArray");
- };
- static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
- return value;
- }
- };
-
- template <typename T>
- class Converter<Napi::TypedArrayOf<T>> {
- public:
- // clang-format off
- // The Napi element type of T
- static constexpr napi_typedarray_type element_type =
- std::is_same<T, int8_t>::value ? napi_int8_array
- : std::is_same<T, uint8_t>::value ? napi_uint8_array
- : std::is_same<T, int16_t>::value ? napi_int16_array
- : std::is_same<T, uint16_t>::value ? napi_uint16_array
- : std::is_same<T, int32_t>::value ? napi_int32_array
- : std::is_same<T, uint32_t>::value ? napi_uint32_array
- : std::is_same<T, float>::value ? napi_float32_array
- : std::is_same<T, double>::value ? napi_float64_array
- : std::is_same<T, int64_t>::value ? napi_bigint64_array
- : std::is_same<T, uint64_t>::value ? napi_biguint64_array
- : static_cast<napi_typedarray_type>(-1);
- // clang-format on
- static_assert(static_cast<int>(element_type) >= 0,
- "unsupported T type for Napi::TypedArrayOf<T>");
- static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArrayOf<T>& out) {
- if (value.IsTypedArray()) {
- auto arr = value.As<Napi::TypedArrayOf<T>>();
- if (arr.TypedArrayType() == element_type) {
- out = arr;
- return Success;
- }
- return Error("value is not a TypedArray of the correct element type");
- }
- return Error("value is not a TypedArray");
- };
- static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
- return value;
- }
- };
-
- template <>
- class Converter<std::string> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, std::string&);
- static Napi::Value ToJS(Napi::Env, std::string);
- };
-
- template <>
- class Converter<bool> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, bool&);
- static Napi::Value ToJS(Napi::Env, bool);
- };
-
- template <>
- class Converter<int8_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, int8_t&);
- static Napi::Value ToJS(Napi::Env, int8_t);
- };
-
- template <>
- class Converter<uint8_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, uint8_t&);
- static Napi::Value ToJS(Napi::Env, uint8_t);
- };
-
- template <>
- class Converter<int16_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, int16_t&);
- static Napi::Value ToJS(Napi::Env, int16_t);
- };
-
- template <>
- class Converter<uint16_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, uint16_t&);
- static Napi::Value ToJS(Napi::Env, uint16_t);
- };
-
- template <>
- class Converter<int32_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, int32_t&);
- static Napi::Value ToJS(Napi::Env, int32_t);
- };
-
- template <>
- class Converter<uint32_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, uint32_t&);
- static Napi::Value ToJS(Napi::Env, uint32_t);
- };
-
- template <>
- class Converter<int64_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, int64_t&);
- static Napi::Value ToJS(Napi::Env, int64_t);
- };
-
- template <>
- class Converter<uint64_t> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, uint64_t&);
- static Napi::Value ToJS(Napi::Env, uint64_t);
- };
-
- template <>
- class Converter<float> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, float&);
- static Napi::Value ToJS(Napi::Env, float);
- };
-
- template <>
- class Converter<double> {
- public:
- static Result FromJS(Napi::Env, Napi::Value, double&);
- static Napi::Value ToJS(Napi::Env, double);
- };
-
- template <typename T>
- class Converter<Interface<T>> {
- public:
- static Result FromJS(Napi::Env env, Napi::Value value, Interface<T>& out) {
- if (!value.IsObject()) {
- return Error("value is not object");
- }
- auto obj = value.As<Napi::Object>();
- if (!T::Unwrap(obj)) {
- return Error("object is not of the correct interface type");
- }
- out = Interface<T>(obj);
- return Success;
- }
- static Napi::Value ToJS(Napi::Env env, const Interface<T>& value) {
- return {env, value};
- }
- };
-
- template <typename T>
- class Converter<std::optional<T>> {
- public:
- static Result FromJS(Napi::Env env, Napi::Value value, std::optional<T>& out) {
- if (value.IsNull() || value.IsUndefined()) {
- out.reset();
- return Success;
- }
- T v{};
- auto res = Converter<T>::FromJS(env, value, v);
- if (!res) {
- return res;
- }
- out = std::move(v);
- return Success;
- }
- static Napi::Value ToJS(Napi::Env env, std::optional<T> value) {
- if (value.has_value()) {
- return Converter<T>::ToJS(env, value.value());
- }
- return env.Null();
- }
- };
-
- template <typename T>
- class Converter<std::vector<T>> {
- public:
- static inline Result FromJS(Napi::Env env, Napi::Value value, std::vector<T>& out) {
- if (!value.IsArray()) {
- return Error("value is not an array");
- }
- auto arr = value.As<Napi::Array>();
- std::vector<T> vec(arr.Length());
- for (size_t i = 0; i < vec.size(); i++) {
- auto res = Converter<T>::FromJS(env, arr[static_cast<uint32_t>(i)], vec[i]);
- if (!res) {
- return res.Append("for array element ", i);
- }
- }
- out = std::move(vec);
- return Success;
- }
- static inline Napi::Value ToJS(Napi::Env env, const std::vector<T>& vec) {
- auto arr = Napi::Array::New(env, vec.size());
- for (size_t i = 0; i < vec.size(); i++) {
- arr.Set(static_cast<uint32_t>(i), Converter<T>::ToJS(env, vec[i]));
- }
- return arr;
- }
- };
-
- template <typename K, typename V>
- class Converter<std::unordered_map<K, V>> {
- public:
- static inline Result FromJS(Napi::Env env,
- Napi::Value value,
- std::unordered_map<K, V>& out) {
- if (!value.IsObject()) {
- return Error("value is not an object");
- }
- auto obj = value.ToObject();
- auto keys = obj.GetPropertyNames();
- std::unordered_map<K, V> map(keys.Length());
- for (uint32_t i = 0; i < static_cast<uint32_t>(keys.Length()); i++) {
- K key{};
- V value{};
- auto key_res = Converter<K>::FromJS(env, keys[i], key);
- if (!key_res) {
- return key_res.Append("for object key");
- }
- auto value_res = Converter<V>::FromJS(env, obj.Get(keys[i]), value);
- if (!value_res) {
- return value_res.Append("for object value of key: ", key);
- }
- map[key] = value;
- }
- out = std::move(map);
- return Success;
- }
- static inline Napi::Value ToJS(Napi::Env env, std::unordered_map<K, V> value) {
- auto obj = Napi::Object::New(env);
- for (auto it : value) {
- obj.Set(Converter<K>::ToJS(env, it.first), Converter<V>::ToJS(env, it.second));
- }
- return obj;
- }
- };
-
- template <typename... TYPES>
- class Converter<std::variant<TYPES...>> {
- template <typename TY>
- static inline Result TryFromJS(Napi::Env env,
- Napi::Value value,
- std::variant<TYPES...>& out) {
- TY v{};
- auto res = Converter<TY>::FromJS(env, value, v);
- if (!res) {
- return Error("no possible types matched");
- }
- out = std::move(v);
- return Success;
- }
-
- template <typename T0, typename T1, typename... TN>
- static inline Result TryFromJS(Napi::Env env,
- Napi::Value value,
- std::variant<TYPES...>& out) {
- if (TryFromJS<T0>(env, value, out)) {
- return Success;
- }
- return TryFromJS<T1, TN...>(env, value, out);
- }
-
- public:
- static inline Result FromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
- return TryFromJS<TYPES...>(env, value, out);
- }
- static inline Napi::Value ToJS(Napi::Env env, std::variant<TYPES...> value) {
- return std::visit(
- [&](auto&& v) {
- using T = std::remove_cv_t<std::remove_reference_t<decltype(v)>>;
- return Converter<T>::ToJS(env, v);
- },
- value);
- }
- };
-
- template <typename T>
- class Converter<Promise<T>> {
- public:
- static inline Result FromJS(Napi::Env, Napi::Value, Promise<T>&) {
- UNIMPLEMENTED();
- }
- static inline Napi::Value ToJS(Napi::Env, Promise<T> promise) {
- return promise;
- }
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // Helpers
- ////////////////////////////////////////////////////////////////////////////////
-
- // FromJS() is a helper function which delegates to
- // Converter<T>::FromJS()
- template <typename T>
- inline Result FromJS(Napi::Env env, Napi::Value value, T& out) {
- return Converter<T>::FromJS(env, value, out);
- }
-
- // FromJSOptional() is similar to FromJS(), but if 'value' is either null
- // or undefined then 'out' is left unassigned.
- template <typename T>
- inline Result FromJSOptional(Napi::Env env, Napi::Value value, T& out) {
- if (value.IsNull() || value.IsUndefined()) {
- return Success;
- }
- return Converter<T>::FromJS(env, value, out);
- }
-
- // ToJS() is a helper function which delegates to Converter<T>::ToJS()
- template <typename T>
- inline Napi::Value ToJS(Napi::Env env, T&& value) {
- return Converter<std::remove_cv_t<std::remove_reference_t<T>>>::ToJS(
- env, std::forward<T>(value));
- }
-
- // DefaultedParameter can be used in the tuple parameter types passed to
- // FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args), for parameters
- // that have a default value. If the argument is omitted in the call, then
- // DefaultedParameter::default_value will be assigned to
- // DefaultedParameter::value.
- template <typename T>
- struct DefaultedParameter {
- T value; // The argument value assigned by FromJS()
- T default_value; // The default value if no argument supplied
-
- // Implicit conversion operator. Returns value.
- inline operator const T&() const {
- return value;
- }
- };
-
- // IsDefaultedParameter<T>::value is true iff T is of type DefaultedParameter.
- template <typename T>
- struct IsDefaultedParameter {
- static constexpr bool value = false;
- };
- template <typename T>
- struct IsDefaultedParameter<DefaultedParameter<T>> {
- static constexpr bool value = true;
- };
-
- // FromJS() is a helper function for bulk converting the arguments of 'info'.
- // PARAM_TYPES is a std::tuple<> describing the C++ function parameter types.
- // Parameters may be of the templated DefaultedParameter type, in which case
- // the parameter will default to the default-value if omitted.
- template <typename PARAM_TYPES, int BASE_INDEX = 0>
- inline Result FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args) {
- if constexpr (BASE_INDEX < std::tuple_size_v<PARAM_TYPES>) {
- using T = std::tuple_element_t<BASE_INDEX, PARAM_TYPES>;
- auto& value = info[BASE_INDEX];
- auto& out = std::get<BASE_INDEX>(args);
- if constexpr (IsDefaultedParameter<T>::value) {
- // Parameter has a default value.
- // Check whether the argument was provided.
- if (value.IsNull() || value.IsUndefined()) {
- // Use default value for this parameter
- out.value = out.default_value;
- } else {
- // Argument was provided
- auto res = FromJS(info.Env(), value, out.value);
- if (!res) {
- return res;
- }
- }
- } else {
- // Parameter does not have a default value.
- auto res = FromJS(info.Env(), value, out);
- if (!res) {
- return res;
- }
- }
- // Convert the rest of the arguments
- return FromJS<PARAM_TYPES, BASE_INDEX + 1>(info, args);
- } else {
- return Success;
- }
- }
-
-}} // namespace wgpu::interop
-
-#endif // DAWN_NODE_INTEROP_CORE_WEBGPU_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.cpp.tmpl b/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.cpp.tmpl
deleted file mode 100644
index d55dd3f8947..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.cpp.tmpl
+++ /dev/null
@@ -1,393 +0,0 @@
-{{/*
- Copyright 2021 The Dawn Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/}}
-
-{{- /*
---------------------------------------------------------------------------------
-Template file for use with src/dawn_node/tools/cmd/idlgen/main.go to generate
-the WebGPU.cpp source file.
-
-See:
-* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
- types used by this template
-* src/dawn_node/tools/cmd/idlgen/main.go for additional structures and functions
- used by this template
-* https://golang.org/pkg/text/template/ for documentation on the template syntax
---------------------------------------------------------------------------------
-*/ -}}
-
-{{- Include "WebGPUCommon.tmpl" -}}
-
-#include "src/dawn_node/interop/WebGPU.h"
-
-#include <unordered_map>
-
-#include "src/dawn_node/utils/Debug.h"
-
-namespace wgpu {
-namespace interop {
-
-namespace {
-
-{{template "Wrappers" $}}
-
-} // namespace
-
-{{ range $ := .Declarations}}
-{{- if IsDictionary $}}{{template "Dictionary" $}}
-{{- else if IsInterface $}}{{template "Interface" $}}
-{{- else if IsEnum $}}{{template "Enum" $}}
-{{- end}}
-{{- end}}
-
-
-void Initialize(Napi::Env env) {
- auto* wrapper = Wrappers::Init(env);
- auto global = env.Global();
-{{ range $ := .Declarations}}
-{{- if IsInterfaceOrNamespace $}}
- global.Set(Napi::String::New(env, "{{$.Name}}"), wrapper->{{$.Name}}_ctor.Value());
-{{- end}}
-{{- end}}
-}
-
-} // namespace interop
-} // namespace wgpu
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Wrappers emits the C++ 'Wrappers' class, which holds all the interface and
--- namespace interop wrapper classes.
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Wrappers"}}
-// Wrappers holds all the Napi class constructors, and Napi::ObjectWrap type
-// declarations, for each of the WebIDL interface and namespace types.
-class Wrappers {
- Wrappers(Napi::Env env) {
-{{- range $ := .Declarations}}
-{{- if IsInterfaceOrNamespace $}}
- {{$.Name}}_ctor = Napi::Persistent(W{{$.Name}}::Class(env));
-{{- end}}
-{{- end}}
- }
-
- static Wrappers* instance;
-
-public:
-{{- range $ := .Declarations}}
-{{- if IsInterfaceOrNamespace $}}{{template "Wrapper" $}}
-{{- end}}
-{{- end}}
-
- // Allocates and constructs the Wrappers instance
- static Wrappers* Init(Napi::Env env) {
- instance = new Wrappers(env);
- return instance;
- }
-
- // Destructs and frees the Wrappers instance
- static void Term(Napi::Env env) {
- delete instance;
- instance = nullptr;
- }
-
- static Wrappers* For(Napi::Env env) {
- // Currently Napi only actually supports a single Env, so there's no point
- // maintaining a map of Env to Wrapper. Note: This might not always be true.
- return instance;
- }
-
-{{ range $ := .Declarations}}
-{{- if IsInterfaceOrNamespace $}}
- Napi::FunctionReference {{$.Name}}_ctor;
-{{- end}}
-{{- end}}
-};
-
-Wrappers* Wrappers::instance = nullptr;
-{{- end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Wrapper emits the C++ wrapper class for the given ast.Interface or
--- ast.Namespace.
--- This wrapper class inherits from Napi::ObjectWrap, which binds the lifetime
--- of the JavaScript object to the lifetime of the wrapper class instance.
--- If the wrapper is for an interface, the wrapper object holds a unique_ptr to
--- the interface implementation, and delegates all exposed method calls on to
--- the implementation.
--- See: https://github.com/nodejs/node-addon-api/blob/main/doc/object_wrap.md
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Wrapper"}}
- struct W{{$.Name}} : public Napi::ObjectWrap<W{{$.Name}}> {
-{{- if IsInterface $}}
- std::unique_ptr<{{$.Name}}> impl;
-{{- end}}
- static Napi::Function Class(Napi::Env env) {
- return DefineClass(env, "{{$.Name}}", {
-{{ if $s := SetlikeOf $}}
- InstanceMethod("has", &W{{$.Name}}::has),
- InstanceMethod("keys", &W{{$.Name}}::keys),
-{{- end}}
-{{- range $m := MethodsOf $}}
- InstanceMethod("{{$m.Name}}", &W{{$.Name}}::{{$m.Name}}),
-{{- end}}
-{{- range $a := AttributesOf $}}
- InstanceAccessor("{{$a.Name}}", &W{{$.Name}}::get{{Title $a.Name}},
-{{- if $a.Readonly}} nullptr{{else}} &W{{$.Name}}::set{{Title $a.Name}}{{end -}}
- ),
-{{- end}}
-{{- range $c := ConstantsOf $}}
- StaticValue("{{$c.Name}}", ToJS(env, {{$.Name}}::{{$c.Name}}), napi_default_jsproperty),
-{{- end}}
- });
- }
-
- W{{$.Name}}(const Napi::CallbackInfo& info) : ObjectWrap(info) {}
-
-{{ if $s := SetlikeOf $}}
- Napi::Value has(const Napi::CallbackInfo& info) {
- std::tuple<{{template "Type" $s.Elem}}> args;
- auto res = FromJS(info, args);
- if (res) {
- return ToJS(info.Env(), impl->has(info.Env(), std::get<0>(args)));
- }
- Napi::Error::New(info.Env(), res.error).ThrowAsJavaScriptException();
- return {};
- }
- Napi::Value keys(const Napi::CallbackInfo& info) {
- return ToJS(info.Env(), impl->keys(info.Env()));
- }
-{{- end}}
-{{- range $m := MethodsOf $}}
- Napi::Value {{$m.Name}}(const Napi::CallbackInfo& info) {
- std::string error;
-{{- range $overload_idx, $o := $m.Overloads}}
-{{- $overloaded := gt (len $m.Overloads) 1}}
- { {{if $overloaded}}// Overload {{$overload_idx}}{{end}}
- std::tuple<
-{{- range $i, $p := $o.Parameters}}
-{{- if $i}}, {{end}}
-{{- if $p.Init }}DefaultedParameter<{{template "Type" $p.Type}}>
-{{- else if $p.Optional}}std::optional<{{template "Type" $p.Type}}>
-{{- else }}{{template "Type" $p.Type}}
-{{- end}}
-{{- end}}> args;
-
-{{- range $i, $p := $o.Parameters}}
-{{- if $p.Init}}
- std::get<{{$i}} /* {{$p.Name}} */>(args).default_value = {{Eval "Literal" "Value" $p.Init "Type" $p.Type}};
-{{- end}}
-{{- end}}
-
- auto res = FromJS(info, args);
- if (res) {
- {{/* indent */}}INTEROP_LOG(
-{{- range $i, $p := $o.Parameters}}
-{{- if $i}}, ", {{$p.Name}}: "{{else}}"{{$p.Name}}: "{{end}}, std::get<{{$i}}>(args)
-{{- end}});
- {{/* indent */}}
-{{- if not (IsUndefinedType $o.Type) }}auto result = {{end -}}
- impl->{{$o.Name}}(info.Env(){{range $i, $_ := $o.Parameters}}, std::get<{{$i}}>(args){{end}});
- {{/* indent */ -}}
-{{- if IsUndefinedType $o.Type}}return info.Env().Null();
-{{- else }}return ToJS(info.Env(), result);
-{{- end }}
- }
- error = {{if $overloaded}}"\noverload {{$overload_idx}} failed to match:\n" + {{end}}res.error;
- }
-{{- end}}
- Napi::Error::New(info.Env(), "no overload matched for {{$m.Name}}:\n" + error).ThrowAsJavaScriptException();
- return {};
- }
-{{- end}}
-
-{{- range $a := AttributesOf $}}
- Napi::Value get{{Title $a.Name}}(const Napi::CallbackInfo& info) {
- return ToJS(info.Env(), impl->get{{Title $a.Name}}(info.Env()));
- }
-{{- if not $a.Readonly}}
- void set{{Title $a.Name}}(const Napi::CallbackInfo& info, const Napi::Value& value) {
- {{template "Type" $a.Type}} v{};
- auto res = FromJS(info.Env(), value, v);
- if (res) {
- impl->set{{Title $a.Name}}(info.Env(), std::move(v));
- } else {
- res = res.Append("invalid value to {{$a.Name}}");
- Napi::Error::New(info.Env(), res.error).ThrowAsJavaScriptException();
- }
- }
-{{- end }}
-{{- end}}
- };
-{{end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Dictionary emits the C++ method implementations and associated functions of
--- the interop type that defines the given ast.Dictionary
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Dictionary"}}
-Result Converter<{{$.Name}}>::FromJS(Napi::Env env, Napi::Value value, {{$.Name}}& out) {
- auto object = value.ToObject();
- Result res;
-{{- template "DictionaryMembersFromJS" $}};
- return Success;
-}
-
-Napi::Value Converter<{{$.Name}}>::ToJS(Napi::Env env, {{$.Name}} value) {
- auto object = Napi::Object::New(env);
-{{- template "DictionaryMembersToJS" $}}
- return object;
-}
-
-std::ostream& operator<<(std::ostream& o, const {{$.Name}}& dict) {
- o << "{{$.Name}} {";
-{{- range $i, $m := $.Members}}
- o << {{if $i}}", "{{else}}" "{{end}} << "{{$m.Name}}: ";
- utils::Write(o, dict.{{$m.Name}});
-{{- end }}
- o << "}" << std::endl;
- return o;
-}
-{{ end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- DictionaryMembersFromJS emits the C++ logic to convert each of the
--- dictionary ast.Member fields from JavaScript to C++. Each call to ToJS() is
--- emitted as a separate statement, and requires a 'Result res' local to be
--- declared
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "DictionaryMembersFromJS"}}
-{{- if $.Inherits}}{{template "DictionaryMembersFromJS" (Lookup $.Inherits)}}{{end}}
-{{- range $i, $m := $.Members}}
- {{/* indent */}}
-{{- if $m.Init }}res = interop::FromJSOptional(env, object.Get("{{$m.Name}}"), out.{{$m.Name}});
-{{- else }}res = interop::FromJS(env, object.Get("{{$m.Name}}"), out.{{$m.Name}});
-{{- end }}
- if (!res) {
- return res.Append("while converting member '{{$m.Name}}'");
- }
-{{- end}}
-{{- end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- DictionaryMembersToJS emits the C++ logic to convert each of the
--- dictionary ast.Member fields to JavaScript from C++. Each call to ToJS() is
--- emitted as a separate statement
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "DictionaryMembersToJS"}}
-{{- if $.Inherits}}{{template "DictionaryMembersToJS" (Lookup $.Inherits)}}{{end}}
-{{- range $m := $.Members}}
- object.Set(Napi::String::New(env, "{{$m.Name}}"), interop::ToJS(env, value.{{$m.Name}}));
-{{- end}}
-{{- end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Interface emits the C++ method implementations that define the given
--- ast.Interface.
--- Note: Most of the actual binding logic lives in the interface wrapper class.
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Interface"}}
-{{$.Name}}::{{$.Name}}() = default;
-
-{{$.Name}}* {{$.Name}}::Unwrap(Napi::Object object) {
- auto* wrappers = Wrappers::For(object.Env());
- if (!object.InstanceOf(wrappers->{{$.Name}}_ctor.Value())) {
- return nullptr;
- }
- return Wrappers::W{{$.Name}}::Unwrap(object)->impl.get();
-}
-
-Interface<{{$.Name}}> {{$.Name}}::Bind(Napi::Env env, std::unique_ptr<{{$.Name}}>&& impl) {
- auto* wrappers = Wrappers::For(env);
- auto object = wrappers->{{$.Name}}_ctor.New({});
- auto* wrapper = Wrappers::W{{$.Name}}::Unwrap(object);
- wrapper->impl = std::move(impl);
- return Interface<{{$.Name}}>(object);
-}
-
-{{$.Name}}::~{{$.Name}}() = default;
-{{ end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Enum emits the C++ associated functions of the interop type that defines the
--- given ast.Enum
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Enum"}}
-bool Converter<{{$.Name}}>::FromString(std::string str, {{$.Name}}& out) {
-{{- range $e := $.Values}}
- if (str == {{$e.Value}}) {
- out = {{$.Name}}::{{EnumEntryName $e.Value}};
- return true;
- }
-{{- end}}
- return false;
-}
-
-const char* Converter<{{$.Name}}>::ToString({{$.Name}} value) {
- switch (value) {
-{{- range $e := $.Values}}
- case {{$.Name}}::{{EnumEntryName $e.Value}}:
- return {{$e.Value}};
-{{- end}}
- }
- return nullptr;
-}
-
-Result Converter<{{$.Name}}>::FromJS(Napi::Env env, Napi::Value value, {{$.Name}}& out) {
- std::string str = value.ToString();
- if (FromString(str, out)) {
- return Success;
- }
- return Error(str + " is not a valid enum value of {{$.Name}}");
-}
-
-Napi::Value Converter<{{$.Name}}>::ToJS(Napi::Env env, {{$.Name}} value) {
- switch (value) {
-{{- range $e := $.Values}}
- case {{$.Name}}::{{EnumEntryName $e.Value}}:
- return Napi::String::New(env, {{$e.Value}});
-{{- end}}
- }
- return env.Undefined();
-}
-
-std::ostream& operator<<(std::ostream& o, {{$.Name}} value) {
- if (auto* s = Converter<{{$.Name}}>::ToString(value)) {
- return o << s;
- }
- return o << "undefined<{{$.Name}}>";
-}
-
-{{end}}
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.h.tmpl b/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.h.tmpl
deleted file mode 100644
index 5fbb0ae55c4..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.h.tmpl
+++ /dev/null
@@ -1,282 +0,0 @@
-{{/*
- Copyright 2021 The Dawn Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/}}
-
-{{- /*
---------------------------------------------------------------------------------
-Template file for use with src/dawn_node/tools/cmd/idlgen/main.go to generate
-the WebGPU.h header file.
-
-See:
-* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
- types used by this template
-* src/dawn_node/tools/cmd/idlgen/main.go for additional structures and functions
- used by this template
-* https://golang.org/pkg/text/template/ for documentation on the template syntax
---------------------------------------------------------------------------------
-*/ -}}
-
-{{- Include "WebGPUCommon.tmpl" -}}
-
-#ifndef DAWN_NODE_GEN_INTEROP_WEBGPU_H_
-#define DAWN_NODE_GEN_INTEROP_WEBGPU_H_
-
-#include "src/dawn_node/interop/Core.h"
-
-namespace wgpu {
-namespace interop {
-
-// Initialize() registers the WebGPU types with the Napi environment.
-void Initialize(Napi::Env env);
-
-{{ range $ := .Declarations}}
-{{- if IsDictionary $}}{{template "Dictionary" $}}
-{{- else if IsNamespace $}}{{template "Namespace" $}}
-{{- else if IsInterface $}}{{template "Interface" $}}
-{{- else if IsEnum $}}{{template "Enum" $}}
-{{- else if IsTypedef $}}{{template "Typedef" $}}
-{{- end}}
-{{- end}}
-
-} // namespace interop
-} // namespace wgpu
-
-#endif // DAWN_NODE_GEN_INTEROP_WEBGPU_H_
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Dictionary emits the C++ header declaration that defines the interop type for
--- the given ast.Dictionary
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Dictionary"}}
-// dictionary {{$.Name}}
-class {{$.Name}} {{- if $.Inherits }} : public {{$.Inherits}}{{end}} {
-public:
-{{ range $m := $.Members}}
-{{- if IsConstructor $m}} {{$.Name}}();
-{{ else if IsMember $m}} {{template "DictionaryMember" $m}}
-{{ end}}
-{{- end -}}
-};
-
-template<>
-class Converter<{{$.Name}}> {
-public:
- static Result FromJS(Napi::Env, Napi::Value, {{$.Name}}&);
- static Napi::Value ToJS(Napi::Env, {{$.Name}});
-};
-
-std::ostream& operator<<(std::ostream& o, const {{$.Name}}& desc);
-{{end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Namespace emits the C++ header declaration that defines the interop type for
--- the given ast.Namespace
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Namespace"}}
-// namespace {{$.Name}}
-class {{$.Name}} {
-public:
- virtual ~{{$.Name}}();
- {{$.Name}}();
-{{- range $c := ConstantsOf $}}
-{{- template "Constant" $c}}
-{{- end}}
-};
-{{end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Interface emits the C++ header declaration that defines the interop type for
--- the given ast.Interface
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Interface"}}
-// interface {{$.Name}}
-class {{$.Name}} {{- if $.Inherits }} : public {{$.Inherits}}{{end}} {
-public:
- static Interface<{{$.Name}}> Bind(Napi::Env, std::unique_ptr<{{$.Name}}>&&);
- static {{$.Name}}* Unwrap(Napi::Object);
-
- template<typename T, typename ... ARGS>
- static inline Interface<{{$.Name}}> Create(Napi::Env env, ARGS&& ... args) {
- return Bind(env, std::make_unique<T>(std::forward<ARGS>(args)...));
- }
-
- virtual ~{{$.Name}}();
- {{$.Name}}();
-{{- if $s := SetlikeOf $}}
-{{- template "InterfaceSetlike" $s}}
-{{- end}}
-{{- range $m := MethodsOf $}}
-{{- template "InterfaceMethod" $m}}
-{{- end}}
-{{- range $a := AttributesOf $}}
-{{- template "InterfaceAttribute" $a}}
-{{- end}}
-{{- range $c := ConstantsOf $}}
-{{- template "Constant" $c}}
-{{- end}}
-};
-{{end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Typedef emits the C++ header declaration that defines the interop type for
--- the given ast.Interface
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Typedef"}}
-using {{$.Name}} = {{template "Type" $.Type}};
-{{end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Enum emits the C++ header declaration that defines the interop type for
--- the given ast.Enum
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Enum"}}
-enum class {{$.Name}} {
-{{- range $ := $.Values}}
- {{EnumEntryName $.Value}},
-{{- end}}
-};
-
-template<>
-class Converter<{{$.Name}}> {
-public:
- static Result FromJS(Napi::Env, Napi::Value, {{$.Name}}&);
- static Napi::Value ToJS(Napi::Env, {{$.Name}});
- static bool FromString(std::string, {{$.Name}}&);
- static const char* ToString({{$.Name}});
-};
-
-std::ostream& operator<<(std::ostream& o, {{$.Name}});
-{{end}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- DictionaryMember emits the C++ declaration for a single dictionary ast.Member
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "DictionaryMember"}}
-{{- if $.Attribute}}{{template "AttributeType" $}} {{$.Name}}
-{{- if $.Init}} = {{Eval "Literal" "Value" $.Init "Type" $.Type}}{{end}};
-{{- else }}{{template "Type" $.Type}} {{$.Name}}({{template "Parameters" $.Parameters}});
-{{- end }}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- InterfaceSetlike emits the C++ methods for a setlike interface
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "InterfaceSetlike"}}
- virtual bool has(Napi::Env, {{template "Type" $.Elem}}) = 0;
- virtual std::vector<{{template "Type" $.Elem}}> keys(Napi::Env) = 0;
-{{- /* TODO(crbug.com/dawn/1143):
- entries, forEach, size, values
- read-write: add, clear, or delete
-*/}}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- InterfaceMethod emits the C++ declaration for a single interface ast.Member
--- method
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "InterfaceMethod"}}
-{{- range $o := $.Overloads}}
- virtual {{template "Type" $o.Type}} {{$.Name}}(Napi::Env{{template "ParametersWithLeadingComma" $o.Parameters}}) = 0;
-{{- end }}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- InterfaceAttribute emits the C++ declaration for a single interface
--- ast.Member attribute
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "InterfaceAttribute"}}
- virtual {{template "Type" $.Type}} get{{Title $.Name}}(Napi::Env) = 0;
-{{- if not $.Readonly}}
- virtual void set{{Title $.Name}}(Napi::Env, {{template "Type" $.Type}} value) = 0;
-{{- end }}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Constant emits the C++ declaration for a single ast.Member constant
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Constant"}}
- static constexpr {{template "Type" $.Type}} {{$.Name}} = {{Eval "Literal" "Value" $.Init "Type" $.Type}};
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Parameters emits the C++ comma separated list of parameter declarations for
--- the given []ast.Parameter
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Parameters"}}
-{{- range $i, $param := $ }}
-{{- if $i }}, {{end}}
-{{- template "Parameter" $param}}
-{{- end }}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- ParametersWithLeadingComma emits the C++ comma separated list of parameter
--- declarations for the given []ast.Parameter, starting with a leading comma
--- for the first parameter
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "ParametersWithLeadingComma"}}
-{{- range $i, $param := $ }}, {{/* */}}
-{{- template "Parameter" $param}}
-{{- end }}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Parameter emits the C++ parameter type and name for the given ast.Parameter
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Parameter" -}}
-{{- if $.Init }}{{template "Type" $.Type}} {{$.Name}}
-{{- else if $.Optional}}std::optional<{{template "Type" $.Type}}> {{$.Name}}
-{{- else }}{{template "Type" $.Type}} {{$.Name}}
-{{- end }}
-{{- end}}
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/WebGPUCommon.tmpl b/chromium/third_party/dawn/src/dawn_node/interop/WebGPUCommon.tmpl
deleted file mode 100644
index 86307735634..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/interop/WebGPUCommon.tmpl
+++ /dev/null
@@ -1,126 +0,0 @@
-{{/*
- Copyright 2021 The Dawn Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/}}
-
-{{- /*
---------------------------------------------------------------------------------
-Template file for use with src/dawn_node/tools/cmd/idlgen/main.go.
-This file provides common template definitions and is included by WebGPU.h.tmpl
-and WebGPU.cpp.tmpl.
-
-See:
-* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
- types used by this template
-* src/dawn_node/tools/cmd/idlgen/main.go for additional structures and functions
- used by this template
-* https://golang.org/pkg/text/template/ for documentation on the template syntax
---------------------------------------------------------------------------------
-*/ -}}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Type generates the C++ type for the given ast.Type
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Type" -}}
-{{- if IsUndefinedType $}}void
-{{- else if IsTypeName $}}
-{{- if eq $.Name "boolean" }}bool
-{{- else if eq $.Name "long" }}int32_t
-{{- else if eq $.Name "unsigned long" }}uint32_t
-{{- else if eq $.Name "long long" }}int64_t
-{{- else if eq $.Name "unsigned long long" }}uint64_t
-{{- else if eq $.Name "object" }}Object
-{{- else if eq $.Name "DOMString" }}std::string
-{{- else if eq $.Name "USVString" }}std::string
-{{- else if eq $.Name "ArrayBuffer" }}ArrayBuffer
-{{- else if IsInterface (Lookup $.Name) }}Interface<{{$.Name}}>
-{{- else }}{{$.Name}}
-{{- end }}
-{{- else if IsParametrizedType $}}{{$.Name}}<{{template "TypeList" $.Elems}}>
-{{- else if IsNullableType $}}std::optional<{{template "Type" $.Type}}>
-{{- else if IsUnionType $}}std::variant<{{template "VariantTypeList" $.Types}}>
-{{- else if IsSequenceType $}}std::vector<{{template "Type" $.Elem}}>
-{{- else if IsRecordType $}}std::unordered_map<{{template "Type" $.Key}}, {{template "Type" $.Elem}}>
-{{- else }} /* Unhandled Type {{printf "%T" $}} */
-{{- end -}}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- AttributeType generates the C++ type for the given ast.Member
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "AttributeType" -}}
-{{- if $.Required }}{{template "Type" $.Type}}
-{{- else if $.Init }}{{template "Type" $.Type}}
-{{- else }}std::optional<{{template "Type" $.Type}}>
-{{- end}}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- Literal generates a C++ literal value using the following arguments:
--- Value - the ast.Literal
--- Type - the ast.Type of the literal
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "Literal" -}}
-{{- if IsDefaultDictionaryLiteral $.Value}}{{template "Type" $.Type}}{}
-{{- else if IsTypeName $.Type }}
-{{- $ty := Lookup $.Type.Name}}
-{{- if IsEnum $ty }}{{$.Type.Name}}::{{EnumEntryName $.Value.Value}}
-{{- else if IsBasicLiteral $.Value }}{{$.Value.Value}}
-{{- else }}/* Unhandled Type {{printf "ty: %v $.Type.Name: %T $.Value: %T" $ty $.Type.Name $.Value}} */
-{{- end }}
-{{- else if IsSequenceType $.Type }}{{template "Type" $.Type}}{} {{- /* TODO: Assumes the initialiser is empty */}}
-{{- else if IsBasicLiteral $.Value }}{{$.Value.Value}}
-{{- else }} /* Unhandled Type {{printf "%T %T" $.Type $.Value}} */
-{{- end}}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- TypeList generates a C++ comma separated list of types from the given
--- []ast.Type
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "TypeList" -}}
-{{- range $i, $ty := $}}
-{{- if $i }}, {{end}}
-{{- template "Type" $ty}}
-{{- end}}
-{{- end }}
-
-
-{{- /*
---------------------------------------------------------------------------------
--- VariantTypeList generates a C++ comma separated list of types from the given
--- []ast.Type, skipping any 'undefined' types
---------------------------------------------------------------------------------
-*/ -}}
-{{- define "VariantTypeList" -}}
-{{- range $i, $ty := $}}
-{{- if not (IsUndefinedType $ty)}}
-{{- if $i }}, {{end}}
-{{- template "Type" $ty}}
-{{- end}}
-{{- end}}
-{{- end }}
-
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/go.mod b/chromium/third_party/dawn/src/dawn_node/tools/go.mod
deleted file mode 100644
index e39e222e92c..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/tools/go.mod
+++ /dev/null
@@ -1,9 +0,0 @@
-module dawn.googlesource.com/dawn/src/dawn_node/tools
-
-go 1.16
-
-require (
- github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094
- github.com/mattn/go-colorable v0.1.9
- github.com/mattn/go-isatty v0.0.14 // indirect
-)
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/idlgen/main.go b/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/idlgen/main.go
deleted file mode 100644
index 5ea5499fcc7..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/idlgen/main.go
+++ /dev/null
@@ -1,635 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// idlgen is a tool used to generate code from WebIDL files and a golang
-// template file
-package main
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "reflect"
- "strings"
- "text/template"
- "unicode"
-
- "github.com/ben-clayton/webidlparser/ast"
- "github.com/ben-clayton/webidlparser/parser"
-)
-
-func main() {
- if err := run(); err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-}
-
-func showUsage() {
- fmt.Println(`
-idlgen is a tool used to generate code from WebIDL files and a golang
-template file
-
-Usage:
- idlgen --template=<template-path> --output=<output-path> <idl-file> [<idl-file>...]`)
- os.Exit(1)
-}
-
-func run() error {
- var templatePath string
- var outputPath string
- flag.StringVar(&templatePath, "template", "", "the template file run with the parsed WebIDL files")
- flag.StringVar(&outputPath, "output", "", "the output file")
- flag.Parse()
-
- idlFiles := flag.Args()
-
- // Check all required arguments are provided
- if templatePath == "" || outputPath == "" || len(idlFiles) == 0 {
- showUsage()
- }
-
- // Open up the output file
- out := os.Stdout
- if outputPath != "" {
- file, err := os.Create(outputPath)
- if err != nil {
- return fmt.Errorf("failed to open output file '%v'", outputPath)
- }
- out = file
- defer file.Close()
- }
-
- // Read the template file
- tmpl, err := ioutil.ReadFile(templatePath)
- if err != nil {
- return fmt.Errorf("failed to open template file '%v'", templatePath)
- }
-
- // idl is the combination of the parsed idlFiles
- idl := &ast.File{}
-
- // Parse each of the WebIDL files and add the declarations to idl
- for _, path := range idlFiles {
- content, err := ioutil.ReadFile(path)
- if err != nil {
- return fmt.Errorf("failed to open file '%v'", path)
- }
- fileIDL := parser.Parse(string(content))
- if numErrs := len(fileIDL.Errors); numErrs != 0 {
- errs := make([]string, numErrs)
- for i, e := range fileIDL.Errors {
- errs[i] = e.Message
- }
- return fmt.Errorf("errors found while parsing %v:\n%v", path, strings.Join(errs, "\n"))
- }
- idl.Declarations = append(idl.Declarations, fileIDL.Declarations...)
- }
-
- // Initialize the generator
- g := generator{t: template.New(templatePath)}
- g.workingDir = filepath.Dir(templatePath)
- g.funcs = map[string]interface{}{
- // Functions exposed to the template
- "AttributesOf": attributesOf,
- "ConstantsOf": constantsOf,
- "EnumEntryName": enumEntryName,
- "Eval": g.eval,
- "Include": g.include,
- "IsBasicLiteral": is(ast.BasicLiteral{}),
- "IsConstructor": isConstructor,
- "IsDefaultDictionaryLiteral": is(ast.DefaultDictionaryLiteral{}),
- "IsDictionary": is(ast.Dictionary{}),
- "IsEnum": is(ast.Enum{}),
- "IsInterface": is(ast.Interface{}),
- "IsInterfaceOrNamespace": is(ast.Interface{}, ast.Namespace{}),
- "IsMember": is(ast.Member{}),
- "IsNamespace": is(ast.Namespace{}),
- "IsNullableType": is(ast.NullableType{}),
- "IsParametrizedType": is(ast.ParametrizedType{}),
- "IsRecordType": is(ast.RecordType{}),
- "IsSequenceType": is(ast.SequenceType{}),
- "IsTypedef": is(ast.Typedef{}),
- "IsTypeName": is(ast.TypeName{}),
- "IsUndefinedType": isUndefinedType,
- "IsUnionType": is(ast.UnionType{}),
- "Lookup": g.lookup,
- "MethodsOf": methodsOf,
- "SetlikeOf": setlikeOf,
- "Title": strings.Title,
- }
- t, err := g.t.
- Option("missingkey=invalid").
- Funcs(g.funcs).
- Parse(string(tmpl))
- if err != nil {
- return fmt.Errorf("failed to parse template file '%v': %w", templatePath, err)
- }
-
- // simplify the definitions in the WebIDL before passing this to the template
- idl, declarations := simplify(idl)
- g.declarations = declarations
-
- // Write the file header
- fmt.Fprintf(out, header, strings.Join(os.Args[1:], "\n// "))
-
- // Execute the template
- return t.Execute(out, idl)
-}
-
-// declarations is a map of WebIDL declaration name to its AST node.
-type declarations map[string]ast.Decl
-
-// nameOf returns the name of the AST node n.
-// Returns an empty string if the node is not named.
-func nameOf(n ast.Node) string {
- switch n := n.(type) {
- case *ast.Namespace:
- return n.Name
- case *ast.Interface:
- return n.Name
- case *ast.Dictionary:
- return n.Name
- case *ast.Enum:
- return n.Name
- case *ast.Typedef:
- return n.Name
- case *ast.Mixin:
- return n.Name
- case *ast.Includes:
- return ""
- default:
- panic(fmt.Errorf("unhandled AST declaration %T", n))
- }
-}
-
-// simplify processes the AST 'in', returning a new AST that:
-// * Has all partial interfaces merged into a single interface.
-// * Has all mixins flattened into their place of use.
-// * Has all the declarations ordered in dependency order (leaf first)
-// simplify also returns the map of declarations in the AST.
-func simplify(in *ast.File) (*ast.File, declarations) {
- s := simplifier{
- declarations: declarations{},
- registered: map[string]bool{},
- out: &ast.File{},
- }
-
- // Walk the IDL declarations to merge together partial interfaces and embed
- // mixins into their uses.
- {
- interfaces := map[string]*ast.Interface{}
- mixins := map[string]*ast.Mixin{}
- for _, d := range in.Declarations {
- switch d := d.(type) {
- case *ast.Interface:
- if i, ok := interfaces[d.Name]; ok {
- // Merge partial body into one interface
- i.Members = append(i.Members, d.Members...)
- } else {
- clone := *d
- d := &clone
- interfaces[d.Name] = d
- s.declarations[d.Name] = d
- }
- case *ast.Mixin:
- mixins[d.Name] = d
- s.declarations[d.Name] = d
- case *ast.Includes:
- // Merge mixin into interface
- i, ok := interfaces[d.Name]
- if !ok {
- panic(fmt.Errorf("%v includes %v, but %v is not an interface", d.Name, d.Source, d.Name))
- }
- m, ok := mixins[d.Source]
- if !ok {
- panic(fmt.Errorf("%v includes %v, but %v is not an mixin", d.Name, d.Source, d.Source))
- }
- // Merge mixin into the interface
- for _, member := range m.Members {
- if member, ok := member.(*ast.Member); ok {
- i.Members = append(i.Members, member)
- }
- }
- default:
- if name := nameOf(d); name != "" {
- s.declarations[nameOf(d)] = d
- }
- }
- }
- }
-
- // Now traverse the declarations in to produce the dependency-ordered
- // output `s.out`.
- for _, d := range in.Declarations {
- if name := nameOf(d); name != "" {
- s.visit(s.declarations[nameOf(d)])
- }
- }
-
- return s.out, s.declarations
-}
-
-// simplifier holds internal state for simplify()
-type simplifier struct {
- // all AST declarations
- declarations declarations
- // set of visited declarations
- registered map[string]bool
- // the dependency-ordered output
- out *ast.File
-}
-
-// visit traverses the AST declaration 'd' adding all dependent declarations to
-// s.out.
-func (s *simplifier) visit(d ast.Decl) {
- register := func(name string) bool {
- if s.registered[name] {
- return true
- }
- s.registered[name] = true
- return false
- }
- switch d := d.(type) {
- case *ast.Namespace:
- if register(d.Name) {
- return
- }
- for _, m := range d.Members {
- if m, ok := m.(*ast.Member); ok {
- s.visitType(m.Type)
- for _, p := range m.Parameters {
- s.visitType(p.Type)
- }
- }
- }
- case *ast.Interface:
- if register(d.Name) {
- return
- }
- if d, ok := s.declarations[d.Inherits]; ok {
- s.visit(d)
- }
- for _, m := range d.Members {
- if m, ok := m.(*ast.Member); ok {
- s.visitType(m.Type)
- for _, p := range m.Parameters {
- s.visitType(p.Type)
- }
- }
- }
- case *ast.Dictionary:
- if register(d.Name) {
- return
- }
- if d, ok := s.declarations[d.Inherits]; ok {
- s.visit(d)
- }
- for _, m := range d.Members {
- s.visitType(m.Type)
- for _, p := range m.Parameters {
- s.visitType(p.Type)
- }
- }
- case *ast.Typedef:
- if register(d.Name) {
- return
- }
- s.visitType(d.Type)
- case *ast.Mixin:
- if register(d.Name) {
- return
- }
- for _, m := range d.Members {
- if m, ok := m.(*ast.Member); ok {
- s.visitType(m.Type)
- for _, p := range m.Parameters {
- s.visitType(p.Type)
- }
- }
- }
- case *ast.Enum:
- if register(d.Name) {
- return
- }
- case *ast.Includes:
- if register(d.Name) {
- return
- }
- default:
- panic(fmt.Errorf("unhandled AST declaration %T", d))
- }
-
- s.out.Declarations = append(s.out.Declarations, d)
-}
-
-// visitType traverses the AST type 't' adding all dependent declarations to
-// s.out.
-func (s *simplifier) visitType(t ast.Type) {
- switch t := t.(type) {
- case *ast.TypeName:
- if d, ok := s.declarations[t.Name]; ok {
- s.visit(d)
- }
- case *ast.UnionType:
- for _, t := range t.Types {
- s.visitType(t)
- }
- case *ast.ParametrizedType:
- for _, t := range t.Elems {
- s.visitType(t)
- }
- case *ast.NullableType:
- s.visitType(t.Type)
- case *ast.SequenceType:
- s.visitType(t.Elem)
- case *ast.RecordType:
- s.visitType(t.Elem)
- default:
- panic(fmt.Errorf("unhandled AST type %T", t))
- }
-}
-
-// generator holds the template generator state
-type generator struct {
- // the root template
- t *template.Template
- // the working directory
- workingDir string
- // map of function name to function exposed to the template executor
- funcs map[string]interface{}
- // dependency-sorted declarations
- declarations declarations
-}
-
-// eval executes the sub-template with the given name and arguments, returning
-// the generated output
-// args can be a single argument:
-// arg[0]
-// or a list of name-value pairs:
-// (args[0]: name, args[1]: value), (args[2]: name, args[3]: value)...
-func (g *generator) eval(template string, args ...interface{}) (string, error) {
- target := g.t.Lookup(template)
- if target == nil {
- return "", fmt.Errorf("template '%v' not found", template)
- }
- sb := strings.Builder{}
- var err error
- if len(args) == 1 {
- err = target.Execute(&sb, args[0])
- } else {
- m := newMap()
- if len(args)%2 != 0 {
- return "", fmt.Errorf("Eval expects a single argument or list name-value pairs")
- }
- for i := 0; i < len(args); i += 2 {
- name, ok := args[i].(string)
- if !ok {
- return "", fmt.Errorf("Eval argument %v is not a string", i)
- }
- m.Put(name, args[i+1])
- }
- err = target.Execute(&sb, m)
- }
- if err != nil {
- return "", fmt.Errorf("while evaluating '%v': %v", template, err)
- }
- return sb.String(), nil
-}
-
-// lookup returns the declaration with the given name, or nil if not found.
-func (g *generator) lookup(name string) ast.Decl {
- return g.declarations[name]
-}
-
-// include loads the template with the given path, importing the declarations
-// into the scope of the current template.
-func (g *generator) include(path string) (string, error) {
- t, err := g.t.
- Option("missingkey=invalid").
- Funcs(g.funcs).
- ParseFiles(filepath.Join(g.workingDir, path))
- if err != nil {
- return "", err
- }
- g.t.AddParseTree(path, t.Tree)
- return "", nil
-}
-
-// Map is a simple generic key-value map, which can be used in the template
-type Map map[interface{}]interface{}
-
-func newMap() Map { return Map{} }
-
-// Put adds the key-value pair into the map.
-// Put always returns an empty string so nothing is printed in the template.
-func (m Map) Put(key, value interface{}) string {
- m[key] = value
- return ""
-}
-
-// Get looks up and returns the value with the given key. If the map does not
-// contain the given key, then nil is returned.
-func (m Map) Get(key interface{}) interface{} {
- return m[key]
-}
-
-// is returns a function that returns true if the value passed to the function
-// matches any of the types of the objects in 'prototypes'.
-func is(prototypes ...interface{}) func(interface{}) bool {
- types := make([]reflect.Type, len(prototypes))
- for i, p := range prototypes {
- types[i] = reflect.TypeOf(p)
- }
- return func(v interface{}) bool {
- ty := reflect.TypeOf(v)
- for _, rty := range types {
- if ty == rty || ty == reflect.PtrTo(rty) {
- return true
- }
- }
- return false
- }
-}
-
-// isConstructor returns true if the object is a constructor ast.Member.
-func isConstructor(v interface{}) bool {
- if member, ok := v.(*ast.Member); ok {
- if ty, ok := member.Type.(*ast.TypeName); ok {
- return ty.Name == "constructor"
- }
- }
- return false
-}
-
-// isUndefinedType returns true if the type is 'undefined'
-func isUndefinedType(ty ast.Type) bool {
- if ty, ok := ty.(*ast.TypeName); ok {
- return ty.Name == "undefined"
- }
- return false
-}
-
-// enumEntryName formats the enum entry name 's' for use in a C++ enum.
-func enumEntryName(s string) string {
- return "k" + strings.ReplaceAll(pascalCase(strings.Trim(s, `"`)), "-", "")
-}
-
-// Method describes a WebIDL interface method
-type Method struct {
- // Name of the method
- Name string
- // The list of overloads of the method
- Overloads []*ast.Member
-}
-
-// methodsOf returns all the methods of the given WebIDL interface.
-func methodsOf(obj interface{}) []*Method {
- iface, ok := obj.(*ast.Interface)
- if !ok {
- return nil
- }
- byName := map[string]*Method{}
- out := []*Method{}
- for _, member := range iface.Members {
- member := member.(*ast.Member)
- if !member.Const && !member.Attribute && !isConstructor(member) {
- if method, ok := byName[member.Name]; ok {
- method.Overloads = append(method.Overloads, member)
- } else {
- method = &Method{
- Name: member.Name,
- Overloads: []*ast.Member{member},
- }
- byName[member.Name] = method
- out = append(out, method)
- }
- }
- }
- return out
-}
-
-// attributesOf returns all the attributes of the given WebIDL interface or
-// namespace.
-func attributesOf(obj interface{}) []*ast.Member {
- out := []*ast.Member{}
- add := func(m interface{}) {
- if m := m.(*ast.Member); m.Attribute {
- out = append(out, m)
- }
- }
- switch obj := obj.(type) {
- case *ast.Interface:
- for _, m := range obj.Members {
- add(m)
- }
- case *ast.Namespace:
- for _, m := range obj.Members {
- add(m)
- }
- default:
- return nil
- }
- return out
-}
-
-// constantsOf returns all the constant values of the given WebIDL interface or
-// namespace.
-func constantsOf(obj interface{}) []*ast.Member {
- out := []*ast.Member{}
- add := func(m interface{}) {
- if m := m.(*ast.Member); m.Const {
- out = append(out, m)
- }
- }
- switch obj := obj.(type) {
- case *ast.Interface:
- for _, m := range obj.Members {
- add(m)
- }
- case *ast.Namespace:
- for _, m := range obj.Members {
- add(m)
- }
- default:
- return nil
- }
- return out
-}
-
-// setlikeOf returns the setlike ast.Pattern, if obj is a setlike interface.
-func setlikeOf(obj interface{}) *ast.Pattern {
- iface, ok := obj.(*ast.Interface)
- if !ok {
- return nil
- }
- for _, pattern := range iface.Patterns {
- if pattern.Type == ast.Setlike {
- return pattern
- }
- }
- return nil
-}
-
-// pascalCase returns the snake-case string s transformed into 'PascalCase',
-// Rules:
-// * The first letter of the string is capitalized
-// * Characters following an underscore, hyphen or number are capitalized
-// * Underscores are removed from the returned string
-// See: https://en.wikipedia.org/wiki/Camel_case
-func pascalCase(s string) string {
- b := strings.Builder{}
- upper := true
- for _, r := range s {
- if r == '_' || r == '-' {
- upper = true
- continue
- }
- if upper {
- b.WriteRune(unicode.ToUpper(r))
- upper = false
- } else {
- b.WriteRune(r)
- }
- if unicode.IsNumber(r) {
- upper = true
- }
- }
- return b.String()
-}
-
-const header = `// Copyright 2021 The Dawn Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-////////////////////////////////////////////////////////////////////////////////
-// File generated by tools/cmd/idlgen.go, with the arguments:
-// %v
-//
-// Do not modify this file directly
-////////////////////////////////////////////////////////////////////////////////
-
-`
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/run-cts/main.go b/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/run-cts/main.go
deleted file mode 100644
index f60e0246c69..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/run-cts/main.go
+++ /dev/null
@@ -1,894 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// run-cts is a tool used to run the WebGPU CTS using the Dawn module for NodeJS
-package main
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "net/http"
- "os"
- "os/exec"
- "os/signal"
- "path/filepath"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "syscall"
- "time"
-
- "github.com/mattn/go-colorable"
- "github.com/mattn/go-isatty"
-)
-
-const (
- testTimeout = time.Minute
-)
-
-func main() {
- if err := run(); err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-}
-
-func showUsage() {
- fmt.Println(`
-run-cts is a tool used to run the WebGPU CTS using the Dawn module for NodeJS
-
-Usage:
- run-cts --dawn-node=<path to dawn.node> --cts=<path to WebGPU CTS> [test-query]`)
- os.Exit(1)
-}
-
-var (
- colors bool
- stdout io.Writer
- mainCtx context.Context
-)
-
-type dawnNodeFlags []string
-
-func (f *dawnNodeFlags) String() string {
- return fmt.Sprint(strings.Join(*f, ""))
-}
-
-func (f *dawnNodeFlags) Set(value string) error {
- // Multiple flags must be passed in indivually:
- // -flag=a=b -dawn_node_flag=c=d
- *f = append(*f, value)
- return nil
-}
-
-func makeMainCtx() context.Context {
- ctx, cancel := context.WithCancel(context.Background())
- sigs := make(chan os.Signal, 1)
- signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
- go func() {
- sig := <-sigs
- fmt.Printf("Signal received: %v\n", sig)
- cancel()
- }()
- return ctx
-}
-
-func run() error {
- mainCtx = makeMainCtx()
-
- colors = os.Getenv("TERM") != "dumb" ||
- isatty.IsTerminal(os.Stdout.Fd()) ||
- isatty.IsCygwinTerminal(os.Stdout.Fd())
- if colors {
- if _, disable := os.LookupEnv("NO_COLOR"); disable {
- colors = false
- }
- }
-
- backendDefault := "default"
- if vkIcdFilenames := os.Getenv("VK_ICD_FILENAMES"); vkIcdFilenames != "" {
- backendDefault = "vulkan"
- }
-
- var dawnNode, cts, node, npx, logFilename, backend string
- var verbose, isolated, build bool
- var numRunners int
- var flags dawnNodeFlags
- flag.StringVar(&dawnNode, "dawn-node", "", "path to dawn.node module")
- flag.StringVar(&cts, "cts", "", "root directory of WebGPU CTS")
- flag.StringVar(&node, "node", "", "path to node executable")
- flag.StringVar(&npx, "npx", "", "path to npx executable")
- flag.BoolVar(&verbose, "verbose", false, "print extra information while testing")
- flag.BoolVar(&build, "build", true, "attempt to build the CTS before running")
- flag.BoolVar(&isolated, "isolate", false, "run each test in an isolated process")
- flag.BoolVar(&colors, "colors", colors, "enable / disable colors")
- flag.IntVar(&numRunners, "j", runtime.NumCPU()/2, "number of concurrent runners. 0 runs serially")
- flag.StringVar(&logFilename, "log", "", "path to log file of tests run and result")
- flag.Var(&flags, "flag", "flag to pass to dawn-node as flag=value. multiple flags must be passed in individually")
- flag.StringVar(&backend, "backend", backendDefault, "backend to use: default|null|webgpu|d3d11|d3d12|metal|vulkan|opengl|opengles."+
- " set to 'vulkan' if VK_ICD_FILENAMES environment variable is set, 'default' otherwise")
- flag.Parse()
-
- if colors {
- stdout = colorable.NewColorableStdout()
- } else {
- stdout = colorable.NewNonColorable(os.Stdout)
- }
-
- // Check mandatory arguments
- if dawnNode == "" || cts == "" {
- showUsage()
- }
- if !isFile(dawnNode) {
- return fmt.Errorf("'%v' is not a file", dawnNode)
- }
- if !isDir(cts) {
- return fmt.Errorf("'%v' is not a directory", cts)
- }
-
- // Make paths absolute
- for _, path := range []*string{&dawnNode, &cts} {
- abs, err := filepath.Abs(*path)
- if err != nil {
- return fmt.Errorf("unable to get absolute path for '%v'", *path)
- }
- *path = abs
- }
-
- // The test query is the optional unnamed argument
- query := "webgpu:*"
- switch len(flag.Args()) {
- case 0:
- case 1:
- query = flag.Args()[0]
- default:
- return fmt.Errorf("only a single query can be provided")
- }
-
- // Find node
- if node == "" {
- var err error
- node, err = exec.LookPath("node")
- if err != nil {
- return fmt.Errorf("add node to PATH or specify with --node")
- }
- }
- // Find npx
- if npx == "" {
- var err error
- npx, err = exec.LookPath("npx")
- if err != nil {
- npx = ""
- }
- }
-
- if backend != "default" {
- fmt.Println("Forcing backend to", backend)
- flags = append(flags, fmt.Sprint("dawn-backend=", backend))
- }
-
- r := runner{
- numRunners: numRunners,
- verbose: verbose,
- node: node,
- npx: npx,
- dawnNode: dawnNode,
- cts: cts,
- flags: flags,
- evalScript: func(main string) string {
- return fmt.Sprintf(`require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/%v.ts');`, main)
- },
- }
-
- if logFilename != "" {
- writer, err := os.Create(logFilename)
- if err != nil {
- return fmt.Errorf("failed to open log '%v': %w", logFilename, err)
- }
- defer writer.Close()
- r.log = newLogger(writer)
- }
-
- cache := cache{}
- cachePath := dawnNode + ".runcts.cache"
- if err := cache.load(cachePath); err != nil && verbose {
- fmt.Println("failed to load cache from", cachePath, err)
- }
- defer cache.save(cachePath)
-
- // Scan the CTS source to determine the most recent change to the CTS source
- mostRecentSourceChange, err := r.scanSourceTimestamps(verbose)
- if err != nil {
- return fmt.Errorf("failed to scan source files for modified timestamps: %w", err)
- }
-
- ctsNeedsRebuild := mostRecentSourceChange.After(cache.BuildTimestamp) ||
- !isDir(filepath.Join(r.cts, "out-node"))
- if build {
- if verbose {
- fmt.Println("CTS needs rebuild:", ctsNeedsRebuild)
- }
-
- if npx != "" {
- if ctsNeedsRebuild {
- if err := r.buildCTS(verbose); err != nil {
- return fmt.Errorf("failed to build CTS: %w", err)
- }
- cache.BuildTimestamp = mostRecentSourceChange
- }
- // Use the prebuilt CTS (instead of using the `setup-ts-in-node` transpiler)
- r.evalScript = func(main string) string {
- return fmt.Sprintf(`require('./out-node/common/runtime/%v.js');`, main)
- }
- } else {
- fmt.Println("npx not found on PATH. Using runtime TypeScript transpilation (slow)")
- }
- }
-
- if numRunners > 0 {
- // Find all the test cases that match the given queries.
- if err := r.gatherTestCases(query, verbose); err != nil {
- return fmt.Errorf("failed to gather test cases: %w", err)
- }
-
- if isolated {
- fmt.Println("Running in parallel isolated...")
- fmt.Printf("Testing %d test cases...\n", len(r.testcases))
- return r.runParallelIsolated()
- }
- fmt.Println("Running in parallel with server...")
- fmt.Printf("Testing %d test cases...\n", len(r.testcases))
- return r.runParallelWithServer()
- }
-
- fmt.Println("Running serially...")
- return r.runSerially(query)
-}
-
-type logger struct {
- writer io.Writer
- idx int
- resultByIndex map[int]result
-}
-
-// newLogger creates a new logger instance.
-func newLogger(writer io.Writer) logger {
- return logger{writer, 0, map[int]result{}}
-}
-
-// logResult writes the test results to the log file in sequential order.
-// logResult should be called whenever a new test result becomes available.
-func (l *logger) logResults(res result) {
- if l.writer == nil {
- return
- }
- l.resultByIndex[res.index] = res
- for {
- logRes, ok := l.resultByIndex[l.idx]
- if !ok {
- break
- }
- fmt.Fprintf(l.writer, "%v [%v]\n", logRes.testcase, logRes.status)
- l.idx++
- }
-}
-
-// Cache holds cached information between runs to optimize runs
-type cache struct {
- BuildTimestamp time.Time
-}
-
-// load loads the cache information from the JSON file at path
-func (c *cache) load(path string) error {
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- defer f.Close()
- return json.NewDecoder(f).Decode(c)
-}
-
-// save saves the cache information to the JSON file at path
-func (c *cache) save(path string) error {
- f, err := os.Create(path)
- if err != nil {
- return err
- }
- defer f.Close()
- return json.NewEncoder(f).Encode(c)
-}
-
-type runner struct {
- numRunners int
- verbose bool
- node, npx, dawnNode, cts string
- flags dawnNodeFlags
- evalScript func(string) string
- testcases []string
- log logger
-}
-
-// scanSourceTimestamps scans all the .js and .ts files in all subdirectories of
-// r.cts, and returns the file with the most recent timestamp.
-func (r *runner) scanSourceTimestamps(verbose bool) (time.Time, error) {
- if verbose {
- start := time.Now()
- fmt.Println("Scanning .js / .ts files for changes...")
- defer func() {
- fmt.Println("completed in", time.Since(start))
- }()
- }
-
- dir := filepath.Join(r.cts, "src")
-
- mostRecentChange := time.Time{}
- err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
- switch filepath.Ext(path) {
- case ".ts", ".js":
- if info.ModTime().After(mostRecentChange) {
- mostRecentChange = info.ModTime()
- }
- }
- return nil
- })
- if err != nil {
- return time.Time{}, err
- }
- return mostRecentChange, nil
-}
-
-// buildCTS calls `npx grunt run:build-out-node` in the CTS directory to compile
-// the TypeScript files down to JavaScript. Doing this once ahead of time can be
-// much faster than dynamically transpiling when there are many tests to run.
-func (r *runner) buildCTS(verbose bool) error {
- if verbose {
- start := time.Now()
- fmt.Println("Building CTS...")
- defer func() {
- fmt.Println("completed in", time.Since(start))
- }()
- }
-
- cmd := exec.Command(r.npx, "grunt", "run:build-out-node")
- cmd.Dir = r.cts
- out, err := cmd.CombinedOutput()
- if err != nil {
- return fmt.Errorf("%w: %v", err, string(out))
- }
- return nil
-}
-
-// gatherTestCases() queries the CTS for all test cases that match the given
-// query. On success, gatherTestCases() populates r.testcases.
-func (r *runner) gatherTestCases(query string, verbose bool) error {
- if verbose {
- start := time.Now()
- fmt.Println("Gathering test cases...")
- defer func() {
- fmt.Println("completed in", time.Since(start))
- }()
- }
-
- args := append([]string{
- "-e", r.evalScript("cmdline"),
- "--", // Start of arguments
- // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
- // and slices away the first two arguments. When running with '-e', args
- // start at 1, so just inject a dummy argument.
- "dummy-arg",
- "--list",
- }, query)
-
- cmd := exec.Command(r.node, args...)
- cmd.Dir = r.cts
- out, err := cmd.CombinedOutput()
- if err != nil {
- return fmt.Errorf("%w\n%v", err, string(out))
- }
-
- tests := filterTestcases(strings.Split(string(out), "\n"))
- r.testcases = tests
- return nil
-}
-
-type portListener struct {
- buffer strings.Builder
- port chan int
-}
-
-func newPortListener() portListener {
- return portListener{strings.Builder{}, make(chan int)}
-}
-
-var portRE = regexp.MustCompile(`\[\[(\d+)\]\]`)
-
-func (p *portListener) Write(data []byte) (n int, err error) {
- if p.port != nil {
- p.buffer.Write(data)
- match := portRE.FindStringSubmatch(p.buffer.String())
- if len(match) == 2 {
- port, err := strconv.Atoi(match[1])
- if err != nil {
- return 0, err
- }
- p.port <- port
- close(p.port)
- p.port = nil
- }
- }
- return len(data), nil
-}
-
-// runParallelWithServer() starts r.numRunners instances of the CTS server test
-// runner, and issues test run requests to those servers, concurrently.
-func (r *runner) runParallelWithServer() error {
- // Create a chan of test indices.
- // This will be read by the test runner goroutines.
- caseIndices := make(chan int, len(r.testcases))
- for i := range r.testcases {
- caseIndices <- i
- }
- close(caseIndices)
-
- // Create a chan for the test results.
- // This will be written to by the test runner goroutines.
- results := make(chan result, len(r.testcases))
-
- // Spin up the test runner goroutines
- wg := &sync.WaitGroup{}
- for i := 0; i < r.numRunners; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- if err := r.runServer(caseIndices, results); err != nil {
- results <- result{
- status: fail,
- error: fmt.Errorf("Test server error: %w", err),
- }
- }
- }()
- }
-
- r.streamResults(wg, results)
- return nil
-}
-
-type redirectingWriter struct {
- io.Writer
-}
-
-// runServer starts a test runner server instance, takes case indices from
-// caseIndices, and requests the server run the test with the given index.
-// The result of the test run is written to the results chan.
-// Once the caseIndices chan has been closed, the server is stopped and
-// runServer returns.
-func (r *runner) runServer(caseIndices <-chan int, results chan<- result) error {
- var port int
- var rw redirectingWriter
-
- stopServer := func() {}
- startServer := func() error {
- args := []string{
- "-e", r.evalScript("server"), // Evaluate 'eval'.
- "--",
- // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
- // and slices away the first two arguments. When running with '-e', args
- // start at 1, so just inject a dummy argument.
- "dummy-arg",
- // Actual arguments begin here
- "--gpu-provider", r.dawnNode,
- }
- for _, f := range r.flags {
- args = append(args, "--gpu-provider-flag", f)
- }
-
- ctx := mainCtx
- cmd := exec.CommandContext(ctx, r.node, args...)
-
- serverLog := &bytes.Buffer{}
-
- pl := newPortListener()
-
- cmd.Dir = r.cts
- cmd.Stdout = io.MultiWriter(&rw, serverLog, &pl)
- cmd.Stderr = io.MultiWriter(&rw, serverLog)
-
- err := cmd.Start()
- if err != nil {
- return fmt.Errorf("failed to start test runner server: %v", err)
- }
-
- select {
- case port = <-pl.port:
- case <-time.After(time.Second * 10):
- return fmt.Errorf("timeout waiting for server port:\n%v", serverLog.String())
- case <-ctx.Done():
- return ctx.Err()
- }
-
- return nil
- }
- stopServer = func() {
- if port > 0 {
- go http.Post(fmt.Sprintf("http://localhost:%v/terminate", port), "", &bytes.Buffer{})
- time.Sleep(time.Millisecond * 100)
- port = 0
- }
- }
-
- for idx := range caseIndices {
- // Redirect the server log per test case
- caseServerLog := &bytes.Buffer{}
- rw.Writer = caseServerLog
-
- if port == 0 {
- if err := startServer(); err != nil {
- return err
- }
- }
-
- res := result{index: idx, testcase: r.testcases[idx]}
-
- type Response struct {
- Status string
- Message string
- }
- postResp, err := http.Post(fmt.Sprintf("http://localhost:%v/run?%v", port, r.testcases[idx]), "", &bytes.Buffer{})
- if err != nil {
- res.error = fmt.Errorf("server POST failure. Restarting server...")
- res.status = fail
- results <- res
- stopServer()
- continue
- }
-
- if postResp.StatusCode == http.StatusOK {
- var resp Response
- if err := json.NewDecoder(postResp.Body).Decode(&resp); err != nil {
- res.error = fmt.Errorf("server response decode failure")
- res.status = fail
- results <- res
- continue
- }
-
- switch resp.Status {
- case "pass":
- res.status = pass
- res.message = resp.Message + caseServerLog.String()
- case "warn":
- res.status = warn
- res.message = resp.Message + caseServerLog.String()
- case "fail":
- res.status = fail
- res.message = resp.Message + caseServerLog.String()
- case "skip":
- res.status = skip
- res.message = resp.Message + caseServerLog.String()
- default:
- res.status = fail
- res.error = fmt.Errorf("unknown status: '%v'", resp.Status)
- }
- } else {
- msg, err := ioutil.ReadAll(postResp.Body)
- if err != nil {
- msg = []byte(err.Error())
- }
- res.status = fail
- res.error = fmt.Errorf("server error: %v", string(msg))
- }
- results <- res
- }
-
- stopServer()
- return nil
-}
-
-// runParallelIsolated() calls the CTS command-line test runner to run each
-// testcase in a separate process. This reduces possibility of state leakage
-// between tests.
-// Up to r.numRunners tests will be run concurrently.
-func (r *runner) runParallelIsolated() error {
- // Create a chan of test indices.
- // This will be read by the test runner goroutines.
- caseIndices := make(chan int, len(r.testcases))
- for i := range r.testcases {
- caseIndices <- i
- }
- close(caseIndices)
-
- // Create a chan for the test results.
- // This will be written to by the test runner goroutines.
- results := make(chan result, len(r.testcases))
-
- // Spin up the test runner goroutines
- wg := &sync.WaitGroup{}
- for i := 0; i < r.numRunners; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- for idx := range caseIndices {
- res := r.runTestcase(r.testcases[idx])
- res.index = idx
- results <- res
- }
- }()
- }
-
- r.streamResults(wg, results)
- return nil
-}
-
-// streamResults reads from the chan 'results', printing the results in test-id
-// sequential order. Once the WaitGroup 'wg' is complete, streamResults() will
-// automatically close the 'results' chan.
-// Once all the results have been printed, a summary will be printed and the
-// function will return.
-func (r *runner) streamResults(wg *sync.WaitGroup, results chan result) {
- // Create another goroutine to close the results chan when all the runner
- // goroutines have finished.
- start := time.Now()
- var timeTaken time.Duration
- go func() {
- wg.Wait()
- timeTaken = time.Since(start)
- close(results)
- }()
-
- // Total number of tests, test counts binned by status
- numTests, numByStatus := len(r.testcases), map[status]int{}
-
- // Helper function for printing a progress bar.
- lastStatusUpdate, animFrame := time.Now(), 0
- updateProgress := func() {
- printANSIProgressBar(animFrame, numTests, numByStatus)
- animFrame++
- lastStatusUpdate = time.Now()
- }
-
- // Pull test results as they become available.
- // Update the status counts, and print any failures (or all test results if --verbose)
- progressUpdateRate := time.Millisecond * 10
- if !colors {
- // No colors == no cursor control. Reduce progress updates so that
- // we're not printing endless progress bars.
- progressUpdateRate = time.Second
- }
-
- for res := range results {
- r.log.logResults(res)
-
- numByStatus[res.status] = numByStatus[res.status] + 1
- name := res.testcase
- if r.verbose || res.error != nil || (res.status != pass && res.status != skip) {
- fmt.Printf("%v - %v: %v\n", name, res.status, res.message)
- if res.error != nil {
- fmt.Println(res.error)
- }
- updateProgress()
- }
- if time.Since(lastStatusUpdate) > progressUpdateRate {
- updateProgress()
- }
- }
- printANSIProgressBar(animFrame, numTests, numByStatus)
-
- // All done. Print final stats.
- fmt.Printf(`
-Completed in %v
-
-pass: %v (%v)
-fail: %v (%v)
-skip: %v (%v)
-timeout: %v (%v)
-`,
- timeTaken,
- numByStatus[pass], percentage(numByStatus[pass], numTests),
- numByStatus[fail], percentage(numByStatus[fail], numTests),
- numByStatus[skip], percentage(numByStatus[skip], numTests),
- numByStatus[timeout], percentage(numByStatus[timeout], numTests),
- )
-}
-
-// runSerially() calls the CTS test runner to run the test query in a single
-// process.
-func (r *runner) runSerially(query string) error {
- start := time.Now()
- result := r.runTestcase(query)
- timeTaken := time.Since(start)
-
- if r.verbose {
- fmt.Println(result)
- }
- fmt.Println("Status:", result.status)
- fmt.Println("Completed in", timeTaken)
- return nil
-}
-
-// status is an enumerator of test result status
-type status string
-
-const (
- pass status = "pass"
- warn status = "warn"
- fail status = "fail"
- skip status = "skip"
- timeout status = "timeout"
-)
-
-// result holds the information about a completed test
-type result struct {
- index int
- testcase string
- status status
- message string
- error error
-}
-
-// runTestcase() runs the CTS testcase with the given query, returning the test
-// result.
-func (r *runner) runTestcase(query string) result {
- ctx, cancel := context.WithTimeout(mainCtx, testTimeout)
- defer cancel()
-
- args := []string{
- "-e", r.evalScript("cmdline"), // Evaluate 'eval'.
- "--",
- // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
- // and slices away the first two arguments. When running with '-e', args
- // start at 1, so just inject a dummy argument.
- "dummy-arg",
- // Actual arguments begin here
- "--gpu-provider", r.dawnNode,
- "--verbose",
- }
- for _, f := range r.flags {
- args = append(args, "--gpu-provider-flag", f)
- }
- args = append(args, query)
-
- cmd := exec.CommandContext(ctx, r.node, args...)
- cmd.Dir = r.cts
-
- var buf bytes.Buffer
- cmd.Stdout = &buf
- cmd.Stderr = &buf
-
- err := cmd.Run()
- msg := buf.String()
- switch {
- case errors.Is(err, context.DeadlineExceeded):
- return result{testcase: query, status: timeout, message: msg}
- case strings.Contains(msg, "[fail]"):
- return result{testcase: query, status: fail, message: msg}
- case strings.Contains(msg, "[warn]"):
- return result{testcase: query, status: warn, message: msg}
- case strings.Contains(msg, "[skip]"):
- return result{testcase: query, status: skip, message: msg}
- case strings.Contains(msg, "[pass]"), err == nil:
- return result{testcase: query, status: pass, message: msg}
- }
- return result{testcase: query, status: fail, message: fmt.Sprint(msg, err), error: err}
-}
-
-// filterTestcases returns in with empty strings removed
-func filterTestcases(in []string) []string {
- out := make([]string, 0, len(in))
- for _, c := range in {
- if c != "" {
- out = append(out, c)
- }
- }
- return out
-}
-
-// percentage returns the percentage of n out of total as a string
-func percentage(n, total int) string {
- if total == 0 {
- return "-"
- }
- f := float64(n) / float64(total)
- return fmt.Sprintf("%.1f%c", f*100.0, '%')
-}
-
-// isDir returns true if the path resolves to a directory
-func isDir(path string) bool {
- s, err := os.Stat(path)
- if err != nil {
- return false
- }
- return s.IsDir()
-}
-
-// isFile returns true if the path resolves to a file
-func isFile(path string) bool {
- s, err := os.Stat(path)
- if err != nil {
- return false
- }
- return !s.IsDir()
-}
-
-// printANSIProgressBar prints a colored progress bar, providing realtime
-// information about the status of the CTS run.
-// Note: We'll want to skip this if !isatty or if we're running on windows.
-func printANSIProgressBar(animFrame int, numTests int, numByStatus map[status]int) {
- const (
- barWidth = 50
-
- escape = "\u001B["
- positionLeft = escape + "0G"
- red = escape + "31m"
- green = escape + "32m"
- yellow = escape + "33m"
- blue = escape + "34m"
- magenta = escape + "35m"
- cyan = escape + "36m"
- white = escape + "37m"
- reset = escape + "0m"
- )
-
- animSymbols := []rune{'⣾', '⣽', '⣻', '⢿', '⡿', '⣟', '⣯', '⣷'}
- blockSymbols := []rune{'▏', '▎', '▍', '▌', '▋', '▊', '▉'}
-
- numBlocksPrinted := 0
-
- fmt.Fprint(stdout, string(animSymbols[animFrame%len(animSymbols)]), " [")
- animFrame++
-
- numFinished := 0
-
- for _, ty := range []struct {
- status status
- color string
- }{{pass, green}, {warn, yellow}, {skip, blue}, {timeout, yellow}, {fail, red}} {
- num := numByStatus[ty.status]
- numFinished += num
- statusFrac := float64(num) / float64(numTests)
- fNumBlocks := barWidth * statusFrac
- fmt.Fprint(stdout, ty.color)
- numBlocks := int(math.Ceil(fNumBlocks))
- if numBlocks > 1 {
- fmt.Print(strings.Repeat(string("▉"), numBlocks))
- }
- if numBlocks > 0 {
- frac := fNumBlocks - math.Floor(fNumBlocks)
- symbol := blockSymbols[int(math.Round(frac*float64(len(blockSymbols)-1)))]
- fmt.Print(string(symbol))
- }
- numBlocksPrinted += numBlocks
- }
-
- if barWidth > numBlocksPrinted {
- fmt.Print(strings.Repeat(string(" "), barWidth-numBlocksPrinted))
- }
- fmt.Fprint(stdout, reset)
- fmt.Print("] ", percentage(numFinished, numTests))
-
- if colors {
- // move cursor to start of line so the bar is overridden
- fmt.Fprint(stdout, positionLeft)
- } else {
- // cannot move cursor, so newline
- fmt.Println()
- }
-}
diff --git a/chromium/third_party/dawn/src/dawn_node/utils/Debug.h b/chromium/third_party/dawn/src/dawn_node/utils/Debug.h
deleted file mode 100644
index 38735409817..00000000000
--- a/chromium/third_party/dawn/src/dawn_node/utils/Debug.h
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNODE_UTILS_DEBUG_H_
-#define DAWNNODE_UTILS_DEBUG_H_
-
-#include <iostream>
-#include <optional>
-#include <sstream>
-#include <unordered_map>
-#include <variant>
-#include <vector>
-
-#include "dawn/webgpu_cpp_print.h"
-
-namespace wgpu { namespace utils {
-
- // Write() is a helper for printing container types to the std::ostream.
- // Write() is used by the LOG() macro below.
-
- // Forward declarations
- inline std::ostream& Write(std::ostream& out) {
- return out;
- }
- template <typename T>
- inline std::ostream& Write(std::ostream& out, const std::optional<T>& value);
- template <typename T>
- inline std::ostream& Write(std::ostream& out, const std::vector<T>& value);
- template <typename K, typename V>
- inline std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value);
- template <typename... TYS>
- inline std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value);
- template <typename VALUE>
- std::ostream& Write(std::ostream& out, VALUE&& value);
-
- // Write() implementations
- template <typename T>
- std::ostream& Write(std::ostream& out, const std::optional<T>& value) {
- if (value.has_value()) {
- return Write(out, value.value());
- }
- return out << "<undefined>";
- }
-
- template <typename T>
- std::ostream& Write(std::ostream& out, const std::vector<T>& value) {
- out << "[";
- bool first = true;
- for (const auto& el : value) {
- if (!first) {
- out << ", ";
- }
- first = false;
- Write(out, el);
- }
- return out << "]";
- }
-
- template <typename K, typename V>
- std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value) {
- out << "{";
- bool first = true;
- for (auto it : value) {
- if (!first) {
- out << ", ";
- }
- first = false;
- Write(out, it.first);
- out << ": ";
- Write(out, it.second);
- }
- return out << "}";
- }
-
- template <typename... TYS>
- std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value) {
- std::visit([&](auto&& v) { Write(out, v); }, value);
- return out;
- }
-
- template <typename VALUE>
- std::ostream& Write(std::ostream& out, VALUE&& value) {
- return out << std::forward<VALUE>(value);
- }
-
- template <typename FIRST, typename... REST>
- inline std::ostream& Write(std::ostream& out, FIRST&& first, REST&&... rest) {
- Write(out, std::forward<FIRST>(first));
- Write(out, std::forward<REST>(rest)...);
- return out;
- }
-
- // Fatal() prints a message to stdout with the given file, line, function and optional message,
- // then calls abort(). Fatal() is usually not called directly, but by the UNREACHABLE() and
- // UNIMPLEMENTED() macro below.
- template <typename... MSG_ARGS>
- [[noreturn]] inline void Fatal(const char* reason,
- const char* file,
- int line,
- const char* function,
- MSG_ARGS&&... msg_args) {
- std::stringstream msg;
- msg << file << ":" << line << ": " << reason << ": " << function << "()";
- if constexpr (sizeof...(msg_args) > 0) {
- msg << " ";
- Write(msg, std::forward<MSG_ARGS>(msg_args)...);
- }
- std::cout << msg.str() << std::endl;
- abort();
- }
-
-// LOG() prints the current file, line and function to stdout, followed by a
-// string representation of all the variadic arguments.
-#define LOG(...) \
- ::wgpu::utils::Write(std::cout << __FILE__ << ":" << __LINE__ << " " << __FUNCTION__ << ": ", \
- ##__VA_ARGS__) \
- << std::endl
-
-// UNIMPLEMENTED() prints 'UNIMPLEMENTED' with the current file, line and
-// function to stdout, along with the optional message, then calls abort().
-// The macro calls Fatal(), which is annotated with [[noreturn]].
-// Used to stub code that has not yet been implemented.
-#define UNIMPLEMENTED(...) \
- ::wgpu::utils::Fatal("UNIMPLEMENTED", __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__)
-
-// UNREACHABLE() prints 'UNREACHABLE' with the current file, line and
-// function to stdout, along with the optional message, then calls abort().
-// The macro calls Fatal(), which is annotated with [[noreturn]].
-// Used to stub code that has not yet been implemented.
-#define UNREACHABLE(...) \
- ::wgpu::utils::Fatal("UNREACHABLE", __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__)
-
-}} // namespace wgpu::utils
-
-#endif // DAWNNODE_UTILS_DEBUG_H_
diff --git a/chromium/third_party/dawn/src/dawn_platform/BUILD.gn b/chromium/third_party/dawn/src/dawn_platform/BUILD.gn
index cbd323814e2..92df854432c 100644
--- a/chromium/third_party/dawn/src/dawn_platform/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_platform/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2020 The Dawn Authors
+# Copyright 2022 The Dawn Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,30 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import("../../scripts/dawn_overrides_with_defaults.gni")
-
-import("${dawn_root}/scripts/dawn_component.gni")
-
-dawn_component("dawn_platform") {
- DEFINE_PREFIX = "DAWN_PLATFORM"
-
- configs = [ "${dawn_root}/src/common:dawn_internal" ]
-
- sources = [
- "${dawn_root}/src/include/dawn_platform/DawnPlatform.h",
- "${dawn_root}/src/include/dawn_platform/dawn_platform_export.h",
- "DawnPlatform.cpp",
- "WorkerThread.cpp",
- "WorkerThread.h",
- "tracing/EventTracer.cpp",
- "tracing/EventTracer.h",
- "tracing/TraceEvent.h",
- ]
-
- deps = [ "${dawn_root}/src/common" ]
-
- public_deps = [
- # DawnPlatform.h has #include <dawn/webgpu.h>
- "${dawn_root}/src/dawn:dawn_headers",
- ]
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawn_platform") {
+ public_deps = [ "../dawn/platform" ]
}
diff --git a/chromium/third_party/dawn/src/dawn_platform/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_platform/CMakeLists.txt
deleted file mode 100644
index 92372bbe715..00000000000
--- a/chromium/third_party/dawn/src/dawn_platform/CMakeLists.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2020 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-add_library(dawn_platform ${DAWN_DUMMY_FILE})
-
-target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_IMPLEMENTATION")
-if(BUILD_SHARED_LIBS)
- target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_SHARED_LIBRARY")
-endif()
-
-target_sources(dawn_platform PRIVATE
- "${DAWN_INCLUDE_DIR}/dawn_platform/DawnPlatform.h"
- "${DAWN_INCLUDE_DIR}/dawn_platform/dawn_platform_export.h"
- "DawnPlatform.cpp"
- "WorkerThread.cpp"
- "WorkerThread.h"
- "tracing/EventTracer.cpp"
- "tracing/EventTracer.h"
- "tracing/TraceEvent.h"
-)
-target_link_libraries(dawn_platform PUBLIC dawn_headers PRIVATE dawn_internal_config dawn_common)
diff --git a/chromium/third_party/dawn/src/dawn_platform/DawnPlatform.cpp b/chromium/third_party/dawn/src/dawn_platform/DawnPlatform.cpp
deleted file mode 100644
index 1bedbcb1411..00000000000
--- a/chromium/third_party/dawn/src/dawn_platform/DawnPlatform.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_platform/DawnPlatform.h"
-#include "dawn_platform/WorkerThread.h"
-
-#include "common/Assert.h"
-
-namespace dawn_platform {
-
- CachingInterface::CachingInterface() = default;
-
- CachingInterface::~CachingInterface() = default;
-
- Platform::Platform() = default;
-
- Platform::~Platform() = default;
-
- const unsigned char* Platform::GetTraceCategoryEnabledFlag(TraceCategory category) {
- static unsigned char disabled = 0;
- return &disabled;
- }
-
- double Platform::MonotonicallyIncreasingTime() {
- return 0;
- }
-
- uint64_t Platform::AddTraceEvent(char phase,
- const unsigned char* categoryGroupEnabled,
- const char* name,
- uint64_t id,
- double timestamp,
- int numArgs,
- const char** argNames,
- const unsigned char* argTypes,
- const uint64_t* argValues,
- unsigned char flags) {
- // AddTraceEvent cannot be called if events are disabled.
- ASSERT(false);
- return 0;
- }
-
- dawn_platform::CachingInterface* Platform::GetCachingInterface(const void* fingerprint,
- size_t fingerprintSize) {
- return nullptr;
- }
-
- std::unique_ptr<dawn_platform::WorkerTaskPool> Platform::CreateWorkerTaskPool() {
- return std::make_unique<AsyncWorkerThreadPool>();
- }
-
-} // namespace dawn_platform
diff --git a/chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp b/chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp
deleted file mode 100644
index 025ed1f62c7..00000000000
--- a/chromium/third_party/dawn/src/dawn_platform/WorkerThread.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_platform/WorkerThread.h"
-
-#include <condition_variable>
-#include <functional>
-#include <thread>
-
-#include "common/Assert.h"
-
-namespace {
-
- class AsyncWaitableEventImpl {
- public:
- AsyncWaitableEventImpl() : mIsComplete(false) {
- }
-
- void Wait() {
- std::unique_lock<std::mutex> lock(mMutex);
- mCondition.wait(lock, [this] { return mIsComplete; });
- }
-
- bool IsComplete() {
- std::lock_guard<std::mutex> lock(mMutex);
- return mIsComplete;
- }
-
- void MarkAsComplete() {
- {
- std::lock_guard<std::mutex> lock(mMutex);
- mIsComplete = true;
- }
- mCondition.notify_all();
- }
-
- private:
- std::mutex mMutex;
- std::condition_variable mCondition;
- bool mIsComplete;
- };
-
- class AsyncWaitableEvent final : public dawn_platform::WaitableEvent {
- public:
- explicit AsyncWaitableEvent()
- : mWaitableEventImpl(std::make_shared<AsyncWaitableEventImpl>()) {
- }
-
- void Wait() override {
- mWaitableEventImpl->Wait();
- }
-
- bool IsComplete() override {
- return mWaitableEventImpl->IsComplete();
- }
-
- std::shared_ptr<AsyncWaitableEventImpl> GetWaitableEventImpl() const {
- return mWaitableEventImpl;
- }
-
- private:
- std::shared_ptr<AsyncWaitableEventImpl> mWaitableEventImpl;
- };
-
-} // anonymous namespace
-
-namespace dawn_platform {
-
- std::unique_ptr<dawn_platform::WaitableEvent> AsyncWorkerThreadPool::PostWorkerTask(
- dawn_platform::PostWorkerTaskCallback callback,
- void* userdata) {
- std::unique_ptr<AsyncWaitableEvent> waitableEvent = std::make_unique<AsyncWaitableEvent>();
-
- std::function<void()> doTask =
- [callback, userdata, waitableEventImpl = waitableEvent->GetWaitableEventImpl()]() {
- callback(userdata);
- waitableEventImpl->MarkAsComplete();
- };
-
- std::thread thread(doTask);
- thread.detach();
-
- return waitableEvent;
- }
-
-} // namespace dawn_platform
diff --git a/chromium/third_party/dawn/src/dawn_platform/WorkerThread.h b/chromium/third_party/dawn/src/dawn_platform/WorkerThread.h
deleted file mode 100644
index 49f81ad4fe8..00000000000
--- a/chromium/third_party/dawn/src/dawn_platform/WorkerThread.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef COMMON_WORKERTHREAD_H_
-#define COMMON_WORKERTHREAD_H_
-
-#include "common/NonCopyable.h"
-#include "dawn_platform/DawnPlatform.h"
-
-namespace dawn_platform {
-
- class AsyncWorkerThreadPool : public dawn_platform::WorkerTaskPool, public NonCopyable {
- public:
- std::unique_ptr<dawn_platform::WaitableEvent> PostWorkerTask(
- dawn_platform::PostWorkerTaskCallback callback,
- void* userdata) override;
- };
-
-} // namespace dawn_platform
-
-#endif
diff --git a/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp b/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp
deleted file mode 100644
index a110340f58a..00000000000
--- a/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_platform/tracing/EventTracer.h"
-#include "common/Assert.h"
-#include "dawn_platform/DawnPlatform.h"
-
-namespace dawn_platform { namespace tracing {
-
- const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, TraceCategory category) {
- static unsigned char disabled = 0;
- if (platform == nullptr) {
- return &disabled;
- }
-
- const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(category);
- if (categoryEnabledFlag != nullptr) {
- return categoryEnabledFlag;
- }
-
- return &disabled;
- }
-
- TraceEventHandle AddTraceEvent(Platform* platform,
- char phase,
- const unsigned char* categoryGroupEnabled,
- const char* name,
- uint64_t id,
- int numArgs,
- const char** argNames,
- const unsigned char* argTypes,
- const uint64_t* argValues,
- unsigned char flags) {
- ASSERT(platform != nullptr);
-
- double timestamp = platform->MonotonicallyIncreasingTime();
- if (timestamp != 0) {
- TraceEventHandle handle =
- platform->AddTraceEvent(phase, categoryGroupEnabled, name, id, timestamp, numArgs,
- argNames, argTypes, argValues, flags);
- return handle;
- }
-
- return static_cast<TraceEventHandle>(0);
- }
-
-}} // namespace dawn_platform::tracing
diff --git a/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h b/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h
deleted file mode 100644
index 5d35a876111..00000000000
--- a/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNPLATFORM_TRACING_EVENTTRACER_H_
-#define DAWNPLATFORM_TRACING_EVENTTRACER_H_
-
-#include "dawn_platform/dawn_platform_export.h"
-
-#include <cstdint>
-
-namespace dawn_platform {
-
- class Platform;
- enum class TraceCategory;
-
- namespace tracing {
-
- using TraceEventHandle = uint64_t;
-
- DAWN_PLATFORM_EXPORT const unsigned char* GetTraceCategoryEnabledFlag(
- Platform* platform,
- TraceCategory category);
-
- // TODO(enga): Simplify this API.
- DAWN_PLATFORM_EXPORT TraceEventHandle
- AddTraceEvent(Platform* platform,
- char phase,
- const unsigned char* categoryGroupEnabled,
- const char* name,
- uint64_t id,
- int numArgs,
- const char** argNames,
- const unsigned char* argTypes,
- const uint64_t* argValues,
- unsigned char flags);
-
- } // namespace tracing
-} // namespace dawn_platform
-
-#endif // DAWNPLATFORM_TRACING_EVENTTRACER_H_
diff --git a/chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h b/chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h
deleted file mode 100644
index 8d375ef0366..00000000000
--- a/chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h
+++ /dev/null
@@ -1,991 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Trace events are for tracking application performance and resource usage.
-// Macros are provided to track:
-// Begin and end of function calls
-// Counters
-//
-// Events are issued against categories. Whereas LOG's
-// categories are statically defined, TRACE categories are created
-// implicitly with a string. For example:
-// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent")
-//
-// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
-// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
-// doSomethingCostly()
-// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
-// Note: our tools can't always determine the correct BEGIN/END pairs unless
-// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you need them
-// to be in separate scopes.
-//
-// A common use case is to trace entire function scopes. This
-// issues a trace BEGIN and END automatically:
-// void doSomethingCostly() {
-// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
-// ...
-// }
-//
-// Additional parameters can be associated with an event:
-// void doSomethingCostly2(int howMuch) {
-// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
-// "howMuch", howMuch);
-// ...
-// }
-//
-// The trace system will automatically add to this information the
-// current process id, thread id, and a timestamp in microseconds.
-//
-// To trace an asynchronous procedure such as an IPC send/receive, use ASYNC_BEGIN and
-// ASYNC_END:
-// [single threaded sender code]
-// static int send_count = 0;
-// ++send_count;
-// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
-// Send(new MyMessage(send_count));
-// [receive code]
-// void OnMyMessage(send_count) {
-// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
-// }
-// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
-// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process. Pointers can
-// be used for the ID parameter, and they will be mangled internally so that
-// the same pointer on two different processes will not match. For example:
-// class MyTracedClass {
-// public:
-// MyTracedClass() {
-// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
-// }
-// ~MyTracedClass() {
-// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
-// }
-// }
-//
-// Trace event also supports counters, which is a way to track a quantity
-// as it varies over time. Counters are created with the following macro:
-// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
-//
-// Counters are process-specific. The macro itself can be issued from any
-// thread, however.
-//
-// Sometimes, you want to track two counters at once. You can do this with two
-// counter macros:
-// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
-// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
-// Or you can do it with a combined macro:
-// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
-// "bytesPinned", g_myCounterValue[0],
-// "bytesAllocated", g_myCounterValue[1]);
-// This indicates to the tracing UI that these counters should be displayed
-// in a single graph, as a summed area chart.
-//
-// Since counters are in a global namespace, you may want to disembiguate with a
-// unique ID, by using the TRACE_COUNTER_ID* variations.
-//
-// By default, trace collection is compiled in, but turned off at runtime.
-// Collecting trace data is the responsibility of the embedding
-// application. In Chrome's case, navigating to about:tracing will turn on
-// tracing and display data collected across all active processes.
-//
-//
-// Memory scoping note:
-// Tracing copies the pointers, not the string content, of the strings passed
-// in for category, name, and arg_names. Thus, the following code will
-// cause problems:
-// char* str = strdup("impprtantName");
-// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
-// free(str); // Trace system now has dangling pointer
-//
-// To avoid this issue with the |name| and |arg_name| parameters, use the
-// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
-// Notes: The category must always be in a long-lived char* (i.e. static const).
-// The |arg_values|, when used, are always deep copied with the _COPY
-// macros.
-//
-// When are string argument values copied:
-// const char* arg_values are only referenced by default:
-// TRACE_EVENT1("category", "name",
-// "arg1", "literal string is only referenced");
-// Use TRACE_STR_COPY to force copying of a const char*:
-// TRACE_EVENT1("category", "name",
-// "arg1", TRACE_STR_COPY("string will be copied"));
-// std::string arg_values are always copied:
-// TRACE_EVENT1("category", "name",
-// "arg1", std::string("string will be copied"));
-//
-//
-// Thread Safety:
-// A thread safe singleton and mutex are used for thread safety. Category
-// enabled flags are used to limit the performance impact when the system
-// is not enabled.
-//
-// TRACE_EVENT macros first cache a pointer to a category. The categories are
-// statically allocated and safe at all times, even after exit. Fetching a
-// category is protected by the TraceLog::lock_. Multiple threads initializing
-// the static variable is safe, as they will be serialized by the lock and
-// multiple calls will return the same pointer to the category.
-//
-// Then the category_enabled flag is checked. This is a unsigned char, and
-// not intended to be multithread safe. It optimizes access to addTraceEvent
-// which is threadsafe internally via TraceLog::lock_. The enabled flag may
-// cause some threads to incorrectly call or skip calling addTraceEvent near
-// the time of the system being enabled or disabled. This is acceptable as
-// we tolerate some data loss while the system is being enabled/disabled and
-// because addTraceEvent is threadsafe internally and checks the enabled state
-// again under lock.
-//
-// Without the use of these static category pointers and enabled flags all
-// trace points would carry a significant performance cost of aquiring a lock
-// and resolving the category.
-
-#ifndef DAWNPLATFORM_TRACING_TRACEEVENT_H_
-#define DAWNPLATFORM_TRACING_TRACEEVENT_H_
-
-#include <string>
-
-#include "dawn_platform/tracing/EventTracer.h"
-
-// Records a pair of begin and end events called "name" for the current
-// scope, with 0, 1 or 2 associated arguments. If the category is not
-// enabled, then this does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_EVENT0(platform, category, name) \
- INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0)
-#define TRACE_EVENT1(platform, category, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0, arg1_name, arg1_val)
-#define TRACE_EVENT2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-
-// Records a single event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_EVENT_INSTANT0(platform, category, name) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
- TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_INSTANT1(platform, category, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_INSTANT2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_INSTANT0(platform, category, name) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
- TRACE_EVENT_FLAG_COPY, 0)
-#define TRACE_EVENT_COPY_INSTANT1(platform, category, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_INSTANT2(platform, category, name, arg1_name, arg1_val, arg2_name, \
- arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_INSTANT, category, name, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_EVENT_BEGIN0(platform, category, name) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
- TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_BEGIN1(platform, category, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_BEGIN2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_BEGIN0(platform, category, name) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
- TRACE_EVENT_FLAG_COPY, 0)
-#define TRACE_EVENT_COPY_BEGIN1(platform, category, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_BEGIN2(platform, category, name, arg1_name, arg1_val, arg2_name, \
- arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_BEGIN, category, name, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Records a single END event for "name" immediately. If the category
-// is not enabled, then this does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_EVENT_END0(platform, category, name) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
- TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_END1(platform, category, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_END2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_END0(platform, category, name) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_END1(platform, category, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_END2(platform, category, name, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_END, category, name, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Records the value of a counter called "name" immediately. Value
-// must be representable as a 32 bit integer.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_COUNTER1(platform, category, name, value) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
- TRACE_EVENT_FLAG_NONE, 0, "value", static_cast<int>(value))
-#define TRACE_COPY_COUNTER1(platform, category, name, value) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
- TRACE_EVENT_FLAG_COPY, 0, "value", static_cast<int>(value))
-
-// Records the values of a multi-parted counter called "name" immediately.
-// The UI will treat value1 and value2 as parts of a whole, displaying their
-// values as a stacked-bar chart.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_COUNTER2(platform, category, name, value1_name, value1_val, value2_name, value2_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
- TRACE_EVENT_FLAG_NONE, 0, value1_name, static_cast<int>(value1_val), \
- value2_name, static_cast<int>(value2_val))
-#define TRACE_COPY_COUNTER2(platform, category, name, value1_name, value1_val, value2_name, \
- value2_val) \
- INTERNAL_TRACE_EVENT_ADD(platform, TRACE_EVENT_PHASE_COUNTER, category, name, \
- TRACE_EVENT_FLAG_COPY, 0, value1_name, static_cast<int>(value1_val), \
- value2_name, static_cast<int>(value2_val))
-
-// Records the value of a counter called "name" immediately. Value
-// must be representable as a 32 bit integer.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to disambiguate counters with the same name. It must either
-// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
-// will be xored with a hash of the process ID so that the same pointer on
-// two different processes will not collide.
-#define TRACE_COUNTER_ID1(platform, category, name, id, value) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0, "value", static_cast<int>(value))
-#define TRACE_COPY_COUNTER_ID1(platform, category, name, id, value) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0, "value", static_cast<int>(value))
-
-// Records the values of a multi-parted counter called "name" immediately.
-// The UI will treat value1 and value2 as parts of a whole, displaying their
-// values as a stacked-bar chart.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to disambiguate counters with the same name. It must either
-// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
-// will be xored with a hash of the process ID so that the same pointer on
-// two different processes will not collide.
-#define TRACE_COUNTER_ID2(platform, category, name, id, value1_name, value1_val, value2_name, \
- value2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, TRACE_EVENT_FLAG_NONE, 0, \
- value1_name, static_cast<int>(value1_val), value2_name, static_cast<int>(value2_val))
-#define TRACE_COPY_COUNTER_ID2(platform, category, name, id, value1_name, value1_val, value2_name, \
- value2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
- platform, TRACE_EVENT_PHASE_COUNTER, category, name, id, TRACE_EVENT_FLAG_COPY, 0, \
- value1_name, static_cast<int>(value1_val), value2_name, static_cast<int>(value2_val))
-
-// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
-// events are considered to match if their category, name and id values all
-// match. |id| must either be a pointer or an integer value up to 64 bits. If
-// it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-// An asynchronous operation can consist of multiple phases. The first phase is
-// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
-// ASYNC_STEP_BEGIN macros. When the operation completes, call ASYNC_END.
-// An async operation can span threads and processes, but all events in that
-// operation must use the same |name| and |id|. Each event can have its own
-// args.
-#define TRACE_EVENT_ASYNC_BEGIN0(platform, category, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_ASYNC_BEGIN1(platform, category, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_ASYNC_BEGIN2(platform, category, name, id, arg1_name, arg1_val, arg2_name, \
- arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-#define TRACE_EVENT_COPY_ASYNC_BEGIN0(platform, category, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0)
-#define TRACE_EVENT_COPY_ASYNC_BEGIN1(platform, category, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_ASYNC_BEGIN2(platform, category, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_BEGIN, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-
-// Records a single ASYNC_STEP event for |step| immediately. If the category
-// is not enabled, then this does nothing. The |name| and |id| must match the
-// ASYNC_BEGIN event above. The |step| param identifies this step within the
-// async event. This should be called at the beginning of the next phase of an
-// asynchronous operation.
-#define TRACE_EVENT_ASYNC_STEP0(platform, category, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0, "step", step)
-#define TRACE_EVENT_ASYNC_STEP1(platform, category, name, id, step, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0, "step", step, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_ASYNC_STEP0(platform, category, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0, "step", step)
-#define TRACE_EVENT_COPY_ASYNC_STEP1(platform, category, name, id, step, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_STEP, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0, "step", step, arg1_name, arg1_val)
-
-// Records a single ASYNC_END event for "name" immediately. If the category
-// is not enabled, then this does nothing.
-#define TRACE_EVENT_ASYNC_END0(platform, category, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_ASYNC_END1(platform, category, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_ASYNC_END2(platform, category, name, id, arg1_name, arg1_val, arg2_name, \
- arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
- TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-#define TRACE_EVENT_COPY_ASYNC_END0(platform, category, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0)
-#define TRACE_EVENT_COPY_ASYNC_END1(platform, category, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_ASYNC_END2(platform, category, name, id, arg1_name, arg1_val, arg2_name, \
- arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_ASYNC_END, category, name, id, \
- TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-
-// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
-// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
-// events.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
-// considered as a match if their category_group, name and id all match.
-// - |id| must either be a pointer or an integer value up to 64 bits.
-// If it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-// - |id| is used to match a child NESTABLE_ASYNC event with its parent
-// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
-// be logged using the same id and category_group.
-//
-// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
-// at the first NESTABLE_ASYNC event of that id, and unmatched
-// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
-// NESTABLE_ASYNC event of that id. Corresponding warning messages for
-// unmatched events will be shown in the analysis view.
-
-// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
-// 0, 1 or 2 associated arguments. If the category is not enabled, then this
-// does nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(platform, category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
- arg1_name, arg1_val)
-#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
-// or 2 associated arguments. If the category is not enabled, then this does
-// nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_END0(platform, category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
-// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
-// associated argument. If the category is not enabled, then this does nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_END1(platform, category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
- arg1_name, arg1_val)
-#define TRACE_EVENT_NESTABLE_ASYNC_END2(platform, category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with none, one or two associated argument. If the category is not enabled,
-// then this does nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(platform, category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0)
-
-#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(platform, category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
- arg1_name, arg1_val)
-
-#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(platform, category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, 0, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
-#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2(platform, category_group, name, id, \
- arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, 0, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2(platform, category_group, name, id, \
- arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, 0, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
-// |timestamp| provided.
-#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(platform, category_group, name, id, \
- timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(platform, category_group, name, id, \
- timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP1(platform, category_group, name, id, \
- timestamp, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0, arg1_name, \
- arg1_val)
-#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT_WITH_TIMESTAMP0(platform, category_group, name, id, \
- timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(platform, category_group, name, id, \
- timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY, 0)
-#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(platform, category_group, name, id, \
- timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- platform, TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY, 0)
-
-// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
-// events are considered to match if their category_group, name and id values
-// all match. |id| must either be a pointer or an integer value up to 64 bits.
-// If it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-// FLOW events are different from ASYNC events in how they are drawn by the
-// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
-// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
-// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
-// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
-// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
-// macros. When the operation completes, call FLOW_END. An async operation can
-// span threads and processes, but all events in that operation must use the
-// same |name| and |id|. Each event can have its own args.
-#define TRACE_EVENT_FLOW_BEGIN0(platform, category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
- id, TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_FLOW_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
- id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_FLOW_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
- id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-#define TRACE_EVENT_COPY_FLOW_BEGIN0(platform, category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
- id, TRACE_EVENT_FLAG_COPY, 0)
-#define TRACE_EVENT_COPY_FLOW_BEGIN1(platform, category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
- id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_FLOW_BEGIN2(platform, category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, \
- id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-
-// Records a single FLOW_STEP event for |step| immediately. If the category
-// is not enabled, then this does nothing. The |name| and |id| must match the
-// FLOW_BEGIN event above. The |step| param identifies this step within the
-// async event. This should be called at the beginning of the next phase of an
-// asynchronous operation.
-#define TRACE_EVENT_FLOW_STEP0(platform, category_group, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
- id, TRACE_EVENT_FLAG_NONE, 0, "step", step)
-#define TRACE_EVENT_FLOW_STEP1(platform, category_group, name, id, step, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
- id, TRACE_EVENT_FLAG_NONE, 0, "step", step, arg1_name, \
- arg1_val)
-#define TRACE_EVENT_COPY_FLOW_STEP0(platform, category_group, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
- id, TRACE_EVENT_FLAG_COPY, 0, "step", step)
-#define TRACE_EVENT_COPY_FLOW_STEP1(platform, category_group, name, id, step, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, \
- id, TRACE_EVENT_FLAG_COPY, 0, "step", step, arg1_name, \
- arg1_val)
-
-// Records a single FLOW_END event for "name" immediately. If the category
-// is not enabled, then this does nothing.
-#define TRACE_EVENT_FLOW_END0(platform, category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
- id, TRACE_EVENT_FLAG_NONE, 0)
-#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(platform, category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
- id, TRACE_EVENT_FLAG_BIND_TO_ENCLOSING, 0)
-#define TRACE_EVENT_FLOW_END1(platform, category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
- id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_FLOW_END2(platform, category_group, name, id, arg1_name, arg1_val, arg2_name, \
- arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
- id, TRACE_EVENT_FLAG_NONE, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-#define TRACE_EVENT_COPY_FLOW_END0(platform, category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
- id, TRACE_EVENT_FLAG_COPY, 0)
-#define TRACE_EVENT_COPY_FLOW_END1(platform, category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
- id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_FLOW_END2(platform, category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, TRACE_EVENT_PHASE_FLOW_END, category_group, name, \
- id, TRACE_EVENT_FLAG_COPY, 0, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-
-// Creates a scope of a sampling state with the given category and name (both must
-// be constant strings). These states are intended for a sampling profiler.
-// Implementation note: we store category and name together because we don't
-// want the inconsistency/expense of storing two pointers.
-// |thread_bucket| is [0..2] and is used to statically isolate samples in one
-// thread from others.
-//
-// { // The sampling state is set within this scope.
-// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
-// ...;
-// }
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, name) \
- TraceEvent::SamplingStateScope<bucket_number> traceEventSamplingScope(category "\0" name);
-
-// Returns a current sampling state of the given bucket.
-// The format of the returned string is "category\0name".
-#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
- TraceEvent::SamplingStateScope<bucket_number>::current()
-
-// Sets a current sampling state of the given bucket.
-// |category| and |name| have to be constant strings.
-#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, name) \
- TraceEvent::SamplingStateScope<bucket_number>::set(category "\0" name)
-
-// Sets a current sampling state of the given bucket.
-// |categoryAndName| doesn't need to be a constant string.
-// The format of the string is "category\0name".
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(bucket_number, categoryAndName) \
- TraceEvent::SamplingStateScope<bucket_number>::set(categoryAndName)
-
-// Syntactic sugars for the sampling tracing in the main thread.
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_GET_SAMPLING_STATE() TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
-#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
- TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
-
-////////////////////////////////////////////////////////////////////////////////
-// Implementation specific tracing API definitions.
-
-// Get a pointer to the enabled state of the given trace category. Only
-// long-lived literal strings should be given as the category name. The returned
-// pointer can be held permanently in a local static for example. If the
-// unsigned char is non-zero, tracing is enabled. If tracing is enabled,
-// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
-// between the load of the tracing state and the call to
-// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
-// for best performance when tracing is disabled.
-// const unsigned char*
-// TRACE_EVENT_API_GET_CATEGORY_ENABLED(const char* category_name)
-#define TRACE_EVENT_API_GET_CATEGORY_ENABLED dawn_platform::tracing::GetTraceCategoryEnabledFlag
-
-// Add a trace event to the platform tracing system.
-// void TRACE_EVENT_API_ADD_TRACE_EVENT(
-// char phase,
-// const unsigned char* category_enabled,
-// const char* name,
-// unsigned long long id,
-// int num_args,
-// const char** arg_names,
-// const unsigned char* arg_types,
-// const unsigned long long* arg_values,
-// unsigned char flags)
-#define TRACE_EVENT_API_ADD_TRACE_EVENT dawn_platform::tracing::AddTraceEvent
-
-////////////////////////////////////////////////////////////////////////////////
-
-// Implementation detail: trace event macros create temporary variables
-// to keep instrumentation overhead low. These macros give each temporary
-// variable a unique name based on the line number to prevent name collissions.
-#define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b
-#define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b)
-#define INTERNALTRACEEVENTUID(name_prefix) INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
-
-// Implementation detail: internal macro to create static category.
-#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, category) \
- static const unsigned char* INTERNALTRACEEVENTUID(catstatic) = 0; \
- if (!INTERNALTRACEEVENTUID(catstatic)) \
- INTERNALTRACEEVENTUID(catstatic) = TRACE_EVENT_API_GET_CATEGORY_ENABLED(platform, category);
-
-// Implementation detail: internal macro to create static category and add
-// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD(platform, phase, category, name, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, ::dawn_platform::TraceCategory::category) \
- if (*INTERNALTRACEEVENTUID(catstatic)) { \
- dawn_platform::TraceEvent::addTraceEvent( \
- platform, phase, INTERNALTRACEEVENTUID(catstatic), name, \
- dawn_platform::TraceEvent::noEventId, flags, __VA_ARGS__); \
- } \
- } while (0)
-
-// Implementation detail: internal macro to create static category and add begin
-// event if the category is enabled. Also adds the end event when the scope
-// ends.
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, ...) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, ::dawn_platform::TraceCategory::category) \
- dawn_platform::TraceEvent::TraceEndOnScopeClose INTERNALTRACEEVENTUID(profileScope); \
- do { \
- if (*INTERNALTRACEEVENTUID(catstatic)) { \
- dawn_platform::TraceEvent::addTraceEvent( \
- platform, TRACE_EVENT_PHASE_BEGIN, INTERNALTRACEEVENTUID(catstatic), name, \
- dawn_platform::TraceEvent::noEventId, TRACE_EVENT_FLAG_NONE, __VA_ARGS__); \
- INTERNALTRACEEVENTUID(profileScope) \
- .initialize(platform, INTERNALTRACEEVENTUID(catstatic), name); \
- } \
- } while (0)
-
-// Implementation detail: internal macro to create static category and add
-// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, phase, category, name, id, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, ::dawn_platform::TraceCategory::category) \
- if (*INTERNALTRACEEVENTUID(catstatic)) { \
- unsigned char traceEventFlags = flags | TRACE_EVENT_FLAG_HAS_ID; \
- dawn_platform::TraceEvent::TraceID traceEventTraceID(id, &traceEventFlags); \
- dawn_platform::TraceEvent::addTraceEvent( \
- platform, phase, INTERNALTRACEEVENTUID(catstatic), name, traceEventTraceID.data(), \
- traceEventFlags, __VA_ARGS__); \
- } \
- } while (0)
-
-// Notes regarding the following definitions:
-// New values can be added and propagated to third party libraries, but existing
-// definitions must never be changed, because third party libraries may use old
-// definitions.
-
-// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
-#define TRACE_EVENT_PHASE_BEGIN ('B')
-#define TRACE_EVENT_PHASE_END ('E')
-#define TRACE_EVENT_PHASE_INSTANT ('I')
-#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
-#define TRACE_EVENT_PHASE_ASYNC_STEP ('T')
-#define TRACE_EVENT_PHASE_ASYNC_END ('F')
-#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
-#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
-#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
-#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
-#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
-#define TRACE_EVENT_PHASE_FLOW_END ('f')
-#define TRACE_EVENT_PHASE_METADATA ('M')
-#define TRACE_EVENT_PHASE_COUNTER ('C')
-#define TRACE_EVENT_PHASE_SAMPLE ('P')
-
-// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
-#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned char>(0))
-#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned char>(1 << 0))
-#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned char>(1 << 1))
-#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned char>(1 << 2))
-
-// Type values for identifying types in the TraceValue union.
-#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
-#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
-#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
-#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
-#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
-#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
-#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
-
-namespace dawn_platform { namespace TraceEvent {
-
- // Specify these values when the corresponding argument of addTraceEvent is not
- // used.
- const int zeroNumArgs = 0;
- const unsigned long long noEventId = 0;
-
- // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
- // are mangled with the Process ID so that they are unlikely to collide when the
- // same pointer is used on different processes.
- class TraceID {
- public:
- explicit TraceID(const void* id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(reinterpret_cast<uintptr_t>(id))) {
- *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
- }
- explicit TraceID(unsigned long long id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(unsigned long id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(unsigned int id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(unsigned short id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(unsigned char id, unsigned char* flags) : m_data(id) {
- (void)flags;
- }
- explicit TraceID(long long id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
- explicit TraceID(long id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
- explicit TraceID(int id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
- explicit TraceID(short id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
- explicit TraceID(signed char id, unsigned char* flags)
- : m_data(static_cast<unsigned long long>(id)) {
- (void)flags;
- }
-
- unsigned long long data() const {
- return m_data;
- }
-
- private:
- unsigned long long m_data;
- };
-
- // Simple union to store various types as unsigned long long.
- union TraceValueUnion {
- bool m_bool;
- unsigned long long m_uint;
- long long m_int;
- double m_double;
- const void* m_pointer;
- const char* m_string;
- };
-
- // Simple container for const char* that should be copied instead of retained.
- class TraceStringWithCopy {
- public:
- explicit TraceStringWithCopy(const char* str) : m_str(str) {
- }
- operator const char*() const {
- return m_str;
- }
-
- private:
- const char* m_str;
- };
-
-// Define setTraceValue for each allowed type. It stores the type and
-// value in the return arguments. This allows this API to avoid declaring any
-// structures so that it is portable to third_party libraries.
-#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, value_type_id) \
- static inline void setTraceValue(actual_type arg, unsigned char* type, uint64_t* value) { \
- TraceValueUnion typeValue; \
- typeValue.union_member = arg; \
- *type = value_type_id; \
- *value = typeValue.m_uint; \
- }
-// Simpler form for int types that can be safely casted.
-#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \
- static inline void setTraceValue(actual_type arg, unsigned char* type, uint64_t* value) { \
- *type = value_type_id; \
- *value = static_cast<unsigned long long>(arg); \
- }
-
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
- INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
- INTERNAL_DECLARE_SET_TRACE_VALUE(bool, m_bool, TRACE_VALUE_TYPE_BOOL)
- INTERNAL_DECLARE_SET_TRACE_VALUE(double, m_double, TRACE_VALUE_TYPE_DOUBLE)
- INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, m_pointer, TRACE_VALUE_TYPE_POINTER)
- INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, m_string, TRACE_VALUE_TYPE_STRING)
- INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&,
- m_string,
- TRACE_VALUE_TYPE_COPY_STRING)
-
-#undef INTERNAL_DECLARE_SET_TRACE_VALUE
-#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
-
- static inline void setTraceValue(const std::string& arg,
- unsigned char* type,
- uint64_t* value) {
- TraceValueUnion typeValue;
- typeValue.m_string = arg.data();
- *type = TRACE_VALUE_TYPE_COPY_STRING;
- *value = typeValue.m_uint;
- }
-
- // These addTraceEvent template functions are defined here instead of in the
- // macro, because the arg values could be temporary string objects. In order to
- // store pointers to the internal c_str and pass through to the tracing API, the
- // arg values must live throughout these procedures.
-
- static inline dawn_platform::tracing::TraceEventHandle addTraceEvent(
- dawn_platform::Platform* platform,
- char phase,
- const unsigned char* categoryEnabled,
- const char* name,
- unsigned long long id,
- unsigned char flags,
- int /*unused, helps avoid empty __VA_ARGS__*/) {
- return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id,
- zeroNumArgs, 0, 0, 0, flags);
- }
-
- template <class ARG1_TYPE>
- static inline dawn_platform::tracing::TraceEventHandle addTraceEvent(
- dawn_platform::Platform* platform,
- char phase,
- const unsigned char* categoryEnabled,
- const char* name,
- unsigned long long id,
- unsigned char flags,
- int /*unused, helps avoid empty __VA_ARGS__*/,
- const char* arg1Name,
- const ARG1_TYPE& arg1Val) {
- const int numArgs = 1;
- unsigned char argTypes[1];
- uint64_t argValues[1];
- setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
- return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id,
- numArgs, &arg1Name, argTypes, argValues, flags);
- }
-
- template <class ARG1_TYPE, class ARG2_TYPE>
- static inline dawn_platform::tracing::TraceEventHandle addTraceEvent(
- dawn_platform::Platform* platform,
- char phase,
- const unsigned char* categoryEnabled,
- const char* name,
- unsigned long long id,
- unsigned char flags,
- int /*unused, helps avoid empty __VA_ARGS__*/,
- const char* arg1Name,
- const ARG1_TYPE& arg1Val,
- const char* arg2Name,
- const ARG2_TYPE& arg2Val) {
- const int numArgs = 2;
- const char* argNames[2] = {arg1Name, arg2Name};
- unsigned char argTypes[2];
- uint64_t argValues[2];
- setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
- setTraceValue(arg2Val, &argTypes[1], &argValues[1]);
- return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id,
- numArgs, argNames, argTypes, argValues, flags);
- }
-
- // Used by TRACE_EVENTx macro. Do not use directly.
- class TraceEndOnScopeClose {
- public:
- // Note: members of m_data intentionally left uninitialized. See initialize.
- TraceEndOnScopeClose() : m_pdata(0) {
- }
- ~TraceEndOnScopeClose() {
- if (m_pdata)
- addEventIfEnabled();
- }
-
- void initialize(dawn_platform::Platform* platform,
- const unsigned char* categoryEnabled,
- const char* name) {
- m_data.platform = platform;
- m_data.categoryEnabled = categoryEnabled;
- m_data.name = name;
- m_pdata = &m_data;
- }
-
- private:
- // Add the end event if the category is still enabled.
- void addEventIfEnabled() {
- // Only called when m_pdata is non-null.
- if (*m_pdata->categoryEnabled) {
- TRACE_EVENT_API_ADD_TRACE_EVENT(
- m_pdata->platform, TRACE_EVENT_PHASE_END, m_pdata->categoryEnabled,
- m_pdata->name, noEventId, zeroNumArgs, 0, 0, 0, TRACE_EVENT_FLAG_NONE);
- }
- }
-
- // This Data struct workaround is to avoid initializing all the members
- // in Data during construction of this object, since this object is always
- // constructed, even when tracing is disabled. If the members of Data were
- // members of this class instead, compiler warnings occur about potential
- // uninitialized accesses.
- struct Data {
- dawn_platform::Platform* platform;
- const unsigned char* categoryEnabled;
- const char* name;
- };
- Data* m_pdata;
- Data m_data;
- };
-
-}} // namespace dawn_platform::TraceEvent
-
-#endif // DAWNPLATFORM_TRACING_TRACEEVENT_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/BUILD.gn b/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
index b678fa10b5b..13a9a903d33 100644
--- a/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2019 The Dawn Authors
+# Copyright 2022 The Dawn Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,90 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import("../../scripts/dawn_overrides_with_defaults.gni")
-
-import("${dawn_root}/generator/dawn_generator.gni")
-import("${dawn_root}/scripts/dawn_component.gni")
-
-# Public dawn_wire headers so they can be publically visible for
-# dependencies of dawn_wire
-source_set("dawn_wire_headers") {
- public_deps = [ "${dawn_root}/src/dawn:dawn_headers" ]
- all_dependent_configs = [ "${dawn_root}/src/common:dawn_public_include_dirs" ]
- sources = [
- "${dawn_root}/src/include/dawn_wire/Wire.h",
- "${dawn_root}/src/include/dawn_wire/WireClient.h",
- "${dawn_root}/src/include/dawn_wire/WireServer.h",
- "${dawn_root}/src/include/dawn_wire/dawn_wire_export.h",
- ]
-}
-
-dawn_json_generator("dawn_wire_gen") {
- target = "dawn_wire"
- outputs = [
- "src/dawn_wire/ObjectType_autogen.h",
- "src/dawn_wire/WireCmd_autogen.h",
- "src/dawn_wire/WireCmd_autogen.cpp",
- "src/dawn_wire/client/ApiObjects_autogen.h",
- "src/dawn_wire/client/ApiProcs_autogen.cpp",
- "src/dawn_wire/client/ClientBase_autogen.h",
- "src/dawn_wire/client/ClientHandlers_autogen.cpp",
- "src/dawn_wire/client/ClientPrototypes_autogen.inc",
- "src/dawn_wire/server/ServerBase_autogen.h",
- "src/dawn_wire/server/ServerDoers_autogen.cpp",
- "src/dawn_wire/server/ServerHandlers_autogen.cpp",
- "src/dawn_wire/server/ServerPrototypes_autogen.inc",
- ]
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawn_wire") {
+ public_deps = [ "../dawn/wire" ]
}
-
-dawn_component("dawn_wire") {
- DEFINE_PREFIX = "DAWN_WIRE"
-
- deps = [
- ":dawn_wire_gen",
- "${dawn_root}/src/common",
- ]
-
- configs = [ "${dawn_root}/src/common:dawn_internal" ]
- sources = get_target_outputs(":dawn_wire_gen")
- sources += [
- "BufferConsumer.h",
- "BufferConsumer_impl.h",
- "ChunkedCommandHandler.cpp",
- "ChunkedCommandHandler.h",
- "ChunkedCommandSerializer.cpp",
- "ChunkedCommandSerializer.h",
- "Wire.cpp",
- "WireClient.cpp",
- "WireDeserializeAllocator.cpp",
- "WireDeserializeAllocator.h",
- "WireResult.h",
- "WireServer.cpp",
- "client/ApiObjects.h",
- "client/Buffer.cpp",
- "client/Buffer.h",
- "client/Client.cpp",
- "client/Client.h",
- "client/ClientDoers.cpp",
- "client/ClientInlineMemoryTransferService.cpp",
- "client/Device.cpp",
- "client/Device.h",
- "client/ObjectAllocator.h",
- "client/Queue.cpp",
- "client/Queue.h",
- "client/RequestTracker.h",
- "client/ShaderModule.cpp",
- "client/ShaderModule.h",
- "server/ObjectStorage.h",
- "server/Server.cpp",
- "server/Server.h",
- "server/ServerBuffer.cpp",
- "server/ServerDevice.cpp",
- "server/ServerInlineMemoryTransferService.cpp",
- "server/ServerQueue.cpp",
- "server/ServerShaderModule.cpp",
- ]
-
- # Make headers publicly visible
- public_deps = [ ":dawn_wire_headers" ]
+group("dawn_wire_headers") {
+ public_deps = [ "../dawn/wire:headers" ]
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/BufferConsumer.h b/chromium/third_party/dawn/src/dawn_wire/BufferConsumer.h
deleted file mode 100644
index 3797bf40c88..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/BufferConsumer.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_BUFFERCONSUMER_H_
-#define DAWNWIRE_BUFFERCONSUMER_H_
-
-#include "dawn_wire/WireResult.h"
-
-#include <cstddef>
-
-namespace dawn_wire {
-
- // BufferConsumer is a utility class that allows reading bytes from a buffer
- // while simultaneously decrementing the amount of remaining space by exactly
- // the amount read. It helps prevent bugs where incrementing a pointer and
- // decrementing a size value are not kept in sync.
- // BufferConsumer also contains bounds checks to prevent reading out-of-bounds.
- template <typename BufferT>
- class BufferConsumer {
- static_assert(sizeof(BufferT) == 1,
- "BufferT must be 1-byte, but may have const/volatile qualifiers.");
-
- public:
- BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {
- }
-
- BufferT* Buffer() const {
- return mBuffer;
- }
- size_t AvailableSize() const {
- return mSize;
- }
-
- protected:
- template <typename T, typename N>
- WireResult NextN(N count, T** data);
-
- template <typename T>
- WireResult Next(T** data);
-
- template <typename T>
- WireResult Peek(T** data);
-
- private:
- BufferT* mBuffer;
- size_t mSize;
- };
-
- class SerializeBuffer : public BufferConsumer<char> {
- public:
- using BufferConsumer::BufferConsumer;
- using BufferConsumer::Next;
- using BufferConsumer::NextN;
- };
-
- class DeserializeBuffer : public BufferConsumer<const volatile char> {
- public:
- using BufferConsumer::BufferConsumer;
- using BufferConsumer::Peek;
-
- template <typename T, typename N>
- WireResult ReadN(N count, const volatile T** data) {
- return NextN(count, data);
- }
-
- template <typename T>
- WireResult Read(const volatile T** data) {
- return Next(data);
- }
- };
-
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_BUFFERCONSUMER_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_wire/BufferConsumer_impl.h b/chromium/third_party/dawn/src/dawn_wire/BufferConsumer_impl.h
deleted file mode 100644
index fdd5fdbc726..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/BufferConsumer_impl.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_BUFFERCONSUMER_IMPL_H_
-#define DAWNWIRE_BUFFERCONSUMER_IMPL_H_
-
-#include "dawn_wire/BufferConsumer.h"
-
-#include <limits>
-#include <type_traits>
-
-namespace dawn_wire {
-
- template <typename BufferT>
- template <typename T>
- WireResult BufferConsumer<BufferT>::Peek(T** data) {
- if (sizeof(T) > mSize) {
- return WireResult::FatalError;
- }
-
- *data = reinterpret_cast<T*>(mBuffer);
- return WireResult::Success;
- }
-
- template <typename BufferT>
- template <typename T>
- WireResult BufferConsumer<BufferT>::Next(T** data) {
- if (sizeof(T) > mSize) {
- return WireResult::FatalError;
- }
-
- *data = reinterpret_cast<T*>(mBuffer);
- mBuffer += sizeof(T);
- mSize -= sizeof(T);
- return WireResult::Success;
- }
-
- template <typename BufferT>
- template <typename T, typename N>
- WireResult BufferConsumer<BufferT>::NextN(N count, T** data) {
- static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
-
- constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
- if (count > kMaxCountWithoutOverflows) {
- return WireResult::FatalError;
- }
-
- // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
- size_t totalSize = sizeof(T) * count;
- if (totalSize > mSize) {
- return WireResult::FatalError;
- }
-
- *data = reinterpret_cast<T*>(mBuffer);
- mBuffer += totalSize;
- mSize -= totalSize;
- return WireResult::Success;
- }
-
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_BUFFERCONSUMER_IMPL_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
deleted file mode 100644
index e970367758c..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2020 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-DawnJSONGenerator(
- TARGET "dawn_wire"
- PRINT_NAME "Dawn wire"
- RESULT_VARIABLE "DAWN_WIRE_GEN_SOURCES"
-)
-
-add_library(dawn_wire ${DAWN_DUMMY_FILE})
-
-target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_IMPLEMENTATION")
-if(BUILD_SHARED_LIBS)
- target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_SHARED_LIBRARY")
-endif()
-
-target_sources(dawn_wire PRIVATE
- "${DAWN_INCLUDE_DIR}/dawn_wire/Wire.h"
- "${DAWN_INCLUDE_DIR}/dawn_wire/WireClient.h"
- "${DAWN_INCLUDE_DIR}/dawn_wire/WireServer.h"
- "${DAWN_INCLUDE_DIR}/dawn_wire/dawn_wire_export.h"
- ${DAWN_WIRE_GEN_SOURCES}
- "BufferConsumer.h"
- "BufferConsumer_impl.h"
- "ChunkedCommandHandler.cpp"
- "ChunkedCommandHandler.h"
- "ChunkedCommandSerializer.cpp"
- "ChunkedCommandSerializer.h"
- "Wire.cpp"
- "WireClient.cpp"
- "WireDeserializeAllocator.cpp"
- "WireDeserializeAllocator.h"
- "WireResult.h"
- "WireServer.cpp"
- "client/ApiObjects.h"
- "client/Buffer.cpp"
- "client/Buffer.h"
- "client/Client.cpp"
- "client/Client.h"
- "client/ClientDoers.cpp"
- "client/ClientInlineMemoryTransferService.cpp"
- "client/Device.cpp"
- "client/Device.h"
- "client/ObjectAllocator.h"
- "client/Queue.cpp"
- "client/Queue.h"
- "client/RequestTracker.h"
- "client/ShaderModule.cpp"
- "client/ShaderModule.h"
- "server/ObjectStorage.h"
- "server/Server.cpp"
- "server/Server.h"
- "server/ServerBuffer.cpp"
- "server/ServerDevice.cpp"
- "server/ServerInlineMemoryTransferService.cpp"
- "server/ServerQueue.cpp"
- "server/ServerShaderModule.cpp"
-)
-target_link_libraries(dawn_wire
- PUBLIC dawn_headers
- PRIVATE dawn_common dawn_internal_config
-)
diff --git a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandHandler.cpp b/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandHandler.cpp
deleted file mode 100644
index 8a962d9653c..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandHandler.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/ChunkedCommandHandler.h"
-
-#include "common/Alloc.h"
-
-#include <algorithm>
-#include <cstring>
-
-namespace dawn_wire {
-
- ChunkedCommandHandler::~ChunkedCommandHandler() = default;
-
- const volatile char* ChunkedCommandHandler::HandleCommands(const volatile char* commands,
- size_t size) {
- if (mChunkedCommandRemainingSize > 0) {
- // If there is a chunked command in flight, append the command data.
- // We append at most |mChunkedCommandRemainingSize| which is enough to finish the
- // in-flight chunked command, and then pass the rest along to a second call to
- // |HandleCommandsImpl|.
- size_t chunkSize = std::min(size, mChunkedCommandRemainingSize);
-
- memcpy(mChunkedCommandData.get() + mChunkedCommandPutOffset,
- const_cast<const char*>(commands), chunkSize);
- mChunkedCommandPutOffset += chunkSize;
- mChunkedCommandRemainingSize -= chunkSize;
-
- commands += chunkSize;
- size -= chunkSize;
-
- if (mChunkedCommandRemainingSize == 0) {
- // Once the chunked command is complete, pass the data to the command handler
- // implemenation.
- auto chunkedCommandData = std::move(mChunkedCommandData);
- if (HandleCommandsImpl(chunkedCommandData.get(), mChunkedCommandPutOffset) ==
- nullptr) {
- // |HandleCommandsImpl| returns nullptr on error. Forward any errors
- // out.
- return nullptr;
- }
- }
- }
-
- return HandleCommandsImpl(commands, size);
- }
-
- ChunkedCommandHandler::ChunkedCommandsResult ChunkedCommandHandler::BeginChunkedCommandData(
- const volatile char* commands,
- size_t commandSize,
- size_t initialSize) {
- ASSERT(!mChunkedCommandData);
-
- // Reserve space for all the command data we're expecting, and copy the initial data
- // to the start of the memory.
- mChunkedCommandData.reset(AllocNoThrow<char>(commandSize));
- if (!mChunkedCommandData) {
- return ChunkedCommandsResult::Error;
- }
-
- memcpy(mChunkedCommandData.get(), const_cast<const char*>(commands), initialSize);
- mChunkedCommandPutOffset = initialSize;
- mChunkedCommandRemainingSize = commandSize - initialSize;
-
- return ChunkedCommandsResult::Consumed;
- }
-
-} // namespace dawn_wire
diff --git a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandHandler.h b/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandHandler.h
deleted file mode 100644
index 182ceadecbf..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandHandler.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
-#define DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
-
-#include "common/Assert.h"
-#include "dawn_wire/Wire.h"
-#include "dawn_wire/WireCmd_autogen.h"
-
-#include <cstdint>
-#include <memory>
-
-namespace dawn_wire {
-
- class ChunkedCommandHandler : public CommandHandler {
- public:
- const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
- ~ChunkedCommandHandler() override;
-
- protected:
- enum class ChunkedCommandsResult {
- Passthrough,
- Consumed,
- Error,
- };
-
- // Returns |true| if the commands were entirely consumed into the chunked command vector
- // and should be handled later once we receive all the command data.
- // Returns |false| if commands should be handled now immediately.
- ChunkedCommandsResult HandleChunkedCommands(const volatile char* commands, size_t size) {
- uint64_t commandSize64 =
- reinterpret_cast<const volatile CmdHeader*>(commands)->commandSize;
-
- if (commandSize64 > std::numeric_limits<size_t>::max()) {
- return ChunkedCommandsResult::Error;
- }
- size_t commandSize = static_cast<size_t>(commandSize64);
- if (size < commandSize) {
- return BeginChunkedCommandData(commands, commandSize, size);
- }
- return ChunkedCommandsResult::Passthrough;
- }
-
- private:
- virtual const volatile char* HandleCommandsImpl(const volatile char* commands,
- size_t size) = 0;
-
- ChunkedCommandsResult BeginChunkedCommandData(const volatile char* commands,
- size_t commandSize,
- size_t initialSize);
-
- size_t mChunkedCommandRemainingSize = 0;
- size_t mChunkedCommandPutOffset = 0;
- std::unique_ptr<char[]> mChunkedCommandData;
- };
-
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_CHUNKEDCOMMANDHANDLER_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.cpp b/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.cpp
deleted file mode 100644
index 71b77162bfd..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/ChunkedCommandSerializer.h"
-
-namespace dawn_wire {
-
- ChunkedCommandSerializer::ChunkedCommandSerializer(CommandSerializer* serializer)
- : mSerializer(serializer), mMaxAllocationSize(serializer->GetMaximumAllocationSize()) {
- }
-
- void ChunkedCommandSerializer::SerializeChunkedCommand(const char* allocatedBuffer,
- size_t remainingSize) {
- while (remainingSize > 0) {
- size_t chunkSize = std::min(remainingSize, mMaxAllocationSize);
- void* dst = mSerializer->GetCmdSpace(chunkSize);
- if (dst == nullptr) {
- return;
- }
- memcpy(dst, allocatedBuffer, chunkSize);
-
- allocatedBuffer += chunkSize;
- remainingSize -= chunkSize;
- }
- }
-
-} // namespace dawn_wire
diff --git a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h b/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h
deleted file mode 100644
index 2465f8153da..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
-#define DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
-
-#include "common/Alloc.h"
-#include "common/Compiler.h"
-#include "dawn_wire/Wire.h"
-#include "dawn_wire/WireCmd_autogen.h"
-
-#include <algorithm>
-#include <cstring>
-#include <memory>
-
-namespace dawn_wire {
-
- class ChunkedCommandSerializer {
- public:
- ChunkedCommandSerializer(CommandSerializer* serializer);
-
- template <typename Cmd>
- void SerializeCommand(const Cmd& cmd) {
- SerializeCommand(cmd, 0, [](SerializeBuffer*) { return WireResult::Success; });
- }
-
- template <typename Cmd, typename ExtraSizeSerializeFn>
- void SerializeCommand(const Cmd& cmd,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- SerializeCommandImpl(
- cmd,
- [](const Cmd& cmd, size_t requiredSize, SerializeBuffer* serializeBuffer) {
- return cmd.Serialize(requiredSize, serializeBuffer);
- },
- extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
- }
-
- template <typename Cmd>
- void SerializeCommand(const Cmd& cmd, const ObjectIdProvider& objectIdProvider) {
- SerializeCommand(cmd, objectIdProvider, 0,
- [](SerializeBuffer*) { return WireResult::Success; });
- }
-
- template <typename Cmd, typename ExtraSizeSerializeFn>
- void SerializeCommand(const Cmd& cmd,
- const ObjectIdProvider& objectIdProvider,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- SerializeCommandImpl(
- cmd,
- [&objectIdProvider](const Cmd& cmd, size_t requiredSize,
- SerializeBuffer* serializeBuffer) {
- return cmd.Serialize(requiredSize, serializeBuffer, objectIdProvider);
- },
- extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
- }
-
- private:
- template <typename Cmd, typename SerializeCmdFn, typename ExtraSizeSerializeFn>
- void SerializeCommandImpl(const Cmd& cmd,
- SerializeCmdFn&& SerializeCmd,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- size_t commandSize = cmd.GetRequiredSize();
- size_t requiredSize = commandSize + extraSize;
-
- if (requiredSize <= mMaxAllocationSize) {
- char* allocatedBuffer = static_cast<char*>(mSerializer->GetCmdSpace(requiredSize));
- if (allocatedBuffer != nullptr) {
- SerializeBuffer serializeBuffer(allocatedBuffer, requiredSize);
- WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
- WireResult r2 = SerializeExtraSize(&serializeBuffer);
- if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
- mSerializer->OnSerializeError();
- }
- }
- return;
- }
-
- auto cmdSpace = std::unique_ptr<char[]>(AllocNoThrow<char>(requiredSize));
- if (!cmdSpace) {
- return;
- }
- SerializeBuffer serializeBuffer(cmdSpace.get(), requiredSize);
- WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
- WireResult r2 = SerializeExtraSize(&serializeBuffer);
- if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
- mSerializer->OnSerializeError();
- return;
- }
- SerializeChunkedCommand(cmdSpace.get(), requiredSize);
- }
-
- void SerializeChunkedCommand(const char* allocatedBuffer, size_t remainingSize);
-
- CommandSerializer* mSerializer;
- size_t mMaxAllocationSize;
- };
-
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_CHUNKEDCOMMANDSERIALIZER_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/Wire.cpp b/chromium/third_party/dawn/src/dawn_wire/Wire.cpp
deleted file mode 100644
index 7221d6b0545..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/Wire.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/Wire.h"
-
-namespace dawn_wire {
-
- CommandSerializer::CommandSerializer() = default;
- CommandSerializer::~CommandSerializer() = default;
-
- void CommandSerializer::OnSerializeError() {
- }
-
- CommandHandler::CommandHandler() = default;
- CommandHandler::~CommandHandler() = default;
-
-} // namespace dawn_wire
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
deleted file mode 100644
index 01ab45beabd..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/WireClient.h"
-#include "dawn_wire/client/Client.h"
-
-namespace dawn_wire {
-
- WireClient::WireClient(const WireClientDescriptor& descriptor)
- : mImpl(new client::Client(descriptor.serializer, descriptor.memoryTransferService)) {
- }
-
- WireClient::~WireClient() {
- mImpl.reset();
- }
-
- const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
- return mImpl->HandleCommands(commands, size);
- }
-
- ReservedTexture WireClient::ReserveTexture(WGPUDevice device) {
- return mImpl->ReserveTexture(device);
- }
-
- ReservedSwapChain WireClient::ReserveSwapChain(WGPUDevice device) {
- return mImpl->ReserveSwapChain(device);
- }
-
- ReservedDevice WireClient::ReserveDevice() {
- return mImpl->ReserveDevice();
- }
-
- void WireClient::ReclaimTextureReservation(const ReservedTexture& reservation) {
- mImpl->ReclaimTextureReservation(reservation);
- }
-
- void WireClient::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
- mImpl->ReclaimSwapChainReservation(reservation);
- }
-
- void WireClient::ReclaimDeviceReservation(const ReservedDevice& reservation) {
- mImpl->ReclaimDeviceReservation(reservation);
- }
-
- void WireClient::Disconnect() {
- mImpl->Disconnect();
- }
-
- namespace client {
- MemoryTransferService::MemoryTransferService() = default;
-
- MemoryTransferService::~MemoryTransferService() = default;
-
- MemoryTransferService::ReadHandle::ReadHandle() = default;
-
- MemoryTransferService::ReadHandle::~ReadHandle() = default;
-
- MemoryTransferService::WriteHandle::WriteHandle() = default;
-
- MemoryTransferService::WriteHandle::~WriteHandle() = default;
- } // namespace client
-
-} // namespace dawn_wire
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireDeserializeAllocator.cpp b/chromium/third_party/dawn/src/dawn_wire/WireDeserializeAllocator.cpp
deleted file mode 100644
index 7ae1b35c3fa..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/WireDeserializeAllocator.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/WireDeserializeAllocator.h"
-
-#include <algorithm>
-
-namespace dawn_wire {
- WireDeserializeAllocator::WireDeserializeAllocator() {
- Reset();
- }
-
- WireDeserializeAllocator::~WireDeserializeAllocator() {
- Reset();
- }
-
- void* WireDeserializeAllocator::GetSpace(size_t size) {
- // Return space in the current buffer if possible first.
- if (mRemainingSize >= size) {
- char* buffer = mCurrentBuffer;
- mCurrentBuffer += size;
- mRemainingSize -= size;
- return buffer;
- }
-
- // Otherwise allocate a new buffer and try again.
- size_t allocationSize = std::max(size, size_t(2048));
- char* allocation = static_cast<char*>(malloc(allocationSize));
- if (allocation == nullptr) {
- return nullptr;
- }
-
- mAllocations.push_back(allocation);
- mCurrentBuffer = allocation;
- mRemainingSize = allocationSize;
- return GetSpace(size);
- }
-
- void WireDeserializeAllocator::Reset() {
- for (auto allocation : mAllocations) {
- free(allocation);
- }
- mAllocations.clear();
-
- // The initial buffer is the inline buffer so that some allocations can be skipped
- mCurrentBuffer = mStaticBuffer;
- mRemainingSize = sizeof(mStaticBuffer);
- }
-} // namespace dawn_wire
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireDeserializeAllocator.h b/chromium/third_party/dawn/src/dawn_wire/WireDeserializeAllocator.h
deleted file mode 100644
index 4c34456458e..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/WireDeserializeAllocator.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
-#define DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
-
-#include "dawn_wire/WireCmd_autogen.h"
-
-#include <vector>
-
-namespace dawn_wire {
- // A really really simple implementation of the DeserializeAllocator. It's main feature
- // is that it has some inline storage so as to avoid allocations for the majority of
- // commands.
- class WireDeserializeAllocator : public DeserializeAllocator {
- public:
- WireDeserializeAllocator();
- virtual ~WireDeserializeAllocator();
-
- void* GetSpace(size_t size) override;
-
- void Reset();
-
- private:
- size_t mRemainingSize = 0;
- char* mCurrentBuffer = nullptr;
- char mStaticBuffer[2048];
- std::vector<char*> mAllocations;
- };
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_WIREDESERIALIZEALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireResult.h b/chromium/third_party/dawn/src/dawn_wire/WireResult.h
deleted file mode 100644
index fc0deb3c86c..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/WireResult.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_WIRERESULT_H_
-#define DAWNWIRE_WIRERESULT_H_
-
-#include "common/Compiler.h"
-
-namespace dawn_wire {
-
- enum class DAWN_NO_DISCARD WireResult {
- Success,
- FatalError,
- };
-
-// Macro to simplify error handling, similar to DAWN_TRY but for WireResult.
-#define WIRE_TRY(EXPR) \
- do { \
- WireResult exprResult = EXPR; \
- if (DAWN_UNLIKELY(exprResult != WireResult::Success)) { \
- return exprResult; \
- } \
- } while (0)
-
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_WIRERESULT_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp b/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
deleted file mode 100644
index bad595760ba..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/WireServer.h"
-#include "dawn_wire/server/Server.h"
-
-namespace dawn_wire {
-
- WireServer::WireServer(const WireServerDescriptor& descriptor)
- : mImpl(new server::Server(*descriptor.procs,
- descriptor.serializer,
- descriptor.memoryTransferService)) {
- }
-
- WireServer::~WireServer() {
- mImpl.reset();
- }
-
- const volatile char* WireServer::HandleCommands(const volatile char* commands, size_t size) {
- return mImpl->HandleCommands(commands, size);
- }
-
- bool WireServer::InjectTexture(WGPUTexture texture,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration) {
- return mImpl->InjectTexture(texture, id, generation, deviceId, deviceGeneration);
- }
-
- bool WireServer::InjectSwapChain(WGPUSwapChain swapchain,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration) {
- return mImpl->InjectSwapChain(swapchain, id, generation, deviceId, deviceGeneration);
- }
-
- bool WireServer::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
- return mImpl->InjectDevice(device, id, generation);
- }
-
- WGPUDevice WireServer::GetDevice(uint32_t id, uint32_t generation) {
- return mImpl->GetDevice(id, generation);
- }
-
- namespace server {
- MemoryTransferService::MemoryTransferService() = default;
-
- MemoryTransferService::~MemoryTransferService() = default;
-
- MemoryTransferService::ReadHandle::ReadHandle() = default;
-
- MemoryTransferService::ReadHandle::~ReadHandle() = default;
-
- MemoryTransferService::WriteHandle::WriteHandle() = default;
-
- MemoryTransferService::WriteHandle::~WriteHandle() = default;
-
- void MemoryTransferService::WriteHandle::SetTarget(void* data) {
- mTargetData = data;
- }
- void MemoryTransferService::WriteHandle::SetDataLength(size_t dataLength) {
- mDataLength = dataLength;
- }
- } // namespace server
-
-} // namespace dawn_wire
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h b/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h
deleted file mode 100644
index 71dbc82dfeb..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_APIOBJECTS_H_
-#define DAWNWIRE_CLIENT_APIOBJECTS_H_
-
-#include "dawn_wire/client/ObjectBase.h"
-
-#include "dawn_wire/client/Buffer.h"
-#include "dawn_wire/client/Device.h"
-#include "dawn_wire/client/Queue.h"
-#include "dawn_wire/client/ShaderModule.h"
-
-#include "dawn_wire/client/ApiObjects_autogen.h"
-
-#endif // DAWNWIRE_CLIENT_APIOBJECTS_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
deleted file mode 100644
index 5077b5aa49f..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/client/Buffer.h"
-
-#include "dawn_wire/BufferConsumer_impl.h"
-#include "dawn_wire/WireCmd_autogen.h"
-#include "dawn_wire/client/Client.h"
-#include "dawn_wire/client/Device.h"
-
-namespace dawn_wire { namespace client {
-
- // static
- WGPUBuffer Buffer::Create(Device* device, const WGPUBufferDescriptor* descriptor) {
- Client* wireClient = device->client;
-
- bool mappable =
- (descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 ||
- descriptor->mappedAtCreation;
- if (mappable && descriptor->size >= std::numeric_limits<size_t>::max()) {
- device->InjectError(WGPUErrorType_OutOfMemory, "Buffer is too large for map usage");
- return device->CreateErrorBuffer();
- }
-
- std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
- std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
-
- DeviceCreateBufferCmd cmd;
- cmd.deviceId = device->id;
- cmd.descriptor = descriptor;
- cmd.readHandleCreateInfoLength = 0;
- cmd.readHandleCreateInfo = nullptr;
- cmd.writeHandleCreateInfoLength = 0;
- cmd.writeHandleCreateInfo = nullptr;
-
- if (mappable) {
- if ((descriptor->usage & WGPUBufferUsage_MapRead) != 0) {
- // Create the read handle on buffer creation.
- readHandle.reset(
- wireClient->GetMemoryTransferService()->CreateReadHandle(descriptor->size));
- if (readHandle == nullptr) {
- device->InjectError(WGPUErrorType_OutOfMemory,
- "Failed to create buffer mapping");
- return device->CreateErrorBuffer();
- }
- cmd.readHandleCreateInfoLength = readHandle->SerializeCreateSize();
- }
-
- if ((descriptor->usage & WGPUBufferUsage_MapWrite) != 0 ||
- descriptor->mappedAtCreation) {
- // Create the write handle on buffer creation.
- writeHandle.reset(
- wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
- if (writeHandle == nullptr) {
- device->InjectError(WGPUErrorType_OutOfMemory,
- "Failed to create buffer mapping");
- return device->CreateErrorBuffer();
- }
- cmd.writeHandleCreateInfoLength = writeHandle->SerializeCreateSize();
- }
- }
-
- // Create the buffer and send the creation command.
- // This must happen after any potential device->CreateErrorBuffer()
- // as server expects allocating ids to be monotonically increasing
- auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(wireClient);
- Buffer* buffer = bufferObjectAndSerial->object.get();
- buffer->mDevice = device;
- buffer->mDeviceIsAlive = device->GetAliveWeakPtr();
- buffer->mSize = descriptor->size;
- buffer->mDestructWriteHandleOnUnmap = false;
-
- if (descriptor->mappedAtCreation) {
- // If the buffer is mapped at creation, a write handle is created and will be
- // destructed on unmap if the buffer doesn't have MapWrite usage
- // The buffer is mapped right now.
- buffer->mMapState = MapState::MappedAtCreation;
-
- // This flag is for write handle created by mappedAtCreation
- // instead of MapWrite usage. We don't have such a case for read handle
- buffer->mDestructWriteHandleOnUnmap =
- (descriptor->usage & WGPUBufferUsage_MapWrite) == 0;
-
- buffer->mMapOffset = 0;
- buffer->mMapSize = buffer->mSize;
- ASSERT(writeHandle != nullptr);
- buffer->mMappedData = writeHandle->GetData();
- }
-
- cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
-
- wireClient->SerializeCommand(
- cmd, cmd.readHandleCreateInfoLength + cmd.writeHandleCreateInfoLength,
- [&](SerializeBuffer* serializeBuffer) {
- if (readHandle != nullptr) {
- char* readHandleBuffer;
- WIRE_TRY(
- serializeBuffer->NextN(cmd.readHandleCreateInfoLength, &readHandleBuffer));
- // Serialize the ReadHandle into the space after the command.
- readHandle->SerializeCreate(readHandleBuffer);
- buffer->mReadHandle = std::move(readHandle);
- }
- if (writeHandle != nullptr) {
- char* writeHandleBuffer;
- WIRE_TRY(serializeBuffer->NextN(cmd.writeHandleCreateInfoLength,
- &writeHandleBuffer));
- // Serialize the WriteHandle into the space after the command.
- writeHandle->SerializeCreate(writeHandleBuffer);
- buffer->mWriteHandle = std::move(writeHandle);
- }
-
- return WireResult::Success;
- });
- return ToAPI(buffer);
- }
-
- // static
- WGPUBuffer Buffer::CreateError(Device* device) {
- auto* allocation = device->client->BufferAllocator().New(device->client);
- allocation->object->mDevice = device;
- allocation->object->mDeviceIsAlive = device->GetAliveWeakPtr();
-
- DeviceCreateErrorBufferCmd cmd;
- cmd.self = ToAPI(device);
- cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
- device->client->SerializeCommand(cmd);
-
- return ToAPI(allocation->object.get());
- }
-
- Buffer::~Buffer() {
- ClearAllCallbacks(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
- FreeMappedData();
- }
-
- void Buffer::CancelCallbacksForDisconnect() {
- ClearAllCallbacks(WGPUBufferMapAsyncStatus_DeviceLost);
- }
-
- void Buffer::ClearAllCallbacks(WGPUBufferMapAsyncStatus status) {
- mRequests.CloseAll([status](MapRequestData* request) {
- if (request->callback != nullptr) {
- request->callback(status, request->userdata);
- }
- });
- }
-
- void Buffer::MapAsync(WGPUMapModeFlags mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- return callback(WGPUBufferMapAsyncStatus_DeviceLost, userdata);
- }
-
- // Handle the defaulting of size required by WebGPU.
- if ((size == WGPU_WHOLE_MAP_SIZE) && (offset <= mSize)) {
- size = mSize - offset;
- }
-
- // Create the request structure that will hold information while this mapping is
- // in flight.
- MapRequestData request = {};
- request.callback = callback;
- request.userdata = userdata;
- request.offset = offset;
- request.size = size;
- if (mode & WGPUMapMode_Read) {
- request.type = MapRequestType::Read;
- } else if (mode & WGPUMapMode_Write) {
- request.type = MapRequestType::Write;
- }
-
- uint64_t serial = mRequests.Add(std::move(request));
-
- // Serialize the command to send to the server.
- BufferMapAsyncCmd cmd;
- cmd.bufferId = this->id;
- cmd.requestSerial = serial;
- cmd.mode = mode;
- cmd.offset = offset;
- cmd.size = size;
-
- client->SerializeCommand(cmd);
- }
-
- bool Buffer::OnMapAsyncCallback(uint64_t requestSerial,
- uint32_t status,
- uint64_t readDataUpdateInfoLength,
- const uint8_t* readDataUpdateInfo) {
- MapRequestData request;
- if (!mRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- auto FailRequest = [&request]() -> bool {
- if (request.callback != nullptr) {
- request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
- }
- return false;
- };
-
- // Take into account the client-side status of the request if the server says it is a success.
- if (status == WGPUBufferMapAsyncStatus_Success) {
- status = request.clientStatus;
- }
-
- if (status == WGPUBufferMapAsyncStatus_Success) {
- switch (request.type) {
- case MapRequestType::Read: {
- if (readDataUpdateInfoLength > std::numeric_limits<size_t>::max()) {
- // This is the size of data deserialized from the command stream, which must
- // be CPU-addressable.
- return FailRequest();
- }
-
- // Validate to prevent bad map request; buffer destroyed during map request
- if (mReadHandle == nullptr) {
- return FailRequest();
- }
- // Update user map data with server returned data
- if (!mReadHandle->DeserializeDataUpdate(
- readDataUpdateInfo, static_cast<size_t>(readDataUpdateInfoLength),
- request.offset, request.size)) {
- return FailRequest();
- }
- mMapState = MapState::MappedForRead;
- mMappedData = const_cast<void*>(mReadHandle->GetData());
- break;
- }
- case MapRequestType::Write: {
- if (mWriteHandle == nullptr) {
- return FailRequest();
- }
- mMapState = MapState::MappedForWrite;
- mMappedData = mWriteHandle->GetData();
- break;
- }
- default:
- UNREACHABLE();
- }
-
- mMapOffset = request.offset;
- mMapSize = request.size;
- }
-
- if (request.callback) {
- request.callback(static_cast<WGPUBufferMapAsyncStatus>(status), request.userdata);
- }
-
- return true;
- }
-
- void* Buffer::GetMappedRange(size_t offset, size_t size) {
- if (!IsMappedForWriting() || !CheckGetMappedRangeOffsetSize(offset, size)) {
- return nullptr;
- }
- return static_cast<uint8_t*>(mMappedData) + offset;
- }
-
- const void* Buffer::GetConstMappedRange(size_t offset, size_t size) {
- if (!(IsMappedForWriting() || IsMappedForReading()) ||
- !CheckGetMappedRangeOffsetSize(offset, size)) {
- return nullptr;
- }
- return static_cast<uint8_t*>(mMappedData) + offset;
- }
-
- void Buffer::Unmap() {
- // Invalidate the local pointer, and cancel all other in-flight requests that would
- // turn into errors anyway (you can't double map). This prevents race when the following
- // happens, where the application code would have unmapped a buffer but still receive a
- // callback:
- // - Client -> Server: MapRequest1, Unmap, MapRequest2
- // - Server -> Client: Result of MapRequest1
- // - Unmap locally on the client
- // - Server -> Client: Result of MapRequest2
-
- // mWriteHandle can still be nullptr if buffer has been destroyed before unmap
- if ((mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation) &&
- mWriteHandle != nullptr) {
- // Writes need to be flushed before Unmap is sent. Unmap calls all associated
- // in-flight callbacks which may read the updated data.
-
- // Get the serialization size of data update writes.
- size_t writeDataUpdateInfoLength =
- mWriteHandle->SizeOfSerializeDataUpdate(mMapOffset, mMapSize);
-
- BufferUpdateMappedDataCmd cmd;
- cmd.bufferId = id;
- cmd.writeDataUpdateInfoLength = writeDataUpdateInfoLength;
- cmd.writeDataUpdateInfo = nullptr;
- cmd.offset = mMapOffset;
- cmd.size = mMapSize;
-
- client->SerializeCommand(
- cmd, writeDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
- char* writeHandleBuffer;
- WIRE_TRY(serializeBuffer->NextN(writeDataUpdateInfoLength, &writeHandleBuffer));
-
- // Serialize flush metadata into the space after the command.
- // This closes the handle for writing.
- mWriteHandle->SerializeDataUpdate(writeHandleBuffer, cmd.offset, cmd.size);
-
- return WireResult::Success;
- });
-
- // If mDestructWriteHandleOnUnmap is true, that means the write handle is merely
- // for mappedAtCreation usage. It is destroyed on unmap after flush to server
- // instead of at buffer destruction.
- if (mMapState == MapState::MappedAtCreation && mDestructWriteHandleOnUnmap) {
- mWriteHandle = nullptr;
- if (mReadHandle) {
- // If it's both mappedAtCreation and MapRead we need to reset
- // mMappedData to readHandle's GetData(). This could be changed to
- // merging read/write handle in future
- mMappedData = const_cast<void*>(mReadHandle->GetData());
- }
- }
- }
-
- // Free map access tokens
- mMapState = MapState::Unmapped;
- mMapOffset = 0;
- mMapSize = 0;
-
- // Tag all mapping requests still in flight as unmapped before callback.
- mRequests.ForAll([](MapRequestData* request) {
- if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
- request->clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
- }
- });
-
- BufferUnmapCmd cmd;
- cmd.self = ToAPI(this);
- client->SerializeCommand(cmd);
- }
-
- void Buffer::Destroy() {
- // Remove the current mapping and destroy Read/WriteHandles.
- FreeMappedData();
-
- // Tag all mapping requests still in flight as destroyed before callback.
- mRequests.ForAll([](MapRequestData* request) {
- if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
- request->clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
- }
- });
-
- BufferDestroyCmd cmd;
- cmd.self = ToAPI(this);
- client->SerializeCommand(cmd);
- }
-
- bool Buffer::IsMappedForReading() const {
- return mMapState == MapState::MappedForRead;
- }
-
- bool Buffer::IsMappedForWriting() const {
- return mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation;
- }
-
- bool Buffer::CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const {
- if (offset % 8 != 0 || size % 4 != 0) {
- return false;
- }
-
- if (size > mMapSize || offset < mMapOffset) {
- return false;
- }
-
- size_t offsetInMappedRange = offset - mMapOffset;
- return offsetInMappedRange <= mMapSize - size;
- }
-
- void Buffer::FreeMappedData() {
-#if defined(DAWN_ENABLE_ASSERTS)
- // When in "debug" mode, 0xCA-out the mapped data when we free it so that in we can detect
- // use-after-free of the mapped data. This is particularly useful for WebGPU test about the
- // interaction of mapping and GC.
- if (mMappedData) {
- memset(static_cast<uint8_t*>(mMappedData) + mMapOffset, 0xCA, mMapSize);
- }
-#endif // defined(DAWN_ENABLE_ASSERTS)
-
- mMapOffset = 0;
- mMapSize = 0;
- mReadHandle = nullptr;
- mWriteHandle = nullptr;
- mMappedData = nullptr;
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
deleted file mode 100644
index 0a243843891..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_BUFFER_H_
-#define DAWNWIRE_CLIENT_BUFFER_H_
-
-#include <dawn/webgpu.h>
-
-#include "dawn_wire/WireClient.h"
-#include "dawn_wire/client/ObjectBase.h"
-#include "dawn_wire/client/RequestTracker.h"
-
-namespace dawn_wire { namespace client {
-
- class Device;
-
- class Buffer final : public ObjectBase {
- public:
- using ObjectBase::ObjectBase;
-
- static WGPUBuffer Create(Device* device, const WGPUBufferDescriptor* descriptor);
- static WGPUBuffer CreateError(Device* device);
-
- ~Buffer();
-
- bool OnMapAsyncCallback(uint64_t requestSerial,
- uint32_t status,
- uint64_t readDataUpdateInfoLength,
- const uint8_t* readDataUpdateInfo);
- void MapAsync(WGPUMapModeFlags mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata);
- void* GetMappedRange(size_t offset, size_t size);
- const void* GetConstMappedRange(size_t offset, size_t size);
- void Unmap();
-
- void Destroy();
-
- private:
- void CancelCallbacksForDisconnect() override;
- void ClearAllCallbacks(WGPUBufferMapAsyncStatus status);
-
- bool IsMappedForReading() const;
- bool IsMappedForWriting() const;
- bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
-
- void FreeMappedData();
-
- Device* mDevice;
-
- enum class MapRequestType { None, Read, Write };
-
- enum class MapState {
- Unmapped,
- MappedForRead,
- MappedForWrite,
- MappedAtCreation,
- };
-
- // We want to defer all the validation to the server, which means we could have multiple
- // map request in flight at a single time and need to track them separately.
- // On well-behaved applications, only one request should exist at a single time.
- struct MapRequestData {
- WGPUBufferMapCallback callback = nullptr;
- void* userdata = nullptr;
- size_t offset = 0;
- size_t size = 0;
-
- // When the buffer is destroyed or unmapped too early, the unmappedBeforeX status takes
- // precedence over the success value returned from the server. However Error statuses
- // from the server take precedence over the client-side status.
- WGPUBufferMapAsyncStatus clientStatus = WGPUBufferMapAsyncStatus_Success;
-
- MapRequestType type = MapRequestType::None;
- };
- RequestTracker<MapRequestData> mRequests;
- uint64_t mSize = 0;
-
- // Only one mapped pointer can be active at a time because Unmap clears all the in-flight
- // requests.
- // TODO(enga): Use a tagged pointer to save space.
- std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
- std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
- MapState mMapState = MapState::Unmapped;
- bool mDestructWriteHandleOnUnmap = false;
-
- void* mMappedData = nullptr;
- size_t mMapOffset = 0;
- size_t mMapSize = 0;
-
- std::weak_ptr<bool> mDeviceIsAlive;
- };
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_BUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
deleted file mode 100644
index 2d4445e7940..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/client/Client.h"
-
-#include "common/Compiler.h"
-#include "dawn_wire/client/Device.h"
-
-namespace dawn_wire { namespace client {
-
- namespace {
-
- class NoopCommandSerializer final : public CommandSerializer {
- public:
- static NoopCommandSerializer* GetInstance() {
- static NoopCommandSerializer gNoopCommandSerializer;
- return &gNoopCommandSerializer;
- }
-
- ~NoopCommandSerializer() = default;
-
- size_t GetMaximumAllocationSize() const final {
- return 0;
- }
- void* GetCmdSpace(size_t size) final {
- return nullptr;
- }
- bool Flush() final {
- return false;
- }
- };
-
- } // anonymous namespace
-
- Client::Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService)
- : ClientBase(), mSerializer(serializer), mMemoryTransferService(memoryTransferService) {
- if (mMemoryTransferService == nullptr) {
- // If a MemoryTransferService is not provided, fall back to inline memory.
- mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
- mMemoryTransferService = mOwnedMemoryTransferService.get();
- }
- }
-
- Client::~Client() {
- DestroyAllObjects();
- }
-
- void Client::DestroyAllObjects() {
- for (auto& objectList : mObjects) {
- ObjectType objectType = static_cast<ObjectType>(&objectList - mObjects.data());
- if (objectType == ObjectType::Device) {
- continue;
- }
- while (!objectList.empty()) {
- ObjectBase* object = objectList.head()->value();
-
- DestroyObjectCmd cmd;
- cmd.objectType = objectType;
- cmd.objectId = object->id;
- SerializeCommand(cmd);
- FreeObject(objectType, object);
- }
- }
-
- while (!mObjects[ObjectType::Device].empty()) {
- ObjectBase* object = mObjects[ObjectType::Device].head()->value();
-
- DestroyObjectCmd cmd;
- cmd.objectType = ObjectType::Device;
- cmd.objectId = object->id;
- SerializeCommand(cmd);
- FreeObject(ObjectType::Device, object);
- }
- }
-
- ReservedTexture Client::ReserveTexture(WGPUDevice device) {
- auto* allocation = TextureAllocator().New(this);
-
- ReservedTexture result;
- result.texture = ToAPI(allocation->object.get());
- result.id = allocation->object->id;
- result.generation = allocation->generation;
- result.deviceId = FromAPI(device)->id;
- result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
- return result;
- }
-
- ReservedSwapChain Client::ReserveSwapChain(WGPUDevice device) {
- auto* allocation = SwapChainAllocator().New(this);
-
- ReservedSwapChain result;
- result.swapchain = ToAPI(allocation->object.get());
- result.id = allocation->object->id;
- result.generation = allocation->generation;
- result.deviceId = FromAPI(device)->id;
- result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
- return result;
- }
-
- ReservedDevice Client::ReserveDevice() {
- auto* allocation = DeviceAllocator().New(this);
-
- ReservedDevice result;
- result.device = ToAPI(allocation->object.get());
- result.id = allocation->object->id;
- result.generation = allocation->generation;
- return result;
- }
-
- void Client::ReclaimTextureReservation(const ReservedTexture& reservation) {
- TextureAllocator().Free(FromAPI(reservation.texture));
- }
-
- void Client::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
- SwapChainAllocator().Free(FromAPI(reservation.swapchain));
- }
-
- void Client::ReclaimDeviceReservation(const ReservedDevice& reservation) {
- DeviceAllocator().Free(FromAPI(reservation.device));
- }
-
- void Client::Disconnect() {
- mDisconnected = true;
- mSerializer = ChunkedCommandSerializer(NoopCommandSerializer::GetInstance());
-
- auto& deviceList = mObjects[ObjectType::Device];
- {
- for (LinkNode<ObjectBase>* device = deviceList.head(); device != deviceList.end();
- device = device->next()) {
- static_cast<Device*>(device->value())
- ->HandleDeviceLost(WGPUDeviceLostReason_Undefined, "GPU connection lost");
- }
- }
- for (auto& objectList : mObjects) {
- for (LinkNode<ObjectBase>* object = objectList.head(); object != objectList.end();
- object = object->next()) {
- object->value()->CancelCallbacksForDisconnect();
- }
- }
- }
-
- bool Client::IsDisconnected() const {
- return mDisconnected;
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.h b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
deleted file mode 100644
index fc3758a0d88..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_CLIENT_H_
-#define DAWNWIRE_CLIENT_CLIENT_H_
-
-#include <dawn/webgpu.h>
-#include <dawn_wire/Wire.h>
-
-#include "common/LinkedList.h"
-#include "common/NonCopyable.h"
-#include "dawn_wire/ChunkedCommandSerializer.h"
-#include "dawn_wire/WireClient.h"
-#include "dawn_wire/WireCmd_autogen.h"
-#include "dawn_wire/WireDeserializeAllocator.h"
-#include "dawn_wire/client/ClientBase_autogen.h"
-
-namespace dawn_wire { namespace client {
-
- class Device;
- class MemoryTransferService;
-
- class Client : public ClientBase {
- public:
- Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService);
- ~Client() override;
-
- // ChunkedCommandHandler implementation
- const volatile char* HandleCommandsImpl(const volatile char* commands,
- size_t size) override;
-
- MemoryTransferService* GetMemoryTransferService() const {
- return mMemoryTransferService;
- }
-
- ReservedTexture ReserveTexture(WGPUDevice device);
- ReservedSwapChain ReserveSwapChain(WGPUDevice device);
- ReservedDevice ReserveDevice();
-
- void ReclaimTextureReservation(const ReservedTexture& reservation);
- void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
- void ReclaimDeviceReservation(const ReservedDevice& reservation);
-
- template <typename Cmd>
- void SerializeCommand(const Cmd& cmd) {
- mSerializer.SerializeCommand(cmd, *this);
- }
-
- template <typename Cmd, typename ExtraSizeSerializeFn>
- void SerializeCommand(const Cmd& cmd,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- mSerializer.SerializeCommand(cmd, *this, extraSize, SerializeExtraSize);
- }
-
- void Disconnect();
- bool IsDisconnected() const;
-
- template <typename T>
- void TrackObject(T* object) {
- mObjects[ObjectTypeToTypeEnum<T>::value].Append(object);
- }
-
- private:
- void DestroyAllObjects();
-
-#include "dawn_wire/client/ClientPrototypes_autogen.inc"
-
- ChunkedCommandSerializer mSerializer;
- WireDeserializeAllocator mAllocator;
- MemoryTransferService* mMemoryTransferService = nullptr;
- std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
-
- PerObjectType<LinkedList<ObjectBase>> mObjects;
- bool mDisconnected = false;
- };
-
- std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_CLIENT_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
deleted file mode 100644
index e6665abf010..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Assert.h"
-#include "dawn_wire/client/Client.h"
-#include "dawn_wire/client/Device.h"
-
-#include <limits>
-
-namespace dawn_wire { namespace client {
-
- bool Client::DoDeviceUncapturedErrorCallback(Device* device,
- WGPUErrorType errorType,
- const char* message) {
- switch (errorType) {
- case WGPUErrorType_NoError:
- case WGPUErrorType_Validation:
- case WGPUErrorType_OutOfMemory:
- case WGPUErrorType_Unknown:
- case WGPUErrorType_DeviceLost:
- break;
- default:
- return false;
- }
- if (device == nullptr) {
- // The device might have been deleted or recreated so this isn't an error.
- return true;
- }
- device->HandleError(errorType, message);
- return true;
- }
-
- bool Client::DoDeviceLoggingCallback(Device* device,
- WGPULoggingType loggingType,
- const char* message) {
- if (device == nullptr) {
- // The device might have been deleted or recreated so this isn't an error.
- return true;
- }
- device->HandleLogging(loggingType, message);
- return true;
- }
-
- bool Client::DoDeviceLostCallback(Device* device,
- WGPUDeviceLostReason reason,
- char const* message) {
- if (device == nullptr) {
- // The device might have been deleted or recreated so this isn't an error.
- return true;
- }
- device->HandleDeviceLost(reason, message);
- return true;
- }
-
- bool Client::DoDevicePopErrorScopeCallback(Device* device,
- uint64_t requestSerial,
- WGPUErrorType errorType,
- const char* message) {
- if (device == nullptr) {
- // The device might have been deleted or recreated so this isn't an error.
- return true;
- }
- return device->OnPopErrorScopeCallback(requestSerial, errorType, message);
- }
-
- bool Client::DoBufferMapAsyncCallback(Buffer* buffer,
- uint64_t requestSerial,
- uint32_t status,
- uint64_t readDataUpdateInfoLength,
- const uint8_t* readDataUpdateInfo) {
- // The buffer might have been deleted or recreated so this isn't an error.
- if (buffer == nullptr) {
- return true;
- }
- return buffer->OnMapAsyncCallback(requestSerial, status, readDataUpdateInfoLength,
- readDataUpdateInfo);
- }
-
- bool Client::DoQueueWorkDoneCallback(Queue* queue,
- uint64_t requestSerial,
- WGPUQueueWorkDoneStatus status) {
- // The queue might have been deleted or recreated so this isn't an error.
- if (queue == nullptr) {
- return true;
- }
- return queue->OnWorkDoneCallback(requestSerial, status);
- }
-
- bool Client::DoDeviceCreateComputePipelineAsyncCallback(Device* device,
- uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message) {
- // The device might have been deleted or recreated so this isn't an error.
- if (device == nullptr) {
- return true;
- }
- return device->OnCreateComputePipelineAsyncCallback(requestSerial, status, message);
- }
-
- bool Client::DoDeviceCreateRenderPipelineAsyncCallback(Device* device,
- uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message) {
- // The device might have been deleted or recreated so this isn't an error.
- if (device == nullptr) {
- return true;
- }
- return device->OnCreateRenderPipelineAsyncCallback(requestSerial, status, message);
- }
-
- bool Client::DoShaderModuleGetCompilationInfoCallback(ShaderModule* shaderModule,
- uint64_t requestSerial,
- WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info) {
- // The shader module might have been deleted or recreated so this isn't an error.
- if (shaderModule == nullptr) {
- return true;
- }
- return shaderModule->GetCompilationInfoCallback(requestSerial, status, info);
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp
deleted file mode 100644
index 9cb0eed13fb..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientInlineMemoryTransferService.cpp
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Alloc.h"
-#include "common/Assert.h"
-#include "dawn_wire/WireClient.h"
-#include "dawn_wire/client/Client.h"
-
-#include <cstring>
-
-namespace dawn_wire { namespace client {
-
- class InlineMemoryTransferService : public MemoryTransferService {
- class ReadHandleImpl : public ReadHandle {
- public:
- explicit ReadHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
- : mStagingData(std::move(stagingData)), mSize(size) {
- }
-
- ~ReadHandleImpl() override = default;
-
- size_t SerializeCreateSize() override {
- return 0;
- }
-
- void SerializeCreate(void*) override {
- }
-
- const void* GetData() override {
- return mStagingData.get();
- }
-
- bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) override {
- if (deserializeSize != size || deserializePointer == nullptr) {
- return false;
- }
-
- if (offset > mSize || size > mSize - offset) {
- return false;
- }
-
- void* start = static_cast<uint8_t*>(mStagingData.get()) + offset;
- memcpy(start, deserializePointer, size);
- return true;
- }
-
- private:
- std::unique_ptr<uint8_t[]> mStagingData;
- size_t mSize;
- };
-
- class WriteHandleImpl : public WriteHandle {
- public:
- explicit WriteHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
- : mStagingData(std::move(stagingData)), mSize(size) {
- }
-
- ~WriteHandleImpl() override = default;
-
- size_t SerializeCreateSize() override {
- return 0;
- }
-
- void SerializeCreate(void*) override {
- }
-
- void* GetData() override {
- return mStagingData.get();
- }
-
- size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
- ASSERT(offset <= mSize);
- ASSERT(size <= mSize - offset);
- return size;
- }
-
- void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override {
- ASSERT(mStagingData != nullptr);
- ASSERT(serializePointer != nullptr);
- ASSERT(offset <= mSize);
- ASSERT(size <= mSize - offset);
- memcpy(serializePointer, static_cast<uint8_t*>(mStagingData.get()) + offset, size);
- }
-
- private:
- std::unique_ptr<uint8_t[]> mStagingData;
- size_t mSize;
- };
-
- public:
- InlineMemoryTransferService() {
- }
- ~InlineMemoryTransferService() override = default;
-
- ReadHandle* CreateReadHandle(size_t size) override {
- auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
- if (stagingData) {
- return new ReadHandleImpl(std::move(stagingData), size);
- }
- return nullptr;
- }
-
- WriteHandle* CreateWriteHandle(size_t size) override {
- auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
- if (stagingData) {
- memset(stagingData.get(), 0, size);
- return new WriteHandleImpl(std::move(stagingData), size);
- }
- return nullptr;
- }
- };
-
- std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
- return std::make_unique<InlineMemoryTransferService>();
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientMemoryTransferService_mock.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientMemoryTransferService_mock.cpp
deleted file mode 100644
index 6a523f2819e..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientMemoryTransferService_mock.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/client/ClientMemoryTransferService_mock.h"
-
-#include <cstdio>
-#include "common/Assert.h"
-
-namespace dawn_wire { namespace client {
-
- MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
- : ReadHandle(), mService(service) {
- }
-
- MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
- mService->OnReadHandleDestroy(this);
- }
-
- size_t MockMemoryTransferService::MockReadHandle::SerializeCreateSize() {
- return mService->OnReadHandleSerializeCreateSize(this);
- }
-
- void MockMemoryTransferService::MockReadHandle::SerializeCreate(void* serializePointer) {
- mService->OnReadHandleSerializeCreate(this, serializePointer);
- }
-
- const void* MockMemoryTransferService::MockReadHandle::GetData() {
- return mService->OnReadHandleGetData(this);
- }
-
- bool MockMemoryTransferService::MockReadHandle::DeserializeDataUpdate(
- const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) {
- ASSERT(deserializeSize % sizeof(uint32_t) == 0);
- return mService->OnReadHandleDeserializeDataUpdate(
- this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
- size);
- }
-
- MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
- : WriteHandle(), mService(service) {
- }
-
- MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
- mService->OnWriteHandleDestroy(this);
- }
-
- size_t MockMemoryTransferService::MockWriteHandle::SerializeCreateSize() {
- return mService->OnWriteHandleSerializeCreateSize(this);
- }
-
- void MockMemoryTransferService::MockWriteHandle::SerializeCreate(void* serializePointer) {
- mService->OnWriteHandleSerializeCreate(this, serializePointer);
- }
-
- void* MockMemoryTransferService::MockWriteHandle::GetData() {
- return mService->OnWriteHandleGetData(this);
- }
-
- size_t MockMemoryTransferService::MockWriteHandle::SizeOfSerializeDataUpdate(size_t offset,
- size_t size) {
- return mService->OnWriteHandleSizeOfSerializeDataUpdate(this, offset, size);
- }
-
- void MockMemoryTransferService::MockWriteHandle::SerializeDataUpdate(void* serializePointer,
- size_t offset,
- size_t size) {
- mService->OnWriteHandleSerializeDataUpdate(this, serializePointer, offset, size);
- }
-
- MockMemoryTransferService::MockMemoryTransferService() = default;
- MockMemoryTransferService::~MockMemoryTransferService() = default;
-
- MockMemoryTransferService::ReadHandle* MockMemoryTransferService::CreateReadHandle(
- size_t size) {
- return OnCreateReadHandle(size);
- }
-
- MockMemoryTransferService::WriteHandle* MockMemoryTransferService::CreateWriteHandle(
- size_t size) {
- return OnCreateWriteHandle(size);
- }
-
- MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
- return new MockReadHandle(this);
- }
-
- MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
- return new MockWriteHandle(this);
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientMemoryTransferService_mock.h b/chromium/third_party/dawn/src/dawn_wire/client/ClientMemoryTransferService_mock.h
deleted file mode 100644
index 5f9eeb05521..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientMemoryTransferService_mock.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
-#define DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
-
-#include <gmock/gmock.h>
-
-#include "dawn_wire/WireClient.h"
-#include "dawn_wire/client/Client.h"
-
-namespace dawn_wire { namespace client {
-
- class MockMemoryTransferService : public MemoryTransferService {
- public:
- class MockReadHandle : public ReadHandle {
- public:
- explicit MockReadHandle(MockMemoryTransferService* service);
- ~MockReadHandle() override;
-
- size_t SerializeCreateSize() override;
- void SerializeCreate(void* serializePointer) override;
- const void* GetData() override;
- bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) override;
-
- private:
- MockMemoryTransferService* mService;
- };
-
- class MockWriteHandle : public WriteHandle {
- public:
- explicit MockWriteHandle(MockMemoryTransferService* service);
- ~MockWriteHandle() override;
-
- size_t SerializeCreateSize() override;
- void SerializeCreate(void* serializePointer) override;
- void* GetData() override;
- size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
- void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override;
-
- private:
- MockMemoryTransferService* mService;
- };
-
- MockMemoryTransferService();
- ~MockMemoryTransferService() override;
-
- ReadHandle* CreateReadHandle(size_t) override;
- WriteHandle* CreateWriteHandle(size_t) override;
-
- MockReadHandle* NewReadHandle();
- MockWriteHandle* NewWriteHandle();
-
- MOCK_METHOD(ReadHandle*, OnCreateReadHandle, (size_t));
- MOCK_METHOD(WriteHandle*, OnCreateWriteHandle, (size_t));
-
- MOCK_METHOD(size_t, OnReadHandleSerializeCreateSize, (const ReadHandle*));
- MOCK_METHOD(void, OnReadHandleSerializeCreate, (const ReadHandle*, void* serializePointer));
- MOCK_METHOD((const void*), OnReadHandleGetData, (const ReadHandle*));
- MOCK_METHOD(bool,
- OnReadHandleDeserializeDataUpdate,
- (const ReadHandle*,
- const uint32_t* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size));
- MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle*));
-
- MOCK_METHOD(size_t, OnWriteHandleSerializeCreateSize, (const void* WriteHandle));
- MOCK_METHOD(void,
- OnWriteHandleSerializeCreate,
- (const void* WriteHandle, void* serializePointer));
- MOCK_METHOD((void*), OnWriteHandleGetData, (const void* WriteHandle));
- MOCK_METHOD(size_t,
- OnWriteHandleSizeOfSerializeDataUpdate,
- (const void* WriteHandle, size_t offset, size_t size));
- MOCK_METHOD(size_t,
- OnWriteHandleSerializeDataUpdate,
- (const void* WriteHandle, void* serializePointer, size_t offset, size_t size));
- MOCK_METHOD(void, OnWriteHandleDestroy, (const void* WriteHandle));
- };
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
deleted file mode 100644
index 8379d51b1c7..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
+++ /dev/null
@@ -1,328 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/client/Device.h"
-
-#include "common/Assert.h"
-#include "common/Log.h"
-#include "dawn_wire/client/ApiObjects_autogen.h"
-#include "dawn_wire/client/Client.h"
-#include "dawn_wire/client/ObjectAllocator.h"
-
-namespace dawn_wire { namespace client {
-
- Device::Device(Client* clientIn, uint32_t initialRefcount, uint32_t initialId)
- : ObjectBase(clientIn, initialRefcount, initialId), mIsAlive(std::make_shared<bool>()) {
-#if defined(DAWN_ENABLE_ASSERTS)
- mErrorCallback = [](WGPUErrorType, char const*, void*) {
- static bool calledOnce = false;
- if (!calledOnce) {
- calledOnce = true;
- dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
- "probably not intended. If you really want to ignore errors "
- "and suppress this message, set the callback to null.";
- }
- };
-
- mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
- static bool calledOnce = false;
- if (!calledOnce) {
- calledOnce = true;
- dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
- "intended. If you really want to ignore device lost "
- "and suppress this message, set the callback to null.";
- }
- };
-#endif // DAWN_ENABLE_ASSERTS
- }
-
- Device::~Device() {
- mErrorScopes.CloseAll([](ErrorScopeData* request) {
- request->callback(WGPUErrorType_Unknown, "Device destroyed before callback",
- request->userdata);
- });
-
- mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
- if (request->createComputePipelineAsyncCallback != nullptr) {
- request->createComputePipelineAsyncCallback(
- WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", request->userdata);
- } else {
- ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
- request->createRenderPipelineAsyncCallback(
- WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", request->userdata);
- }
- });
- }
-
- void Device::HandleError(WGPUErrorType errorType, const char* message) {
- if (mErrorCallback) {
- mErrorCallback(errorType, message, mErrorUserdata);
- }
- }
-
- void Device::HandleLogging(WGPULoggingType loggingType, const char* message) {
- if (mLoggingCallback) {
- // Since client always run in single thread, calling the callback directly is safe.
- mLoggingCallback(loggingType, message, mLoggingUserdata);
- }
- }
-
- void Device::HandleDeviceLost(WGPUDeviceLostReason reason, const char* message) {
- if (mDeviceLostCallback && !mDidRunLostCallback) {
- mDidRunLostCallback = true;
- mDeviceLostCallback(reason, message, mDeviceLostUserdata);
- }
- }
-
- void Device::CancelCallbacksForDisconnect() {
- mErrorScopes.CloseAll([](ErrorScopeData* request) {
- request->callback(WGPUErrorType_DeviceLost, "Device lost", request->userdata);
- });
-
- mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
- if (request->createComputePipelineAsyncCallback != nullptr) {
- request->createComputePipelineAsyncCallback(
- WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, "Device lost",
- request->userdata);
- } else {
- ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
- request->createRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
- nullptr, "Device lost",
- request->userdata);
- }
- });
- }
-
- std::weak_ptr<bool> Device::GetAliveWeakPtr() {
- return mIsAlive;
- }
-
- void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
- mErrorCallback = errorCallback;
- mErrorUserdata = errorUserdata;
- }
-
- void Device::SetLoggingCallback(WGPULoggingCallback callback, void* userdata) {
- mLoggingCallback = callback;
- mLoggingUserdata = userdata;
- }
-
- void Device::SetDeviceLostCallback(WGPUDeviceLostCallback callback, void* userdata) {
- mDeviceLostCallback = callback;
- mDeviceLostUserdata = userdata;
- }
-
- void Device::PushErrorScope(WGPUErrorFilter filter) {
- mErrorScopeStackSize++;
-
- DevicePushErrorScopeCmd cmd;
- cmd.self = ToAPI(this);
- cmd.filter = filter;
-
- client->SerializeCommand(cmd);
- }
-
- bool Device::PopErrorScope(WGPUErrorCallback callback, void* userdata) {
- if (mErrorScopeStackSize == 0) {
- return false;
- }
- mErrorScopeStackSize--;
-
- if (client->IsDisconnected()) {
- callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
- return true;
- }
-
- uint64_t serial = mErrorScopes.Add({callback, userdata});
-
- DevicePopErrorScopeCmd cmd;
- cmd.deviceId = this->id;
- cmd.requestSerial = serial;
-
- client->SerializeCommand(cmd);
-
- return true;
- }
-
- bool Device::OnPopErrorScopeCallback(uint64_t requestSerial,
- WGPUErrorType type,
- const char* message) {
- switch (type) {
- case WGPUErrorType_NoError:
- case WGPUErrorType_Validation:
- case WGPUErrorType_OutOfMemory:
- case WGPUErrorType_Unknown:
- case WGPUErrorType_DeviceLost:
- break;
- default:
- return false;
- }
-
- ErrorScopeData request;
- if (!mErrorScopes.Acquire(requestSerial, &request)) {
- return false;
- }
-
- request.callback(type, message, request.userdata);
- return true;
- }
-
- void Device::InjectError(WGPUErrorType type, const char* message) {
- DeviceInjectErrorCmd cmd;
- cmd.self = ToAPI(this);
- cmd.type = type;
- cmd.message = message;
- client->SerializeCommand(cmd);
- }
-
- WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
- return Buffer::Create(this, descriptor);
- }
-
- WGPUBuffer Device::CreateErrorBuffer() {
- return Buffer::CreateError(this);
- }
-
- bool Device::GetLimits(WGPUSupportedLimits* limits) {
- // Not implemented in the wire.
- UNREACHABLE();
- return false;
- }
-
- WGPUQueue Device::GetQueue() {
- // The queue is lazily created because if a Device is created by
- // Reserve/Inject, we cannot send the GetQueue message until
- // it has been injected on the Server. It cannot happen immediately
- // on construction.
- if (mQueue == nullptr) {
- // Get the primary queue for this device.
- auto* allocation = client->QueueAllocator().New(client);
- mQueue = allocation->object.get();
-
- DeviceGetQueueCmd cmd;
- cmd.self = ToAPI(this);
- cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
-
- client->SerializeCommand(cmd);
- }
-
- mQueue->refcount++;
- return ToAPI(mQueue);
- }
-
- void Device::CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
- "GPU device disconnected", userdata);
- }
-
- auto* allocation = client->ComputePipelineAllocator().New(client);
-
- CreatePipelineAsyncRequest request = {};
- request.createComputePipelineAsyncCallback = callback;
- request.userdata = userdata;
- request.pipelineObjectID = allocation->object->id;
-
- uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
-
- DeviceCreateComputePipelineAsyncCmd cmd;
- cmd.deviceId = this->id;
- cmd.descriptor = descriptor;
- cmd.requestSerial = serial;
- cmd.pipelineObjectHandle = ObjectHandle{allocation->object->id, allocation->generation};
-
- client->SerializeCommand(cmd);
- }
-
- bool Device::OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message) {
- CreatePipelineAsyncRequest request;
- if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- auto pipelineAllocation =
- client->ComputePipelineAllocator().GetObject(request.pipelineObjectID);
-
- // If the return status is a failure we should give a null pipeline to the callback and
- // free the allocation both on the client side and the server side.
- if (status != WGPUCreatePipelineAsyncStatus_Success) {
- client->ComputePipelineAllocator().Free(pipelineAllocation);
- request.createComputePipelineAsyncCallback(status, nullptr, message, request.userdata);
- return true;
- }
-
- WGPUComputePipeline pipeline = reinterpret_cast<WGPUComputePipeline>(pipelineAllocation);
- request.createComputePipelineAsyncCallback(status, pipeline, message, request.userdata);
-
- return true;
- }
-
- void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
- "GPU device disconnected", userdata);
- }
-
- auto* allocation = client->RenderPipelineAllocator().New(client);
-
- CreatePipelineAsyncRequest request = {};
- request.createRenderPipelineAsyncCallback = callback;
- request.userdata = userdata;
- request.pipelineObjectID = allocation->object->id;
-
- uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
-
- DeviceCreateRenderPipelineAsyncCmd cmd;
- cmd.deviceId = this->id;
- cmd.descriptor = descriptor;
- cmd.requestSerial = serial;
- cmd.pipelineObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
-
- client->SerializeCommand(cmd);
- }
-
- bool Device::OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message) {
- CreatePipelineAsyncRequest request;
- if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- auto pipelineAllocation =
- client->RenderPipelineAllocator().GetObject(request.pipelineObjectID);
-
- // If the return status is a failure we should give a null pipeline to the callback and
- // free the allocation both on the client side and the server side.
- if (status != WGPUCreatePipelineAsyncStatus_Success) {
- client->RenderPipelineAllocator().Free(pipelineAllocation);
- request.createRenderPipelineAsyncCallback(status, nullptr, message, request.userdata);
- return true;
- }
-
- WGPURenderPipeline pipeline = reinterpret_cast<WGPURenderPipeline>(pipelineAllocation);
- request.createRenderPipelineAsyncCallback(status, pipeline, message, request.userdata);
-
- return true;
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.h b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
deleted file mode 100644
index 426799c1eb2..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_DEVICE_H_
-#define DAWNWIRE_CLIENT_DEVICE_H_
-
-#include <dawn/webgpu.h>
-
-#include "common/LinkedList.h"
-#include "dawn_wire/WireCmd_autogen.h"
-#include "dawn_wire/client/ApiObjects_autogen.h"
-#include "dawn_wire/client/ObjectBase.h"
-#include "dawn_wire/client/RequestTracker.h"
-
-#include <memory>
-
-namespace dawn_wire { namespace client {
-
- class Client;
- class Queue;
-
- class Device final : public ObjectBase {
- public:
- Device(Client* client, uint32_t refcount, uint32_t id);
- ~Device();
-
- void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
- void SetLoggingCallback(WGPULoggingCallback errorCallback, void* errorUserdata);
- void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
- void InjectError(WGPUErrorType type, const char* message);
- void PushErrorScope(WGPUErrorFilter filter);
- bool PopErrorScope(WGPUErrorCallback callback, void* userdata);
- WGPUBuffer CreateBuffer(const WGPUBufferDescriptor* descriptor);
- WGPUBuffer CreateErrorBuffer();
- WGPUComputePipeline CreateComputePipeline(WGPUComputePipelineDescriptor const* descriptor);
- void CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
-
- void HandleError(WGPUErrorType errorType, const char* message);
- void HandleLogging(WGPULoggingType loggingType, const char* message);
- void HandleDeviceLost(WGPUDeviceLostReason reason, const char* message);
- bool OnPopErrorScopeCallback(uint64_t requestSerial,
- WGPUErrorType type,
- const char* message);
- bool OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message);
- bool OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
- WGPUCreatePipelineAsyncStatus status,
- const char* message);
-
- bool GetLimits(WGPUSupportedLimits* limits);
- WGPUQueue GetQueue();
-
- void CancelCallbacksForDisconnect() override;
-
- std::weak_ptr<bool> GetAliveWeakPtr();
-
- private:
- struct ErrorScopeData {
- WGPUErrorCallback callback = nullptr;
- void* userdata = nullptr;
- };
- RequestTracker<ErrorScopeData> mErrorScopes;
- uint64_t mErrorScopeStackSize = 0;
-
- struct CreatePipelineAsyncRequest {
- WGPUCreateComputePipelineAsyncCallback createComputePipelineAsyncCallback = nullptr;
- WGPUCreateRenderPipelineAsyncCallback createRenderPipelineAsyncCallback = nullptr;
- void* userdata = nullptr;
- ObjectId pipelineObjectID;
- };
- RequestTracker<CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
-
- WGPUErrorCallback mErrorCallback = nullptr;
- WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
- WGPULoggingCallback mLoggingCallback = nullptr;
- bool mDidRunLostCallback = false;
- void* mErrorUserdata = nullptr;
- void* mDeviceLostUserdata = nullptr;
- void* mLoggingUserdata = nullptr;
-
- Queue* mQueue = nullptr;
-
- std::shared_ptr<bool> mIsAlive;
- };
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_DEVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h b/chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h
deleted file mode 100644
index 4572752552c..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ObjectAllocator.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
-#define DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
-
-#include "common/Assert.h"
-#include "common/Compiler.h"
-#include "dawn_wire/WireCmd_autogen.h"
-
-#include <limits>
-#include <memory>
-#include <vector>
-
-namespace dawn_wire { namespace client {
-
- template <typename T>
- class ObjectAllocator {
- public:
- struct ObjectAndSerial {
- ObjectAndSerial(std::unique_ptr<T> object, uint32_t generation)
- : object(std::move(object)), generation(generation) {
- }
- std::unique_ptr<T> object;
- uint32_t generation;
- };
-
- ObjectAllocator() {
- // ID 0 is nullptr
- mObjects.emplace_back(nullptr, 0);
- }
-
- template <typename Client>
- ObjectAndSerial* New(Client* client) {
- uint32_t id = GetNewId();
- auto object = std::make_unique<T>(client, 1, id);
- client->TrackObject(object.get());
-
- if (id >= mObjects.size()) {
- ASSERT(id == mObjects.size());
- mObjects.emplace_back(std::move(object), 0);
- } else {
- ASSERT(mObjects[id].object == nullptr);
-
- mObjects[id].generation++;
- // The generation should never overflow. We don't recycle ObjectIds that would
- // overflow their next generation.
- ASSERT(mObjects[id].generation != 0);
-
- mObjects[id].object = std::move(object);
- }
-
- return &mObjects[id];
- }
- void Free(T* obj) {
- ASSERT(obj->IsInList());
- if (DAWN_LIKELY(mObjects[obj->id].generation != std::numeric_limits<uint32_t>::max())) {
- // Only recycle this ObjectId if the generation won't overflow on the next
- // allocation.
- FreeId(obj->id);
- }
- mObjects[obj->id].object = nullptr;
- }
-
- T* GetObject(uint32_t id) {
- if (id >= mObjects.size()) {
- return nullptr;
- }
- return mObjects[id].object.get();
- }
-
- uint32_t GetGeneration(uint32_t id) {
- if (id >= mObjects.size()) {
- return 0;
- }
- return mObjects[id].generation;
- }
-
- private:
- uint32_t GetNewId() {
- if (mFreeIds.empty()) {
- return mCurrentId++;
- }
- uint32_t id = mFreeIds.back();
- mFreeIds.pop_back();
- return id;
- }
- void FreeId(uint32_t id) {
- mFreeIds.push_back(id);
- }
-
- // 0 is an ID reserved to represent nullptr
- uint32_t mCurrentId = 1;
- std::vector<uint32_t> mFreeIds;
- std::vector<ObjectAndSerial> mObjects;
- };
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_OBJECTALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h b/chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h
deleted file mode 100644
index ac257c84448..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_OBJECTBASE_H_
-#define DAWNWIRE_CLIENT_OBJECTBASE_H_
-
-#include <dawn/webgpu.h>
-
-#include "common/LinkedList.h"
-#include "dawn_wire/ObjectType_autogen.h"
-
-namespace dawn_wire { namespace client {
-
- class Client;
-
- // All objects on the client side have:
- // - A pointer to the Client to get where to serialize commands
- // - The external reference count
- // - An ID that is used to refer to this object when talking with the server side
- // - A next/prev pointer. They are part of a linked list of objects of the same type.
- struct ObjectBase : public LinkNode<ObjectBase> {
- ObjectBase(Client* client, uint32_t refcount, uint32_t id)
- : client(client), refcount(refcount), id(id) {
- }
-
- ~ObjectBase() {
- RemoveFromList();
- }
-
- virtual void CancelCallbacksForDisconnect() {
- }
-
- Client* const client;
- uint32_t refcount;
- const uint32_t id;
- };
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_OBJECTBASE_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
deleted file mode 100644
index daa44d77071..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/client/Queue.h"
-
-#include "dawn_wire/client/Client.h"
-#include "dawn_wire/client/Device.h"
-
-namespace dawn_wire { namespace client {
-
- Queue::~Queue() {
- ClearAllCallbacks(WGPUQueueWorkDoneStatus_Unknown);
- }
-
- bool Queue::OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status) {
- OnWorkDoneData request;
- if (!mOnWorkDoneRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- request.callback(status, request.userdata);
- return true;
- }
-
- void Queue::OnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata) {
- if (client->IsDisconnected()) {
- callback(WGPUQueueWorkDoneStatus_DeviceLost, userdata);
- return;
- }
-
- uint64_t serial = mOnWorkDoneRequests.Add({callback, userdata});
-
- QueueOnSubmittedWorkDoneCmd cmd;
- cmd.queueId = this->id;
- cmd.signalValue = signalValue;
- cmd.requestSerial = serial;
-
- client->SerializeCommand(cmd);
- }
-
- void Queue::WriteBuffer(WGPUBuffer cBuffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- Buffer* buffer = FromAPI(cBuffer);
-
- QueueWriteBufferCmd cmd;
- cmd.queueId = id;
- cmd.bufferId = buffer->id;
- cmd.bufferOffset = bufferOffset;
- cmd.data = static_cast<const uint8_t*>(data);
- cmd.size = size;
-
- client->SerializeCommand(cmd);
- }
-
- void Queue::WriteTexture(const WGPUImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const WGPUTextureDataLayout* dataLayout,
- const WGPUExtent3D* writeSize) {
- QueueWriteTextureCmd cmd;
- cmd.queueId = id;
- cmd.destination = destination;
- cmd.data = static_cast<const uint8_t*>(data);
- cmd.dataSize = dataSize;
- cmd.dataLayout = dataLayout;
- cmd.writeSize = writeSize;
-
- client->SerializeCommand(cmd);
- }
-
- void Queue::CancelCallbacksForDisconnect() {
- ClearAllCallbacks(WGPUQueueWorkDoneStatus_DeviceLost);
- }
-
- void Queue::ClearAllCallbacks(WGPUQueueWorkDoneStatus status) {
- mOnWorkDoneRequests.CloseAll([status](OnWorkDoneData* request) {
- if (request->callback != nullptr) {
- request->callback(status, request->userdata);
- }
- });
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Queue.h b/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
deleted file mode 100644
index 901acac2d43..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_QUEUE_H_
-#define DAWNWIRE_CLIENT_QUEUE_H_
-
-#include <dawn/webgpu.h>
-
-#include "dawn_wire/WireClient.h"
-#include "dawn_wire/client/ObjectBase.h"
-#include "dawn_wire/client/RequestTracker.h"
-
-namespace dawn_wire { namespace client {
-
- class Queue final : public ObjectBase {
- public:
- using ObjectBase::ObjectBase;
- ~Queue();
-
- bool OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status);
-
- // Dawn API
- void OnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata);
- void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
- void WriteTexture(const WGPUImageCopyTexture* destination,
- const void* data,
- size_t dataSize,
- const WGPUTextureDataLayout* dataLayout,
- const WGPUExtent3D* writeSize);
-
- private:
- void CancelCallbacksForDisconnect() override;
- void ClearAllCallbacks(WGPUQueueWorkDoneStatus status);
-
- struct OnWorkDoneData {
- WGPUQueueWorkDoneCallback callback = nullptr;
- void* userdata = nullptr;
- };
- RequestTracker<OnWorkDoneData> mOnWorkDoneRequests;
- };
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_QUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/RequestTracker.h b/chromium/third_party/dawn/src/dawn_wire/client/RequestTracker.h
deleted file mode 100644
index 7ce2d0004fe..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/RequestTracker.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_REQUESTTRACKER_H_
-#define DAWNWIRE_CLIENT_REQUESTTRACKER_H_
-
-#include "common/Assert.h"
-#include "common/NonCopyable.h"
-
-#include <cstdint>
-#include <map>
-
-namespace dawn_wire { namespace client {
-
- class Device;
- class MemoryTransferService;
-
- template <typename Request>
- class RequestTracker : NonCopyable {
- public:
- ~RequestTracker() {
- ASSERT(mRequests.empty());
- }
-
- uint64_t Add(Request&& request) {
- mSerial++;
- mRequests.emplace(mSerial, request);
- return mSerial;
- }
-
- bool Acquire(uint64_t serial, Request* request) {
- auto it = mRequests.find(serial);
- if (it == mRequests.end()) {
- return false;
- }
- *request = std::move(it->second);
- mRequests.erase(it);
- return true;
- }
-
- template <typename CloseFunc>
- void CloseAll(CloseFunc&& closeFunc) {
- // Call closeFunc on all requests while handling reentrancy where the callback of some
- // requests may add some additional requests. We guarantee all callbacks for requests
- // are called exactly onces, so keep closing new requests if the first batch added more.
- // It is fine to loop infinitely here if that's what the application makes use do.
- while (!mRequests.empty()) {
- // Move mRequests to a local variable so that further reentrant modifications of
- // mRequests don't invalidate the iterators.
- auto allRequests = std::move(mRequests);
- for (auto& it : allRequests) {
- closeFunc(&it.second);
- }
- }
- }
-
- template <typename F>
- void ForAll(F&& f) {
- for (auto& it : mRequests) {
- f(&it.second);
- }
- }
-
- private:
- uint64_t mSerial;
- std::map<uint64_t, Request> mRequests;
- };
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_REQUESTTRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp
deleted file mode 100644
index c28b978c3ab..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/client/ShaderModule.h"
-
-#include "dawn_wire/client/Client.h"
-
-namespace dawn_wire { namespace client {
-
- ShaderModule::~ShaderModule() {
- ClearAllCallbacks(WGPUCompilationInfoRequestStatus_Unknown);
- }
-
- void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
- if (client->IsDisconnected()) {
- callback(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, userdata);
- return;
- }
-
- uint64_t serial = mCompilationInfoRequests.Add({callback, userdata});
-
- ShaderModuleGetCompilationInfoCmd cmd;
- cmd.shaderModuleId = this->id;
- cmd.requestSerial = serial;
-
- client->SerializeCommand(cmd);
- }
-
- bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
- WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info) {
- CompilationInfoRequest request;
- if (!mCompilationInfoRequests.Acquire(requestSerial, &request)) {
- return false;
- }
-
- request.callback(status, info, request.userdata);
- return true;
- }
-
- void ShaderModule::CancelCallbacksForDisconnect() {
- ClearAllCallbacks(WGPUCompilationInfoRequestStatus_DeviceLost);
- }
-
- void ShaderModule::ClearAllCallbacks(WGPUCompilationInfoRequestStatus status) {
- mCompilationInfoRequests.CloseAll([status](CompilationInfoRequest* request) {
- if (request->callback != nullptr) {
- request->callback(status, nullptr, request->userdata);
- }
- });
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h
deleted file mode 100644
index f12a4d0f1b6..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_SHADER_MODULE_H_
-#define DAWNWIRE_CLIENT_SHADER_MODULE_H_
-
-#include <dawn/webgpu.h>
-
-#include "dawn_wire/client/ObjectBase.h"
-#include "dawn_wire/client/RequestTracker.h"
-
-namespace dawn_wire { namespace client {
-
- class ShaderModule final : public ObjectBase {
- public:
- using ObjectBase::ObjectBase;
- ~ShaderModule();
-
- void GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata);
- bool GetCompilationInfoCallback(uint64_t requestSerial,
- WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info);
-
- private:
- void CancelCallbacksForDisconnect() override;
- void ClearAllCallbacks(WGPUCompilationInfoRequestStatus status);
-
- struct CompilationInfoRequest {
- WGPUCompilationInfoCallback callback = nullptr;
- void* userdata = nullptr;
- };
- RequestTracker<CompilationInfoRequest> mCompilationInfoRequests;
- };
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_SHADER_MODULE_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h b/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h
deleted file mode 100644
index bc45b262bc0..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_SERVER_OBJECTSTORAGE_H_
-#define DAWNWIRE_SERVER_OBJECTSTORAGE_H_
-
-#include "dawn_wire/WireCmd_autogen.h"
-#include "dawn_wire/WireServer.h"
-
-#include <algorithm>
-#include <map>
-#include <unordered_set>
-
-namespace dawn_wire { namespace server {
-
- struct DeviceInfo {
- std::unordered_set<uint64_t> childObjectTypesAndIds;
- Server* server;
- ObjectHandle self;
- };
-
- // Whether this object has been allocated, or reserved for async object creation.
- // Used by the KnownObjects queries
- enum class AllocationState : uint32_t {
- Free,
- Reserved,
- Allocated,
- };
-
- template <typename T>
- struct ObjectDataBase {
- // The backend-provided handle and generation to this object.
- T handle;
- uint32_t generation = 0;
-
- AllocationState state;
-
- // This points to an allocation that is owned by the device.
- DeviceInfo* deviceInfo = nullptr;
- };
-
- // Stores what the backend knows about the type.
- template <typename T>
- struct ObjectData : public ObjectDataBase<T> {};
-
- enum class BufferMapWriteState { Unmapped, Mapped, MapError };
-
- template <>
- struct ObjectData<WGPUBuffer> : public ObjectDataBase<WGPUBuffer> {
- // TODO(enga): Use a tagged pointer to save space.
- std::unique_ptr<MemoryTransferService::ReadHandle> readHandle;
- std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle;
- BufferMapWriteState mapWriteState = BufferMapWriteState::Unmapped;
- WGPUBufferUsageFlags usage = WGPUBufferUsage_None;
- // Indicate if writeHandle needs to be destroyed on unmap
- bool mappedAtCreation = false;
- };
-
- // Pack the ObjectType and ObjectId as a single value for storage in
- // an std::unordered_set. This lets us avoid providing our own hash and
- // equality comparison operators.
- inline uint64_t PackObjectTypeAndId(ObjectType type, ObjectId id) {
- static_assert(sizeof(ObjectType) * 8 <= 32, "");
- static_assert(sizeof(ObjectId) * 8 <= 32, "");
- return (static_cast<uint64_t>(type) << 32) + id;
- }
-
- inline std::pair<ObjectType, ObjectId> UnpackObjectTypeAndId(uint64_t payload) {
- ObjectType type = static_cast<ObjectType>(payload >> 32);
- ObjectId id = payload & 0xFFFFFFFF;
- return std::make_pair(type, id);
- }
-
- template <>
- struct ObjectData<WGPUDevice> : public ObjectDataBase<WGPUDevice> {
- // Store |info| as a separate allocation so that its address does not move.
- // The pointer to |info| is stored in device child objects.
- std::unique_ptr<DeviceInfo> info = std::make_unique<DeviceInfo>();
- };
-
- // Keeps track of the mapping between client IDs and backend objects.
- template <typename T>
- class KnownObjects {
- public:
- using Data = ObjectData<T>;
-
- KnownObjects() {
- // Reserve ID 0 so that it can be used to represent nullptr for optional object values
- // in the wire format. However don't tag it as allocated so that it is an error to ask
- // KnownObjects for ID 0.
- Data reservation;
- reservation.handle = nullptr;
- reservation.state = AllocationState::Free;
- mKnown.push_back(std::move(reservation));
- }
-
- // Get a backend objects for a given client ID.
- // Returns nullptr if the ID hasn't previously been allocated.
- const Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) const {
- if (id >= mKnown.size()) {
- return nullptr;
- }
-
- const Data* data = &mKnown[id];
-
- if (data->state != expected) {
- return nullptr;
- }
-
- return data;
- }
- Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) {
- if (id >= mKnown.size()) {
- return nullptr;
- }
-
- Data* data = &mKnown[id];
-
- if (data->state != expected) {
- return nullptr;
- }
-
- return data;
- }
-
- // Allocates the data for a given ID and returns it.
- // Returns nullptr if the ID is already allocated, or too far ahead, or if ID is 0 (ID 0 is
- // reserved for nullptr). Invalidates all the Data*
- Data* Allocate(uint32_t id, AllocationState state = AllocationState::Allocated) {
- if (id == 0 || id > mKnown.size()) {
- return nullptr;
- }
-
- Data data;
- data.state = state;
- data.handle = nullptr;
-
- if (id >= mKnown.size()) {
- mKnown.push_back(std::move(data));
- return &mKnown.back();
- }
-
- if (mKnown[id].state != AllocationState::Free) {
- return nullptr;
- }
-
- mKnown[id] = std::move(data);
- return &mKnown[id];
- }
-
- // Marks an ID as deallocated
- void Free(uint32_t id) {
- ASSERT(id < mKnown.size());
- mKnown[id].state = AllocationState::Free;
- }
-
- std::vector<T> AcquireAllHandles() {
- std::vector<T> objects;
- for (Data& data : mKnown) {
- if (data.state == AllocationState::Allocated && data.handle != nullptr) {
- objects.push_back(data.handle);
- data.state = AllocationState::Free;
- data.handle = nullptr;
- }
- }
-
- return objects;
- }
-
- std::vector<T> GetAllHandles() {
- std::vector<T> objects;
- for (Data& data : mKnown) {
- if (data.state == AllocationState::Allocated && data.handle != nullptr) {
- objects.push_back(data.handle);
- }
- }
-
- return objects;
- }
-
- private:
- std::vector<Data> mKnown;
- };
-
- // ObjectIds are lost in deserialization. Store the ids of deserialized
- // objects here so they can be used in command handlers. This is useful
- // for creating ReturnWireCmds which contain client ids
- template <typename T>
- class ObjectIdLookupTable {
- public:
- void Store(T key, ObjectId id) {
- mTable[key] = id;
- }
-
- // Return the cached ObjectId, or 0 (null handle)
- ObjectId Get(T key) const {
- const auto it = mTable.find(key);
- if (it != mTable.end()) {
- return it->second;
- }
- return 0;
- }
-
- void Remove(T key) {
- auto it = mTable.find(key);
- if (it != mTable.end()) {
- mTable.erase(it);
- }
- }
-
- private:
- std::map<T, ObjectId> mTable;
- };
-
-}} // namespace dawn_wire::server
-
-#endif // DAWNWIRE_SERVER_OBJECTSTORAGE_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
deleted file mode 100644
index 8297cbdcf47..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/server/Server.h"
-#include "dawn_wire/WireServer.h"
-
-namespace dawn_wire { namespace server {
-
- Server::Server(const DawnProcTable& procs,
- CommandSerializer* serializer,
- MemoryTransferService* memoryTransferService)
- : mSerializer(serializer),
- mProcs(procs),
- mMemoryTransferService(memoryTransferService),
- mIsAlive(std::make_shared<bool>(true)) {
- if (mMemoryTransferService == nullptr) {
- // If a MemoryTransferService is not provided, fallback to inline memory.
- mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
- mMemoryTransferService = mOwnedMemoryTransferService.get();
- }
- }
-
- Server::~Server() {
- // Un-set the error and lost callbacks since we cannot forward them
- // after the server has been destroyed.
- for (WGPUDevice device : DeviceObjects().GetAllHandles()) {
- ClearDeviceCallbacks(device);
- }
- DestroyAllObjects(mProcs);
- }
-
- bool Server::InjectTexture(WGPUTexture texture,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration) {
- ASSERT(texture != nullptr);
- ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
- if (device == nullptr || device->generation != deviceGeneration) {
- return false;
- }
-
- ObjectData<WGPUTexture>* data = TextureObjects().Allocate(id);
- if (data == nullptr) {
- return false;
- }
-
- data->handle = texture;
- data->generation = generation;
- data->state = AllocationState::Allocated;
- data->deviceInfo = device->info.get();
-
- if (!TrackDeviceChild(data->deviceInfo, ObjectType::Texture, id)) {
- return false;
- }
-
- // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
- // message from the client. Add a reference to counterbalance the eventual release.
- mProcs.textureReference(texture);
-
- return true;
- }
-
- bool Server::InjectSwapChain(WGPUSwapChain swapchain,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration) {
- ASSERT(swapchain != nullptr);
- ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
- if (device == nullptr || device->generation != deviceGeneration) {
- return false;
- }
-
- ObjectData<WGPUSwapChain>* data = SwapChainObjects().Allocate(id);
- if (data == nullptr) {
- return false;
- }
-
- data->handle = swapchain;
- data->generation = generation;
- data->state = AllocationState::Allocated;
- data->deviceInfo = device->info.get();
-
- if (!TrackDeviceChild(data->deviceInfo, ObjectType::SwapChain, id)) {
- return false;
- }
-
- // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
- // message from the client. Add a reference to counterbalance the eventual release.
- mProcs.swapChainReference(swapchain);
-
- return true;
- }
-
- bool Server::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
- ASSERT(device != nullptr);
- ObjectData<WGPUDevice>* data = DeviceObjects().Allocate(id);
- if (data == nullptr) {
- return false;
- }
-
- data->handle = device;
- data->generation = generation;
- data->state = AllocationState::Allocated;
- data->info->server = this;
- data->info->self = ObjectHandle{id, generation};
-
- // The device is externally owned so it shouldn't be destroyed when we receive a destroy
- // message from the client. Add a reference to counterbalance the eventual release.
- mProcs.deviceReference(device);
-
- // Set callbacks to forward errors to the client.
- // Note: these callbacks are manually inlined here since they do not acquire and
- // free their userdata. Also unlike other callbacks, these are cleared and unset when
- // the server is destroyed, so we don't need to check if the server is still alive
- // inside them.
- mProcs.deviceSetUncapturedErrorCallback(
- device,
- [](WGPUErrorType type, const char* message, void* userdata) {
- DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
- info->server->OnUncapturedError(info->self, type, message);
- },
- data->info.get());
- // Set callback to post warning and other infomation to client.
- // Almost the same with UncapturedError.
- mProcs.deviceSetLoggingCallback(
- device,
- [](WGPULoggingType type, const char* message, void* userdata) {
- DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
- info->server->OnLogging(info->self, type, message);
- },
- data->info.get());
- mProcs.deviceSetDeviceLostCallback(
- device,
- [](WGPUDeviceLostReason reason, const char* message, void* userdata) {
- DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
- info->server->OnDeviceLost(info->self, reason, message);
- },
- data->info.get());
-
- return true;
- }
-
- WGPUDevice Server::GetDevice(uint32_t id, uint32_t generation) {
- ObjectData<WGPUDevice>* data = DeviceObjects().Get(id);
- if (data == nullptr || data->generation != generation) {
- return nullptr;
- }
- return data->handle;
- }
-
- void Server::ClearDeviceCallbacks(WGPUDevice device) {
- // Un-set the error and lost callbacks since we cannot forward them
- // after the server has been destroyed.
- mProcs.deviceSetUncapturedErrorCallback(device, nullptr, nullptr);
- mProcs.deviceSetLoggingCallback(device, nullptr, nullptr);
- mProcs.deviceSetDeviceLostCallback(device, nullptr, nullptr);
- }
-
- bool TrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
- auto it = info->childObjectTypesAndIds.insert(PackObjectTypeAndId(type, id));
- if (!it.second) {
- // An object of this type and id already exists.
- return false;
- }
- return true;
- }
-
- bool UntrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
- auto& children = info->childObjectTypesAndIds;
- auto it = children.find(PackObjectTypeAndId(type, id));
- if (it == children.end()) {
- // An object of this type and id was already deleted.
- return false;
- }
- children.erase(it);
- return true;
- }
-
-}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
deleted file mode 100644
index b4429871f7e..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_SERVER_SERVER_H_
-#define DAWNWIRE_SERVER_SERVER_H_
-
-#include "dawn_wire/ChunkedCommandSerializer.h"
-#include "dawn_wire/server/ServerBase_autogen.h"
-
-namespace dawn_wire { namespace server {
-
- class Server;
- class MemoryTransferService;
-
- // CallbackUserdata and its derived classes are intended to be created by
- // Server::MakeUserdata<T> and then passed as the userdata argument for Dawn
- // callbacks.
- // It contains a pointer back to the Server so that the callback can call the
- // Server to perform operations like serialization, and it contains a weak pointer
- // |serverIsAlive|. If the weak pointer has expired, it means the server has
- // been destroyed and the callback must not use the Server pointer.
- // To assist with checking |serverIsAlive| and lifetime management of the userdata,
- // |ForwardToServer| (defined later in this file) can be used to acquire the userdata,
- // return early if |serverIsAlive| has expired, and then forward the arguments
- // to userdata->server->MyCallbackHandler.
- //
- // Example Usage:
- //
- // struct MyUserdata : CallbackUserdata { uint32_t foo; };
- //
- // auto userdata = MakeUserdata<MyUserdata>();
- // userdata->foo = 2;
- //
- // // TODO(enga): Make the template inference for ForwardToServer cleaner with C++17
- // callMyCallbackHandler(
- // ForwardToServer<decltype(&Server::MyCallbackHandler)>::Func<
- // &Server::MyCallbackHandler>(),
- // userdata.release());
- //
- // void Server::MyCallbackHandler(MyUserdata* userdata) { }
- struct CallbackUserdata {
- Server* const server;
- std::weak_ptr<bool> const serverIsAlive;
-
- CallbackUserdata() = delete;
- CallbackUserdata(Server* server, const std::shared_ptr<bool>& serverIsAlive)
- : server(server), serverIsAlive(serverIsAlive) {
- }
- };
-
- template <typename F>
- class ForwardToServer;
-
- template <typename R, typename... Args>
- class ForwardToServer<R (Server::*)(Args...)> {
- private:
- // Get the type T of the last argument. It has CallbackUserdata as its base.
- using UserdataT = typename std::remove_pointer<typename std::decay<decltype(
- std::get<sizeof...(Args) - 1>(std::declval<std::tuple<Args...>>()))>::type>::type;
-
- static_assert(std::is_base_of<CallbackUserdata, UserdataT>::value,
- "Last argument of callback handler should derive from CallbackUserdata.");
-
- template <class T, class... Ts>
- struct UntypedCallbackImpl;
-
- template <std::size_t... I, class... Ts>
- struct UntypedCallbackImpl<std::index_sequence<I...>, Ts...> {
- template <R (Server::*Func)(Args...)>
- static auto ForwardToServer(
- // Unpack and forward the types of the parameter pack.
- // Append void* as the last argument.
- typename std::tuple_element<I, std::tuple<Ts...>>::type... args,
- void* userdata) {
- // Acquire the userdata, and cast it to UserdataT.
- std::unique_ptr<UserdataT> data(static_cast<UserdataT*>(userdata));
- if (data->serverIsAlive.expired()) {
- // Do nothing if the server has already been destroyed.
- return;
- }
- // Forward the arguments and the typed userdata to the Server:: member function.
- (data->server->*Func)(std::forward<decltype(args)>(args)..., data.get());
- }
- };
-
- // Generate a free function which has all of the same arguments, except the last
- // userdata argument is void* instead of UserdataT*. Dawn's userdata args are void*.
- using UntypedCallback =
- UntypedCallbackImpl<std::make_index_sequence<sizeof...(Args) - 1>, Args...>;
-
- public:
- template <R (Server::*F)(Args...)>
- static auto Func() {
- return UntypedCallback::template ForwardToServer<F>;
- }
- };
-
- struct MapUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle buffer;
- WGPUBuffer bufferObj;
- uint64_t requestSerial;
- uint64_t offset;
- uint64_t size;
- WGPUMapModeFlags mode;
- };
-
- struct ErrorScopeUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle device;
- uint64_t requestSerial;
- };
-
- struct ShaderModuleGetCompilationInfoUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle shaderModule;
- uint64_t requestSerial;
- };
-
- struct QueueWorkDoneUserdata : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle queue;
- uint64_t requestSerial;
- };
-
- struct CreatePipelineAsyncUserData : CallbackUserdata {
- using CallbackUserdata::CallbackUserdata;
-
- ObjectHandle device;
- uint64_t requestSerial;
- ObjectId pipelineObjectID;
- };
-
- class Server : public ServerBase {
- public:
- Server(const DawnProcTable& procs,
- CommandSerializer* serializer,
- MemoryTransferService* memoryTransferService);
- ~Server() override;
-
- // ChunkedCommandHandler implementation
- const volatile char* HandleCommandsImpl(const volatile char* commands,
- size_t size) override;
-
- bool InjectTexture(WGPUTexture texture,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration);
-
- bool InjectSwapChain(WGPUSwapChain swapchain,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration);
-
- bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
-
- WGPUDevice GetDevice(uint32_t id, uint32_t generation);
-
- template <typename T,
- typename Enable = std::enable_if<std::is_base_of<CallbackUserdata, T>::value>>
- std::unique_ptr<T> MakeUserdata() {
- return std::unique_ptr<T>(new T(this, mIsAlive));
- }
-
- private:
- template <typename Cmd>
- void SerializeCommand(const Cmd& cmd) {
- mSerializer.SerializeCommand(cmd);
- }
-
- template <typename Cmd, typename ExtraSizeSerializeFn>
- void SerializeCommand(const Cmd& cmd,
- size_t extraSize,
- ExtraSizeSerializeFn&& SerializeExtraSize) {
- mSerializer.SerializeCommand(cmd, extraSize, SerializeExtraSize);
- }
-
- void ClearDeviceCallbacks(WGPUDevice device);
-
- // Error callbacks
- void OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message);
- void OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message);
- void OnLogging(ObjectHandle device, WGPULoggingType type, const char* message);
- void OnDevicePopErrorScope(WGPUErrorType type,
- const char* message,
- ErrorScopeUserdata* userdata);
- void OnBufferMapAsyncCallback(WGPUBufferMapAsyncStatus status, MapUserdata* userdata);
- void OnQueueWorkDone(WGPUQueueWorkDoneStatus status, QueueWorkDoneUserdata* userdata);
- void OnCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
- WGPUComputePipeline pipeline,
- const char* message,
- CreatePipelineAsyncUserData* userdata);
- void OnCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
- WGPURenderPipeline pipeline,
- const char* message,
- CreatePipelineAsyncUserData* userdata);
- void OnShaderModuleGetCompilationInfo(WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info,
- ShaderModuleGetCompilationInfoUserdata* userdata);
-
-#include "dawn_wire/server/ServerPrototypes_autogen.inc"
-
- WireDeserializeAllocator mAllocator;
- ChunkedCommandSerializer mSerializer;
- DawnProcTable mProcs;
- std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
- MemoryTransferService* mMemoryTransferService = nullptr;
-
- std::shared_ptr<bool> mIsAlive;
- };
-
- bool TrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
- bool UntrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
-
- std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
-
-}} // namespace dawn_wire::server
-
-#endif // DAWNWIRE_SERVER_SERVER_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
deleted file mode 100644
index 05be903a5cf..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Assert.h"
-#include "dawn_wire/BufferConsumer_impl.h"
-#include "dawn_wire/WireCmd_autogen.h"
-#include "dawn_wire/server/Server.h"
-
-#include <memory>
-
-namespace dawn_wire { namespace server {
-
- bool Server::PreHandleBufferUnmap(const BufferUnmapCmd& cmd) {
- auto* buffer = BufferObjects().Get(cmd.selfId);
- DAWN_ASSERT(buffer != nullptr);
-
- if (buffer->mappedAtCreation && !(buffer->usage & WGPUMapMode_Write)) {
- // This indicates the writeHandle is for mappedAtCreation only. Destroy on unmap
- // writeHandle could have possibly been deleted if buffer is already destroyed so we
- // don't assert it's non-null
- buffer->writeHandle = nullptr;
- }
-
- buffer->mapWriteState = BufferMapWriteState::Unmapped;
-
- return true;
- }
-
- bool Server::PreHandleBufferDestroy(const BufferDestroyCmd& cmd) {
- // Destroying a buffer does an implicit unmapping.
- auto* buffer = BufferObjects().Get(cmd.selfId);
- DAWN_ASSERT(buffer != nullptr);
-
- // The buffer was destroyed. Clear the Read/WriteHandle.
- buffer->readHandle = nullptr;
- buffer->writeHandle = nullptr;
- buffer->mapWriteState = BufferMapWriteState::Unmapped;
-
- return true;
- }
-
- bool Server::DoBufferMapAsync(ObjectId bufferId,
- uint64_t requestSerial,
- WGPUMapModeFlags mode,
- uint64_t offset64,
- uint64_t size64) {
- // These requests are just forwarded to the buffer, with userdata containing what the
- // client will require in the return command.
-
- // The null object isn't valid as `self`
- if (bufferId == 0) {
- return false;
- }
-
- auto* buffer = BufferObjects().Get(bufferId);
- if (buffer == nullptr) {
- return false;
- }
-
- std::unique_ptr<MapUserdata> userdata = MakeUserdata<MapUserdata>();
- userdata->buffer = ObjectHandle{bufferId, buffer->generation};
- userdata->bufferObj = buffer->handle;
- userdata->requestSerial = requestSerial;
- userdata->mode = mode;
-
- // Make sure that the deserialized offset and size are no larger than
- // std::numeric_limits<size_t>::max() so that they are CPU-addressable, and size is not
- // WGPU_WHOLE_MAP_SIZE, which is by definition std::numeric_limits<size_t>::max(). Since
- // client does the default size computation, we should always have a valid actual size here
- // in server. All other invalid actual size can be caught by dawn native side validation.
- if (offset64 > std::numeric_limits<size_t>::max() || size64 >= WGPU_WHOLE_MAP_SIZE) {
- OnBufferMapAsyncCallback(WGPUBufferMapAsyncStatus_Error, userdata.get());
- return true;
- }
-
- size_t offset = static_cast<size_t>(offset64);
- size_t size = static_cast<size_t>(size64);
-
- userdata->offset = offset;
- userdata->size = size;
-
- mProcs.bufferMapAsync(
- buffer->handle, mode, offset, size,
- ForwardToServer<decltype(
- &Server::OnBufferMapAsyncCallback)>::Func<&Server::OnBufferMapAsyncCallback>(),
- userdata.release());
-
- return true;
- }
-
- bool Server::DoDeviceCreateBuffer(ObjectId deviceId,
- const WGPUBufferDescriptor* descriptor,
- ObjectHandle bufferResult,
- uint64_t readHandleCreateInfoLength,
- const uint8_t* readHandleCreateInfo,
- uint64_t writeHandleCreateInfoLength,
- const uint8_t* writeHandleCreateInfo) {
- auto* device = DeviceObjects().Get(deviceId);
- if (device == nullptr) {
- return false;
- }
-
- // Create and register the buffer object.
- auto* resultData = BufferObjects().Allocate(bufferResult.id);
- if (resultData == nullptr) {
- return false;
- }
- resultData->generation = bufferResult.generation;
- resultData->handle = mProcs.deviceCreateBuffer(device->handle, descriptor);
- resultData->deviceInfo = device->info.get();
- resultData->usage = descriptor->usage;
- resultData->mappedAtCreation = descriptor->mappedAtCreation;
- if (!TrackDeviceChild(resultData->deviceInfo, ObjectType::Buffer, bufferResult.id)) {
- return false;
- }
-
- // isReadMode and isWriteMode could be true at the same time if usage contains
- // WGPUMapMode_Read and buffer is mappedAtCreation
- bool isReadMode = descriptor->usage & WGPUMapMode_Read;
- bool isWriteMode = descriptor->usage & WGPUMapMode_Write || descriptor->mappedAtCreation;
-
- // This is the size of data deserialized from the command stream to create the read/write
- // handle, which must be CPU-addressable.
- if (readHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
- writeHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
- readHandleCreateInfoLength >
- std::numeric_limits<size_t>::max() - writeHandleCreateInfoLength) {
- return false;
- }
-
- if (isWriteMode) {
- MemoryTransferService::WriteHandle* writeHandle = nullptr;
- // Deserialize metadata produced from the client to create a companion server handle.
- if (!mMemoryTransferService->DeserializeWriteHandle(
- writeHandleCreateInfo, static_cast<size_t>(writeHandleCreateInfoLength),
- &writeHandle)) {
- return false;
- }
- ASSERT(writeHandle != nullptr);
- resultData->writeHandle.reset(writeHandle);
- writeHandle->SetDataLength(descriptor->size);
-
- if (descriptor->mappedAtCreation) {
- void* mapping =
- mProcs.bufferGetMappedRange(resultData->handle, 0, descriptor->size);
- if (mapping == nullptr) {
- // A zero mapping is used to indicate an allocation error of an error buffer.
- // This is a valid case and isn't fatal. Remember the buffer is an error so as
- // to skip subsequent mapping operations.
- resultData->mapWriteState = BufferMapWriteState::MapError;
- return true;
- }
- ASSERT(mapping != nullptr);
- writeHandle->SetTarget(mapping);
-
- resultData->mapWriteState = BufferMapWriteState::Mapped;
- }
- }
-
- if (isReadMode) {
- MemoryTransferService::ReadHandle* readHandle = nullptr;
- // Deserialize metadata produced from the client to create a companion server handle.
- if (!mMemoryTransferService->DeserializeReadHandle(
- readHandleCreateInfo, static_cast<size_t>(readHandleCreateInfoLength),
- &readHandle)) {
- return false;
- }
- ASSERT(readHandle != nullptr);
-
- resultData->readHandle.reset(readHandle);
- }
-
- return true;
- }
-
- bool Server::DoBufferUpdateMappedData(ObjectId bufferId,
- uint64_t writeDataUpdateInfoLength,
- const uint8_t* writeDataUpdateInfo,
- uint64_t offset,
- uint64_t size) {
- // The null object isn't valid as `self`
- if (bufferId == 0) {
- return false;
- }
-
- if (writeDataUpdateInfoLength > std::numeric_limits<size_t>::max() ||
- offset > std::numeric_limits<size_t>::max() ||
- size > std::numeric_limits<size_t>::max()) {
- return false;
- }
-
- auto* buffer = BufferObjects().Get(bufferId);
- if (buffer == nullptr) {
- return false;
- }
- switch (buffer->mapWriteState) {
- case BufferMapWriteState::Unmapped:
- return false;
- case BufferMapWriteState::MapError:
- // The buffer is mapped but there was an error allocating mapped data.
- // Do not perform the memcpy.
- return true;
- case BufferMapWriteState::Mapped:
- break;
- }
- if (!buffer->writeHandle) {
- // This check is performed after the check for the MapError state. It is permissible
- // to Unmap and attempt to update mapped data of an error buffer.
- return false;
- }
-
- // Deserialize the flush info and flush updated data from the handle into the target
- // of the handle. The target is set via WriteHandle::SetTarget.
- return buffer->writeHandle->DeserializeDataUpdate(
- writeDataUpdateInfo, static_cast<size_t>(writeDataUpdateInfoLength),
- static_cast<size_t>(offset), static_cast<size_t>(size));
- }
-
- void Server::OnBufferMapAsyncCallback(WGPUBufferMapAsyncStatus status, MapUserdata* data) {
- // Skip sending the callback if the buffer has already been destroyed.
- auto* bufferData = BufferObjects().Get(data->buffer.id);
- if (bufferData == nullptr || bufferData->generation != data->buffer.generation) {
- return;
- }
-
- bool isRead = data->mode & WGPUMapMode_Read;
- bool isSuccess = status == WGPUBufferMapAsyncStatus_Success;
-
- ReturnBufferMapAsyncCallbackCmd cmd;
- cmd.buffer = data->buffer;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
- cmd.readDataUpdateInfoLength = 0;
- cmd.readDataUpdateInfo = nullptr;
-
- const void* readData = nullptr;
- if (isSuccess) {
- if (isRead) {
- // Get the serialization size of the message to initialize ReadHandle data.
- readData =
- mProcs.bufferGetConstMappedRange(data->bufferObj, data->offset, data->size);
- cmd.readDataUpdateInfoLength =
- bufferData->readHandle->SizeOfSerializeDataUpdate(data->offset, data->size);
- } else {
- ASSERT(data->mode & WGPUMapMode_Write);
- // The in-flight map request returned successfully.
- bufferData->mapWriteState = BufferMapWriteState::Mapped;
- // Set the target of the WriteHandle to the mapped buffer data.
- // writeHandle Target always refers to the buffer base address.
- // but we call getMappedRange exactly with the range of data that is potentially
- // modified (i.e. we don't want getMappedRange(0, wholeBufferSize) if only a
- // subset of the buffer is actually mapped) in case the implementation does some
- // range tracking.
- bufferData->writeHandle->SetTarget(
- static_cast<uint8_t*>(
- mProcs.bufferGetMappedRange(data->bufferObj, data->offset, data->size)) -
- data->offset);
- }
- }
-
- SerializeCommand(cmd, cmd.readDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
- if (isSuccess && isRead) {
- char* readHandleBuffer;
- WIRE_TRY(serializeBuffer->NextN(cmd.readDataUpdateInfoLength, &readHandleBuffer));
- // The in-flight map request returned successfully.
- bufferData->readHandle->SerializeDataUpdate(readData, data->offset, data->size,
- readHandleBuffer);
- }
- return WireResult::Success;
- });
- }
-
-}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
deleted file mode 100644
index c8cddf4e500..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/server/Server.h"
-
-namespace dawn_wire { namespace server {
-
- namespace {
-
- template <ObjectType objectType, typename Pipeline>
- void HandleCreateRenderPipelineAsyncCallbackResult(KnownObjects<Pipeline>* knownObjects,
- WGPUCreatePipelineAsyncStatus status,
- Pipeline pipeline,
- CreatePipelineAsyncUserData* data) {
- // May be null if the device was destroyed. Device destruction destroys child
- // objects on the wire.
- auto* pipelineObject =
- knownObjects->Get(data->pipelineObjectID, AllocationState::Reserved);
- // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
- // they move from Reserved to Allocated, or if they are destroyed here.
- ASSERT(pipelineObject != nullptr);
-
- if (status == WGPUCreatePipelineAsyncStatus_Success) {
- // Assign the handle and allocated status if the pipeline is created successfully.
- pipelineObject->state = AllocationState::Allocated;
- pipelineObject->handle = pipeline;
-
- // This should be impossible to fail. It would require a command to be sent that
- // creates a duplicate ObjectId, which would fail validation.
- bool success = TrackDeviceChild(pipelineObject->deviceInfo, objectType,
- data->pipelineObjectID);
- ASSERT(success);
- } else {
- // Otherwise, free the ObjectId which will make it unusable.
- knownObjects->Free(data->pipelineObjectID);
- ASSERT(pipeline == nullptr);
- }
- }
-
- } // anonymous namespace
-
- void Server::OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message) {
- ReturnDeviceUncapturedErrorCallbackCmd cmd;
- cmd.device = device;
- cmd.type = type;
- cmd.message = message;
-
- SerializeCommand(cmd);
- }
-
- void Server::OnDeviceLost(ObjectHandle device,
- WGPUDeviceLostReason reason,
- const char* message) {
- ReturnDeviceLostCallbackCmd cmd;
- cmd.device = device;
- cmd.reason = reason;
- cmd.message = message;
-
- SerializeCommand(cmd);
- }
-
- void Server::OnLogging(ObjectHandle device, WGPULoggingType type, const char* message) {
- ReturnDeviceLoggingCallbackCmd cmd;
- cmd.device = device;
- cmd.type = type;
- cmd.message = message;
-
- SerializeCommand(cmd);
- }
-
- bool Server::DoDevicePopErrorScope(ObjectId deviceId, uint64_t requestSerial) {
- auto* device = DeviceObjects().Get(deviceId);
- if (device == nullptr) {
- return false;
- }
-
- auto userdata = MakeUserdata<ErrorScopeUserdata>();
- userdata->requestSerial = requestSerial;
- userdata->device = ObjectHandle{deviceId, device->generation};
-
- ErrorScopeUserdata* unownedUserdata = userdata.release();
- bool success = mProcs.devicePopErrorScope(
- device->handle,
- ForwardToServer<decltype(
- &Server::OnDevicePopErrorScope)>::Func<&Server::OnDevicePopErrorScope>(),
- unownedUserdata);
- if (!success) {
- delete unownedUserdata;
- }
- return success;
- }
-
- void Server::OnDevicePopErrorScope(WGPUErrorType type,
- const char* message,
- ErrorScopeUserdata* userdata) {
- ReturnDevicePopErrorScopeCallbackCmd cmd;
- cmd.device = userdata->device;
- cmd.requestSerial = userdata->requestSerial;
- cmd.type = type;
- cmd.message = message;
-
- SerializeCommand(cmd);
- }
-
- bool Server::DoDeviceCreateComputePipelineAsync(
- ObjectId deviceId,
- uint64_t requestSerial,
- ObjectHandle pipelineObjectHandle,
- const WGPUComputePipelineDescriptor* descriptor) {
- auto* device = DeviceObjects().Get(deviceId);
- if (device == nullptr) {
- return false;
- }
-
- auto* resultData =
- ComputePipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
- if (resultData == nullptr) {
- return false;
- }
-
- resultData->generation = pipelineObjectHandle.generation;
- resultData->deviceInfo = device->info.get();
-
- auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
- userdata->device = ObjectHandle{deviceId, device->generation};
- userdata->requestSerial = requestSerial;
- userdata->pipelineObjectID = pipelineObjectHandle.id;
-
- mProcs.deviceCreateComputePipelineAsync(
- device->handle, descriptor,
- ForwardToServer<decltype(&Server::OnCreateComputePipelineAsyncCallback)>::Func<
- &Server::OnCreateComputePipelineAsyncCallback>(),
- userdata.release());
- return true;
- }
-
- void Server::OnCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
- WGPUComputePipeline pipeline,
- const char* message,
- CreatePipelineAsyncUserData* data) {
- HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::ComputePipeline>(
- &ComputePipelineObjects(), status, pipeline, data);
-
- ReturnDeviceCreateComputePipelineAsyncCallbackCmd cmd;
- cmd.device = data->device;
- cmd.status = status;
- cmd.requestSerial = data->requestSerial;
- cmd.message = message;
-
- SerializeCommand(cmd);
- }
-
- bool Server::DoDeviceCreateRenderPipelineAsync(ObjectId deviceId,
- uint64_t requestSerial,
- ObjectHandle pipelineObjectHandle,
- const WGPURenderPipelineDescriptor* descriptor) {
- auto* device = DeviceObjects().Get(deviceId);
- if (device == nullptr) {
- return false;
- }
-
- auto* resultData =
- RenderPipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
- if (resultData == nullptr) {
- return false;
- }
-
- resultData->generation = pipelineObjectHandle.generation;
- resultData->deviceInfo = device->info.get();
-
- auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
- userdata->device = ObjectHandle{deviceId, device->generation};
- userdata->requestSerial = requestSerial;
- userdata->pipelineObjectID = pipelineObjectHandle.id;
-
- mProcs.deviceCreateRenderPipelineAsync(
- device->handle, descriptor,
- ForwardToServer<decltype(&Server::OnCreateRenderPipelineAsyncCallback)>::Func<
- &Server::OnCreateRenderPipelineAsyncCallback>(),
- userdata.release());
- return true;
- }
-
- void Server::OnCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
- WGPURenderPipeline pipeline,
- const char* message,
- CreatePipelineAsyncUserData* data) {
- HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::RenderPipeline>(
- &RenderPipelineObjects(), status, pipeline, data);
-
- ReturnDeviceCreateRenderPipelineAsyncCallbackCmd cmd;
- cmd.device = data->device;
- cmd.status = status;
- cmd.requestSerial = data->requestSerial;
- cmd.message = message;
-
- SerializeCommand(cmd);
- }
-
-}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp
deleted file mode 100644
index 71347d2f7c5..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerInlineMemoryTransferService.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Assert.h"
-#include "dawn_wire/WireServer.h"
-#include "dawn_wire/server/Server.h"
-
-#include <cstring>
-
-namespace dawn_wire { namespace server {
-
- class InlineMemoryTransferService : public MemoryTransferService {
- public:
- class ReadHandleImpl : public ReadHandle {
- public:
- ReadHandleImpl() {
- }
- ~ReadHandleImpl() override = default;
-
- size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
- return size;
- }
-
- void SerializeDataUpdate(const void* data,
- size_t offset,
- size_t size,
- void* serializePointer) override {
- if (size > 0) {
- ASSERT(data != nullptr);
- ASSERT(serializePointer != nullptr);
- memcpy(serializePointer, data, size);
- }
- }
- };
-
- class WriteHandleImpl : public WriteHandle {
- public:
- WriteHandleImpl() {
- }
- ~WriteHandleImpl() override = default;
-
- bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) override {
- if (deserializeSize != size || mTargetData == nullptr ||
- deserializePointer == nullptr) {
- return false;
- }
- if ((offset >= mDataLength && offset > 0) || size > mDataLength - offset) {
- return false;
- }
- memcpy(static_cast<uint8_t*>(mTargetData) + offset, deserializePointer, size);
- return true;
- }
- };
-
- InlineMemoryTransferService() {
- }
- ~InlineMemoryTransferService() override = default;
-
- bool DeserializeReadHandle(const void* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle) override {
- ASSERT(readHandle != nullptr);
- *readHandle = new ReadHandleImpl();
- return true;
- }
-
- bool DeserializeWriteHandle(const void* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle) override {
- ASSERT(writeHandle != nullptr);
- *writeHandle = new WriteHandleImpl();
- return true;
- }
- };
-
- std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
- return std::make_unique<InlineMemoryTransferService>();
- }
-
-}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerMemoryTransferService_mock.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerMemoryTransferService_mock.cpp
deleted file mode 100644
index 165c6d3ebfb..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerMemoryTransferService_mock.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/server/ServerMemoryTransferService_mock.h"
-
-#include "common/Assert.h"
-
-namespace dawn_wire { namespace server {
-
- MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
- : ReadHandle(), mService(service) {
- }
-
- MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
- mService->OnReadHandleDestroy(this);
- }
-
- size_t MockMemoryTransferService::MockReadHandle::SizeOfSerializeDataUpdate(size_t offset,
- size_t size) {
- return mService->OnReadHandleSizeOfSerializeDataUpdate(this, offset, size);
- }
-
- void MockMemoryTransferService::MockReadHandle::SerializeDataUpdate(const void* data,
- size_t offset,
- size_t size,
- void* serializePointer) {
- mService->OnReadHandleSerializeDataUpdate(this, data, offset, size, serializePointer);
- }
-
- MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
- : WriteHandle(), mService(service) {
- }
-
- MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
- mService->OnWriteHandleDestroy(this);
- }
-
- const uint32_t* MockMemoryTransferService::MockWriteHandle::GetData() const {
- return reinterpret_cast<const uint32_t*>(mTargetData);
- }
-
- bool MockMemoryTransferService::MockWriteHandle::DeserializeDataUpdate(
- const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) {
- ASSERT(deserializeSize % sizeof(uint32_t) == 0);
- return mService->OnWriteHandleDeserializeDataUpdate(
- this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
- size);
- }
-
- MockMemoryTransferService::MockMemoryTransferService() = default;
- MockMemoryTransferService::~MockMemoryTransferService() = default;
-
- bool MockMemoryTransferService::DeserializeReadHandle(const void* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle) {
- ASSERT(deserializeSize % sizeof(uint32_t) == 0);
- return OnDeserializeReadHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
- deserializeSize, readHandle);
- }
-
- bool MockMemoryTransferService::DeserializeWriteHandle(const void* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle) {
- ASSERT(deserializeSize % sizeof(uint32_t) == 0);
- return OnDeserializeWriteHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
- deserializeSize, writeHandle);
- }
-
- MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
- return new MockReadHandle(this);
- }
-
- MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
- return new MockWriteHandle(this);
- }
-
-}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerMemoryTransferService_mock.h b/chromium/third_party/dawn/src/dawn_wire/server/ServerMemoryTransferService_mock.h
deleted file mode 100644
index 23cf9ed6070..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerMemoryTransferService_mock.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
-#define DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
-
-#include <gmock/gmock.h>
-
-#include "dawn_wire/WireServer.h"
-#include "dawn_wire/server/Server.h"
-
-namespace dawn_wire { namespace server {
-
- class MockMemoryTransferService : public MemoryTransferService {
- public:
- class MockReadHandle : public ReadHandle {
- public:
- MockReadHandle(MockMemoryTransferService* service);
- ~MockReadHandle() override;
-
- size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
- void SerializeDataUpdate(const void* data,
- size_t offset,
- size_t size,
- void* serializePointer) override;
-
- private:
- MockMemoryTransferService* mService;
- };
-
- class MockWriteHandle : public WriteHandle {
- public:
- MockWriteHandle(MockMemoryTransferService* service);
- ~MockWriteHandle() override;
-
- bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) override;
-
- const uint32_t* GetData() const;
-
- private:
- MockMemoryTransferService* mService;
- };
-
- MockMemoryTransferService();
- ~MockMemoryTransferService() override;
-
- bool DeserializeReadHandle(const void* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle) override;
-
- bool DeserializeWriteHandle(const void* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle) override;
-
- MockReadHandle* NewReadHandle();
- MockWriteHandle* NewWriteHandle();
-
- MOCK_METHOD(bool,
- OnDeserializeReadHandle,
- (const uint32_t* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle));
-
- MOCK_METHOD(bool,
- OnDeserializeWriteHandle,
- (const uint32_t* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle));
-
- MOCK_METHOD(size_t,
- OnReadHandleSizeOfSerializeDataUpdate,
- (const ReadHandle* readHandle, size_t offset, size_t size));
- MOCK_METHOD(void,
- OnReadHandleSerializeDataUpdate,
- (const ReadHandle* readHandle,
- const void* data,
- size_t offset,
- size_t size,
- void* serializePointer));
- MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle* readHandle));
-
- MOCK_METHOD(bool,
- OnWriteHandleDeserializeDataUpdate,
- (const WriteHandle* writeHandle,
- const uint32_t* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size));
- MOCK_METHOD(void, OnWriteHandleDestroy, (const WriteHandle* writeHandle));
- };
-
-}} // namespace dawn_wire::server
-
-#endif // DAWNWIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
deleted file mode 100644
index 08a5925c2e8..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Assert.h"
-#include "dawn_wire/server/Server.h"
-
-namespace dawn_wire { namespace server {
-
- void Server::OnQueueWorkDone(WGPUQueueWorkDoneStatus status, QueueWorkDoneUserdata* data) {
- ReturnQueueWorkDoneCallbackCmd cmd;
- cmd.queue = data->queue;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
-
- SerializeCommand(cmd);
- }
-
- bool Server::DoQueueOnSubmittedWorkDone(ObjectId queueId,
- uint64_t signalValue,
- uint64_t requestSerial) {
- auto* queue = QueueObjects().Get(queueId);
- if (queue == nullptr) {
- return false;
- }
-
- auto userdata = MakeUserdata<QueueWorkDoneUserdata>();
- userdata->queue = ObjectHandle{queueId, queue->generation};
- userdata->requestSerial = requestSerial;
-
- mProcs.queueOnSubmittedWorkDone(
- queue->handle, signalValue,
- ForwardToServer<decltype(&Server::OnQueueWorkDone)>::Func<&Server::OnQueueWorkDone>(),
- userdata.release());
- return true;
- }
-
- bool Server::DoQueueWriteBuffer(ObjectId queueId,
- ObjectId bufferId,
- uint64_t bufferOffset,
- const uint8_t* data,
- uint64_t size) {
- // The null object isn't valid as `self` or `buffer` so we can combine the check with the
- // check that the ID is valid.
- auto* queue = QueueObjects().Get(queueId);
- auto* buffer = BufferObjects().Get(bufferId);
- if (queue == nullptr || buffer == nullptr) {
- return false;
- }
-
- if (size > std::numeric_limits<size_t>::max()) {
- auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
- if (device == nullptr) {
- return false;
- }
- return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
- WGPUErrorType_OutOfMemory,
- "Data size too large for write texture.");
- }
-
- mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data,
- static_cast<size_t>(size));
- return true;
- }
-
- bool Server::DoQueueWriteTexture(ObjectId queueId,
- const WGPUImageCopyTexture* destination,
- const uint8_t* data,
- uint64_t dataSize,
- const WGPUTextureDataLayout* dataLayout,
- const WGPUExtent3D* writeSize) {
- // The null object isn't valid as `self` so we can combine the check with the
- // check that the ID is valid.
- auto* queue = QueueObjects().Get(queueId);
- if (queue == nullptr) {
- return false;
- }
-
- if (dataSize > std::numeric_limits<size_t>::max()) {
- auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
- if (device == nullptr) {
- return false;
- }
- return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
- WGPUErrorType_OutOfMemory,
- "Data size too large for write texture.");
- }
-
- mProcs.queueWriteTexture(queue->handle, destination, data, static_cast<size_t>(dataSize),
- dataLayout, writeSize);
- return true;
- }
-
-}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerShaderModule.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerShaderModule.cpp
deleted file mode 100644
index cec0dc4db30..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerShaderModule.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/server/Server.h"
-
-#include <memory>
-
-namespace dawn_wire { namespace server {
-
- bool Server::DoShaderModuleGetCompilationInfo(ObjectId shaderModuleId, uint64_t requestSerial) {
- auto* shaderModule = ShaderModuleObjects().Get(shaderModuleId);
- if (shaderModule == nullptr) {
- return false;
- }
-
- auto userdata = MakeUserdata<ShaderModuleGetCompilationInfoUserdata>();
- userdata->shaderModule = ObjectHandle{shaderModuleId, shaderModule->generation};
- userdata->requestSerial = requestSerial;
-
- mProcs.shaderModuleGetCompilationInfo(
- shaderModule->handle,
- ForwardToServer<decltype(&Server::OnShaderModuleGetCompilationInfo)>::Func<
- &Server::OnShaderModuleGetCompilationInfo>(),
- userdata.release());
- return true;
- }
-
- void Server::OnShaderModuleGetCompilationInfo(WGPUCompilationInfoRequestStatus status,
- const WGPUCompilationInfo* info,
- ShaderModuleGetCompilationInfoUserdata* data) {
- ReturnShaderModuleGetCompilationInfoCallbackCmd cmd;
- cmd.shaderModule = data->shaderModule;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
- cmd.info = info;
-
- SerializeCommand(cmd);
- }
-
-}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/fuzzers/BUILD.gn b/chromium/third_party/dawn/src/fuzzers/BUILD.gn
deleted file mode 100644
index ee06dfedba4..00000000000
--- a/chromium/third_party/dawn/src/fuzzers/BUILD.gn
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2018 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import("//build_overrides/build.gni")
-import("../../scripts/dawn_overrides_with_defaults.gni")
-
-# We only have libfuzzer in Chromium builds but if we build fuzzer targets only
-# there, we would risk breaking fuzzer targets all the time when making changes
-# to Dawn. To avoid that, we make fuzzer targets compile in standalone builds
-# as well with a dawn_fuzzer_test target that acts like Chromium's fuzzer_test.
-#
-# The standalone fuzzer targets are able to run a single fuzzer input which
-# could help reproduce fuzzer crashes more easily because you don't need a
-# whole Chromium checkout.
-
-if (build_with_chromium) {
- import("//testing/libfuzzer/fuzzer_test.gni")
-
- # In Chromium build we just proxy everything to the real fuzzer_test
- template("dawn_fuzzer_test") {
- fuzzer_test(target_name) {
- forward_variables_from(invoker, "*")
- }
- }
-} else {
- import("//testing/test.gni")
-
- # In standalone build we do something similar to fuzzer_test.
- template("dawn_fuzzer_test") {
- test(target_name) {
- forward_variables_from(invoker,
- [
- "asan_options",
- "cflags",
- "cflags_cc",
- "check_includes",
- "defines",
- "deps",
- "include_dirs",
- "sources",
- ])
-
- if (defined(asan_options)) {
- not_needed([ "asan_options" ])
- }
-
- if (!defined(configs)) {
- configs = []
- }
-
- # Weirdly fuzzer_test uses a special variable for additional configs.
- if (defined(invoker.additional_configs)) {
- configs += invoker.additional_configs
- }
-
- sources += [ "StandaloneFuzzerMain.cpp" ]
- }
- }
-}
-
-static_library("dawn_wire_server_fuzzer_common") {
- sources = [
- "DawnWireServerFuzzer.cpp",
- "DawnWireServerFuzzer.h",
- ]
- public_deps = [
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn:dawn_proc",
- "${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn_native:dawn_native_static",
- "${dawn_root}/src/dawn_wire:dawn_wire_static",
- "${dawn_root}/src/utils:dawn_utils",
- ]
-}
-
-dawn_fuzzer_test("dawn_wire_server_and_frontend_fuzzer") {
- sources = [ "DawnWireServerAndFrontendFuzzer.cpp" ]
-
- deps = [ ":dawn_wire_server_fuzzer_common" ]
-
- additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
-}
-
-if (is_win) {
- dawn_fuzzer_test("dawn_wire_server_and_d3d12_backend_fuzzer") {
- sources = [ "DawnWireServerAndD3D12BackendFuzzer.cpp" ]
-
- deps = [ ":dawn_wire_server_fuzzer_common" ]
-
- additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
- }
-}
-
-dawn_fuzzer_test("dawn_wire_server_and_vulkan_backend_fuzzer") {
- sources = [ "DawnWireServerAndVulkanBackendFuzzer.cpp" ]
-
- deps = [ ":dawn_wire_server_fuzzer_common" ]
-
- additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
-}
-
-# A group target to build all the fuzzers
-group("dawn_fuzzers") {
- testonly = true
- deps = [
- ":dawn_wire_server_and_frontend_fuzzer",
- ":dawn_wire_server_and_vulkan_backend_fuzzer",
- ]
-
- if (is_win) {
- deps += [ ":dawn_wire_server_and_d3d12_backend_fuzzer" ]
- }
-}
diff --git a/chromium/third_party/dawn/src/fuzzers/dawn/BUILD.gn b/chromium/third_party/dawn/src/fuzzers/dawn/BUILD.gn
new file mode 100644
index 00000000000..a2756c70c42
--- /dev/null
+++ b/chromium/third_party/dawn/src/fuzzers/dawn/BUILD.gn
@@ -0,0 +1,22 @@
+# Copyright 2022 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################################################################
+# Build target aliases
+# TODO(crbug.com/dawn/1275) - remove these
+################################################################################
+group("dawn_fuzzers") {
+ public_deps = [ "../../dawn/fuzzers" ]
+ testonly = true
+}
diff --git a/chromium/third_party/dawn/src/include/README.md b/chromium/third_party/dawn/src/include/README.md
new file mode 100644
index 00000000000..8111f628c57
--- /dev/null
+++ b/chromium/third_party/dawn/src/include/README.md
@@ -0,0 +1,4 @@
+# TODO(crbug.com/dawn/1275) - remove this directory
+
+This directory exists as a temporary include directory while migrating Chromium source to the new Dawn include layout.
+All headers in the subdirectories simply #include to the new location for the header.
diff --git a/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h b/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
index 3947f007a4f..143e9803f9e 100644
--- a/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
+++ b/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
@@ -1,156 +1 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_ENUM_CLASS_BITMASKS_H_
-#define DAWN_ENUM_CLASS_BITMASKS_H_
-
-#include <type_traits>
-
-// The operators in dawn:: namespace need be introduced into other namespaces with
-// using-declarations for C++ Argument Dependent Lookup to work.
-#define DAWN_IMPORT_BITMASK_OPERATORS \
- using dawn::operator|; \
- using dawn::operator&; \
- using dawn::operator^; \
- using dawn::operator~; \
- using dawn::operator&=; \
- using dawn::operator|=; \
- using dawn::operator^=; \
- using dawn::HasZeroOrOneBits;
-
-namespace dawn {
-
- template <typename T>
- struct IsDawnBitmask {
- static constexpr bool enable = false;
- };
-
- template <typename T, typename Enable = void>
- struct LowerBitmask {
- static constexpr bool enable = false;
- };
-
- template <typename T>
- struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
- static constexpr bool enable = true;
- using type = T;
- constexpr static T Lower(T t) {
- return t;
- }
- };
-
- template <typename T>
- struct BoolConvertible {
- using Integral = typename std::underlying_type<T>::type;
-
- constexpr BoolConvertible(Integral value) : value(value) {
- }
- constexpr operator bool() const {
- return value != 0;
- }
- constexpr operator T() const {
- return static_cast<T>(value);
- }
-
- Integral value;
- };
-
- template <typename T>
- struct LowerBitmask<BoolConvertible<T>> {
- static constexpr bool enable = true;
- using type = T;
- static constexpr type Lower(BoolConvertible<T> t) {
- return t;
- }
- };
-
- template <typename T1,
- typename T2,
- typename = typename std::enable_if<LowerBitmask<T1>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
- using T = typename LowerBitmask<T1>::type;
- using Integral = typename std::underlying_type<T>::type;
- return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
- static_cast<Integral>(LowerBitmask<T2>::Lower(right));
- }
-
- template <typename T1,
- typename T2,
- typename = typename std::enable_if<LowerBitmask<T1>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
- using T = typename LowerBitmask<T1>::type;
- using Integral = typename std::underlying_type<T>::type;
- return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
- static_cast<Integral>(LowerBitmask<T2>::Lower(right));
- }
-
- template <typename T1,
- typename T2,
- typename = typename std::enable_if<LowerBitmask<T1>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
- using T = typename LowerBitmask<T1>::type;
- using Integral = typename std::underlying_type<T>::type;
- return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
- static_cast<Integral>(LowerBitmask<T2>::Lower(right));
- }
-
- template <typename T1>
- constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
- using T = typename LowerBitmask<T1>::type;
- using Integral = typename std::underlying_type<T>::type;
- return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
- }
-
- template <typename T,
- typename T2,
- typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr T& operator&=(T& l, T2 right) {
- T r = LowerBitmask<T2>::Lower(right);
- l = l & r;
- return l;
- }
-
- template <typename T,
- typename T2,
- typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr T& operator|=(T& l, T2 right) {
- T r = LowerBitmask<T2>::Lower(right);
- l = l | r;
- return l;
- }
-
- template <typename T,
- typename T2,
- typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
- LowerBitmask<T2>::enable>::type>
- constexpr T& operator^=(T& l, T2 right) {
- T r = LowerBitmask<T2>::Lower(right);
- l = l ^ r;
- return l;
- }
-
- template <typename T>
- constexpr bool HasZeroOrOneBits(T value) {
- using Integral = typename std::underlying_type<T>::type;
- return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
- }
-
-} // namespace dawn
-
-#endif // DAWN_ENUM_CLASS_BITMASKS_H_
+#include <dawn/EnumClassBitmasks.h>
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_proc.h b/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
index adeec463352..f706d9f366f 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
@@ -1,36 +1 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_DAWN_PROC_H_
-#define DAWN_DAWN_PROC_H_
-
-#include "dawn/dawn_proc_table.h"
-#include "dawn/webgpu.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Sets the static proctable used by libdawn_proc to implement the Dawn entrypoints. Passing NULL
-// for `procs` sets up the null proctable that contains only null function pointers. It is the
-// default value of the proctable. Setting the proctable back to null is good practice when you
-// are done using libdawn_proc since further usage will cause a segfault instead of calling an
-// unexpected function.
-WGPU_EXPORT void dawnProcSetProcs(const DawnProcTable* procs);
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // DAWN_DAWN_PROC_H_
+#include <dawn/dawn_proc.h>
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h b/chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h
index 4d08ba8adcd..318acb139fb 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_thread_dispatch_proc.h
@@ -1,33 +1 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_DAWN_THREAD_DISPATCH_PROC_H_
-#define DAWN_DAWN_THREAD_DISPATCH_PROC_H_
-
-#include "dawn/dawn_proc.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Call dawnProcSetProcs(&dawnThreadDispatchProcTable) and then use dawnProcSetPerThreadProcs
-// to set per-thread procs.
-WGPU_EXPORT extern DawnProcTable dawnThreadDispatchProcTable;
-WGPU_EXPORT void dawnProcSetPerThreadProcs(const DawnProcTable* procs);
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // DAWN_DAWN_THREAD_DISPATCH_PROC_H_
+#include <dawn/dawn_thread_dispatch_proc.h>
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
index f1a6047b5b3..0ee9aab2a6d 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
@@ -1,86 +1 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_DAWN_WSI_H_
-#define DAWN_DAWN_WSI_H_
-
-#include <dawn/webgpu.h>
-
-// Error message (or nullptr if there was no error)
-typedef const char* DawnSwapChainError;
-constexpr DawnSwapChainError DAWN_SWAP_CHAIN_NO_ERROR = nullptr;
-
-typedef struct {
- /// Backend-specific texture id/name/pointer
- union {
- void* ptr;
- uint64_t u64;
- uint32_t u32;
- } texture;
-} DawnSwapChainNextTexture;
-
-typedef struct {
- /// Initialize the swap chain implementation.
- /// (*wsiContext) is one of DawnWSIContext{D3D12,Metal,GL}
- void (*Init)(void* userData, void* wsiContext);
-
- /// Destroy the swap chain implementation.
- void (*Destroy)(void* userData);
-
- /// Configure/reconfigure the swap chain.
- DawnSwapChainError (*Configure)(void* userData,
- WGPUTextureFormat format,
- WGPUTextureUsage allowedUsage,
- uint32_t width,
- uint32_t height);
-
- /// Acquire the next texture from the swap chain.
- DawnSwapChainError (*GetNextTexture)(void* userData, DawnSwapChainNextTexture* nextTexture);
-
- /// Present the last acquired texture to the screen.
- DawnSwapChainError (*Present)(void* userData);
-
- /// Each function is called with userData as its first argument.
- void* userData;
-
- /// For use by the D3D12 and Vulkan backends: how the swapchain will use the texture.
- WGPUTextureUsage textureUsage;
-} DawnSwapChainImplementation;
-
-#if defined(DAWN_ENABLE_BACKEND_D3D12) && defined(__cplusplus)
-struct DawnWSIContextD3D12 {
- WGPUDevice device = nullptr;
-};
-#endif
-
-#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(__OBJC__)
-# import <Metal/Metal.h>
-
-struct DawnWSIContextMetal {
- id<MTLDevice> device = nil;
- id<MTLCommandQueue> queue = nil;
-};
-#endif
-
-#ifdef DAWN_ENABLE_BACKEND_OPENGL
-typedef struct {
-} DawnWSIContextGL;
-#endif
-
-#ifdef DAWN_ENABLE_BACKEND_VULKAN
-typedef struct {
-} DawnWSIContextVulkan;
-#endif
-
-#endif // DAWN_DAWN_WSI_H
+#include <dawn/dawn_wsi.h>
diff --git a/chromium/third_party/dawn/src/include/dawn/webgpu.h b/chromium/third_party/dawn/src/include/dawn/webgpu.h
new file mode 100644
index 00000000000..a410df1dc80
--- /dev/null
+++ b/chromium/third_party/dawn/src/include/dawn/webgpu.h
@@ -0,0 +1 @@
+#include <dawn/webgpu.h>
diff --git a/chromium/third_party/dawn/src/include/dawn/webgpu_cpp.h b/chromium/third_party/dawn/src/include/dawn/webgpu_cpp.h
new file mode 100644
index 00000000000..8904453979f
--- /dev/null
+++ b/chromium/third_party/dawn/src/include/dawn/webgpu_cpp.h
@@ -0,0 +1 @@
+#include "dawn/webgpu_cpp.h"
diff --git a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
index a6644b11cfc..ade0dd1b88a 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
@@ -1,106 +1 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12BACKEND_H_
-#define DAWNNATIVE_D3D12BACKEND_H_
-
-#include <dawn/dawn_wsi.h>
-#include <dawn_native/DawnNative.h>
-
-#include <DXGI1_4.h>
-#include <d3d12.h>
-#include <windows.h>
-#include <wrl/client.h>
-
-#include <memory>
-
-struct ID3D12Device;
-struct ID3D12Resource;
-
-namespace dawn_native { namespace d3d12 {
-
- class D3D11on12ResourceCache;
-
- DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
- HWND window);
- DAWN_NATIVE_EXPORT WGPUTextureFormat
- GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
-
- enum MemorySegment {
- Local,
- NonLocal,
- };
-
- DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
- uint64_t requestedReservationSize,
- MemorySegment memorySegment);
-
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
- public:
- ExternalImageDescriptorDXGISharedHandle();
-
- // Note: SharedHandle must be a handle to a texture object.
- HANDLE sharedHandle;
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
- : ExternalImageAccessDescriptor {
- public:
- uint64_t acquireMutexKey;
- uint64_t releaseMutexKey;
- bool isSwapChainTexture = false;
- };
-
- class DAWN_NATIVE_EXPORT ExternalImageDXGI {
- public:
- ~ExternalImageDXGI();
-
- // Note: SharedHandle must be a handle to a texture object.
- static std::unique_ptr<ExternalImageDXGI> Create(
- WGPUDevice device,
- const ExternalImageDescriptorDXGISharedHandle* descriptor);
-
- WGPUTexture ProduceTexture(WGPUDevice device,
- const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
-
- private:
- ExternalImageDXGI(Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
- const WGPUTextureDescriptor* descriptor);
-
- Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource;
-
- // Contents of WGPUTextureDescriptor are stored individually since the descriptor
- // could outlive this image.
- WGPUTextureUsageFlags mUsage;
- WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None;
- WGPUTextureDimension mDimension;
- WGPUExtent3D mSize;
- WGPUTextureFormat mFormat;
- uint32_t mMipLevelCount;
- uint32_t mSampleCount;
-
- std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
- };
-
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptions();
- AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
-
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12BACKEND_H_
+#include <dawn/native/D3D12Backend.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
index 9382141d030..637d5118047 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
@@ -1,274 +1 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_DAWNNATIVE_H_
-#define DAWNNATIVE_DAWNNATIVE_H_
-
-#include <dawn/dawn_proc_table.h>
-#include <dawn/webgpu.h>
-#include <dawn_native/dawn_native_export.h>
-
-#include <string>
-#include <vector>
-
-namespace dawn_platform {
- class Platform;
-} // namespace dawn_platform
-
-namespace wgpu {
- struct AdapterProperties;
-}
-
-namespace dawn_native {
-
- // DEPRECATED: use WGPUAdapterProperties instead.
- struct PCIInfo {
- uint32_t deviceId = 0;
- uint32_t vendorId = 0;
- std::string name;
- };
-
- // DEPRECATED: use WGPUBackendType instead.
- enum class BackendType {
- D3D12,
- Metal,
- Null,
- OpenGL,
- OpenGLES,
- Vulkan,
- };
-
- // DEPRECATED: use WGPUAdapterType instead.
- enum class DeviceType {
- DiscreteGPU,
- IntegratedGPU,
- CPU,
- Unknown,
- };
-
- class InstanceBase;
- class AdapterBase;
-
- // An optional parameter of Adapter::CreateDevice() to send additional information when creating
- // a Device. For example, we can use it to enable a workaround, optimization or feature.
- struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
- std::vector<const char*> requiredFeatures;
- std::vector<const char*> forceEnabledToggles;
- std::vector<const char*> forceDisabledToggles;
-
- const WGPURequiredLimits* requiredLimits = nullptr;
- };
-
- // TODO(crbug.com/dawn/160): Remove when embedders of Dawn are updated to use
- // DawnDeviceDescriptor.
- using DeviceDescriptor = DawnDeviceDescriptor;
-
- // A struct to record the information of a toggle. A toggle is a code path in Dawn device that
- // can be manually configured to run or not outside Dawn, including workarounds, special
- // features and optimizations.
- struct ToggleInfo {
- const char* name;
- const char* description;
- const char* url;
- };
-
- // A struct to record the information of a feature. A feature is a GPU feature that is not
- // required to be supported by all Dawn backends and can only be used when it is enabled on the
- // creation of device.
- using FeatureInfo = ToggleInfo;
-
- // An adapter is an object that represent on possibility of creating devices in the system.
- // Most of the time it will represent a combination of a physical GPU and an API. Not that the
- // same GPU can be represented by multiple adapters but on different APIs.
- //
- // The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
- // a reference to an underlying adapter.
- class DAWN_NATIVE_EXPORT Adapter {
- public:
- Adapter();
- Adapter(AdapterBase* impl);
- ~Adapter();
-
- Adapter(const Adapter& other);
- Adapter& operator=(const Adapter& other);
-
- // DEPRECATED: use GetProperties instead.
- BackendType GetBackendType() const;
- DeviceType GetDeviceType() const;
- const PCIInfo& GetPCIInfo() const;
-
- // Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in
- // dawn.json
- void GetProperties(wgpu::AdapterProperties* properties) const;
-
- std::vector<const char*> GetSupportedExtensions() const;
- std::vector<const char*> GetSupportedFeatures() const;
- WGPUDeviceProperties GetAdapterProperties() const;
- bool GetLimits(WGPUSupportedLimits* limits) const;
-
- void SetUseTieredLimits(bool useTieredLimits);
-
- // Check that the Adapter is able to support importing external images. This is necessary
- // to implement the swapchain and interop APIs in Chromium.
- bool SupportsExternalImages() const;
-
- explicit operator bool() const;
-
- // Create a device on this adapter, note that the interface will change to include at least
- // a device descriptor and a pointer to backend specific options.
- // On an error, nullptr is returned.
- WGPUDevice CreateDevice(const DawnDeviceDescriptor* deviceDescriptor = nullptr);
-
- void RequestDevice(const DawnDeviceDescriptor* descriptor,
- WGPURequestDeviceCallback callback,
- void* userdata);
-
- // Reset the backend device object for testing purposes.
- void ResetInternalDeviceForTesting();
-
- private:
- AdapterBase* mImpl = nullptr;
- };
-
- // Base class for options passed to Instance::DiscoverAdapters.
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
- public:
- const WGPUBackendType backendType;
-
- protected:
- AdapterDiscoveryOptionsBase(WGPUBackendType type);
- };
-
- enum BackendValidationLevel { Full, Partial, Disabled };
-
- // Represents a connection to dawn_native and is used for dependency injection, discovering
- // system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
- //
- // This is an RAII class for Dawn instances and also controls the lifetime of all adapters
- // for this instance.
- class DAWN_NATIVE_EXPORT Instance {
- public:
- Instance();
- ~Instance();
-
- Instance(const Instance& other) = delete;
- Instance& operator=(const Instance& other) = delete;
-
- // Gather all adapters in the system that can be accessed with no special options. These
- // adapters will later be returned by GetAdapters.
- void DiscoverDefaultAdapters();
-
- // Adds adapters that can be discovered with the options provided (like a getProcAddress).
- // The backend is chosen based on the type of the options used. Returns true on success.
- bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
-
- // Returns all the adapters that the instance knows about.
- std::vector<Adapter> GetAdapters() const;
-
- const ToggleInfo* GetToggleInfo(const char* toggleName);
-
- // Enables backend validation layers
- void EnableBackendValidation(bool enableBackendValidation);
- void SetBackendValidationLevel(BackendValidationLevel validationLevel);
-
- // Enable debug capture on Dawn startup
- void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
-
- void SetPlatform(dawn_platform::Platform* platform);
-
- // Returns the underlying WGPUInstance object.
- WGPUInstance Get() const;
-
- private:
- InstanceBase* mImpl = nullptr;
- };
-
- // Backend-agnostic API for dawn_native
- DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
-
- // Query the names of all the toggles that are enabled in device
- DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
-
- // Backdoor to get the number of lazy clears for testing
- DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
-
- // Backdoor to get the number of deprecation warnings for testing
- DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
-
- // Query if texture has been initialized
- DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
- WGPUTexture texture,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount,
- WGPUTextureAspect aspect = WGPUTextureAspect_All);
-
- // Backdoor to get the order of the ProcMap for testing
- DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
-
- DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
-
- // ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
- DAWN_NATIVE_EXPORT void EnableErrorInjector();
- DAWN_NATIVE_EXPORT void DisableErrorInjector();
- DAWN_NATIVE_EXPORT void ClearErrorInjector();
- DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
- DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
-
- // The different types of external images
- enum ExternalImageType {
- OpaqueFD,
- DmaBuf,
- IOSurface,
- DXGISharedHandle,
- EGLImage,
- };
-
- // Common properties of external images
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
- public:
- const ExternalImageType type;
- const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params
- bool isInitialized; // Whether the texture is initialized on import
-
- protected:
- ExternalImageDescriptor(ExternalImageType type);
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
- public:
- bool isInitialized; // Whether the texture is initialized on import
- WGPUTextureUsageFlags usage;
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
- public:
- const ExternalImageType type;
- bool isInitialized; // Whether the texture is initialized after export
-
- protected:
- ExternalImageExportInfo(ExternalImageType type);
- };
-
- DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
-
- DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
-
- DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
- WGPUBindGroupLayout b);
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_DAWNNATIVE_H_
+#include <dawn/native/DawnNative.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h b/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
index 03468436589..1cb8a892dbb 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
@@ -1,71 +1 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_METALBACKEND_H_
-#define DAWNNATIVE_METALBACKEND_H_
-
-#include <dawn/dawn_wsi.h>
-#include <dawn_native/DawnNative.h>
-
-// The specifics of the Metal backend expose types in function signatures that might not be
-// available in dependent's minimum supported SDK version. Suppress all availability errors using
-// clang's pragmas. Dependents using the types without guarded availability will still get errors
-// when using the types.
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunguarded-availability"
-
-struct __IOSurface;
-typedef __IOSurface* IOSurfaceRef;
-
-#ifdef __OBJC__
-# import <Metal/Metal.h>
-#endif //__OBJC__
-
-namespace dawn_native { namespace metal {
-
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptions();
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
- public:
- ExternalImageDescriptorIOSurface();
-
- IOSurfaceRef ioSurface;
- uint32_t plane;
- };
-
- DAWN_NATIVE_EXPORT WGPUTexture
- WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor);
-
- // When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
- // mean that the operations will be visible to other APIs/Metal devices right away. macOS
- // does have a global queue of graphics operations, but the command buffers are inserted there
- // when they are "scheduled". Submitting other operations before the command buffer is
- // scheduled could lead to races in who gets scheduled first and incorrect rendering.
- DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
-
-}} // namespace dawn_native::metal
-
-#ifdef __OBJC__
-namespace dawn_native { namespace metal {
-
- DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
-
-}} // namespace dawn_native::metal
-#endif // __OBJC__
-
-#pragma clang diagnostic pop
-
-#endif // DAWNNATIVE_METALBACKEND_H_
+#include <dawn/native/MetalBackend.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_native/NullBackend.h b/chromium/third_party/dawn/src/include/dawn_native/NullBackend.h
index 5762a776205..38e11349484 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/NullBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/NullBackend.h
@@ -1,25 +1 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_NULLBACKEND_H_
-#define DAWNNATIVE_NULLBACKEND_H_
-
-#include <dawn/dawn_wsi.h>
-#include <dawn_native/DawnNative.h>
-
-namespace dawn_native { namespace null {
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
-}} // namespace dawn_native::null
-
-#endif // DAWNNATIVE_NULLBACKEND_H_
+#include <dawn/native/NullBackend.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h b/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
index 81ab1f8c122..e7d7adf2216 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
@@ -1,55 +1 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_OPENGLBACKEND_H_
-#define DAWNNATIVE_OPENGLBACKEND_H_
-
-typedef void* EGLImage;
-
-#include <dawn/dawn_wsi.h>
-#include <dawn_native/DawnNative.h>
-
-namespace dawn_native { namespace opengl {
-
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptions();
-
- void* (*getProc)(const char*);
- };
-
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptionsES();
-
- void* (*getProc)(const char*);
- };
-
- using PresentCallback = void (*)(void*);
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata);
- DAWN_NATIVE_EXPORT WGPUTextureFormat
- GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
-
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
- public:
- ExternalImageDescriptorEGLImage();
-
- ::EGLImage image;
- };
-
- DAWN_NATIVE_EXPORT WGPUTexture
- WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
-
-}} // namespace dawn_native::opengl
-
-#endif // DAWNNATIVE_OPENGLBACKEND_H_
+#include <dawn/native/OpenGLBackend.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
index 888ef279364..f183c03f598 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
@@ -1,140 +1 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKANBACKEND_H_
-#define DAWNNATIVE_VULKANBACKEND_H_
-
-#include <dawn/dawn_wsi.h>
-#include <dawn_native/DawnNative.h>
-
-#include <vulkan/vulkan.h>
-
-#include <vector>
-
-namespace dawn_native { namespace vulkan {
-
- DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
-
- DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
-
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface);
- DAWN_NATIVE_EXPORT WGPUTextureFormat
- GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
-
- struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
- AdapterDiscoveryOptions();
-
- bool forceSwiftShader = false;
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
- public:
- // The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false
- // since the import does not need to preserve texture contents.
-
- // See https://www.khronos.org/registry/vulkan/specs/1.1/html/chap7.html. The acquire
- // operation old/new layouts must match exactly the layouts in the release operation. So
- // we may need to issue two barriers releasedOldLayout -> releasedNewLayout ->
- // cTextureDescriptor.usage if the new layout is not compatible with the desired usage.
- // The first barrier is the queue transfer, the second is the layout transition to our
- // desired usage.
- VkImageLayout releasedOldLayout = VK_IMAGE_LAYOUT_GENERAL;
- VkImageLayout releasedNewLayout = VK_IMAGE_LAYOUT_GENERAL;
-
- protected:
- using ExternalImageDescriptor::ExternalImageDescriptor;
- };
-
- struct ExternalImageExportInfoVk : ExternalImageExportInfo {
- public:
- // See comments in |ExternalImageDescriptorVk|
- // Contains the old/new layouts used in the queue release operation.
- VkImageLayout releasedOldLayout;
- VkImageLayout releasedNewLayout;
-
- protected:
- using ExternalImageExportInfo::ExternalImageExportInfo;
- };
-
-// Can't use DAWN_PLATFORM_LINUX since header included in both Dawn and Chrome
-#ifdef __linux__
-
- // Common properties of external images represented by FDs. On successful import the file
- // descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
- // used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
- // caller can assume the FD is always consumed.
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
- public:
- int memoryFD; // A file descriptor from an export of the memory of the image
- std::vector<int> waitFDs; // File descriptors of semaphores which will be waited on
-
- protected:
- using ExternalImageDescriptorVk::ExternalImageDescriptorVk;
- };
-
- // Descriptor for opaque file descriptor image import
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
- ExternalImageDescriptorOpaqueFD();
-
- VkDeviceSize allocationSize; // Must match VkMemoryAllocateInfo from image creation
- uint32_t memoryTypeIndex; // Must match VkMemoryAllocateInfo from image creation
- };
-
- // Descriptor for dma-buf file descriptor image import
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
- ExternalImageDescriptorDmaBuf();
-
- uint32_t stride; // Stride of the buffer in bytes
- uint64_t drmModifier; // DRM modifier of the buffer
- };
-
- // Info struct that is written to in |ExportVulkanImage|.
- struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
- public:
- // Contains the exported semaphore handles.
- std::vector<int> semaphoreHandles;
-
- protected:
- using ExternalImageExportInfoVk::ExternalImageExportInfoVk;
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
- ExternalImageExportInfoOpaqueFD();
- };
-
- struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
- ExternalImageExportInfoDmaBuf();
- };
-
-#endif // __linux__
-
- // Imports external memory into a Vulkan image. Internally, this uses external memory /
- // semaphore extensions to import the image and wait on the provided synchronizaton
- // primitives before the texture can be used.
- // On failure, returns a nullptr.
- DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
- const ExternalImageDescriptorVk* descriptor);
-
- // Exports external memory from a Vulkan image. This must be called on wrapped textures
- // before they are destroyed. It writes the semaphore to wait on and the old/new image
- // layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
- // perform a layout transition.
- DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
- VkImageLayout desiredLayout,
- ExternalImageExportInfoVk* info);
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKANBACKEND_H_
+#include <dawn/native/VulkanBackend.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h b/chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h
index ffbd9cc369f..89f828797bb 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/dawn_native_export.h
@@ -1,36 +1 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_EXPORT_H_
-#define DAWNNATIVE_EXPORT_H_
-
-#if defined(DAWN_NATIVE_SHARED_LIBRARY)
-# if defined(_WIN32)
-# if defined(DAWN_NATIVE_IMPLEMENTATION)
-# define DAWN_NATIVE_EXPORT __declspec(dllexport)
-# else
-# define DAWN_NATIVE_EXPORT __declspec(dllimport)
-# endif
-# else // defined(_WIN32)
-# if defined(DAWN_NATIVE_IMPLEMENTATION)
-# define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
-# else
-# define DAWN_NATIVE_EXPORT
-# endif
-# endif // defined(_WIN32)
-#else // defined(DAWN_NATIVE_SHARED_LIBRARY)
-# define DAWN_NATIVE_EXPORT
-#endif // defined(DAWN_NATIVE_SHARED_LIBRARY)
-
-#endif // DAWNNATIVE_EXPORT_H_
+#include <dawn/native/dawn_native_export.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h b/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
index 3a284193344..2b40383df33 100644
--- a/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
+++ b/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
@@ -1,116 +1 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNPLATFORM_DAWNPLATFORM_H_
-#define DAWNPLATFORM_DAWNPLATFORM_H_
-
-#include "dawn_platform/dawn_platform_export.h"
-
-#include <cstddef>
-#include <cstdint>
-#include <memory>
-
-#include <dawn/webgpu.h>
-
-namespace dawn_platform {
-
- enum class TraceCategory {
- General, // General trace events
- Validation, // Dawn validation
- Recording, // Native command recording
- GPUWork, // Actual GPU work
- };
-
- class DAWN_PLATFORM_EXPORT CachingInterface {
- public:
- CachingInterface();
- virtual ~CachingInterface();
-
- // LoadData has two modes. The first mode is used to get a value which
- // corresponds to the |key|. The |valueOut| is a caller provided buffer
- // allocated to the size |valueSize| which is loaded with data of the
- // size returned. The second mode is used to query for the existence of
- // the |key| where |valueOut| is nullptr and |valueSize| must be 0.
- // The return size is non-zero if the |key| exists.
- virtual size_t LoadData(const WGPUDevice device,
- const void* key,
- size_t keySize,
- void* valueOut,
- size_t valueSize) = 0;
-
- // StoreData puts a |value| in the cache which corresponds to the |key|.
- virtual void StoreData(const WGPUDevice device,
- const void* key,
- size_t keySize,
- const void* value,
- size_t valueSize) = 0;
-
- private:
- CachingInterface(const CachingInterface&) = delete;
- CachingInterface& operator=(const CachingInterface&) = delete;
- };
-
- class DAWN_PLATFORM_EXPORT WaitableEvent {
- public:
- WaitableEvent() = default;
- virtual ~WaitableEvent() = default;
- virtual void Wait() = 0; // Wait for completion
- virtual bool IsComplete() = 0; // Non-blocking check if the event is complete
- };
-
- using PostWorkerTaskCallback = void (*)(void* userdata);
-
- class DAWN_PLATFORM_EXPORT WorkerTaskPool {
- public:
- WorkerTaskPool() = default;
- virtual ~WorkerTaskPool() = default;
- virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback,
- void* userdata) = 0;
- };
-
- class DAWN_PLATFORM_EXPORT Platform {
- public:
- Platform();
- virtual ~Platform();
-
- virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category);
-
- virtual double MonotonicallyIncreasingTime();
-
- virtual uint64_t AddTraceEvent(char phase,
- const unsigned char* categoryGroupEnabled,
- const char* name,
- uint64_t id,
- double timestamp,
- int numArgs,
- const char** argNames,
- const unsigned char* argTypes,
- const uint64_t* argValues,
- unsigned char flags);
-
- // The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
- // when the fingerprint changes. The returned CachingInterface is expected to outlive the
- // device which uses it to persistently cache objects.
- virtual CachingInterface* GetCachingInterface(const void* fingerprint,
- size_t fingerprintSize);
- virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
-
- private:
- Platform(const Platform&) = delete;
- Platform& operator=(const Platform&) = delete;
- };
-
-} // namespace dawn_platform
-
-#endif // DAWNPLATFORM_DAWNPLATFORM_H_
+#include <dawn/platform/DawnPlatform.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_platform/dawn_platform_export.h b/chromium/third_party/dawn/src/include/dawn_platform/dawn_platform_export.h
deleted file mode 100644
index 0626467ce7c..00000000000
--- a/chromium/third_party/dawn/src/include/dawn_platform/dawn_platform_export.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNPLATFORM_EXPORT_H_
-#define DAWNPLATFORM_EXPORT_H_
-
-#if defined(DAWN_PLATFORM_SHARED_LIBRARY)
-# if defined(_WIN32)
-# if defined(DAWN_PLATFORM_IMPLEMENTATION)
-# define DAWN_PLATFORM_EXPORT __declspec(dllexport)
-# else
-# define DAWN_PLATFORM_EXPORT __declspec(dllimport)
-# endif
-# else // defined(_WIN32)
-# if defined(DAWN_PLATFORM_IMPLEMENTATION)
-# define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
-# else
-# define DAWN_PLATFORM_EXPORT
-# endif
-# endif // defined(_WIN32)
-#else // defined(DAWN_PLATFORM_SHARED_LIBRARY)
-# define DAWN_PLATFORM_EXPORT
-#endif // defined(DAWN_PLATFORM_SHARED_LIBRARY)
-
-#endif // DAWNPLATFORM_EXPORT_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
index 0c11d91d0c8..066e7abe6bf 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
@@ -1,76 +1 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_WIRE_H_
-#define DAWNWIRE_WIRE_H_
-
-#include <cstdint>
-#include <limits>
-
-#include "dawn/webgpu.h"
-#include "dawn_wire/dawn_wire_export.h"
-
-namespace dawn_wire {
-
- class DAWN_WIRE_EXPORT CommandSerializer {
- public:
- CommandSerializer();
- virtual ~CommandSerializer();
- CommandSerializer(const CommandSerializer& rhs) = delete;
- CommandSerializer& operator=(const CommandSerializer& rhs) = delete;
-
- // Get space for serializing commands.
- // GetCmdSpace will never be called with a value larger than
- // what GetMaximumAllocationSize returns. Return nullptr to indicate
- // a fatal error.
- virtual void* GetCmdSpace(size_t size) = 0;
- virtual bool Flush() = 0;
- virtual size_t GetMaximumAllocationSize() const = 0;
- virtual void OnSerializeError();
- };
-
- class DAWN_WIRE_EXPORT CommandHandler {
- public:
- CommandHandler();
- virtual ~CommandHandler();
- CommandHandler(const CommandHandler& rhs) = delete;
- CommandHandler& operator=(const CommandHandler& rhs) = delete;
-
- virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
- };
-
- DAWN_WIRE_EXPORT size_t
- SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
-
- DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(
- const WGPUDeviceProperties* deviceProperties,
- char* serializeBuffer);
-
- DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
- const volatile char* deserializeBuffer,
- size_t deserializeBufferSize);
-
- DAWN_WIRE_EXPORT size_t
- SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
-
- DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
- char* serializeBuffer);
-
- DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
- const volatile char* deserializeBuffer,
- size_t deserializeBufferSize);
-
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_WIRE_H_
+#include <dawn/wire/Wire.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
index bbcaaee484b..0de75997d40 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
@@ -1,175 +1 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_WIRECLIENT_H_
-#define DAWNWIRE_WIRECLIENT_H_
-
-#include "dawn/dawn_proc_table.h"
-#include "dawn_wire/Wire.h"
-
-#include <memory>
-#include <vector>
-
-namespace dawn_wire {
-
- namespace client {
- class Client;
- class MemoryTransferService;
-
- DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
- } // namespace client
-
- struct ReservedTexture {
- WGPUTexture texture;
- uint32_t id;
- uint32_t generation;
- uint32_t deviceId;
- uint32_t deviceGeneration;
- };
-
- struct ReservedSwapChain {
- WGPUSwapChain swapchain;
- uint32_t id;
- uint32_t generation;
- uint32_t deviceId;
- uint32_t deviceGeneration;
- };
-
- struct ReservedDevice {
- WGPUDevice device;
- uint32_t id;
- uint32_t generation;
- };
-
- struct DAWN_WIRE_EXPORT WireClientDescriptor {
- CommandSerializer* serializer;
- client::MemoryTransferService* memoryTransferService = nullptr;
- };
-
- class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
- public:
- WireClient(const WireClientDescriptor& descriptor);
- ~WireClient() override;
-
- const volatile char* HandleCommands(const volatile char* commands,
- size_t size) override final;
-
- ReservedTexture ReserveTexture(WGPUDevice device);
- ReservedSwapChain ReserveSwapChain(WGPUDevice device);
- ReservedDevice ReserveDevice();
-
- void ReclaimTextureReservation(const ReservedTexture& reservation);
- void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
- void ReclaimDeviceReservation(const ReservedDevice& reservation);
-
- // Disconnects the client.
- // Commands allocated after this point will not be sent.
- void Disconnect();
-
- private:
- std::unique_ptr<client::Client> mImpl;
- };
-
- namespace client {
- class DAWN_WIRE_EXPORT MemoryTransferService {
- public:
- MemoryTransferService();
- virtual ~MemoryTransferService();
-
- class ReadHandle;
- class WriteHandle;
-
- // Create a handle for reading server data.
- // This may fail and return nullptr.
- virtual ReadHandle* CreateReadHandle(size_t) = 0;
-
- // Create a handle for writing server data.
- // This may fail and return nullptr.
- virtual WriteHandle* CreateWriteHandle(size_t) = 0;
-
- class DAWN_WIRE_EXPORT ReadHandle {
- public:
- ReadHandle();
- virtual ~ReadHandle();
-
- // Get the required serialization size for SerializeCreate
- virtual size_t SerializeCreateSize() = 0;
-
- // Serialize the handle into |serializePointer| so it can be received by the server.
- virtual void SerializeCreate(void* serializePointer) = 0;
-
- // Simply return the base address of the allocation (without applying any offset)
- // Returns nullptr if the allocation failed.
- // The data must live at least until the ReadHandle is destructued
- virtual const void* GetData() = 0;
-
- // Gets called when a MapReadCallback resolves.
- // deserialize the data update and apply
- // it to the range (offset, offset + size) of allocation
- // There could be nothing to be deserialized (if using shared memory)
- // Needs to check potential offset/size OOB and overflow
- virtual bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) = 0;
-
- private:
- ReadHandle(const ReadHandle&) = delete;
- ReadHandle& operator=(const ReadHandle&) = delete;
- };
-
- class DAWN_WIRE_EXPORT WriteHandle {
- public:
- WriteHandle();
- virtual ~WriteHandle();
-
- // Get the required serialization size for SerializeCreate
- virtual size_t SerializeCreateSize() = 0;
-
- // Serialize the handle into |serializePointer| so it can be received by the server.
- virtual void SerializeCreate(void* serializePointer) = 0;
-
- // Simply return the base address of the allocation (without applying any offset)
- // The data returned should be zero-initialized.
- // The data returned must live at least until the WriteHandle is destructed.
- // On failure, the pointer returned should be null.
- virtual void* GetData() = 0;
-
- // Get the required serialization size for SerializeDataUpdate
- virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
-
- // Serialize a command to send the modified contents of
- // the subrange (offset, offset + size) of the allocation at buffer unmap
- // This subrange is always the whole mapped region for now
- // There could be nothing to be serialized (if using shared memory)
- virtual void SerializeDataUpdate(void* serializePointer,
- size_t offset,
- size_t size) = 0;
-
- private:
- WriteHandle(const WriteHandle&) = delete;
- WriteHandle& operator=(const WriteHandle&) = delete;
- };
-
- private:
- MemoryTransferService(const MemoryTransferService&) = delete;
- MemoryTransferService& operator=(const MemoryTransferService&) = delete;
- };
-
- // Backdoor to get the order of the ProcMap for testing
- DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
- } // namespace client
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_WIRECLIENT_H_
+#include <dawn/wire/WireClient.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
index 98a2e69ba98..be7030beac9 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
@@ -1,148 +1 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_WIRESERVER_H_
-#define DAWNWIRE_WIRESERVER_H_
-
-#include <memory>
-
-#include "dawn_wire/Wire.h"
-
-struct DawnProcTable;
-
-namespace dawn_wire {
-
- namespace server {
- class Server;
- class MemoryTransferService;
- } // namespace server
-
- struct DAWN_WIRE_EXPORT WireServerDescriptor {
- const DawnProcTable* procs;
- CommandSerializer* serializer;
- server::MemoryTransferService* memoryTransferService = nullptr;
- };
-
- class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
- public:
- WireServer(const WireServerDescriptor& descriptor);
- ~WireServer() override;
-
- const volatile char* HandleCommands(const volatile char* commands,
- size_t size) override final;
-
- bool InjectTexture(WGPUTexture texture,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration);
- bool InjectSwapChain(WGPUSwapChain swapchain,
- uint32_t id,
- uint32_t generation,
- uint32_t deviceId,
- uint32_t deviceGeneration);
-
- bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
-
- // Look up a device by (id, generation) pair. Returns nullptr if the generation
- // has expired or the id is not found.
- // The Wire does not have destroy hooks to allow an embedder to observe when an object
- // has been destroyed, but in Chrome, we need to know the list of live devices so we
- // can call device.Tick() on all of them periodically to ensure progress on asynchronous
- // work is made. Getting this list can be done by tracking the (id, generation) of
- // previously injected devices, and observing if GetDevice(id, generation) returns non-null.
- WGPUDevice GetDevice(uint32_t id, uint32_t generation);
-
- private:
- std::unique_ptr<server::Server> mImpl;
- };
-
- namespace server {
- class DAWN_WIRE_EXPORT MemoryTransferService {
- public:
- MemoryTransferService();
- virtual ~MemoryTransferService();
-
- class ReadHandle;
- class WriteHandle;
-
- // Deserialize data to create Read/Write handles. These handles are for the client
- // to Read/Write data.
- virtual bool DeserializeReadHandle(const void* deserializePointer,
- size_t deserializeSize,
- ReadHandle** readHandle) = 0;
- virtual bool DeserializeWriteHandle(const void* deserializePointer,
- size_t deserializeSize,
- WriteHandle** writeHandle) = 0;
-
- class DAWN_WIRE_EXPORT ReadHandle {
- public:
- ReadHandle();
- virtual ~ReadHandle();
-
- // Return the size of the command serialized if
- // SerializeDataUpdate is called with the same offset/size args
- virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
-
- // Gets called when a MapReadCallback resolves.
- // Serialize the data update for the range (offset, offset + size) into
- // |serializePointer| to the client There could be nothing to be serialized (if
- // using shared memory)
- virtual void SerializeDataUpdate(const void* data,
- size_t offset,
- size_t size,
- void* serializePointer) = 0;
-
- private:
- ReadHandle(const ReadHandle&) = delete;
- ReadHandle& operator=(const ReadHandle&) = delete;
- };
-
- class DAWN_WIRE_EXPORT WriteHandle {
- public:
- WriteHandle();
- virtual ~WriteHandle();
-
- // Set the target for writes from the client. DeserializeFlush should copy data
- // into the target.
- void SetTarget(void* data);
- // Set Staging data length for OOB check
- void SetDataLength(size_t dataLength);
-
- // This function takes in the serialized result of
- // client::MemoryTransferService::WriteHandle::SerializeDataUpdate.
- // Needs to check potential offset/size OOB and overflow
- virtual bool DeserializeDataUpdate(const void* deserializePointer,
- size_t deserializeSize,
- size_t offset,
- size_t size) = 0;
-
- protected:
- void* mTargetData = nullptr;
- size_t mDataLength = 0;
-
- private:
- WriteHandle(const WriteHandle&) = delete;
- WriteHandle& operator=(const WriteHandle&) = delete;
- };
-
- private:
- MemoryTransferService(const MemoryTransferService&) = delete;
- MemoryTransferService& operator=(const MemoryTransferService&) = delete;
- };
- } // namespace server
-
-} // namespace dawn_wire
-
-#endif // DAWNWIRE_WIRESERVER_H_
+#include <dawn/wire/WireServer.h>
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h b/chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h
index 8043f618c19..36624f8e028 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/dawn_wire_export.h
@@ -1,36 +1 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNWIRE_EXPORT_H_
-#define DAWNWIRE_EXPORT_H_
-
-#if defined(DAWN_WIRE_SHARED_LIBRARY)
-# if defined(_WIN32)
-# if defined(DAWN_WIRE_IMPLEMENTATION)
-# define DAWN_WIRE_EXPORT __declspec(dllexport)
-# else
-# define DAWN_WIRE_EXPORT __declspec(dllimport)
-# endif
-# else // defined(_WIN32)
-# if defined(DAWN_WIRE_IMPLEMENTATION)
-# define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
-# else
-# define DAWN_WIRE_EXPORT
-# endif
-# endif // defined(_WIN32)
-#else // defined(DAWN_WIRE_SHARED_LIBRARY)
-# define DAWN_WIRE_EXPORT
-#endif // defined(DAWN_WIRE_SHARED_LIBRARY)
-
-#endif // DAWNWIRE_EXPORT_H_
+#include <dawn/wire/dawn_wire_export.h>
diff --git a/chromium/third_party/dawn/src/tests/BUILD.gn b/chromium/third_party/dawn/src/tests/BUILD.gn
deleted file mode 100644
index ef33d9a33d1..00000000000
--- a/chromium/third_party/dawn/src/tests/BUILD.gn
+++ /dev/null
@@ -1,612 +0,0 @@
-# Copyright 2012 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import("../../scripts/dawn_overrides_with_defaults.gni")
-
-import("//testing/test.gni")
-import("${dawn_root}/generator/dawn_generator.gni")
-import("${dawn_root}/scripts/dawn_features.gni")
-
-group("dawn_tests") {
- testonly = true
- deps = [
- ":dawn_end2end_tests",
- ":dawn_perf_tests",
- ":dawn_unittests",
- ]
-}
-
-###############################################################################
-# Gtest Gmock - Handle building inside and outside of Chromium.
-###############################################################################
-
-# When building outside of Chromium we need to define our own targets for GTest
-# and GMock. However when compiling inside of Chromium we need to reuse the
-# existing targets, both because Chromium has a special harness for swarming
-# and because otherwise the "gn check" fails.
-
-if (!build_with_chromium) {
- # When we aren't in Chromium we define out own targets based on the location
- # of the googletest repo.
- googletest_dir = dawn_googletest_dir
-
- config("gtest_config") {
- include_dirs = [
- "${googletest_dir}/googletest",
- "${googletest_dir}/googletest/include",
- ]
- }
-
- static_library("gtest") {
- testonly = true
- sources = [ "${googletest_dir}/googletest/src/gtest-all.cc" ]
- public_configs = [ ":gtest_config" ]
- }
-
- config("gmock_config") {
- include_dirs = [
- "${googletest_dir}/googlemock",
- "${googletest_dir}/googlemock/include",
- "${googletest_dir}/googletest/include",
- ]
- }
-
- static_library("gmock") {
- testonly = true
- sources = [ "${googletest_dir}/googlemock/src/gmock-all.cc" ]
- public_configs = [ ":gmock_config" ]
- }
-
- group("gmock_and_gtest") {
- testonly = true
- public_deps = [
- ":gmock",
- ":gtest",
- ]
- }
-} else {
- # When we are in Chromium we reuse its targets, and also add some deps that
- # are needed to launch the test in swarming mode.
- group("gmock_and_gtest") {
- testonly = true
- public_deps = [
- "//base",
- "//base/test:test_support",
- "//testing/gmock",
- "//testing/gtest",
- ]
- }
-}
-
-###############################################################################
-# Wrapping of Chromium targets
-###############################################################################
-
-# These targets are separated because they are Chromium sources files that
-# can't use the dawn_internal config, otherwise Dawn's warning flags get
-# applied while compiling a bunch of Chromium's //base (via header inclusion)
-if (build_with_chromium) {
- source_set("dawn_unittests_main") {
- testonly = true
- deps = [ ":gmock_and_gtest" ]
- sources = [ "//gpu/dawn_unittests_main.cc" ]
- }
- source_set("dawn_end2end_tests_main") {
- testonly = true
- deps = [ ":gmock_and_gtest" ]
- sources = [ "//gpu/dawn_end2end_tests_main.cc" ]
- }
- source_set("dawn_perf_tests_main") {
- testonly = true
- deps = [ ":gmock_and_gtest" ]
- sources = [ "//gpu/dawn_perf_tests_main.cc" ]
- }
-}
-
-###############################################################################
-# Dawn unittests
-###############################################################################
-
-dawn_json_generator("mock_webgpu_gen") {
- target = "mock_webgpu"
- outputs = [
- "src/dawn/mock_webgpu.h",
- "src/dawn/mock_webgpu.cpp",
- ]
-}
-
-# Source code for mocks used for unit testing are separated from the rest of
-# sources so that they aren't included in non-test builds.
-source_set("dawn_native_mocks_sources") {
- testonly = true
-
- deps = [
- ":gmock_and_gtest",
- "${dawn_root}/src/dawn_native:dawn_native_sources",
- "${dawn_root}/src/dawn_native:dawn_native_static",
- "${dawn_root}/src/utils:dawn_utils",
- ]
-
- # Add internal dawn_native config for internal unittests.
- configs += [ "${dawn_root}/src/dawn_native:dawn_native_internal" ]
-
- sources = [
- "unittests/native/mocks/BindGroupLayoutMock.h",
- "unittests/native/mocks/BindGroupMock.h",
- "unittests/native/mocks/CommandBufferMock.h",
- "unittests/native/mocks/ComputePipelineMock.h",
- "unittests/native/mocks/DeviceMock.h",
- "unittests/native/mocks/ExternalTextureMock.h",
- "unittests/native/mocks/PipelineLayoutMock.h",
- "unittests/native/mocks/QuerySetMock.h",
- "unittests/native/mocks/RenderPipelineMock.h",
- "unittests/native/mocks/SamplerMock.h",
- "unittests/native/mocks/ShaderModuleMock.cpp",
- "unittests/native/mocks/ShaderModuleMock.h",
- "unittests/native/mocks/SwapChainMock.h",
- "unittests/native/mocks/TextureMock.h",
- ]
-}
-
-test("dawn_unittests") {
- configs += [ "${dawn_root}/src/common:dawn_internal" ]
-
- deps = [
- ":dawn_native_mocks_sources",
- ":gmock_and_gtest",
- ":mock_webgpu_gen",
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn:dawn_proc",
- "${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn_native:dawn_native_sources",
- "${dawn_root}/src/dawn_native:dawn_native_static",
- "${dawn_root}/src/dawn_wire",
- "${dawn_root}/src/utils:dawn_utils",
- ]
-
- # Add internal dawn_native config for internal unittests.
- configs += [ "${dawn_root}/src/dawn_native:dawn_native_internal" ]
-
- sources = get_target_outputs(":mock_webgpu_gen")
- sources += [
- "${dawn_root}/src/dawn_wire/client/ClientMemoryTransferService_mock.cpp",
- "${dawn_root}/src/dawn_wire/client/ClientMemoryTransferService_mock.h",
- "${dawn_root}/src/dawn_wire/server/ServerMemoryTransferService_mock.cpp",
- "${dawn_root}/src/dawn_wire/server/ServerMemoryTransferService_mock.h",
- "DawnNativeTest.cpp",
- "DawnNativeTest.h",
- "MockCallback.h",
- "ToggleParser.cpp",
- "ToggleParser.h",
- "unittests/AsyncTaskTests.cpp",
- "unittests/BitSetIteratorTests.cpp",
- "unittests/BuddyAllocatorTests.cpp",
- "unittests/BuddyMemoryAllocatorTests.cpp",
- "unittests/ChainUtilsTests.cpp",
- "unittests/CommandAllocatorTests.cpp",
- "unittests/ConcurrentCacheTests.cpp",
- "unittests/EnumClassBitmasksTests.cpp",
- "unittests/EnumMaskIteratorTests.cpp",
- "unittests/ErrorTests.cpp",
- "unittests/FeatureTests.cpp",
- "unittests/GPUInfoTests.cpp",
- "unittests/GetProcAddressTests.cpp",
- "unittests/ITypArrayTests.cpp",
- "unittests/ITypBitsetTests.cpp",
- "unittests/ITypSpanTests.cpp",
- "unittests/ITypVectorTests.cpp",
- "unittests/LimitsTests.cpp",
- "unittests/LinkedListTests.cpp",
- "unittests/MathTests.cpp",
- "unittests/ObjectBaseTests.cpp",
- "unittests/PerStageTests.cpp",
- "unittests/PerThreadProcTests.cpp",
- "unittests/PlacementAllocatedTests.cpp",
- "unittests/RefBaseTests.cpp",
- "unittests/RefCountedTests.cpp",
- "unittests/ResultTests.cpp",
- "unittests/RingBufferAllocatorTests.cpp",
- "unittests/SerialMapTests.cpp",
- "unittests/SerialQueueTests.cpp",
- "unittests/SlabAllocatorTests.cpp",
- "unittests/StackContainerTests.cpp",
- "unittests/SubresourceStorageTests.cpp",
- "unittests/SystemUtilsTests.cpp",
- "unittests/ToBackendTests.cpp",
- "unittests/TypedIntegerTests.cpp",
- "unittests/native/CommandBufferEncodingTests.cpp",
- "unittests/native/DestroyObjectTests.cpp",
- "unittests/validation/BindGroupValidationTests.cpp",
- "unittests/validation/BufferValidationTests.cpp",
- "unittests/validation/CommandBufferValidationTests.cpp",
- "unittests/validation/ComputeIndirectValidationTests.cpp",
- "unittests/validation/ComputeValidationTests.cpp",
- "unittests/validation/CopyCommandsValidationTests.cpp",
- "unittests/validation/CopyTextureForBrowserTests.cpp",
- "unittests/validation/DebugMarkerValidationTests.cpp",
- "unittests/validation/DrawIndirectValidationTests.cpp",
- "unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp",
- "unittests/validation/DynamicStateCommandValidationTests.cpp",
- "unittests/validation/ErrorScopeValidationTests.cpp",
- "unittests/validation/ExternalTextureTests.cpp",
- "unittests/validation/GetBindGroupLayoutValidationTests.cpp",
- "unittests/validation/IndexBufferValidationTests.cpp",
- "unittests/validation/InternalUsageValidationTests.cpp",
- "unittests/validation/LabelTests.cpp",
- "unittests/validation/MinimumBufferSizeValidationTests.cpp",
- "unittests/validation/MultipleDeviceTests.cpp",
- "unittests/validation/OverridableConstantsValidationTests.cpp",
- "unittests/validation/PipelineAndPassCompatibilityTests.cpp",
- "unittests/validation/QueryValidationTests.cpp",
- "unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp",
- "unittests/validation/QueueSubmitValidationTests.cpp",
- "unittests/validation/QueueWriteBufferValidationTests.cpp",
- "unittests/validation/QueueWriteTextureValidationTests.cpp",
- "unittests/validation/RenderBundleValidationTests.cpp",
- "unittests/validation/RenderPassDescriptorValidationTests.cpp",
- "unittests/validation/RenderPipelineValidationTests.cpp",
- "unittests/validation/RequestDeviceValidationTests.cpp",
- "unittests/validation/ResourceUsageTrackingTests.cpp",
- "unittests/validation/SamplerValidationTests.cpp",
- "unittests/validation/ShaderModuleValidationTests.cpp",
- "unittests/validation/StorageTextureValidationTests.cpp",
- "unittests/validation/TextureSubresourceTests.cpp",
- "unittests/validation/TextureValidationTests.cpp",
- "unittests/validation/TextureViewValidationTests.cpp",
- "unittests/validation/ToggleValidationTests.cpp",
- "unittests/validation/UnsafeAPIValidationTests.cpp",
- "unittests/validation/ValidationTest.cpp",
- "unittests/validation/ValidationTest.h",
- "unittests/validation/VertexBufferValidationTests.cpp",
- "unittests/validation/VertexStateValidationTests.cpp",
- "unittests/validation/VideoViewsValidationTests.cpp",
- "unittests/validation/WriteBufferTests.cpp",
- "unittests/wire/WireArgumentTests.cpp",
- "unittests/wire/WireBasicTests.cpp",
- "unittests/wire/WireBufferMappingTests.cpp",
- "unittests/wire/WireCreatePipelineAsyncTests.cpp",
- "unittests/wire/WireDestroyObjectTests.cpp",
- "unittests/wire/WireDisconnectTests.cpp",
- "unittests/wire/WireErrorCallbackTests.cpp",
- "unittests/wire/WireExtensionTests.cpp",
- "unittests/wire/WireInjectDeviceTests.cpp",
- "unittests/wire/WireInjectSwapChainTests.cpp",
- "unittests/wire/WireInjectTextureTests.cpp",
- "unittests/wire/WireMemoryTransferServiceTests.cpp",
- "unittests/wire/WireOptionalTests.cpp",
- "unittests/wire/WireQueueTests.cpp",
- "unittests/wire/WireShaderModuleTests.cpp",
- "unittests/wire/WireTest.cpp",
- "unittests/wire/WireTest.h",
- "unittests/wire/WireWGPUDevicePropertiesTests.cpp",
- ]
-
- if (is_win) {
- sources += [ "unittests/WindowsUtilsTests.cpp" ]
- }
-
- if (dawn_enable_d3d12) {
- sources += [ "unittests/d3d12/CopySplitTests.cpp" ]
- }
-
- # When building inside Chromium, use their gtest main function because it is
- # needed to run in swarming correctly.
- if (build_with_chromium) {
- deps += [ ":dawn_unittests_main" ]
- } else {
- sources += [ "UnittestsMain.cpp" ]
- }
-}
-
-###############################################################################
-# Dawn end2end tests targets
-###############################################################################
-
-source_set("dawn_end2end_tests_sources") {
- configs += [ "${dawn_root}/src/common:dawn_internal" ]
- testonly = true
-
- deps = [
- ":gmock_and_gtest",
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn:dawn_proc",
- "${dawn_root}/src/dawn:dawncpp",
-
- # Statically linked because the end2end white_box tests use Dawn internals.
- "${dawn_root}/src/dawn_native:dawn_native_static",
- "${dawn_root}/src/dawn_wire",
- "${dawn_root}/src/utils:dawn_utils",
- ]
-
- sources = [
- "DawnTest.h",
- "MockCallback.h",
- "ParamGenerator.h",
- "ToggleParser.cpp",
- "ToggleParser.h",
- "end2end/AdapterDiscoveryTests.cpp",
- "end2end/BasicTests.cpp",
- "end2end/BindGroupTests.cpp",
- "end2end/BufferTests.cpp",
- "end2end/BufferZeroInitTests.cpp",
- "end2end/ClipSpaceTests.cpp",
- "end2end/ColorStateTests.cpp",
- "end2end/CommandEncoderTests.cpp",
- "end2end/CompressedTextureFormatTests.cpp",
- "end2end/ComputeCopyStorageBufferTests.cpp",
- "end2end/ComputeDispatchTests.cpp",
- "end2end/ComputeLayoutMemoryBufferTests.cpp",
- "end2end/ComputeSharedMemoryTests.cpp",
- "end2end/ComputeStorageBufferBarrierTests.cpp",
- "end2end/CopyTests.cpp",
- "end2end/CopyTextureForBrowserTests.cpp",
- "end2end/CreatePipelineAsyncTests.cpp",
- "end2end/CullingTests.cpp",
- "end2end/DebugMarkerTests.cpp",
- "end2end/DeprecatedAPITests.cpp",
- "end2end/DepthBiasTests.cpp",
- "end2end/DepthStencilCopyTests.cpp",
- "end2end/DepthStencilLoadOpTests.cpp",
- "end2end/DepthStencilSamplingTests.cpp",
- "end2end/DepthStencilStateTests.cpp",
- "end2end/DestroyTests.cpp",
- "end2end/DeviceInitializationTests.cpp",
- "end2end/DeviceLostTests.cpp",
- "end2end/DrawIndexedIndirectTests.cpp",
- "end2end/DrawIndexedTests.cpp",
- "end2end/DrawIndirectTests.cpp",
- "end2end/DrawTests.cpp",
- "end2end/DynamicBufferOffsetTests.cpp",
- "end2end/EntryPointTests.cpp",
- "end2end/ExternalTextureTests.cpp",
- "end2end/FirstIndexOffsetTests.cpp",
- "end2end/GpuMemorySynchronizationTests.cpp",
- "end2end/IndexFormatTests.cpp",
- "end2end/MaxLimitTests.cpp",
- "end2end/MemoryAllocationStressTests.cpp",
- "end2end/MultisampledRenderingTests.cpp",
- "end2end/MultisampledSamplingTests.cpp",
- "end2end/NonzeroBufferCreationTests.cpp",
- "end2end/NonzeroTextureCreationTests.cpp",
- "end2end/ObjectCachingTests.cpp",
- "end2end/OpArrayLengthTests.cpp",
- "end2end/PipelineLayoutTests.cpp",
- "end2end/PrimitiveStateTests.cpp",
- "end2end/PrimitiveTopologyTests.cpp",
- "end2end/QueryTests.cpp",
- "end2end/QueueTests.cpp",
- "end2end/QueueTimelineTests.cpp",
- "end2end/ReadOnlyDepthStencilAttachmentTests.cpp",
- "end2end/RenderAttachmentTests.cpp",
- "end2end/RenderBundleTests.cpp",
- "end2end/RenderPassLoadOpTests.cpp",
- "end2end/RenderPassTests.cpp",
- "end2end/SamplerFilterAnisotropicTests.cpp",
- "end2end/SamplerTests.cpp",
- "end2end/ScissorTests.cpp",
- "end2end/ShaderFloat16Tests.cpp",
- "end2end/ShaderTests.cpp",
- "end2end/StorageTextureTests.cpp",
- "end2end/SubresourceRenderAttachmentTests.cpp",
- "end2end/Texture3DTests.cpp",
- "end2end/TextureFormatTests.cpp",
- "end2end/TextureSubresourceTests.cpp",
- "end2end/TextureViewTests.cpp",
- "end2end/TextureZeroInitTests.cpp",
- "end2end/VertexFormatTests.cpp",
- "end2end/VertexOnlyRenderPipelineTests.cpp",
- "end2end/VertexStateTests.cpp",
- "end2end/ViewportOrientationTests.cpp",
- "end2end/ViewportTests.cpp",
- ]
-
- # Validation tests that need OS windows live in end2end tests.
- sources += [
- "unittests/validation/ValidationTest.cpp",
- "unittests/validation/ValidationTest.h",
- ]
-
- libs = []
-
- if (dawn_enable_d3d12) {
- sources += [
- "end2end/D3D12CachingTests.cpp",
- "end2end/D3D12ResourceWrappingTests.cpp",
- "end2end/D3D12VideoViewsTests.cpp",
- ]
- libs += [
- "d3d11.lib",
- "dxgi.lib",
- ]
- }
-
- if (dawn_enable_metal) {
- sources += [ "end2end/IOSurfaceWrappingTests.cpp" ]
- frameworks = [ "IOSurface.framework" ]
- }
-
- if (dawn_enable_opengl) {
- assert(dawn_supports_glfw_for_windowing)
- }
-
- if (dawn_supports_glfw_for_windowing) {
- sources += [
- "end2end/SwapChainTests.cpp",
- "end2end/SwapChainValidationTests.cpp",
- "end2end/WindowSurfaceTests.cpp",
- ]
- deps += [ "${dawn_root}/src/utils:dawn_glfw" ]
- }
-}
-
-source_set("dawn_white_box_tests_sources") {
- configs += [ "${dawn_root}/src/dawn_native:dawn_native_internal" ]
- testonly = true
-
- deps = [
- ":gmock_and_gtest",
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn:dawn_proc",
- "${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn_native:dawn_native_sources",
-
- # Statically linked because the end2end white_box tests use Dawn internals.
- "${dawn_root}/src/dawn_native:dawn_native_static",
- "${dawn_root}/src/dawn_wire",
- "${dawn_root}/src/utils:dawn_utils",
- ]
-
- sources = [
- "DawnTest.h",
- "ParamGenerator.h",
- "ToggleParser.h",
- ]
-
- if (dawn_enable_vulkan) {
- deps += [ "${dawn_root}/third_party/khronos:vulkan_headers" ]
-
- if (is_chromeos) {
- sources += [ "white_box/VulkanImageWrappingTestsDmaBuf.cpp" ]
- } else if (is_linux) {
- sources += [ "white_box/VulkanImageWrappingTestsOpaqueFD.cpp" ]
- }
-
- if (dawn_enable_error_injection) {
- sources += [ "white_box/VulkanErrorInjectorTests.cpp" ]
- }
- }
-
- sources += [
- "white_box/BufferAllocatedSizeTests.cpp",
- "white_box/InternalResourceUsageTests.cpp",
- "white_box/InternalStorageBufferBindingTests.cpp",
- "white_box/QueryInternalShaderTests.cpp",
- ]
-
- if (dawn_enable_d3d12) {
- sources += [
- "white_box/D3D12DescriptorHeapTests.cpp",
- "white_box/D3D12ResidencyTests.cpp",
- "white_box/D3D12ResourceHeapTests.cpp",
- ]
- }
-
- if (dawn_enable_metal) {
- sources += [ "white_box/MetalAutoreleasePoolTests.mm" ]
- }
-
- if (dawn_enable_opengl) {
- deps += [ "${dawn_root}/src/utils:dawn_glfw" ]
- }
-
- if (dawn_enable_opengles) {
- sources += [ "white_box/EGLImageWrappingTests.cpp" ]
- deps += [ "//third_party/angle:libEGL" ]
- }
-
- libs = []
-}
-
-test("dawn_end2end_tests") {
- configs += [ "${dawn_root}/src/common:dawn_internal" ]
-
- deps = [
- ":dawn_end2end_tests_sources",
- ":dawn_white_box_tests_sources",
- ":gmock_and_gtest",
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn:dawn_proc",
- "${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn_native:dawn_native_static",
- "${dawn_root}/src/dawn_wire",
- "${dawn_root}/src/utils:dawn_utils",
- ]
-
- sources = [
- "DawnTest.cpp",
- "DawnTest.h",
- ]
-
- libs = []
-
- # When building inside Chromium, use their gtest main function because it is
- # needed to run in swarming correctly.
- if (build_with_chromium) {
- deps += [ ":dawn_end2end_tests_main" ]
- } else {
- sources += [ "End2EndTestsMain.cpp" ]
- }
-
- if (dawn_enable_opengl) {
- deps += [ "${dawn_root}/src/utils:dawn_glfw" ]
- }
-
- if (is_chromeos) {
- libs += [ "gbm" ]
- }
-}
-
-###############################################################################
-# Dawn perf tests
-###############################################################################
-
-test("dawn_perf_tests") {
- configs += [ "${dawn_root}/src/common:dawn_internal" ]
-
- deps = [
- ":gmock_and_gtest",
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn:dawn_proc",
- "${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn_native",
- "${dawn_root}/src/dawn_platform",
- "${dawn_root}/src/dawn_wire",
- "${dawn_root}/src/utils:dawn_utils",
- ]
-
- sources = [
- "DawnTest.cpp",
- "DawnTest.h",
- "ParamGenerator.h",
- "ToggleParser.cpp",
- "ToggleParser.h",
- "perf_tests/BufferUploadPerf.cpp",
- "perf_tests/DawnPerfTest.cpp",
- "perf_tests/DawnPerfTest.h",
- "perf_tests/DawnPerfTestPlatform.cpp",
- "perf_tests/DawnPerfTestPlatform.h",
- "perf_tests/DrawCallPerf.cpp",
- "perf_tests/ShaderRobustnessPerf.cpp",
- "perf_tests/SubresourceTrackingPerf.cpp",
- ]
-
- libs = []
-
- # When building inside Chromium, use their gtest main function and the
- # other perf test scaffolding in order to run in swarming correctly.
- if (build_with_chromium) {
- deps += [ ":dawn_perf_tests_main" ]
- data_deps = [ "//testing:run_perf_test" ]
- } else {
- sources += [ "PerfTestsMain.cpp" ]
- }
-
- if (dawn_enable_metal) {
- frameworks = [ "IOSurface.framework" ]
- }
-
- if (dawn_enable_opengl) {
- deps += [ "${dawn_root}/src/utils:dawn_glfw" ]
- }
-}
diff --git a/chromium/third_party/dawn/src/utils/BUILD.gn b/chromium/third_party/dawn/src/utils/BUILD.gn
deleted file mode 100644
index f372b074644..00000000000
--- a/chromium/third_party/dawn/src/utils/BUILD.gn
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright 2020 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import("../../scripts/dawn_overrides_with_defaults.gni")
-
-import("${dawn_root}/scripts/dawn_features.gni")
-
-###############################################################################
-# GLFW wrapping target
-###############################################################################
-
-# GLFW does not support ChromeOS, Android or Fuchsia, so provide a small mock
-# library that can be linked into the Dawn tests on these platforms. Otherwise,
-# use the real library from third_party/.
-if (dawn_supports_glfw_for_windowing) {
- group("dawn_glfw") {
- public_deps = [ "${dawn_root}/third_party/gn/glfw" ]
- }
-} else if (is_fuchsia) {
- # The mock implementation of GLFW on Fuchsia
- config("dawn_glfw_public_config") {
- # Allow inclusion of <GLFW/glfw3.h>
- include_dirs = [ "${dawn_glfw_dir}/include" ]
-
- # The GLFW/glfw3.h header includes <GL/gl.h> by default, but the latter
- # does not exist on Fuchsia. Defining GLFW_INCLUDE_NONE helps work around
- # the issue, but it needs to be defined for any file that includes the
- # header.
- defines = [
- "GLFW_INCLUDE_NONE",
- "GLFW_INCLUDE_VULKAN",
- ]
- }
-
- static_library("dawn_glfw") {
- sources = [
- # NOTE: The header below is required to pass "gn check".
- "${dawn_glfw_dir}/include/GLFW/glfw3.h",
- "Glfw3Fuchsia.cpp",
- ]
- public_configs = [ ":dawn_glfw_public_config" ]
- deps = [ "${dawn_root}/src/common" ]
- }
-} else {
- # Just skip GLFW on other systems
- group("dawn_glfw") {
- }
-}
-
-###############################################################################
-# Utils for tests and samples
-###############################################################################
-
-static_library("dawn_utils") {
- configs += [ "${dawn_root}/src/common:dawn_internal" ]
-
- sources = [
- "ComboRenderBundleEncoderDescriptor.cpp",
- "ComboRenderBundleEncoderDescriptor.h",
- "ComboRenderPipelineDescriptor.cpp",
- "ComboRenderPipelineDescriptor.h",
- "PlatformDebugLogger.h",
- "ScopedAutoreleasePool.h",
- "SystemUtils.cpp",
- "SystemUtils.h",
- "TerribleCommandBuffer.cpp",
- "TerribleCommandBuffer.h",
- "TestUtils.cpp",
- "TestUtils.h",
- "TextureUtils.cpp",
- "TextureUtils.h",
- "Timer.h",
- "WGPUHelpers.cpp",
- "WGPUHelpers.h",
- "WireHelper.cpp",
- "WireHelper.h",
- ]
- deps = [
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn:dawn_proc",
- "${dawn_root}/src/dawn_native:dawn_native_headers",
- "${dawn_root}/src/dawn_wire",
- "${dawn_spirv_tools_dir}:spvtools_opt",
- ]
- libs = []
- frameworks = []
-
- if (is_win && !dawn_is_winuwp) {
- sources += [ "WindowsDebugLogger.cpp" ]
- } else {
- sources += [ "EmptyDebugLogger.cpp" ]
- }
-
- if (is_win) {
- sources += [ "WindowsTimer.cpp" ]
- } else if (is_mac) {
- sources += [
- "OSXTimer.cpp",
- "ObjCUtils.h",
- "ObjCUtils.mm",
- ]
- frameworks += [ "QuartzCore.framework" ]
- } else {
- sources += [ "PosixTimer.cpp" ]
- }
-
- if (is_mac) {
- sources += [ "ScopedAutoreleasePool.mm" ]
- } else {
- sources += [ "ScopedAutoreleasePool.cpp" ]
- }
-
- if (dawn_supports_glfw_for_windowing) {
- sources += [
- "GLFWUtils.cpp",
- "GLFWUtils.h",
- ]
- deps += [ ":dawn_glfw" ]
-
- if (dawn_enable_metal) {
- sources += [ "GLFWUtils_metal.mm" ]
- frameworks += [ "Metal.framework" ]
- }
- }
-
- public_deps = [ "${dawn_root}/src/dawn:dawncpp_headers" ]
-}
-
-###############################################################################
-# Dawn samples, only in standalone builds
-###############################################################################
-
-if (dawn_standalone) {
- # Library to handle the interaction of Dawn with GLFW windows in samples
- static_library("dawn_bindings") {
- configs += [ "${dawn_root}/src/common:dawn_internal" ]
-
- sources = [
- "BackendBinding.cpp",
- "BackendBinding.h",
- ]
-
- public_deps = [ "${dawn_root}/src/dawn:dawn_headers" ]
-
- deps = [
- ":dawn_glfw",
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn_native",
- ]
- libs = []
- frameworks = []
-
- if (dawn_enable_d3d12) {
- sources += [ "D3D12Binding.cpp" ]
- }
-
- if (dawn_enable_metal) {
- sources += [ "MetalBinding.mm" ]
- frameworks += [
- "Metal.framework",
- "QuartzCore.framework",
- ]
-
- # Suppress warnings that Metal isn't in the deployment target of Chrome
- if (is_mac) {
- cflags_objcc = [ "-Wno-unguarded-availability" ]
- }
- }
-
- if (dawn_enable_null) {
- sources += [ "NullBinding.cpp" ]
- }
-
- if (dawn_enable_opengl) {
- sources += [ "OpenGLBinding.cpp" ]
- }
-
- if (dawn_enable_vulkan) {
- sources += [ "VulkanBinding.cpp" ]
- }
- }
-}
diff --git a/chromium/third_party/dawn/src/utils/BackendBinding.cpp b/chromium/third_party/dawn/src/utils/BackendBinding.cpp
deleted file mode 100644
index 15562de02e3..00000000000
--- a/chromium/third_party/dawn/src/utils/BackendBinding.cpp
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/BackendBinding.h"
-
-#include "common/Compiler.h"
-
-#include "GLFW/glfw3.h"
-
-#if defined(DAWN_ENABLE_BACKEND_OPENGL)
-# include "dawn_native/OpenGLBackend.h"
-#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
-
-namespace utils {
-
-#if defined(DAWN_ENABLE_BACKEND_D3D12)
- BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device);
-#endif
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device);
-#endif
-#if defined(DAWN_ENABLE_BACKEND_NULL)
- BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device);
-#endif
-#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device);
-#endif
-#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device);
-#endif
-
- BackendBinding::BackendBinding(GLFWwindow* window, WGPUDevice device)
- : mWindow(window), mDevice(device) {
- }
-
- void DiscoverAdapter(dawn_native::Instance* instance,
- GLFWwindow* window,
- wgpu::BackendType type) {
- DAWN_UNUSED(type);
- DAWN_UNUSED(window);
-
- if (type == wgpu::BackendType::OpenGL || type == wgpu::BackendType::OpenGLES) {
-#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- glfwMakeContextCurrent(window);
- auto getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
- if (type == wgpu::BackendType::OpenGL) {
- dawn_native::opengl::AdapterDiscoveryOptions adapterOptions;
- adapterOptions.getProc = getProc;
- instance->DiscoverAdapters(&adapterOptions);
- } else {
- dawn_native::opengl::AdapterDiscoveryOptionsES adapterOptions;
- adapterOptions.getProc = getProc;
- instance->DiscoverAdapters(&adapterOptions);
- }
-#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
- } else {
- instance->DiscoverDefaultAdapters();
- }
- }
-
- BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device) {
- switch (type) {
-#if defined(DAWN_ENABLE_BACKEND_D3D12)
- case wgpu::BackendType::D3D12:
- return CreateD3D12Binding(window, device);
-#endif
-
-#if defined(DAWN_ENABLE_BACKEND_METAL)
- case wgpu::BackendType::Metal:
- return CreateMetalBinding(window, device);
-#endif
-
-#if defined(DAWN_ENABLE_BACKEND_NULL)
- case wgpu::BackendType::Null:
- return CreateNullBinding(window, device);
-#endif
-
-#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
- case wgpu::BackendType::OpenGL:
- return CreateOpenGLBinding(window, device);
-#endif
-
-#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
- case wgpu::BackendType::OpenGLES:
- return CreateOpenGLBinding(window, device);
-#endif
-
-#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- case wgpu::BackendType::Vulkan:
- return CreateVulkanBinding(window, device);
-#endif
-
- default:
- return nullptr;
- }
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/BackendBinding.h b/chromium/third_party/dawn/src/utils/BackendBinding.h
deleted file mode 100644
index ca1c91ffb9d..00000000000
--- a/chromium/third_party/dawn/src/utils/BackendBinding.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef UTILS_BACKENDBINDING_H_
-#define UTILS_BACKENDBINDING_H_
-
-#include "dawn/webgpu_cpp.h"
-#include "dawn_native/DawnNative.h"
-
-struct GLFWwindow;
-
-namespace utils {
-
- class BackendBinding {
- public:
- virtual ~BackendBinding() = default;
-
- virtual uint64_t GetSwapChainImplementation() = 0;
- virtual WGPUTextureFormat GetPreferredSwapChainTextureFormat() = 0;
-
- protected:
- BackendBinding(GLFWwindow* window, WGPUDevice device);
-
- GLFWwindow* mWindow = nullptr;
- WGPUDevice mDevice = nullptr;
- };
-
- void DiscoverAdapter(dawn_native::Instance* instance,
- GLFWwindow* window,
- wgpu::BackendType type);
- BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device);
-
-} // namespace utils
-
-#endif // UTILS_BACKENDBINDING_H_
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp b/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp
deleted file mode 100644
index 8b076e11d9c..00000000000
--- a/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/ComboRenderBundleEncoderDescriptor.h"
-
-#include "utils/WGPUHelpers.h"
-
-namespace utils {
-
- ComboRenderBundleEncoderDescriptor::ComboRenderBundleEncoderDescriptor() {
- wgpu::RenderBundleEncoderDescriptor* descriptor = this;
-
- descriptor->colorFormatsCount = 0;
- descriptor->colorFormats = &cColorFormats[0];
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h b/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h
deleted file mode 100644
index cd6044b59e9..00000000000
--- a/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
-#define UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
-
-#include <dawn/webgpu_cpp.h>
-
-#include "common/Constants.h"
-
-#include <array>
-
-namespace utils {
-
- class ComboRenderBundleEncoderDescriptor : public wgpu::RenderBundleEncoderDescriptor {
- public:
- ComboRenderBundleEncoderDescriptor();
-
- std::array<wgpu::TextureFormat, kMaxColorAttachments> cColorFormats;
- };
-
-} // namespace utils
-
-#endif // UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
deleted file mode 100644
index 39068300560..00000000000
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/ComboRenderPipelineDescriptor.h"
-
-#include "utils/WGPUHelpers.h"
-
-namespace utils {
-
- ComboVertexState::ComboVertexState() {
- vertexBufferCount = 0;
-
- // Fill the default values for vertexBuffers and vertexAttributes in buffers.
- wgpu::VertexAttribute vertexAttribute;
- vertexAttribute.shaderLocation = 0;
- vertexAttribute.offset = 0;
- vertexAttribute.format = wgpu::VertexFormat::Float32;
- for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
- cAttributes[i] = vertexAttribute;
- }
- for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
- cVertexBuffers[i].arrayStride = 0;
- cVertexBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
- cVertexBuffers[i].attributeCount = 0;
- cVertexBuffers[i].attributes = nullptr;
- }
- // cVertexBuffers[i].attributes points to somewhere in cAttributes.
- // cVertexBuffers[0].attributes points to &cAttributes[0] by default. Assuming
- // cVertexBuffers[0] has two attributes, then cVertexBuffers[1].attributes should point to
- // &cAttributes[2]. Likewise, if cVertexBuffers[1] has 3 attributes, then
- // cVertexBuffers[2].attributes should point to &cAttributes[5].
- cVertexBuffers[0].attributes = &cAttributes[0];
- }
-
- ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor() {
- wgpu::RenderPipelineDescriptor* descriptor = this;
-
- // Set defaults for the vertex state.
- {
- wgpu::VertexState* vertex = &descriptor->vertex;
- vertex->module = nullptr;
- vertex->entryPoint = "main";
- vertex->bufferCount = 0;
-
- // Fill the default values for vertexBuffers and vertexAttributes in buffers.
- for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
- cAttributes[i].shaderLocation = 0;
- cAttributes[i].offset = 0;
- cAttributes[i].format = wgpu::VertexFormat::Float32;
- }
- for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
- cBuffers[i].arrayStride = 0;
- cBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
- cBuffers[i].attributeCount = 0;
- cBuffers[i].attributes = nullptr;
- }
- // cBuffers[i].attributes points to somewhere in cAttributes.
- // cBuffers[0].attributes points to &cAttributes[0] by default. Assuming
- // cBuffers[0] has two attributes, then cBuffers[1].attributes should point to
- // &cAttributes[2]. Likewise, if cBuffers[1] has 3 attributes, then
- // cBuffers[2].attributes should point to &cAttributes[5].
- cBuffers[0].attributes = &cAttributes[0];
- vertex->buffers = &cBuffers[0];
- }
-
- // Set the defaults for the primitive state
- {
- wgpu::PrimitiveState* primitive = &descriptor->primitive;
- primitive->topology = wgpu::PrimitiveTopology::TriangleList;
- primitive->stripIndexFormat = wgpu::IndexFormat::Undefined;
- primitive->frontFace = wgpu::FrontFace::CCW;
- primitive->cullMode = wgpu::CullMode::None;
- }
-
- // Set the defaults for the depth-stencil state
- {
- wgpu::StencilFaceState stencilFace;
- stencilFace.compare = wgpu::CompareFunction::Always;
- stencilFace.failOp = wgpu::StencilOperation::Keep;
- stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
- stencilFace.passOp = wgpu::StencilOperation::Keep;
-
- cDepthStencil.format = wgpu::TextureFormat::Depth24PlusStencil8;
- cDepthStencil.depthWriteEnabled = false;
- cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
- cDepthStencil.stencilBack = stencilFace;
- cDepthStencil.stencilFront = stencilFace;
- cDepthStencil.stencilReadMask = 0xff;
- cDepthStencil.stencilWriteMask = 0xff;
- cDepthStencil.depthBias = 0;
- cDepthStencil.depthBiasSlopeScale = 0.0;
- cDepthStencil.depthBiasClamp = 0.0;
- }
-
- // Set the defaults for the multisample state
- {
- wgpu::MultisampleState* multisample = &descriptor->multisample;
- multisample->count = 1;
- multisample->mask = 0xFFFFFFFF;
- multisample->alphaToCoverageEnabled = false;
- }
-
- // Set the defaults for the fragment state
- {
- cFragment.module = nullptr;
- cFragment.entryPoint = "main";
- cFragment.targetCount = 1;
- cFragment.targets = &cTargets[0];
- descriptor->fragment = &cFragment;
-
- wgpu::BlendComponent blendComponent;
- blendComponent.srcFactor = wgpu::BlendFactor::One;
- blendComponent.dstFactor = wgpu::BlendFactor::Zero;
- blendComponent.operation = wgpu::BlendOperation::Add;
-
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- cTargets[i].format = wgpu::TextureFormat::RGBA8Unorm;
- cTargets[i].blend = nullptr;
- cTargets[i].writeMask = wgpu::ColorWriteMask::All;
-
- cBlends[i].color = blendComponent;
- cBlends[i].alpha = blendComponent;
- }
- }
- }
-
- wgpu::DepthStencilState* ComboRenderPipelineDescriptor::EnableDepthStencil(
- wgpu::TextureFormat format) {
- this->depthStencil = &cDepthStencil;
- cDepthStencil.format = format;
- return &cDepthStencil;
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
deleted file mode 100644
index 451c0036b58..00000000000
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
-#define UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
-
-#include <dawn/webgpu_cpp.h>
-
-#include "common/Constants.h"
-
-#include <array>
-
-namespace utils {
-
- // Primarily used by tests to easily set up the vertex buffer state portion of a RenderPipeline.
- class ComboVertexState {
- public:
- ComboVertexState();
-
- ComboVertexState(const ComboVertexState&) = delete;
- ComboVertexState& operator=(const ComboVertexState&) = delete;
- ComboVertexState(ComboVertexState&&) = delete;
- ComboVertexState& operator=(ComboVertexState&&) = delete;
-
- uint32_t vertexBufferCount;
- std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cVertexBuffers;
- std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
- };
-
- class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
- public:
- ComboRenderPipelineDescriptor();
-
- ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
- ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
- ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
- ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
-
- wgpu::DepthStencilState* EnableDepthStencil(
- wgpu::TextureFormat format = wgpu::TextureFormat::Depth24PlusStencil8);
-
- std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cBuffers;
- std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
- std::array<wgpu::ColorTargetState, kMaxColorAttachments> cTargets;
- std::array<wgpu::BlendState, kMaxColorAttachments> cBlends;
-
- wgpu::FragmentState cFragment;
- wgpu::DepthStencilState cDepthStencil;
- };
-
-} // namespace utils
-
-#endif // UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
diff --git a/chromium/third_party/dawn/src/utils/D3D12Binding.cpp b/chromium/third_party/dawn/src/utils/D3D12Binding.cpp
deleted file mode 100644
index 1708b147824..00000000000
--- a/chromium/third_party/dawn/src/utils/D3D12Binding.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/BackendBinding.h"
-
-#include "common/Assert.h"
-#include "dawn_native/D3D12Backend.h"
-
-#include "GLFW/glfw3.h"
-#define GLFW_EXPOSE_NATIVE_WIN32
-#include "GLFW/glfw3native.h"
-
-#include <memory>
-
-namespace utils {
-
- class D3D12Binding : public BackendBinding {
- public:
- D3D12Binding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
-
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- HWND win32Window = glfwGetWin32Window(mWindow);
- mSwapchainImpl =
- dawn_native::d3d12::CreateNativeSwapChainImpl(mDevice, win32Window);
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
- }
-
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- ASSERT(mSwapchainImpl.userData != nullptr);
- return dawn_native::d3d12::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
- }
-
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
-
- BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device) {
- return new D3D12Binding(window, device);
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/EmptyDebugLogger.cpp b/chromium/third_party/dawn/src/utils/EmptyDebugLogger.cpp
deleted file mode 100644
index ed0ad7ff532..00000000000
--- a/chromium/third_party/dawn/src/utils/EmptyDebugLogger.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/PlatformDebugLogger.h"
-
-namespace utils {
-
- class EmptyDebugLogger : public PlatformDebugLogger {
- public:
- EmptyDebugLogger() = default;
- ~EmptyDebugLogger() override = default;
- };
-
- PlatformDebugLogger* CreatePlatformDebugLogger() {
- return new EmptyDebugLogger();
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/GLFWUtils.cpp b/chromium/third_party/dawn/src/utils/GLFWUtils.cpp
deleted file mode 100644
index 3572ce8fc54..00000000000
--- a/chromium/third_party/dawn/src/utils/GLFWUtils.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/GLFWUtils.h"
-
-#include "GLFW/glfw3.h"
-#include "common/Platform.h"
-
-#include <cstdlib>
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-# define GLFW_EXPOSE_NATIVE_WIN32
-#elif defined(DAWN_USE_X11)
-# define GLFW_EXPOSE_NATIVE_X11
-#endif
-#include "GLFW/glfw3native.h"
-
-namespace utils {
-
- void SetupGLFWWindowHintsForBackend(wgpu::BackendType type) {
- if (type == wgpu::BackendType::OpenGL) {
- // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and
- // texture views.
- glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
- glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
- glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
- glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
- } else if (type == wgpu::BackendType::OpenGLES) {
- glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
- glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
- glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
- glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
- } else {
- // Without this GLFW will initialize a GL context on the window, which prevents using
- // the window with other APIs (by crashing in weird ways).
- glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
- }
- }
-
- wgpu::Surface CreateSurfaceForWindow(wgpu::Instance instance, GLFWwindow* window) {
- std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
- SetupWindowAndGetSurfaceDescriptorForTesting(window);
-
- wgpu::SurfaceDescriptor descriptor;
- descriptor.nextInChain = chainedDescriptor.get();
- wgpu::Surface surface = instance.CreateSurface(&descriptor);
-
- return surface;
- }
-
-#if defined(DAWN_PLATFORM_WINDOWS)
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
- GLFWwindow* window) {
- std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
- std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
- desc->hwnd = glfwGetWin32Window(window);
- desc->hinstance = GetModuleHandle(nullptr);
- return std::move(desc);
- }
-#elif defined(DAWN_USE_X11)
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
- GLFWwindow* window) {
- std::unique_ptr<wgpu::SurfaceDescriptorFromXlib> desc =
- std::make_unique<wgpu::SurfaceDescriptorFromXlib>();
- desc->display = glfwGetX11Display();
- desc->window = glfwGetX11Window(window);
- return std::move(desc);
- }
-#elif defined(DAWN_ENABLE_BACKEND_METAL)
- // SetupWindowAndGetSurfaceDescriptorForTesting defined in GLFWUtils_metal.mm
-#else
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(GLFWwindow*) {
- return nullptr;
- }
-#endif
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/GLFWUtils_metal.mm b/chromium/third_party/dawn/src/utils/GLFWUtils_metal.mm
deleted file mode 100644
index a920ec07d32..00000000000
--- a/chromium/third_party/dawn/src/utils/GLFWUtils_metal.mm
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#if !defined(DAWN_ENABLE_BACKEND_METAL)
-# error "GLFWUtils_metal.mm requires the Metal backend to be enabled."
-#endif // !defined(DAWN_ENABLE_BACKEND_METAL)
-
-#include "utils/GLFWUtils.h"
-
-#import <QuartzCore/CAMetalLayer.h>
-#include "GLFW/glfw3.h"
-
-#include <cstdlib>
-
-#define GLFW_EXPOSE_NATIVE_COCOA
-#include "GLFW/glfw3native.h"
-
-namespace utils {
-
- std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
- GLFWwindow* window) {
- if (@available(macOS 10.11, *)) {
- NSWindow* nsWindow = glfwGetCocoaWindow(window);
- NSView* view = [nsWindow contentView];
-
- // Create a CAMetalLayer that covers the whole window that will be passed to
- // CreateSurface.
- [view setWantsLayer:YES];
- [view setLayer:[CAMetalLayer layer]];
-
- // Use retina if the window was created with retina support.
- [[view layer] setContentsScale:[nsWindow backingScaleFactor]];
-
- std::unique_ptr<wgpu::SurfaceDescriptorFromMetalLayer> desc =
- std::make_unique<wgpu::SurfaceDescriptorFromMetalLayer>();
- desc->layer = [view layer];
- return std::move(desc);
- }
-
- return nullptr;
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/Glfw3Fuchsia.cpp b/chromium/third_party/dawn/src/utils/Glfw3Fuchsia.cpp
deleted file mode 100644
index cc8ed3ba03c..00000000000
--- a/chromium/third_party/dawn/src/utils/Glfw3Fuchsia.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// A mock GLFW implementation that supports Fuchsia, but only implements
-// the functions called from Dawn.
-
-// NOTE: This must be included before GLFW/glfw3.h because the latter will
-// include <vulkan/vulkan.h> and "common/vulkan_platform.h" wants to be
-// the first header to do so for sanity reasons (e.g. undefining weird
-// macros on Windows and Linux).
-// clang-format off
-#include "common/vulkan_platform.h"
-#include "common/Assert.h"
-#include <GLFW/glfw3.h>
-// clang-format on
-
-#include <dlfcn.h>
-
-int glfwInit(void) {
- return GLFW_TRUE;
-}
-
-void glfwDefaultWindowHints(void) {
-}
-
-void glfwWindowHint(int hint, int value) {
- DAWN_UNUSED(hint);
- DAWN_UNUSED(value);
-}
-
-struct GLFWwindow {
- PFN_vkGetInstanceProcAddr GetInstanceProcAddress = nullptr;
- void* vulkan_loader = nullptr;
-
- GLFWwindow() {
- vulkan_loader = ::dlopen("libvulkan.so", RTLD_NOW);
- ASSERT(vulkan_loader != nullptr);
- GetInstanceProcAddress = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
- dlsym(vulkan_loader, "vkGetInstanceProcAddr"));
- ASSERT(GetInstanceProcAddress != nullptr);
- }
-
- ~GLFWwindow() {
- if (vulkan_loader) {
- ::dlclose(vulkan_loader);
- }
- vulkan_loader = nullptr;
- }
-};
-
-GLFWwindow* glfwCreateWindow(int width,
- int height,
- const char* title,
- GLFWmonitor* monitor,
- GLFWwindow* share) {
- ASSERT(monitor == nullptr);
- ASSERT(share == nullptr);
- DAWN_UNUSED(width);
- DAWN_UNUSED(height);
- DAWN_UNUSED(title);
- return new GLFWwindow();
-}
-
-VkResult glfwCreateWindowSurface(VkInstance instance,
- GLFWwindow* window,
- const VkAllocationCallbacks* allocator,
- VkSurfaceKHR* surface) {
- // IMPORTANT: This assumes that the VkInstance was created with a Fuchsia
- // swapchain layer enabled, as well as the corresponding extension that
- // is queried here to perform the surface creation. Dawn should do all
- // required steps in VulkanInfo.cpp, VulkanFunctions.cpp and BackendVk.cpp.
-
- auto vkCreateImagePipeSurfaceFUCHSIA = reinterpret_cast<PFN_vkCreateImagePipeSurfaceFUCHSIA>(
- window->GetInstanceProcAddress(instance, "vkCreateImagePipeSurfaceFUCHSIA"));
- ASSERT(vkCreateImagePipeSurfaceFUCHSIA != nullptr);
- if (!vkCreateImagePipeSurfaceFUCHSIA) {
- *surface = VK_NULL_HANDLE;
- return VK_ERROR_FEATURE_NOT_PRESENT;
- }
-
- const struct VkImagePipeSurfaceCreateInfoFUCHSIA create_info = {
- VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA,
- nullptr, // pNext
- 0, // flags, ignored for now
- ZX_HANDLE_INVALID, // imagePipeHandle, a null handle matches the framebuffer.
- };
-
- return vkCreateImagePipeSurfaceFUCHSIA(instance, &create_info, nullptr, surface);
-}
diff --git a/chromium/third_party/dawn/src/utils/MetalBinding.mm b/chromium/third_party/dawn/src/utils/MetalBinding.mm
deleted file mode 100644
index 1abeb3192a4..00000000000
--- a/chromium/third_party/dawn/src/utils/MetalBinding.mm
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/BackendBinding.h"
-
-#include "common/Assert.h"
-#include "common/SwapChainUtils.h"
-#include "dawn_native/MetalBackend.h"
-
-#define GLFW_EXPOSE_NATIVE_COCOA
-#include "GLFW/glfw3.h"
-#include "GLFW/glfw3native.h"
-
-#import <QuartzCore/CAMetalLayer.h>
-
-namespace utils {
- class SwapChainImplMTL {
- public:
- using WSIContext = DawnWSIContextMetal;
-
- SwapChainImplMTL(id nsWindow) : mNsWindow(nsWindow) {
- }
-
- ~SwapChainImplMTL() {
- [mCurrentTexture release];
- [mCurrentDrawable release];
- }
-
- void Init(DawnWSIContextMetal* ctx) {
- mMtlDevice = ctx->device;
- mCommandQueue = ctx->queue;
- }
-
- DawnSwapChainError Configure(WGPUTextureFormat format,
- WGPUTextureUsage usage,
- uint32_t width,
- uint32_t height) {
- if (format != WGPUTextureFormat_BGRA8Unorm) {
- return "unsupported format";
- }
- ASSERT(width > 0);
- ASSERT(height > 0);
-
- NSView* contentView = [mNsWindow contentView];
- [contentView setWantsLayer:YES];
-
- CGSize size = {};
- size.width = width;
- size.height = height;
-
- mLayer = [CAMetalLayer layer];
- [mLayer setDevice:mMtlDevice];
- [mLayer setPixelFormat:MTLPixelFormatBGRA8Unorm];
- [mLayer setDrawableSize:size];
-
- constexpr uint32_t kFramebufferOnlyTextureUsages =
- WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_Present;
- bool hasOnlyFramebufferUsages = !(usage & (~kFramebufferOnlyTextureUsages));
- if (hasOnlyFramebufferUsages) {
- [mLayer setFramebufferOnly:YES];
- }
-
- [contentView setLayer:mLayer];
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
- [mCurrentDrawable release];
- mCurrentDrawable = [mLayer nextDrawable];
- [mCurrentDrawable retain];
-
- [mCurrentTexture release];
- mCurrentTexture = mCurrentDrawable.texture;
- [mCurrentTexture retain];
-
- nextTexture->texture.ptr = reinterpret_cast<void*>(mCurrentTexture);
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- DawnSwapChainError Present() {
- id<MTLCommandBuffer> commandBuffer = [mCommandQueue commandBuffer];
- [commandBuffer presentDrawable:mCurrentDrawable];
- [commandBuffer commit];
-
- return DAWN_SWAP_CHAIN_NO_ERROR;
- }
-
- private:
- id mNsWindow = nil;
- id<MTLDevice> mMtlDevice = nil;
- id<MTLCommandQueue> mCommandQueue = nil;
-
- CAMetalLayer* mLayer = nullptr;
- id<CAMetalDrawable> mCurrentDrawable = nil;
- id<MTLTexture> mCurrentTexture = nil;
- };
-
- class MetalBinding : public BackendBinding {
- public:
- MetalBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
-
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- mSwapchainImpl = CreateSwapChainImplementation(
- new SwapChainImplMTL(glfwGetCocoaWindow(mWindow)));
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
- }
-
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- return WGPUTextureFormat_BGRA8Unorm;
- }
-
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
-
- BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device) {
- return new MetalBinding(window, device);
- }
-}
diff --git a/chromium/third_party/dawn/src/utils/NullBinding.cpp b/chromium/third_party/dawn/src/utils/NullBinding.cpp
deleted file mode 100644
index f47b81c6745..00000000000
--- a/chromium/third_party/dawn/src/utils/NullBinding.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/BackendBinding.h"
-
-#include "common/Assert.h"
-#include "dawn_native/NullBackend.h"
-
-#include <memory>
-
-namespace utils {
-
- class NullBinding : public BackendBinding {
- public:
- NullBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
-
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- mSwapchainImpl = dawn_native::null::CreateNativeSwapChainImpl();
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
- }
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- return WGPUTextureFormat_RGBA8Unorm;
- }
-
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
-
- BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device) {
- return new NullBinding(window, device);
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/OSXTimer.cpp b/chromium/third_party/dawn/src/utils/OSXTimer.cpp
deleted file mode 100644
index da413759d28..00000000000
--- a/chromium/third_party/dawn/src/utils/OSXTimer.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/Timer.h"
-
-#include <CoreServices/CoreServices.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-
-namespace utils {
-
- class OSXTimer : public Timer {
- public:
- OSXTimer() : Timer(), mRunning(false), mSecondCoeff(0) {
- }
-
- ~OSXTimer() override = default;
-
- void Start() override {
- mStartTime = mach_absolute_time();
- // Cache secondCoeff
- GetSecondCoeff();
- mRunning = true;
- }
-
- void Stop() override {
- mStopTime = mach_absolute_time();
- mRunning = false;
- }
-
- double GetElapsedTime() const override {
- if (mRunning) {
- return mSecondCoeff * (mach_absolute_time() - mStartTime);
- } else {
- return mSecondCoeff * (mStopTime - mStartTime);
- }
- }
-
- double GetAbsoluteTime() override {
- return GetSecondCoeff() * mach_absolute_time();
- }
-
- private:
- double GetSecondCoeff() {
- // If this is the first time we've run, get the timebase.
- if (mSecondCoeff == 0.0) {
- mach_timebase_info_data_t timebaseInfo;
- mach_timebase_info(&timebaseInfo);
-
- mSecondCoeff = timebaseInfo.numer * (1.0 / 1000000000) / timebaseInfo.denom;
- }
-
- return mSecondCoeff;
- }
-
- bool mRunning;
- uint64_t mStartTime;
- uint64_t mStopTime;
- double mSecondCoeff;
- };
-
- Timer* CreateTimer() {
- return new OSXTimer();
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ObjCUtils.mm b/chromium/third_party/dawn/src/utils/ObjCUtils.mm
deleted file mode 100644
index 5eba147cb60..00000000000
--- a/chromium/third_party/dawn/src/utils/ObjCUtils.mm
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/ObjCUtils.h"
-
-#include <QuartzCore/CALayer.h>
-
-namespace utils {
-
- void* CreateDummyCALayer() {
- return [CALayer layer];
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/OpenGLBinding.cpp b/chromium/third_party/dawn/src/utils/OpenGLBinding.cpp
deleted file mode 100644
index f48f426c7cd..00000000000
--- a/chromium/third_party/dawn/src/utils/OpenGLBinding.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/BackendBinding.h"
-
-#include "common/Assert.h"
-#include "common/Platform.h"
-#include "common/SwapChainUtils.h"
-#include "dawn/dawn_wsi.h"
-#include "dawn_native/OpenGLBackend.h"
-
-#include <cstdio>
-#include "GLFW/glfw3.h"
-
-namespace utils {
-
- class OpenGLBinding : public BackendBinding {
- public:
- OpenGLBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
-
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- mSwapchainImpl = dawn_native::opengl::CreateNativeSwapChainImpl(
- mDevice,
- [](void* userdata) { glfwSwapBuffers(static_cast<GLFWwindow*>(userdata)); },
- mWindow);
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
- }
-
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- return dawn_native::opengl::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
- }
-
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
-
- BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device) {
- return new OpenGLBinding(window, device);
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/PosixTimer.cpp b/chromium/third_party/dawn/src/utils/PosixTimer.cpp
deleted file mode 100644
index a79e7b15211..00000000000
--- a/chromium/third_party/dawn/src/utils/PosixTimer.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/Timer.h"
-
-#include <stdint.h>
-#include <time.h>
-
-namespace utils {
-
- namespace {
-
- uint64_t GetCurrentTimeNs() {
- struct timespec currentTime;
- clock_gettime(CLOCK_MONOTONIC, &currentTime);
- return currentTime.tv_sec * 1'000'000'000llu + currentTime.tv_nsec;
- }
-
- } // anonymous namespace
-
- class PosixTimer : public Timer {
- public:
- PosixTimer() : Timer(), mRunning(false) {
- }
-
- ~PosixTimer() override = default;
-
- void Start() override {
- mStartTimeNs = GetCurrentTimeNs();
- mRunning = true;
- }
-
- void Stop() override {
- mStopTimeNs = GetCurrentTimeNs();
- mRunning = false;
- }
-
- double GetElapsedTime() const override {
- uint64_t endTimeNs;
- if (mRunning) {
- endTimeNs = GetCurrentTimeNs();
- } else {
- endTimeNs = mStopTimeNs;
- }
-
- return (endTimeNs - mStartTimeNs) * 1e-9;
- }
-
- double GetAbsoluteTime() override {
- return GetCurrentTimeNs() * 1e-9;
- }
-
- private:
- bool mRunning;
- uint64_t mStartTimeNs;
- uint64_t mStopTimeNs;
- };
-
- Timer* CreateTimer() {
- return new PosixTimer();
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.cpp b/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.cpp
deleted file mode 100644
index 0cee799da11..00000000000
--- a/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.cpp
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/ScopedAutoreleasePool.h"
-
-#include "common/Compiler.h"
-
-namespace utils {
-
- ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool(nullptr) {
- DAWN_UNUSED(mPool);
- }
-
- ScopedAutoreleasePool::~ScopedAutoreleasePool() = default;
-
- ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
- }
-
- ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
- return *this;
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.h b/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.h
deleted file mode 100644
index e9c945dd2e4..00000000000
--- a/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef UTILS_SCOPEDAUTORELEASEPOOL_H_
-#define UTILS_SCOPEDAUTORELEASEPOOL_H_
-
-#include "common/Compiler.h"
-
-#include <cstddef>
-
-namespace utils {
-
- /**
- * ScopedAutoreleasePool is a scoped class which initializes an NSAutoreleasePool on
- * creation, and drains it on destruction. On non-Apple platforms, ScopedAutoreleasePool
- * is a no-op.
- *
- * An autoreleasepool is needed when using protocol objects in Objective-C because Cocoa
- * expects a pool to always be available in each thread. If a pool is not available, then
- * autoreleased objects will never be released and will leak.
- *
- * In long-running blocks of code or loops, it is important to periodically create and drain
- * autorelease pools so that memory is recycled. In Dawn's tests, we have an autoreleasepool
- * per-test. In graphics applications it's advised to create an autoreleasepool around the
- * frame loop. Ex.)
- * void frame() {
- * // Any protocol objects will be reclaimed when this object falls out of scope.
- * utils::ScopedAutoreleasePool pool;
- *
- * // do rendering ...
- * }
- */
- class DAWN_NO_DISCARD ScopedAutoreleasePool {
- public:
- ScopedAutoreleasePool();
- ~ScopedAutoreleasePool();
-
- ScopedAutoreleasePool(const ScopedAutoreleasePool&) = delete;
- ScopedAutoreleasePool& operator=(const ScopedAutoreleasePool&) = delete;
-
- ScopedAutoreleasePool(ScopedAutoreleasePool&&);
- ScopedAutoreleasePool& operator=(ScopedAutoreleasePool&&);
-
- private:
- void* mPool = nullptr;
- };
-
-} // namespace utils
-
-#endif // UTILS_SCOPEDAUTORELEASEPOOL_H_
diff --git a/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.mm b/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.mm
deleted file mode 100644
index f649f3e6e94..00000000000
--- a/chromium/third_party/dawn/src/utils/ScopedAutoreleasePool.mm
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/ScopedAutoreleasePool.h"
-
-#import <Foundation/Foundation.h>
-
-namespace utils {
-
- ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool([[NSAutoreleasePool alloc] init]) {
- }
-
- ScopedAutoreleasePool::~ScopedAutoreleasePool() {
- if (mPool != nullptr) {
- [static_cast<NSAutoreleasePool*>(mPool) release];
- mPool = nullptr;
- }
- }
-
- ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
- mPool = rhs.mPool;
- rhs.mPool = nullptr;
- }
-
- ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
- if (&rhs != this) {
- mPool = rhs.mPool;
- rhs.mPool = nullptr;
- }
- return *this;
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/SystemUtils.cpp b/chromium/third_party/dawn/src/utils/SystemUtils.cpp
deleted file mode 100644
index b72a2ca0584..00000000000
--- a/chromium/third_party/dawn/src/utils/SystemUtils.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "common/Platform.h"
-
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include <Windows.h>
-#elif defined(DAWN_PLATFORM_POSIX)
-# include <unistd.h>
-#else
-# error "Unsupported platform."
-#endif
-
-namespace utils {
-
-#if defined(DAWN_PLATFORM_WINDOWS)
- void USleep(unsigned int usecs) {
- Sleep(static_cast<DWORD>(usecs / 1000));
- }
-#elif defined(DAWN_PLATFORM_POSIX)
- void USleep(unsigned int usecs) {
- usleep(usecs);
- }
-#else
-# error "Implement USleep for your platform."
-#endif
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp b/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp
deleted file mode 100644
index 0c06ab10364..00000000000
--- a/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/TerribleCommandBuffer.h"
-
-#include "common/Assert.h"
-
-namespace utils {
-
- TerribleCommandBuffer::TerribleCommandBuffer() {
- }
-
- TerribleCommandBuffer::TerribleCommandBuffer(dawn_wire::CommandHandler* handler)
- : mHandler(handler) {
- }
-
- void TerribleCommandBuffer::SetHandler(dawn_wire::CommandHandler* handler) {
- mHandler = handler;
- }
-
- size_t TerribleCommandBuffer::GetMaximumAllocationSize() const {
- return sizeof(mBuffer);
- }
-
- void* TerribleCommandBuffer::GetCmdSpace(size_t size) {
- // Note: This returns non-null even if size is zero.
- if (size > sizeof(mBuffer)) {
- return nullptr;
- }
- char* result = &mBuffer[mOffset];
- if (sizeof(mBuffer) - size < mOffset) {
- if (!Flush()) {
- return nullptr;
- }
- return GetCmdSpace(size);
- }
-
- mOffset += size;
- return result;
- }
-
- bool TerribleCommandBuffer::Flush() {
- bool success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
- mOffset = 0;
- return success;
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h b/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h
deleted file mode 100644
index 45aec93ff33..00000000000
--- a/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef UTILS_TERRIBLE_COMMAND_BUFFER_H_
-#define UTILS_TERRIBLE_COMMAND_BUFFER_H_
-
-#include "dawn_wire/Wire.h"
-
-namespace utils {
-
- class TerribleCommandBuffer : public dawn_wire::CommandSerializer {
- public:
- TerribleCommandBuffer();
- TerribleCommandBuffer(dawn_wire::CommandHandler* handler);
-
- void SetHandler(dawn_wire::CommandHandler* handler);
-
- size_t GetMaximumAllocationSize() const override;
-
- void* GetCmdSpace(size_t size) override;
- bool Flush() override;
-
- private:
- dawn_wire::CommandHandler* mHandler = nullptr;
- size_t mOffset = 0;
- char mBuffer[1000000];
- };
-
-} // namespace utils
-
-#endif // UTILS_TERRIBLE_COMMAND_BUFFER_H_
diff --git a/chromium/third_party/dawn/src/utils/TestUtils.cpp b/chromium/third_party/dawn/src/utils/TestUtils.cpp
deleted file mode 100644
index d55a8c2f601..00000000000
--- a/chromium/third_party/dawn/src/utils/TestUtils.cpp
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/TestUtils.h"
-
-#include "common/Assert.h"
-#include "common/Constants.h"
-#include "common/Math.h"
-#include "utils/TextureUtils.h"
-#include "utils/WGPUHelpers.h"
-
-#include <vector>
-
-namespace utils {
-
- uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
- const uint32_t bytesPerBlock = utils::GetTexelBlockSizeInBytes(format);
- const uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
- ASSERT(width % blockWidth == 0);
- return Align(bytesPerBlock * (width / blockWidth), kTextureBytesPerRowAlignment);
- }
-
- TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
- wgpu::TextureFormat format,
- wgpu::Extent3D textureSizeAtLevel0,
- uint32_t mipmapLevel,
- wgpu::TextureDimension dimension,
- uint32_t rowsPerImage) {
- // Compressed texture formats not supported in this function yet.
- ASSERT(utils::GetTextureFormatBlockWidth(format) == 1);
-
- TextureDataCopyLayout layout;
-
- layout.mipSize = {std::max(textureSizeAtLevel0.width >> mipmapLevel, 1u),
- std::max(textureSizeAtLevel0.height >> mipmapLevel, 1u),
- textureSizeAtLevel0.depthOrArrayLayers};
-
- if (dimension == wgpu::TextureDimension::e3D) {
- layout.mipSize.depthOrArrayLayers =
- std::max(textureSizeAtLevel0.depthOrArrayLayers >> mipmapLevel, 1u);
- }
-
- layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
-
- if (rowsPerImage == wgpu::kCopyStrideUndefined) {
- rowsPerImage = layout.mipSize.height;
- }
- layout.rowsPerImage = rowsPerImage;
-
- uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
- layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
-
- layout.byteLength =
- RequiredBytesInCopy(layout.bytesPerRow, appliedRowsPerImage, layout.mipSize, format);
-
- const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
- layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
- layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
- layout.texelBlockCount = layout.byteLength / bytesPerTexel;
-
- return layout;
- }
-
- uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- wgpu::Extent3D copyExtent,
- wgpu::TextureFormat textureFormat) {
- uint32_t blockSize = utils::GetTexelBlockSizeInBytes(textureFormat);
- uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureFormat);
- uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureFormat);
- ASSERT(copyExtent.width % blockWidth == 0);
- uint32_t widthInBlocks = copyExtent.width / blockWidth;
- ASSERT(copyExtent.height % blockHeight == 0);
- uint32_t heightInBlocks = copyExtent.height / blockHeight;
- return RequiredBytesInCopy(bytesPerRow, rowsPerImage, widthInBlocks, heightInBlocks,
- copyExtent.depthOrArrayLayers, blockSize);
- }
-
- uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- uint64_t widthInBlocks,
- uint64_t heightInBlocks,
- uint64_t depth,
- uint64_t bytesPerBlock) {
- if (depth == 0) {
- return 0;
- }
-
- uint64_t bytesPerImage = bytesPerRow * rowsPerImage;
- uint64_t requiredBytesInCopy = bytesPerImage * (depth - 1);
- if (heightInBlocks != 0) {
- uint64_t lastRowBytes = widthInBlocks * bytesPerBlock;
- uint64_t lastImageBytes = bytesPerRow * (heightInBlocks - 1) + lastRowBytes;
- requiredBytesInCopy += lastImageBytes;
- }
- return requiredBytesInCopy;
- }
-
- uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
- uint64_t rowsPerImage,
- wgpu::Extent3D copyExtent,
- wgpu::TextureFormat textureFormat) {
- return RequiredBytesInCopy(bytesPerRow, rowsPerImage, copyExtent, textureFormat) /
- utils::GetTexelBlockSizeInBytes(textureFormat);
- }
-
- void UnalignDynamicUploader(wgpu::Device device) {
- std::vector<uint8_t> data = {1};
-
- wgpu::TextureDescriptor descriptor = {};
- descriptor.size = {1, 1, 1};
- descriptor.format = wgpu::TextureFormat::R8Unorm;
- descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
- wgpu::Texture texture = device.CreateTexture(&descriptor);
-
- wgpu::ImageCopyTexture imageCopyTexture =
- utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
- wgpu::TextureDataLayout textureDataLayout =
- utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
- wgpu::Extent3D copyExtent = {1, 1, 1};
-
- // WriteTexture with exactly 1 byte of data.
- device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), 1, &textureDataLayout,
- &copyExtent);
- }
-
- uint32_t VertexFormatSize(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::Uint8x2:
- case wgpu::VertexFormat::Sint8x2:
- case wgpu::VertexFormat::Unorm8x2:
- case wgpu::VertexFormat::Snorm8x2:
- return 2;
- case wgpu::VertexFormat::Uint8x4:
- case wgpu::VertexFormat::Sint8x4:
- case wgpu::VertexFormat::Unorm8x4:
- case wgpu::VertexFormat::Snorm8x4:
- case wgpu::VertexFormat::Uint16x2:
- case wgpu::VertexFormat::Sint16x2:
- case wgpu::VertexFormat::Unorm16x2:
- case wgpu::VertexFormat::Snorm16x2:
- case wgpu::VertexFormat::Float16x2:
- case wgpu::VertexFormat::Float32:
- case wgpu::VertexFormat::Uint32:
- case wgpu::VertexFormat::Sint32:
- return 4;
- case wgpu::VertexFormat::Uint16x4:
- case wgpu::VertexFormat::Sint16x4:
- case wgpu::VertexFormat::Unorm16x4:
- case wgpu::VertexFormat::Snorm16x4:
- case wgpu::VertexFormat::Float16x4:
- case wgpu::VertexFormat::Float32x2:
- case wgpu::VertexFormat::Uint32x2:
- case wgpu::VertexFormat::Sint32x2:
- return 8;
- case wgpu::VertexFormat::Float32x3:
- case wgpu::VertexFormat::Uint32x3:
- case wgpu::VertexFormat::Sint32x3:
- return 12;
- case wgpu::VertexFormat::Float32x4:
- case wgpu::VertexFormat::Uint32x4:
- case wgpu::VertexFormat::Sint32x4:
- return 16;
- case wgpu::VertexFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TextureUtils.cpp b/chromium/third_party/dawn/src/utils/TextureUtils.cpp
deleted file mode 100644
index 9f5277a9f05..00000000000
--- a/chromium/third_party/dawn/src/utils/TextureUtils.cpp
+++ /dev/null
@@ -1,684 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "TextureUtils.h"
-
-namespace utils {
- bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format) {
- switch (format) {
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::RGBA32Float:
- return true;
-
- default:
- return false;
- }
- }
-
- bool IsBCTextureFormat(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return true;
-
- default:
- return false;
- }
- }
-
- bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
- return true;
-
- default:
- return false;
- }
- }
-
- bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return true;
-
- default:
- return false;
- }
- }
-
- bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::Depth16Unorm:
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth32Float:
- return true;
- default:
- return false;
- }
- }
-
- uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- return 1u;
-
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- return 2u;
-
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- return 4u;
-
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- return 8u;
-
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- return 16u;
-
- case wgpu::TextureFormat::Depth16Unorm:
- return 2u;
-
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- case wgpu::TextureFormat::Depth32Float:
- return 4u;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC4RSnorm:
- return 8u;
-
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- return 16u;
-
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- return 8u;
-
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
- return 16u;
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return 16u;
-
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
-
- // Block size of a multi-planar format depends on aspect.
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-
- // TODO(dawn:666): implement stencil8
- case wgpu::TextureFormat::Stencil8:
- case wgpu::TextureFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth16Unorm:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- return 1u;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
- return 4u;
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- return 4u;
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- return 5u;
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- return 6u;
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- return 8u;
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- return 10u;
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return 12u;
-
- // Block size of a multi-planar format depends on aspect.
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-
- // TODO(dawn:666): implement stencil8
- case wgpu::TextureFormat::Stencil8:
- case wgpu::TextureFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- case wgpu::TextureFormat::Depth32Float:
- case wgpu::TextureFormat::Depth24Plus:
- case wgpu::TextureFormat::Depth24PlusStencil8:
- case wgpu::TextureFormat::Depth16Unorm:
- case wgpu::TextureFormat::Depth24UnormStencil8:
- case wgpu::TextureFormat::Depth32FloatStencil8:
- return 1u;
-
- case wgpu::TextureFormat::BC1RGBAUnorm:
- case wgpu::TextureFormat::BC1RGBAUnormSrgb:
- case wgpu::TextureFormat::BC4RUnorm:
- case wgpu::TextureFormat::BC4RSnorm:
- case wgpu::TextureFormat::BC2RGBAUnorm:
- case wgpu::TextureFormat::BC2RGBAUnormSrgb:
- case wgpu::TextureFormat::BC3RGBAUnorm:
- case wgpu::TextureFormat::BC3RGBAUnormSrgb:
- case wgpu::TextureFormat::BC5RGUnorm:
- case wgpu::TextureFormat::BC5RGSnorm:
- case wgpu::TextureFormat::BC6HRGBUfloat:
- case wgpu::TextureFormat::BC6HRGBFloat:
- case wgpu::TextureFormat::BC7RGBAUnorm:
- case wgpu::TextureFormat::BC7RGBAUnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8Unorm:
- case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
- case wgpu::TextureFormat::ETC2RGB8A1Unorm:
- case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
- case wgpu::TextureFormat::ETC2RGBA8Unorm:
- case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
- case wgpu::TextureFormat::EACR11Unorm:
- case wgpu::TextureFormat::EACR11Snorm:
- case wgpu::TextureFormat::EACRG11Unorm:
- case wgpu::TextureFormat::EACRG11Snorm:
- return 4u;
-
- case wgpu::TextureFormat::ASTC4x4Unorm:
- case wgpu::TextureFormat::ASTC4x4UnormSrgb:
- case wgpu::TextureFormat::ASTC5x4Unorm:
- case wgpu::TextureFormat::ASTC5x4UnormSrgb:
- return 4u;
- case wgpu::TextureFormat::ASTC5x5Unorm:
- case wgpu::TextureFormat::ASTC5x5UnormSrgb:
- case wgpu::TextureFormat::ASTC6x5Unorm:
- case wgpu::TextureFormat::ASTC6x5UnormSrgb:
- case wgpu::TextureFormat::ASTC8x5Unorm:
- case wgpu::TextureFormat::ASTC8x5UnormSrgb:
- case wgpu::TextureFormat::ASTC10x5Unorm:
- case wgpu::TextureFormat::ASTC10x5UnormSrgb:
- return 5u;
- case wgpu::TextureFormat::ASTC6x6Unorm:
- case wgpu::TextureFormat::ASTC6x6UnormSrgb:
- case wgpu::TextureFormat::ASTC8x6Unorm:
- case wgpu::TextureFormat::ASTC8x6UnormSrgb:
- case wgpu::TextureFormat::ASTC10x6Unorm:
- case wgpu::TextureFormat::ASTC10x6UnormSrgb:
- return 6u;
- case wgpu::TextureFormat::ASTC8x8Unorm:
- case wgpu::TextureFormat::ASTC8x8UnormSrgb:
- case wgpu::TextureFormat::ASTC10x8Unorm:
- case wgpu::TextureFormat::ASTC10x8UnormSrgb:
- return 8u;
- case wgpu::TextureFormat::ASTC10x10Unorm:
- case wgpu::TextureFormat::ASTC10x10UnormSrgb:
- case wgpu::TextureFormat::ASTC12x10Unorm:
- case wgpu::TextureFormat::ASTC12x10UnormSrgb:
- return 10u;
- case wgpu::TextureFormat::ASTC12x12Unorm:
- case wgpu::TextureFormat::ASTC12x12UnormSrgb:
- return 12u;
-
- // Block size of a multi-planar format depends on aspect.
- case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-
- // TODO(dawn:666): implement stencil8
- case wgpu::TextureFormat::Stencil8:
- case wgpu::TextureFormat::Undefined:
- break;
- }
- UNREACHABLE();
- }
-
- const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Snorm:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Snorm:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8Snorm:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Ufloat:
- case wgpu::TextureFormat::RGB9E5Ufloat:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- return "f32";
-
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA32Uint:
- return "u32";
-
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::R32Sint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::RG32Sint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA32Sint:
- return "i32";
-
- default:
- UNREACHABLE();
- }
- }
-
- uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::R8Unorm:
- case wgpu::TextureFormat::R8Uint:
- case wgpu::TextureFormat::R8Sint:
- case wgpu::TextureFormat::R16Uint:
- case wgpu::TextureFormat::R16Sint:
- case wgpu::TextureFormat::R16Float:
- case wgpu::TextureFormat::R32Float:
- case wgpu::TextureFormat::R32Uint:
- case wgpu::TextureFormat::R32Sint:
- return 1u;
- case wgpu::TextureFormat::RG8Unorm:
- case wgpu::TextureFormat::RG8Uint:
- case wgpu::TextureFormat::RG8Sint:
- case wgpu::TextureFormat::RG16Uint:
- case wgpu::TextureFormat::RG16Sint:
- case wgpu::TextureFormat::RG16Float:
- case wgpu::TextureFormat::RG32Float:
- case wgpu::TextureFormat::RG32Uint:
- case wgpu::TextureFormat::RG32Sint:
- return 2u;
- case wgpu::TextureFormat::RGBA8Unorm:
- case wgpu::TextureFormat::RGBA8UnormSrgb:
- case wgpu::TextureFormat::RGBA8Uint:
- case wgpu::TextureFormat::RGBA8Sint:
- case wgpu::TextureFormat::BGRA8Unorm:
- case wgpu::TextureFormat::BGRA8UnormSrgb:
- case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RGBA16Uint:
- case wgpu::TextureFormat::RGBA16Sint:
- case wgpu::TextureFormat::RGBA16Float:
- case wgpu::TextureFormat::RGBA32Float:
- case wgpu::TextureFormat::RGBA32Uint:
- case wgpu::TextureFormat::RGBA32Sint:
- return 4u;
- default:
- UNREACHABLE();
- }
- }
-
- const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat) {
- switch (textureFormat) {
- case wgpu::TextureFormat::RGBA8Unorm:
- return "rgba8unorm";
- case wgpu::TextureFormat::RGBA8Snorm:
- return "rgba8snorm";
- case wgpu::TextureFormat::RGBA8Uint:
- return "rgba8uint";
- case wgpu::TextureFormat::RGBA8Sint:
- return "rgba8sint";
- case wgpu::TextureFormat::RGBA16Uint:
- return "rgba16uint";
- case wgpu::TextureFormat::RGBA16Sint:
- return "rgba16sint";
- case wgpu::TextureFormat::RGBA16Float:
- return "rgba16float";
- case wgpu::TextureFormat::R32Uint:
- return "r32uint";
- case wgpu::TextureFormat::R32Sint:
- return "r32sint";
- case wgpu::TextureFormat::R32Float:
- return "r32float";
- case wgpu::TextureFormat::RG32Uint:
- return "rg32uint";
- case wgpu::TextureFormat::RG32Sint:
- return "rg32sint";
- case wgpu::TextureFormat::RG32Float:
- return "rg32float";
- case wgpu::TextureFormat::RGBA32Uint:
- return "rgba32uint";
- case wgpu::TextureFormat::RGBA32Sint:
- return "rgba32sint";
- case wgpu::TextureFormat::RGBA32Float:
- return "rgba32float";
-
- // The below do not currently exist in the WGSL spec, but are used
- // for tests that expect compilation failure.
- case wgpu::TextureFormat::R8Unorm:
- return "r8unorm";
- case wgpu::TextureFormat::R8Snorm:
- return "r8snorm";
- case wgpu::TextureFormat::R8Uint:
- return "r8uint";
- case wgpu::TextureFormat::R8Sint:
- return "r8sint";
- case wgpu::TextureFormat::R16Uint:
- return "r16uint";
- case wgpu::TextureFormat::R16Sint:
- return "r16sint";
- case wgpu::TextureFormat::R16Float:
- return "r16float";
- case wgpu::TextureFormat::RG8Unorm:
- return "rg8unorm";
- case wgpu::TextureFormat::RG8Snorm:
- return "rg8snorm";
- case wgpu::TextureFormat::RG8Uint:
- return "rg8uint";
- case wgpu::TextureFormat::RG8Sint:
- return "rg8sint";
- case wgpu::TextureFormat::RG16Uint:
- return "rg16uint";
- case wgpu::TextureFormat::RG16Sint:
- return "rg16sint";
- case wgpu::TextureFormat::RG16Float:
- return "rg16float";
- case wgpu::TextureFormat::RGB10A2Unorm:
- return "rgb10a2unorm";
- case wgpu::TextureFormat::RG11B10Ufloat:
- return "rg11b10ufloat";
-
- default:
- UNREACHABLE();
- }
- }
-
- wgpu::TextureDimension ViewDimensionToTextureDimension(
- const wgpu::TextureViewDimension dimension) {
- switch (dimension) {
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return wgpu::TextureDimension::e2D;
- case wgpu::TextureViewDimension::e3D:
- return wgpu::TextureDimension::e3D;
- // TODO(crbug.com/dawn/814): Implement for 1D texture.
- case wgpu::TextureViewDimension::e1D:
- default:
- UNREACHABLE();
- break;
- }
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TextureUtils.h b/chromium/third_party/dawn/src/utils/TextureUtils.h
deleted file mode 100644
index fac5552c589..00000000000
--- a/chromium/third_party/dawn/src/utils/TextureUtils.h
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef UTILS_TEXTURE_UTILS_H_
-#define UTILS_TEXTURE_UTILS_H_
-
-#include <array>
-
-#include <dawn/webgpu_cpp.h>
-
-#include "common/Assert.h"
-
-namespace utils {
- // TODO(dawn:666): Add Stencil8 format when it's implemented.
- static constexpr std::array<wgpu::TextureFormat, 94> kAllTextureFormats = {
- wgpu::TextureFormat::R8Unorm,
- wgpu::TextureFormat::R8Snorm,
- wgpu::TextureFormat::R8Uint,
- wgpu::TextureFormat::R8Sint,
- wgpu::TextureFormat::R16Uint,
- wgpu::TextureFormat::R16Sint,
- wgpu::TextureFormat::R16Float,
- wgpu::TextureFormat::RG8Unorm,
- wgpu::TextureFormat::RG8Snorm,
- wgpu::TextureFormat::RG8Uint,
- wgpu::TextureFormat::RG8Sint,
- wgpu::TextureFormat::R32Float,
- wgpu::TextureFormat::R32Uint,
- wgpu::TextureFormat::R32Sint,
- wgpu::TextureFormat::RG16Uint,
- wgpu::TextureFormat::RG16Sint,
- wgpu::TextureFormat::RG16Float,
- wgpu::TextureFormat::RGBA8Unorm,
- wgpu::TextureFormat::RGBA8UnormSrgb,
- wgpu::TextureFormat::RGBA8Snorm,
- wgpu::TextureFormat::RGBA8Uint,
- wgpu::TextureFormat::RGBA8Sint,
- wgpu::TextureFormat::BGRA8Unorm,
- wgpu::TextureFormat::BGRA8UnormSrgb,
- wgpu::TextureFormat::RGB10A2Unorm,
- wgpu::TextureFormat::RG11B10Ufloat,
- wgpu::TextureFormat::RGB9E5Ufloat,
- wgpu::TextureFormat::RG32Float,
- wgpu::TextureFormat::RG32Uint,
- wgpu::TextureFormat::RG32Sint,
- wgpu::TextureFormat::RGBA16Uint,
- wgpu::TextureFormat::RGBA16Sint,
- wgpu::TextureFormat::RGBA16Float,
- wgpu::TextureFormat::RGBA32Float,
- wgpu::TextureFormat::RGBA32Uint,
- wgpu::TextureFormat::RGBA32Sint,
- wgpu::TextureFormat::Depth16Unorm,
- wgpu::TextureFormat::Depth32Float,
- wgpu::TextureFormat::Depth24Plus,
- wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::Depth24UnormStencil8,
- wgpu::TextureFormat::Depth32FloatStencil8,
- wgpu::TextureFormat::BC1RGBAUnorm,
- wgpu::TextureFormat::BC1RGBAUnormSrgb,
- wgpu::TextureFormat::BC2RGBAUnorm,
- wgpu::TextureFormat::BC2RGBAUnormSrgb,
- wgpu::TextureFormat::BC3RGBAUnorm,
- wgpu::TextureFormat::BC3RGBAUnormSrgb,
- wgpu::TextureFormat::BC4RUnorm,
- wgpu::TextureFormat::BC4RSnorm,
- wgpu::TextureFormat::BC5RGUnorm,
- wgpu::TextureFormat::BC5RGSnorm,
- wgpu::TextureFormat::BC6HRGBUfloat,
- wgpu::TextureFormat::BC6HRGBFloat,
- wgpu::TextureFormat::BC7RGBAUnorm,
- wgpu::TextureFormat::BC7RGBAUnormSrgb,
- wgpu::TextureFormat::ETC2RGB8Unorm,
- wgpu::TextureFormat::ETC2RGB8UnormSrgb,
- wgpu::TextureFormat::ETC2RGB8A1Unorm,
- wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
- wgpu::TextureFormat::ETC2RGBA8Unorm,
- wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
- wgpu::TextureFormat::EACR11Unorm,
- wgpu::TextureFormat::EACR11Snorm,
- wgpu::TextureFormat::EACRG11Unorm,
- wgpu::TextureFormat::EACRG11Snorm,
- wgpu::TextureFormat::ASTC4x4Unorm,
- wgpu::TextureFormat::ASTC4x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x4Unorm,
- wgpu::TextureFormat::ASTC5x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x5Unorm,
- wgpu::TextureFormat::ASTC5x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x5Unorm,
- wgpu::TextureFormat::ASTC6x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x6Unorm,
- wgpu::TextureFormat::ASTC6x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x5Unorm,
- wgpu::TextureFormat::ASTC8x5UnormSrgb,
- wgpu::TextureFormat::ASTC8x6Unorm,
- wgpu::TextureFormat::ASTC8x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x8Unorm,
- wgpu::TextureFormat::ASTC8x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x5Unorm,
- wgpu::TextureFormat::ASTC10x5UnormSrgb,
- wgpu::TextureFormat::ASTC10x6Unorm,
- wgpu::TextureFormat::ASTC10x6UnormSrgb,
- wgpu::TextureFormat::ASTC10x8Unorm,
- wgpu::TextureFormat::ASTC10x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x10Unorm,
- wgpu::TextureFormat::ASTC10x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x10Unorm,
- wgpu::TextureFormat::ASTC12x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x12Unorm,
- wgpu::TextureFormat::ASTC12x12UnormSrgb};
-
- static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
- wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
- wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
- wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
- wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
- wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
- wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
- wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb};
-
- static constexpr std::array<wgpu::TextureFormat, 10> kETC2Formats = {
- wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
- wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
- wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
- wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
- wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm};
-
- static constexpr std::array<wgpu::TextureFormat, 28> kASTCFormats = {
- wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
- wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
- wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
- wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb,
- };
-
- static constexpr std::array<wgpu::TextureFormat, 52> kCompressedFormats = {
- wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
- wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
- wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
- wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
- wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
- wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
- wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb,
- wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
- wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
- wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
- wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
- wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm,
- wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
- wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
- wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
- wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
- wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
- wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
- wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
- wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
- wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb};
- static_assert(kCompressedFormats.size() ==
- kBCFormats.size() + kETC2Formats.size() + kASTCFormats.size(),
- "Number of compressed format must equal number of BC, ETC2, and ASTC formats.");
-
- // TODO(dawn:666): Add Stencil8 format when it's implemented.
- static constexpr std::array<wgpu::TextureFormat, 6> kDepthFormats = {
- wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Depth32Float,
- wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::Depth24UnormStencil8, wgpu::TextureFormat::Depth32FloatStencil8,
- };
- static constexpr std::array<wgpu::TextureFormat, 3> kStencilFormats = {
- wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::Depth24UnormStencil8,
- wgpu::TextureFormat::Depth32FloatStencil8,
- };
- static constexpr std::array<wgpu::TextureFormat, 3> kDepthAndStencilFormats = {
- wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::Depth24UnormStencil8,
- wgpu::TextureFormat::Depth32FloatStencil8,
- };
-
- bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
-
- bool IsBCTextureFormat(wgpu::TextureFormat textureFormat);
- bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat);
- bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat);
-
- bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat);
-
- uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
- uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
- uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
-
- const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat);
- const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
- uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat);
-
- wgpu::TextureDimension ViewDimensionToTextureDimension(
- const wgpu::TextureViewDimension dimension);
-} // namespace utils
-
-#endif
diff --git a/chromium/third_party/dawn/src/utils/VulkanBinding.cpp b/chromium/third_party/dawn/src/utils/VulkanBinding.cpp
deleted file mode 100644
index 577c3bce6b4..00000000000
--- a/chromium/third_party/dawn/src/utils/VulkanBinding.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/BackendBinding.h"
-
-#include "common/Assert.h"
-#include "dawn_native/VulkanBackend.h"
-
-// Include GLFW after VulkanBackend so that it declares the Vulkan-specific functions
-#include "GLFW/glfw3.h"
-
-#include <memory>
-
-namespace utils {
-
- class VulkanBinding : public BackendBinding {
- public:
- VulkanBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
- }
-
- uint64_t GetSwapChainImplementation() override {
- if (mSwapchainImpl.userData == nullptr) {
- VkSurfaceKHR surface = VK_NULL_HANDLE;
- if (glfwCreateWindowSurface(dawn_native::vulkan::GetInstance(mDevice), mWindow,
- nullptr, &surface) != VK_SUCCESS) {
- ASSERT(false);
- }
-
- mSwapchainImpl = dawn_native::vulkan::CreateNativeSwapChainImpl(mDevice, surface);
- }
- return reinterpret_cast<uint64_t>(&mSwapchainImpl);
- }
- WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
- ASSERT(mSwapchainImpl.userData != nullptr);
- return dawn_native::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
- }
-
- private:
- DawnSwapChainImplementation mSwapchainImpl = {};
- };
-
- BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device) {
- return new VulkanBinding(window, device);
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
deleted file mode 100644
index c385215c648..00000000000
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/WGPUHelpers.h"
-
-#include "common/Constants.h"
-#include "common/Log.h"
-
-#include "spirv-tools/optimizer.hpp"
-
-#include <cstring>
-#include <iomanip>
-#include <limits>
-#include <mutex>
-#include <sstream>
-
-namespace utils {
- wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
- // Use SPIRV-Tools's C API to assemble the SPIR-V assembly text to binary. Because the types
- // aren't RAII, we don't return directly on success and instead always go through the code
- // path that destroys the SPIRV-Tools objects.
- wgpu::ShaderModule result = nullptr;
-
- spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
- ASSERT(context != nullptr);
-
- spv_binary spirv = nullptr;
- spv_diagnostic diagnostic = nullptr;
- if (spvTextToBinary(context, source, strlen(source), &spirv, &diagnostic) == SPV_SUCCESS) {
- ASSERT(spirv != nullptr);
- ASSERT(spirv->wordCount <= std::numeric_limits<uint32_t>::max());
-
- wgpu::ShaderModuleSPIRVDescriptor spirvDesc;
- spirvDesc.codeSize = static_cast<uint32_t>(spirv->wordCount);
- spirvDesc.code = spirv->code;
-
- wgpu::ShaderModuleDescriptor descriptor;
- descriptor.nextInChain = &spirvDesc;
- result = device.CreateShaderModule(&descriptor);
- } else {
- ASSERT(diagnostic != nullptr);
- dawn::WarningLog() << "CreateShaderModuleFromASM SPIRV assembly error:"
- << diagnostic->position.line + 1 << ":"
- << diagnostic->position.column + 1 << ": " << diagnostic->error;
- }
-
- spvDiagnosticDestroy(diagnostic);
- spvBinaryDestroy(spirv);
- spvContextDestroy(context);
-
- return result;
- }
-
- wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source) {
- wgpu::ShaderModuleWGSLDescriptor wgslDesc;
- wgslDesc.source = source;
- wgpu::ShaderModuleDescriptor descriptor;
- descriptor.nextInChain = &wgslDesc;
- return device.CreateShaderModule(&descriptor);
- }
-
- wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
- const void* data,
- uint64_t size,
- wgpu::BufferUsage usage) {
- wgpu::BufferDescriptor descriptor;
- descriptor.size = size;
- descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
- wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
-
- device.GetQueue().WriteBuffer(buffer, 0, data, size);
- return buffer;
- }
-
- ComboRenderPassDescriptor::ComboRenderPassDescriptor(
- std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
- wgpu::TextureView depthStencil) {
- for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- cColorAttachments[i].loadOp = wgpu::LoadOp::Clear;
- cColorAttachments[i].storeOp = wgpu::StoreOp::Store;
- cColorAttachments[i].clearColor = {0.0f, 0.0f, 0.0f, 0.0f};
- }
-
- cDepthStencilAttachmentInfo.clearDepth = 1.0f;
- cDepthStencilAttachmentInfo.clearStencil = 0;
- cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
- cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
- cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
- cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-
- colorAttachmentCount = static_cast<uint32_t>(colorAttachmentInfo.size());
- uint32_t colorAttachmentIndex = 0;
- for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
- if (colorAttachment.Get() != nullptr) {
- cColorAttachments[colorAttachmentIndex].view = colorAttachment;
- }
- ++colorAttachmentIndex;
- }
- colorAttachments = cColorAttachments.data();
-
- if (depthStencil.Get() != nullptr) {
- cDepthStencilAttachmentInfo.view = depthStencil;
- depthStencilAttachment = &cDepthStencilAttachmentInfo;
- } else {
- depthStencilAttachment = nullptr;
- }
- }
-
- ComboRenderPassDescriptor::ComboRenderPassDescriptor(const ComboRenderPassDescriptor& other) {
- *this = other;
- }
-
- const ComboRenderPassDescriptor& ComboRenderPassDescriptor::operator=(
- const ComboRenderPassDescriptor& otherRenderPass) {
- cDepthStencilAttachmentInfo = otherRenderPass.cDepthStencilAttachmentInfo;
- cColorAttachments = otherRenderPass.cColorAttachments;
- colorAttachmentCount = otherRenderPass.colorAttachmentCount;
-
- colorAttachments = cColorAttachments.data();
-
- if (otherRenderPass.depthStencilAttachment != nullptr) {
- // Assign desc.depthStencilAttachment to this->depthStencilAttachmentInfo;
- depthStencilAttachment = &cDepthStencilAttachmentInfo;
- } else {
- depthStencilAttachment = nullptr;
- }
-
- return *this;
- }
-
- BasicRenderPass::BasicRenderPass()
- : width(0),
- height(0),
- color(nullptr),
- colorFormat(wgpu::TextureFormat::RGBA8Unorm),
- renderPassInfo({}) {
- }
-
- BasicRenderPass::BasicRenderPass(uint32_t texWidth,
- uint32_t texHeight,
- wgpu::Texture colorAttachment,
- wgpu::TextureFormat textureFormat)
- : width(texWidth),
- height(texHeight),
- color(colorAttachment),
- colorFormat(textureFormat),
- renderPassInfo({colorAttachment.CreateView()}) {
- }
-
- BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
- uint32_t width,
- uint32_t height,
- wgpu::TextureFormat format) {
- DAWN_ASSERT(width > 0 && height > 0);
-
- wgpu::TextureDescriptor descriptor;
- descriptor.dimension = wgpu::TextureDimension::e2D;
- descriptor.size.width = width;
- descriptor.size.height = height;
- descriptor.size.depthOrArrayLayers = 1;
- descriptor.sampleCount = 1;
- descriptor.format = format;
- descriptor.mipLevelCount = 1;
- descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
- wgpu::Texture color = device.CreateTexture(&descriptor);
-
- return BasicRenderPass(width, height, color);
- }
-
- wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- wgpu::ImageCopyBuffer imageCopyBuffer = {};
- imageCopyBuffer.buffer = buffer;
- imageCopyBuffer.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
-
- return imageCopyBuffer;
- }
-
- wgpu::ImageCopyTexture CreateImageCopyTexture(wgpu::Texture texture,
- uint32_t mipLevel,
- wgpu::Origin3D origin,
- wgpu::TextureAspect aspect) {
- wgpu::ImageCopyTexture imageCopyTexture;
- imageCopyTexture.texture = texture;
- imageCopyTexture.mipLevel = mipLevel;
- imageCopyTexture.origin = origin;
- imageCopyTexture.aspect = aspect;
-
- return imageCopyTexture;
- }
-
- wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- wgpu::TextureDataLayout textureDataLayout;
- textureDataLayout.offset = offset;
- textureDataLayout.bytesPerRow = bytesPerRow;
- textureDataLayout.rowsPerImage = rowsPerImage;
-
- return textureDataLayout;
- }
-
- wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
- const wgpu::BindGroupLayout* bindGroupLayout) {
- wgpu::PipelineLayoutDescriptor descriptor;
- if (bindGroupLayout != nullptr) {
- descriptor.bindGroupLayoutCount = 1;
- descriptor.bindGroupLayouts = bindGroupLayout;
- } else {
- descriptor.bindGroupLayoutCount = 0;
- descriptor.bindGroupLayouts = nullptr;
- }
- return device.CreatePipelineLayout(&descriptor);
- }
-
- wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
- std::vector<wgpu::BindGroupLayout> bgls) {
- wgpu::PipelineLayoutDescriptor descriptor;
- descriptor.bindGroupLayoutCount = uint32_t(bgls.size());
- descriptor.bindGroupLayouts = bgls.data();
- return device.CreatePipelineLayout(&descriptor);
- }
-
- wgpu::BindGroupLayout MakeBindGroupLayout(
- const wgpu::Device& device,
- std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer) {
- std::vector<wgpu::BindGroupLayoutEntry> entries;
- for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
- entries.push_back(entry);
- }
-
- wgpu::BindGroupLayoutDescriptor descriptor;
- descriptor.entryCount = static_cast<uint32_t>(entries.size());
- descriptor.entries = entries.data();
- return device.CreateBindGroupLayout(&descriptor);
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::BufferBindingType bufferType,
- bool bufferHasDynamicOffset,
- uint64_t bufferMinBindingSize) {
- binding = entryBinding;
- visibility = entryVisibility;
- buffer.type = bufferType;
- buffer.hasDynamicOffset = bufferHasDynamicOffset;
- buffer.minBindingSize = bufferMinBindingSize;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::SamplerBindingType samplerType) {
- binding = entryBinding;
- visibility = entryVisibility;
- sampler.type = samplerType;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::TextureSampleType textureSampleType,
- wgpu::TextureViewDimension textureViewDimension,
- bool textureMultisampled) {
- binding = entryBinding;
- visibility = entryVisibility;
- texture.sampleType = textureSampleType;
- texture.viewDimension = textureViewDimension;
- texture.multisampled = textureMultisampled;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::StorageTextureAccess storageTextureAccess,
- wgpu::TextureFormat format,
- wgpu::TextureViewDimension textureViewDimension) {
- binding = entryBinding;
- visibility = entryVisibility;
- storageTexture.access = storageTextureAccess;
- storageTexture.format = format;
- storageTexture.viewDimension = textureViewDimension;
- }
-
- // ExternalTextureBindingLayout never contains data, so just make one that can be reused instead
- // of declaring a new one every time it's needed.
- wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout = {};
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::ExternalTextureBindingLayout* bindingLayout) {
- binding = entryBinding;
- visibility = entryVisibility;
- nextInChain = bindingLayout;
- }
-
- BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
- const wgpu::BindGroupLayoutEntry& entry)
- : wgpu::BindGroupLayoutEntry(entry) {
- }
-
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const wgpu::Sampler& sampler)
- : binding(binding), sampler(sampler) {
- }
-
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const wgpu::TextureView& textureView)
- : binding(binding), textureView(textureView) {
- }
-
- BindingInitializationHelper::BindingInitializationHelper(
- uint32_t binding,
- const wgpu::ExternalTexture& externalTexture)
- : binding(binding) {
- externalTextureBindingEntry.externalTexture = externalTexture;
- }
-
- BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const wgpu::Buffer& buffer,
- uint64_t offset,
- uint64_t size)
- : binding(binding), buffer(buffer), offset(offset), size(size) {
- }
-
- wgpu::BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
- wgpu::BindGroupEntry result;
-
- result.binding = binding;
- result.sampler = sampler;
- result.textureView = textureView;
- result.buffer = buffer;
- result.offset = offset;
- result.size = size;
- if (externalTextureBindingEntry.externalTexture != nullptr) {
- result.nextInChain = &externalTextureBindingEntry;
- }
-
- return result;
- }
-
- wgpu::BindGroup MakeBindGroup(
- const wgpu::Device& device,
- const wgpu::BindGroupLayout& layout,
- std::initializer_list<BindingInitializationHelper> entriesInitializer) {
- std::vector<wgpu::BindGroupEntry> entries;
- for (const BindingInitializationHelper& helper : entriesInitializer) {
- entries.push_back(helper.GetAsBinding());
- }
-
- wgpu::BindGroupDescriptor descriptor;
- descriptor.layout = layout;
- descriptor.entryCount = entries.size();
- descriptor.entries = entries.data();
-
- return device.CreateBindGroup(&descriptor);
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
deleted file mode 100644
index 0dff1781360..00000000000
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef UTILS_DAWNHELPERS_H_
-#define UTILS_DAWNHELPERS_H_
-
-#include <dawn/webgpu_cpp.h>
-
-#include <array>
-#include <initializer_list>
-#include <vector>
-
-#include "common/Constants.h"
-#include "utils/TextureUtils.h"
-
-namespace utils {
-
- enum Expectation { Success, Failure };
-
- wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
- wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
-
- wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
- const void* data,
- uint64_t size,
- wgpu::BufferUsage usage);
-
- template <typename T>
- wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
- wgpu::BufferUsage usage,
- std::initializer_list<T> data) {
- return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
- }
-
- wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
- wgpu::ImageCopyTexture CreateImageCopyTexture(
- wgpu::Texture texture,
- uint32_t level,
- wgpu::Origin3D origin,
- wgpu::TextureAspect aspect = wgpu::TextureAspect::All);
- wgpu::TextureDataLayout CreateTextureDataLayout(
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
-
- struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
- public:
- ComboRenderPassDescriptor(std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
- wgpu::TextureView depthStencil = wgpu::TextureView());
-
- ComboRenderPassDescriptor(const ComboRenderPassDescriptor& otherRenderPass);
- const ComboRenderPassDescriptor& operator=(
- const ComboRenderPassDescriptor& otherRenderPass);
-
- std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments> cColorAttachments;
- wgpu::RenderPassDepthStencilAttachment cDepthStencilAttachmentInfo = {};
- };
-
- struct BasicRenderPass {
- public:
- BasicRenderPass();
- BasicRenderPass(uint32_t width,
- uint32_t height,
- wgpu::Texture color,
- wgpu::TextureFormat texture = kDefaultColorFormat);
-
- static constexpr wgpu::TextureFormat kDefaultColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-
- uint32_t width;
- uint32_t height;
- wgpu::Texture color;
- wgpu::TextureFormat colorFormat;
- utils::ComboRenderPassDescriptor renderPassInfo;
- };
- BasicRenderPass CreateBasicRenderPass(
- const wgpu::Device& device,
- uint32_t width,
- uint32_t height,
- wgpu::TextureFormat format = BasicRenderPass::kDefaultColorFormat);
-
- wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
- const wgpu::BindGroupLayout* bindGroupLayout);
-
- wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
- std::vector<wgpu::BindGroupLayout> bgls);
-
- extern wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout;
-
- // Helpers to make creating bind group layouts look nicer:
- //
- // utils::MakeBindGroupLayout(device, {
- // {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
- // {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
- // {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
- // });
-
- struct BindingLayoutEntryInitializationHelper : wgpu::BindGroupLayoutEntry {
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::BufferBindingType bufferType,
- bool bufferHasDynamicOffset = false,
- uint64_t bufferMinBindingSize = 0);
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::SamplerBindingType samplerType);
- BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::TextureSampleType textureSampleType,
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
- bool textureMultisampled = false);
- BindingLayoutEntryInitializationHelper(
- uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::StorageTextureAccess storageTextureAccess,
- wgpu::TextureFormat format,
- wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
- BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
- wgpu::ShaderStage entryVisibility,
- wgpu::ExternalTextureBindingLayout* bindingLayout);
-
- BindingLayoutEntryInitializationHelper(const wgpu::BindGroupLayoutEntry& entry);
- };
-
- wgpu::BindGroupLayout MakeBindGroupLayout(
- const wgpu::Device& device,
- std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer);
-
- // Helpers to make creating bind groups look nicer:
- //
- // utils::MakeBindGroup(device, layout, {
- // {0, mySampler},
- // {1, myBuffer, offset, size},
- // {3, myTextureView}
- // });
-
- // Structure with one constructor per-type of bindings, so that the initializer_list accepts
- // bindings with the right type and no extra information.
- struct BindingInitializationHelper {
- BindingInitializationHelper(uint32_t binding, const wgpu::Sampler& sampler);
- BindingInitializationHelper(uint32_t binding, const wgpu::TextureView& textureView);
- BindingInitializationHelper(uint32_t binding, const wgpu::ExternalTexture& externalTexture);
- BindingInitializationHelper(uint32_t binding,
- const wgpu::Buffer& buffer,
- uint64_t offset = 0,
- uint64_t size = wgpu::kWholeSize);
-
- wgpu::BindGroupEntry GetAsBinding() const;
-
- uint32_t binding;
- wgpu::Sampler sampler;
- wgpu::TextureView textureView;
- wgpu::Buffer buffer;
- wgpu::ExternalTextureBindingEntry externalTextureBindingEntry;
- uint64_t offset = 0;
- uint64_t size = 0;
- };
-
- wgpu::BindGroup MakeBindGroup(
- const wgpu::Device& device,
- const wgpu::BindGroupLayout& layout,
- std::initializer_list<BindingInitializationHelper> entriesInitializer);
-
-} // namespace utils
-
-#endif // UTILS_DAWNHELPERS_H_
diff --git a/chromium/third_party/dawn/src/utils/WindowsDebugLogger.cpp b/chromium/third_party/dawn/src/utils/WindowsDebugLogger.cpp
deleted file mode 100644
index bcf967038a4..00000000000
--- a/chromium/third_party/dawn/src/utils/WindowsDebugLogger.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2020 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/PlatformDebugLogger.h"
-
-#include "common/Assert.h"
-#include "common/windows_with_undefs.h"
-
-#include <array>
-#include <thread>
-
-namespace utils {
-
- class WindowsDebugLogger : public PlatformDebugLogger {
- public:
- WindowsDebugLogger() : PlatformDebugLogger() {
- if (IsDebuggerPresent()) {
- // This condition is true when running inside Visual Studio or some other debugger.
- // Messages are already printed there so we don't need to do anything.
- return;
- }
-
- mShouldExitHandle = CreateEventA(nullptr, TRUE, FALSE, nullptr);
- ASSERT(mShouldExitHandle != nullptr);
-
- mThread = std::thread(
- [](HANDLE shouldExit) {
- // https://blogs.msdn.microsoft.com/reiley/2011/07/29/a-debugging-approach-to-outputdebugstring/
- // for the layout of this struct.
- struct {
- DWORD process_id;
- char data[4096 - sizeof(DWORD)];
- }* dbWinBuffer = nullptr;
-
- HANDLE file = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE,
- 0, sizeof(*dbWinBuffer), "DBWIN_BUFFER");
- ASSERT(file != nullptr);
- ASSERT(file != INVALID_HANDLE_VALUE);
-
- dbWinBuffer = static_cast<decltype(dbWinBuffer)>(
- MapViewOfFile(file, SECTION_MAP_READ, 0, 0, 0));
- ASSERT(dbWinBuffer != nullptr);
-
- HANDLE dbWinBufferReady =
- CreateEventA(nullptr, FALSE, FALSE, "DBWIN_BUFFER_READY");
- ASSERT(dbWinBufferReady != nullptr);
-
- HANDLE dbWinDataReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_DATA_READY");
- ASSERT(dbWinDataReady != nullptr);
-
- std::array<HANDLE, 2> waitHandles = {shouldExit, dbWinDataReady};
- while (true) {
- SetEvent(dbWinBufferReady);
- DWORD wait = WaitForMultipleObjects(waitHandles.size(), waitHandles.data(),
- FALSE, INFINITE);
- if (wait == WAIT_OBJECT_0) {
- break;
- }
- ASSERT(wait == WAIT_OBJECT_0 + 1);
- fprintf(stderr, "%.*s\n", static_cast<int>(sizeof(dbWinBuffer->data)),
- dbWinBuffer->data);
- fflush(stderr);
- }
-
- CloseHandle(dbWinDataReady);
- CloseHandle(dbWinBufferReady);
- UnmapViewOfFile(dbWinBuffer);
- CloseHandle(file);
- },
- mShouldExitHandle);
- }
-
- ~WindowsDebugLogger() override {
- if (IsDebuggerPresent()) {
- // This condition is true when running inside Visual Studio or some other debugger.
- // Messages are already printed there so we don't need to do anything.
- return;
- }
-
- if (mShouldExitHandle != nullptr) {
- BOOL result = SetEvent(mShouldExitHandle);
- ASSERT(result != 0);
- CloseHandle(mShouldExitHandle);
- }
-
- if (mThread.joinable()) {
- mThread.join();
- }
- }
-
- private:
- std::thread mThread;
- HANDLE mShouldExitHandle = INVALID_HANDLE_VALUE;
- };
-
- PlatformDebugLogger* CreatePlatformDebugLogger() {
- return new WindowsDebugLogger();
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WindowsTimer.cpp b/chromium/third_party/dawn/src/utils/WindowsTimer.cpp
deleted file mode 100644
index 95996a11067..00000000000
--- a/chromium/third_party/dawn/src/utils/WindowsTimer.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/Timer.h"
-
-#include <windows.h>
-
-namespace utils {
-
- class WindowsTimer : public Timer {
- public:
- WindowsTimer() : Timer(), mRunning(false), mFrequency(0) {
- }
-
- ~WindowsTimer() override = default;
-
- void Start() override {
- LARGE_INTEGER curTime;
- QueryPerformanceCounter(&curTime);
- mStartTime = curTime.QuadPart;
-
- // Cache the frequency
- GetFrequency();
-
- mRunning = true;
- }
-
- void Stop() override {
- LARGE_INTEGER curTime;
- QueryPerformanceCounter(&curTime);
- mStopTime = curTime.QuadPart;
-
- mRunning = false;
- }
-
- double GetElapsedTime() const override {
- LONGLONG endTime;
- if (mRunning) {
- LARGE_INTEGER curTime;
- QueryPerformanceCounter(&curTime);
- endTime = curTime.QuadPart;
- } else {
- endTime = mStopTime;
- }
-
- return static_cast<double>(endTime - mStartTime) / mFrequency;
- }
-
- double GetAbsoluteTime() override {
- LARGE_INTEGER curTime;
- QueryPerformanceCounter(&curTime);
-
- return static_cast<double>(curTime.QuadPart) / GetFrequency();
- }
-
- private:
- LONGLONG GetFrequency() {
- if (mFrequency == 0) {
- LARGE_INTEGER frequency = {};
- QueryPerformanceFrequency(&frequency);
-
- mFrequency = frequency.QuadPart;
- }
-
- return mFrequency;
- }
-
- bool mRunning;
- LONGLONG mStartTime;
- LONGLONG mStopTime;
- LONGLONG mFrequency;
- };
-
- Timer* CreateTimer() {
- return new WindowsTimer();
- }
-
-} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WireHelper.cpp b/chromium/third_party/dawn/src/utils/WireHelper.cpp
deleted file mode 100644
index 32be5cff7cd..00000000000
--- a/chromium/third_party/dawn/src/utils/WireHelper.cpp
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2021 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "utils/WireHelper.h"
-
-#include "common/Assert.h"
-#include "common/Log.h"
-#include "common/SystemUtils.h"
-#include "dawn/dawn_proc.h"
-#include "dawn_native/DawnNative.h"
-#include "dawn_wire/WireClient.h"
-#include "dawn_wire/WireServer.h"
-#include "utils/TerribleCommandBuffer.h"
-
-#include <algorithm>
-#include <cstring>
-#include <fstream>
-#include <iomanip>
-#include <set>
-#include <sstream>
-
-namespace utils {
-
- namespace {
-
- class WireServerTraceLayer : public dawn_wire::CommandHandler {
- public:
- WireServerTraceLayer(const char* dir, dawn_wire::CommandHandler* handler)
- : dawn_wire::CommandHandler(), mDir(dir), mHandler(handler) {
- const char* sep = GetPathSeparator();
- if (mDir.size() > 0 && mDir.back() != *sep) {
- mDir += sep;
- }
- }
-
- void BeginWireTrace(const char* name) {
- std::string filename = name;
- // Replace slashes in gtest names with underscores so everything is in one
- // directory.
- std::replace(filename.begin(), filename.end(), '/', '_');
- std::replace(filename.begin(), filename.end(), '\\', '_');
-
- // Prepend the filename with the directory.
- filename = mDir + filename;
-
- ASSERT(!mFile.is_open());
- mFile.open(filename,
- std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
-
- // Write the initial 8 bytes. This means the fuzzer should never inject an
- // error.
- const uint64_t injectedErrorIndex = 0xFFFF'FFFF'FFFF'FFFF;
- mFile.write(reinterpret_cast<const char*>(&injectedErrorIndex),
- sizeof(injectedErrorIndex));
- }
-
- const volatile char* HandleCommands(const volatile char* commands,
- size_t size) override {
- if (mFile.is_open()) {
- mFile.write(const_cast<const char*>(commands), size);
- }
- return mHandler->HandleCommands(commands, size);
- }
-
- private:
- std::string mDir;
- dawn_wire::CommandHandler* mHandler;
- std::ofstream mFile;
- };
-
- class WireHelperDirect : public WireHelper {
- public:
- WireHelperDirect() {
- dawnProcSetProcs(&dawn_native::GetProcs());
- }
-
- std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
- ASSERT(backendDevice != nullptr);
- return std::make_pair(wgpu::Device::Acquire(backendDevice), backendDevice);
- }
-
- void BeginWireTrace(const char* name) override {
- }
-
- bool FlushClient() override {
- return true;
- }
-
- bool FlushServer() override {
- return true;
- }
- };
-
- class WireHelperProxy : public WireHelper {
- public:
- explicit WireHelperProxy(const char* wireTraceDir) {
- mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
- mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
-
- dawn_wire::WireServerDescriptor serverDesc = {};
- serverDesc.procs = &dawn_native::GetProcs();
- serverDesc.serializer = mS2cBuf.get();
-
- mWireServer.reset(new dawn_wire::WireServer(serverDesc));
- mC2sBuf->SetHandler(mWireServer.get());
-
- if (wireTraceDir != nullptr && strlen(wireTraceDir) > 0) {
- mWireServerTraceLayer.reset(
- new WireServerTraceLayer(wireTraceDir, mWireServer.get()));
- mC2sBuf->SetHandler(mWireServerTraceLayer.get());
- }
-
- dawn_wire::WireClientDescriptor clientDesc = {};
- clientDesc.serializer = mC2sBuf.get();
-
- mWireClient.reset(new dawn_wire::WireClient(clientDesc));
- mS2cBuf->SetHandler(mWireClient.get());
- dawnProcSetProcs(&dawn_wire::client::GetProcs());
- }
-
- std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
- ASSERT(backendDevice != nullptr);
-
- auto reservation = mWireClient->ReserveDevice();
- mWireServer->InjectDevice(backendDevice, reservation.id, reservation.generation);
- dawn_native::GetProcs().deviceRelease(backendDevice);
-
- return std::make_pair(wgpu::Device::Acquire(reservation.device), backendDevice);
- }
-
- void BeginWireTrace(const char* name) override {
- if (mWireServerTraceLayer) {
- return mWireServerTraceLayer->BeginWireTrace(name);
- }
- }
-
- bool FlushClient() override {
- return mC2sBuf->Flush();
- }
-
- bool FlushServer() override {
- return mS2cBuf->Flush();
- }
-
- private:
- std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
- std::unique_ptr<utils::TerribleCommandBuffer> mS2cBuf;
- std::unique_ptr<WireServerTraceLayer> mWireServerTraceLayer;
- std::unique_ptr<dawn_wire::WireServer> mWireServer;
- std::unique_ptr<dawn_wire::WireClient> mWireClient;
- };
-
- } // anonymous namespace
-
- std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir) {
- if (useWire) {
- return std::unique_ptr<WireHelper>(new WireHelperProxy(wireTraceDir));
- } else {
- return std::unique_ptr<WireHelper>(new WireHelperDirect());
- }
- }
-
- WireHelper::~WireHelper() {
- dawnProcSetProcs(nullptr);
- }
-
-} // namespace utils