summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn/src
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2022-02-02 12:21:57 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2022-02-12 08:13:00 +0000
commit606d85f2a5386472314d39923da28c70c60dc8e7 (patch)
treea8f4d7bf997f349f45605e6058259fba0630e4d7 /chromium/third_party/dawn/src
parent5786336dda477d04fb98483dca1a5426eebde2d7 (diff)
downloadqtwebengine-chromium-606d85f2a5386472314d39923da28c70c60dc8e7.tar.gz
BASELINE: Update Chromium to 96.0.4664.181
Change-Id: I762cd1da89d73aa6313b4a753fe126c34833f046 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/dawn/src')
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn3
-rw-r--r--chromium/third_party/dawn/src/common/CMakeLists.txt4
-rw-r--r--chromium/third_party/dawn/src/common/Constants.h2
-rw-r--r--chromium/third_party/dawn/src/common/NonCopyable.h3
-rw-r--r--chromium/third_party/dawn/src/dawn/BUILD.gn28
-rw-r--r--chromium/third_party/dawn/src/dawn/CMakeLists.txt22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Adapter.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Adapter.h20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BUILD.gn37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.cpp204
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp123
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BufferLocation.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BufferLocation.h49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CMakeLists.txt32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CachedObject.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp74
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandAllocator.h32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp120
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp967
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp314
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.h12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.cpp20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.h22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp213
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp76
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp57
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DawnNative.cpp47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp458
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h109
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.h57
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Error.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorData.cpp42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorData.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Extensions.cpp152
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Extensions.h72
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ExternalTexture.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Features.cpp163
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Features.h74
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.cpp74
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Forward.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.cpp193
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.h112
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.cpp397
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.h39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/IntegerTypes.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Limits.cpp212
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Limits.h43
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ObjectBase.h34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp79
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.cpp81
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundle.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp461
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp363
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp522
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Sampler.cpp48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Sampler.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ScratchBuffer.cpp47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ScratchBuffer.h55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp122
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Subresource.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp324
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp163
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.cpp164
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.h89
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp138
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp81
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp706
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp156
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/dawn_platform.h21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm85
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm85
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm63
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp126
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp201
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp122
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp96
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_node/CMakeLists.txt124
-rw-r--r--chromium/third_party/dawn/src/dawn_node/Module.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_node/NapiSymbols.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn_node/README.md70
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.h76
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/CMakeLists.txt86
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Converter.cpp1141
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Converter.h362
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Errors.cpp179
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/Errors.h60
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPU.cpp123
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPU.h41
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.cpp138
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.cpp167
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.cpp40
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.cpp196
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.cpp135
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.h76
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.cpp518
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.h113
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.cpp132
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.h61
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.cpp192
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.cpp262
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.h115
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.cpp125
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.h50
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.cpp131
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.cpp64
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.h49
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/Browser.idl84
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/CMakeLists.txt68
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/Core.cpp160
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/Core.h662
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/WebGPU.cpp.tmpl393
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/WebGPU.h.tmpl282
-rw-r--r--chromium/third_party/dawn/src/dawn_node/interop/WebGPUCommon.tmpl126
-rw-r--r--chromium/third_party/dawn/src/dawn_node/tools/go.mod9
-rw-r--r--chromium/third_party/dawn/src/dawn_node/tools/go.sum33
-rwxr-xr-xchromium/third_party/dawn/src/dawn_node/tools/run-cts33
-rw-r--r--chromium/third_party/dawn/src/dawn_node/tools/src/cmd/idlgen/main.go635
-rw-r--r--chromium/third_party/dawn/src/dawn_node/tools/src/cmd/run-cts/main.go499
-rw-r--r--chromium/third_party/dawn/src/dawn_node/utils/Debug.h146
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/BUILD.gn1
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt1
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp59
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.cpp176
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Queue.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/RequestTracker.h82
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp5
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h8
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/DawnNative.h23
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h6
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/Wire.h10
-rw-r--r--chromium/third_party/dawn/src/tests/BUILD.gn12
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp6
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h16
-rw-r--r--chromium/third_party/dawn/src/utils/TestUtils.cpp3
-rw-r--r--chromium/third_party/dawn/src/utils/TextureUtils.cpp231
-rw-r--r--chromium/third_party/dawn/src/utils/TextureUtils.h100
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.h5
292 files changed, 17591 insertions, 3974 deletions
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
index afaa18f2d5b..b4ae07731d7 100644
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/common/BUILD.gn
@@ -94,6 +94,9 @@ config("dawn_internal") {
visibility = [ "../*" ]
cflags = []
+ if (is_clang) {
+ cflags += [ "-Wno-shadow" ]
+ }
# Enable more warnings that were found when using Dawn in other projects.
# Add them only when building in standalone because we control which clang
diff --git a/chromium/third_party/dawn/src/common/CMakeLists.txt b/chromium/third_party/dawn/src/common/CMakeLists.txt
index 4bdeec4f60d..fe90b29a9a0 100644
--- a/chromium/third_party/dawn/src/common/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/common/CMakeLists.txt
@@ -69,6 +69,10 @@ if (WIN32)
"WindowsUtils.h"
"windows_with_undefs.h"
)
+elseif(APPLE)
+ target_sources(dawn_common PRIVATE
+ "SystemUtils_mac.mm"
+ )
endif()
target_link_libraries(dawn_common PUBLIC dawncpp_headers PRIVATE dawn_internal_config)
diff --git a/chromium/third_party/dawn/src/common/Constants.h b/chromium/third_party/dawn/src/common/Constants.h
index d6f42e71c2b..e34aaed8403 100644
--- a/chromium/third_party/dawn/src/common/Constants.h
+++ b/chromium/third_party/dawn/src/common/Constants.h
@@ -65,7 +65,7 @@ static constexpr float kLodMax = 1000.0;
static constexpr uint32_t kMaxTextureDimension1D = 8192u;
static constexpr uint32_t kMaxTextureDimension2D = 8192u;
static constexpr uint32_t kMaxTextureDimension3D = 2048u;
-static constexpr uint32_t kMaxTextureArrayLayers = 2048u;
+static constexpr uint32_t kMaxTextureArrayLayers = 256u;
static constexpr uint32_t kMaxTexture2DMipLevels = 14u;
static_assert(1 << (kMaxTexture2DMipLevels - 1) == kMaxTextureDimension2D,
"kMaxTexture2DMipLevels and kMaxTextureDimension2D size mismatch");
diff --git a/chromium/third_party/dawn/src/common/NonCopyable.h b/chromium/third_party/dawn/src/common/NonCopyable.h
index 61f15cabcff..2d217dfbad3 100644
--- a/chromium/third_party/dawn/src/common/NonCopyable.h
+++ b/chromium/third_party/dawn/src/common/NonCopyable.h
@@ -21,6 +21,9 @@ class NonCopyable {
constexpr NonCopyable() = default;
~NonCopyable() = default;
+ NonCopyable(NonCopyable&&) = default;
+ NonCopyable& operator=(NonCopyable&&) = default;
+
private:
NonCopyable(const NonCopyable&) = delete;
void operator=(const NonCopyable&) = delete;
diff --git a/chromium/third_party/dawn/src/dawn/BUILD.gn b/chromium/third_party/dawn/src/dawn/BUILD.gn
index 543c51e599d..feddfeecf14 100644
--- a/chromium/third_party/dawn/src/dawn/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/BUILD.gn
@@ -29,14 +29,6 @@ dawn_json_generator("dawn_headers_gen") {
]
}
-dawn_json_generator("emscripten_bits_gen") {
- target = "emscripten_bits"
- outputs = [
- "src/dawn/webgpu_struct_info.json",
- "src/dawn/library_webgpu_enum_tables.js",
- ]
-}
-
source_set("dawn_headers") {
all_dependent_configs = [ "${dawn_root}/src/common:dawn_public_include_dirs" ]
public_deps = [ ":dawn_headers_gen" ]
@@ -107,3 +99,23 @@ dawn_component("dawn_proc") {
"${dawn_root}/src/include/dawn/dawn_thread_dispatch_proc.h",
]
}
+
+###############################################################################
+# Other generated files (upstream header, emscripten header, emscripten bits)
+###############################################################################
+
+dawn_json_generator("webgpu_headers_gen") {
+ target = "webgpu_headers"
+ outputs = [ "webgpu-headers/webgpu.h" ]
+}
+
+dawn_json_generator("emscripten_bits_gen") {
+ target = "emscripten_bits"
+ outputs = [
+ "emscripten-bits/webgpu.h",
+ "emscripten-bits/webgpu_cpp.h",
+ "emscripten-bits/webgpu_cpp.cpp",
+ "emscripten-bits/webgpu_struct_info.json",
+ "emscripten-bits/library_webgpu_enum_tables.js",
+ ]
+}
diff --git a/chromium/third_party/dawn/src/dawn/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/CMakeLists.txt
index ff7d278f9df..7d6a4f67645 100644
--- a/chromium/third_party/dawn/src/dawn/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn/CMakeLists.txt
@@ -87,3 +87,25 @@ if(BUILD_SHARED_LIBS)
endif()
target_sources(dawn_proc PRIVATE ${DAWNPROC_GEN_SOURCES})
target_link_libraries(dawn_proc PUBLIC dawn_headers)
+
+###############################################################################
+# Other generated files (upstream header, emscripten header, emscripten bits)
+###############################################################################
+
+DawnJSONGenerator(
+ TARGET "webgpu_headers"
+ PRINT_NAME "WebGPU headers"
+ RESULT_VARIABLE "WEBGPU_HEADERS_GEN_SOURCES"
+)
+add_custom_target(webgpu_headers_gen
+ DEPENDS ${WEBGPU_HEADERS_GEN_SOURCES}
+)
+
+DawnJSONGenerator(
+ TARGET "emscripten_bits"
+ PRINT_NAME "Emscripten WebGPU bits"
+ RESULT_VARIABLE "EMSCRIPTEN_BITS_GEN_SOURCES"
+)
+add_custom_target(emscripten_bits_gen
+ DEPENDS ${EMSCRIPTEN_BITS_GEN_SOURCES}
+)
diff --git a/chromium/third_party/dawn/src/dawn_native/Adapter.cpp b/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
index 5f843069aac..8690f764da5 100644
--- a/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
@@ -20,7 +20,8 @@ namespace dawn_native {
AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
: mInstance(instance), mBackend(backend) {
- mSupportedExtensions.EnableExtension(Extension::DawnInternalUsages);
+ GetDefaultLimits(&mLimits.v1);
+ mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
}
wgpu::BackendType AdapterBase::GetBackendType() const {
@@ -43,18 +44,18 @@ namespace dawn_native {
return mInstance;
}
- ExtensionsSet AdapterBase::GetSupportedExtensions() const {
- return mSupportedExtensions;
+ FeaturesSet AdapterBase::GetSupportedFeatures() const {
+ return mSupportedFeatures;
}
- bool AdapterBase::SupportsAllRequestedExtensions(
- const std::vector<const char*>& requestedExtensions) const {
- for (const char* extensionStr : requestedExtensions) {
- Extension extensionEnum = mInstance->ExtensionNameToEnum(extensionStr);
- if (extensionEnum == Extension::InvalidEnum) {
+ bool AdapterBase::SupportsAllRequestedFeatures(
+ const std::vector<const char*>& requestedFeatures) const {
+ for (const char* featureStr : requestedFeatures) {
+ Feature featureEnum = mInstance->FeatureNameToEnum(featureStr);
+ if (featureEnum == Feature::InvalidEnum) {
return false;
}
- if (!mSupportedExtensions.IsEnabled(extensionEnum)) {
+ if (!mSupportedFeatures.IsEnabled(featureEnum)) {
return false;
}
}
@@ -63,11 +64,32 @@ namespace dawn_native {
WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
WGPUDeviceProperties adapterProperties = {};
-
- mSupportedExtensions.InitializeDeviceProperties(&adapterProperties);
+ adapterProperties.deviceID = mPCIInfo.deviceId;
+ adapterProperties.vendorID = mPCIInfo.vendorId;
+
+ mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
+ // This is OK for now because there are no limit feature structs.
+ // If we add additional structs, the caller will need to provide memory
+ // to store them (ex. by calling GetLimits directly instead). Currently,
+ // we keep this function as it's only used internally in Chromium to
+ // send the adapter properties across the wire.
+ GetLimits(reinterpret_cast<SupportedLimits*>(&adapterProperties.limits));
return adapterProperties;
}
+ bool AdapterBase::GetLimits(SupportedLimits* limits) const {
+ ASSERT(limits != nullptr);
+ if (limits->nextInChain != nullptr) {
+ return false;
+ }
+ if (mUseTieredLimits) {
+ limits->limits = ApplyLimitTiers(mLimits.v1);
+ } else {
+ limits->limits = mLimits.v1;
+ }
+ return true;
+ }
+
DeviceBase* AdapterBase::CreateDevice(const DeviceDescriptor* descriptor) {
DeviceBase* result = nullptr;
@@ -78,18 +100,61 @@ namespace dawn_native {
return result;
}
+ void AdapterBase::RequestDevice(const DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ DeviceBase* result = nullptr;
+ MaybeError err = CreateDeviceInternal(&result, descriptor);
+ WGPUDevice device = reinterpret_cast<WGPUDevice>(result);
+
+ if (err.IsError()) {
+ std::unique_ptr<ErrorData> errorData = err.AcquireError();
+ callback(WGPURequestDeviceStatus_Error, device, errorData->GetMessage().c_str(),
+ userdata);
+ return;
+ }
+ WGPURequestDeviceStatus status =
+ device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+ callback(status, device, nullptr, userdata);
+ }
+
MaybeError AdapterBase::CreateDeviceInternal(DeviceBase** result,
const DeviceDescriptor* descriptor) {
if (descriptor != nullptr) {
- if (!SupportsAllRequestedExtensions(descriptor->requiredExtensions)) {
- return DAWN_VALIDATION_ERROR("One or more requested extensions are not supported");
+ // TODO(dawn:1149): remove once requiredExtensions is no longer used.
+ for (const char* extensionStr : descriptor->requiredExtensions) {
+ Feature extensionEnum = mInstance->FeatureNameToEnum(extensionStr);
+ DAWN_INVALID_IF(extensionEnum == Feature::InvalidEnum,
+ "Requested feature %s is unknown.", extensionStr);
+ DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(extensionEnum),
+ "Requested feature %s is disabled.", extensionStr);
}
+ for (const char* featureStr : descriptor->requiredFeatures) {
+ Feature featureEnum = mInstance->FeatureNameToEnum(featureStr);
+ DAWN_INVALID_IF(featureEnum == Feature::InvalidEnum,
+ "Requested feature %s is unknown.", featureStr);
+ DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(featureEnum),
+ "Requested feature %s is disabled.", featureStr);
+ }
+ }
+
+ if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
+ DAWN_TRY(ValidateLimits(
+ mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
+ reinterpret_cast<const RequiredLimits*>(descriptor->requiredLimits)->limits));
+
+ DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
+ "nextInChain is not nullptr.");
}
DAWN_TRY_ASSIGN(*result, CreateDeviceImpl(descriptor));
return {};
}
+ void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
+ mUseTieredLimits = useTieredLimits;
+ }
+
void AdapterBase::ResetInternalDeviceForTesting() {
mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Adapter.h b/chromium/third_party/dawn/src/dawn_native/Adapter.h
index 7587eeb633d..adf230bf355 100644
--- a/chromium/third_party/dawn/src/dawn_native/Adapter.h
+++ b/chromium/third_party/dawn/src/dawn_native/Adapter.h
@@ -18,7 +18,8 @@
#include "dawn_native/DawnNative.h"
#include "dawn_native/Error.h"
-#include "dawn_native/Extensions.h"
+#include "dawn_native/Features.h"
+#include "dawn_native/Limits.h"
#include "dawn_native/dawn_platform.h"
#include <string>
@@ -40,20 +41,27 @@ namespace dawn_native {
DeviceBase* CreateDevice(const DeviceDescriptor* descriptor = nullptr);
+ void RequestDevice(const DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
+
void ResetInternalDeviceForTesting();
- ExtensionsSet GetSupportedExtensions() const;
- bool SupportsAllRequestedExtensions(
- const std::vector<const char*>& requestedExtensions) const;
+ FeaturesSet GetSupportedFeatures() const;
+ bool SupportsAllRequestedFeatures(const std::vector<const char*>& requestedFeatures) const;
WGPUDeviceProperties GetAdapterProperties() const;
+ bool GetLimits(SupportedLimits* limits) const;
+
+ void SetUseTieredLimits(bool useTieredLimits);
+
virtual bool SupportsExternalImages() const = 0;
protected:
PCIInfo mPCIInfo = {};
wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
std::string mDriverDescription;
- ExtensionsSet mSupportedExtensions;
+ FeaturesSet mSupportedFeatures;
private:
virtual ResultOrError<DeviceBase*> CreateDeviceImpl(const DeviceDescriptor* descriptor) = 0;
@@ -63,6 +71,8 @@ namespace dawn_native {
virtual MaybeError ResetInternalDeviceForTestingImpl();
InstanceBase* mInstance = nullptr;
wgpu::BackendType mBackend;
+ CombinedLimits mLimits;
+ bool mUseTieredLimits = false;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
index 5878b44ad3b..427db42bb92 100644
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
@@ -35,12 +35,14 @@ namespace dawn_native {
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
: mSampleCount(descriptor->multisample.count) {
- ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
- ++i) {
- mColorAttachmentsSet.set(i);
- mColorFormats[i] = descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
+ if (descriptor->fragment != nullptr) {
+ ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
+ ++i) {
+ mColorAttachmentsSet.set(i);
+ mColorFormats[i] = descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
+ }
}
if (descriptor->depthStencil != nullptr) {
mDepthStencilFormat = descriptor->depthStencil->format;
@@ -53,9 +55,6 @@ namespace dawn_native {
++i) {
TextureViewBase* attachment =
descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
- if (attachment == nullptr) {
- attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].attachment;
- }
mColorAttachmentsSet.set(i);
mColorFormats[i] = attachment->GetFormat().format;
if (mSampleCount == 0) {
@@ -66,9 +65,6 @@ namespace dawn_native {
}
if (descriptor->depthStencilAttachment != nullptr) {
TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
- if (attachment == nullptr) {
- attachment = descriptor->depthStencilAttachment->attachment;
- }
mDepthStencilFormat = attachment->GetFormat().format;
if (mSampleCount == 0) {
mSampleCount = attachment->GetTexture()->GetSampleCount();
@@ -130,7 +126,7 @@ namespace dawn_native {
}
AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
- : AttachmentStateBlueprint(blueprint), CachedObject(device) {
+ : AttachmentStateBlueprint(blueprint), ObjectBase(device) {
}
AttachmentState::~AttachmentState() {
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.h b/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
index ce8b8aaf14c..6c1e434971c 100644
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
+++ b/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
@@ -20,6 +20,7 @@
#include "common/ityp_bitset.h"
#include "dawn_native/CachedObject.h"
#include "dawn_native/IntegerTypes.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/dawn_platform.h"
@@ -59,7 +60,9 @@ namespace dawn_native {
uint32_t mSampleCount = 0;
};
- class AttachmentState final : public AttachmentStateBlueprint, public CachedObject {
+ class AttachmentState final : public AttachmentStateBlueprint,
+ public ObjectBase,
+ public CachedObject {
public:
AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
diff --git a/chromium/third_party/dawn/src/dawn_native/BUILD.gn b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
index ee59d2366df..37254d737a1 100644
--- a/chromium/third_party/dawn/src/dawn_native/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
@@ -53,6 +53,17 @@ if (dawn_enable_vulkan) {
dawn_enable_vulkan_loader && dawn_vulkan_loader_dir != ""
}
+group("dawn_abseil") {
+ # When build_with_chromium=true we need to include "//third_party/abseil-cpp:absl" while
+ # it's beneficial to be more specific with standalone Dawn, especially when it comes to
+ # including it as a dependency in other projects (such as Skia).
+ if (build_with_chromium) {
+ public_deps = [ "$dawn_abseil_dir:absl" ]
+ } else {
+ public_deps = [ "${dawn_root}/third_party/gn/abseil-cpp:str_format" ]
+ }
+}
+
config("dawn_native_internal") {
configs = [ "${dawn_root}/src/common:dawn_internal" ]
@@ -98,6 +109,10 @@ dawn_json_generator("dawn_native_utils_gen") {
"src/dawn_native/wgpu_structs_autogen.cpp",
"src/dawn_native/ValidationUtils_autogen.h",
"src/dawn_native/ValidationUtils_autogen.cpp",
+ "src/dawn_native/webgpu_absl_format_autogen.h",
+ "src/dawn_native/webgpu_absl_format_autogen.cpp",
+ "src/dawn_native/ObjectType_autogen.h",
+ "src/dawn_native/ObjectType_autogen.cpp",
]
}
@@ -161,7 +176,10 @@ source_set("dawn_native_sources") {
# Dependencies that are needed to compile dawn_native entry points in
# FooBackend.cpp need to be public deps so they are propagated to the
# dawn_native target
- public_deps = [ "${dawn_root}/src/dawn_platform" ]
+ public_deps = [
+ ":dawn_abseil",
+ "${dawn_root}/src/dawn_platform",
+ ]
sources = get_target_outputs(":dawn_native_utils_gen")
sources += [
@@ -186,6 +204,8 @@ source_set("dawn_native_sources") {
"BuddyMemoryAllocator.h",
"Buffer.cpp",
"Buffer.h",
+ "BufferLocation.cpp",
+ "BufferLocation.h",
"CachedObject.cpp",
"CachedObject.h",
"CallbackTaskManager.cpp",
@@ -228,17 +248,24 @@ source_set("dawn_native_sources") {
"ErrorInjector.h",
"ErrorScope.cpp",
"ErrorScope.h",
- "Extensions.cpp",
- "Extensions.h",
"ExternalTexture.cpp",
"ExternalTexture.h",
+ "Features.cpp",
+ "Features.h",
"Format.cpp",
"Format.h",
"Forward.h",
+ "IndirectDrawMetadata.cpp",
+ "IndirectDrawMetadata.h",
+ "IndirectDrawValidationEncoder.cpp",
+ "IndirectDrawValidationEncoder.h",
"Instance.cpp",
"Instance.h",
"IntegerTypes.h",
+ "InternalPipelineStore.cpp",
"InternalPipelineStore.h",
+ "Limits.cpp",
+ "Limits.h",
"ObjectBase.cpp",
"ObjectBase.h",
"ObjectContentHasher.cpp",
@@ -282,6 +309,8 @@ source_set("dawn_native_sources") {
"RingBufferAllocator.h",
"Sampler.cpp",
"Sampler.h",
+ "ScratchBuffer.cpp",
+ "ScratchBuffer.h",
"ShaderModule.cpp",
"ShaderModule.h",
"StagingBuffer.cpp",
@@ -358,6 +387,8 @@ source_set("dawn_native_sources") {
"d3d12/CommandRecordingContext.h",
"d3d12/ComputePipelineD3D12.cpp",
"d3d12/ComputePipelineD3D12.h",
+ "d3d12/D3D11on12Util.cpp",
+ "d3d12/D3D11on12Util.h",
"d3d12/D3D12Error.cpp",
"d3d12/D3D12Error.h",
"d3d12/D3D12Info.cpp",
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
index 4f5f9045661..0a8beaf1c2e 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
@@ -22,6 +22,8 @@
#include "dawn_native/ChainUtils_autogen.h"
#include "dawn_native/Device.h"
#include "dawn_native/ExternalTexture.h"
+#include "dawn_native/ObjectBase.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/Sampler.h"
#include "dawn_native/Texture.h"
@@ -69,50 +71,41 @@ namespace dawn_native {
uint64_t bufferSize = entry.buffer->GetSize();
// Handle wgpu::WholeSize, avoiding overflows.
- if (entry.offset > bufferSize) {
- return DAWN_VALIDATION_ERROR("Buffer binding doesn't fit in the buffer");
- }
+ DAWN_INVALID_IF(entry.offset > bufferSize,
+ "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
+ bufferSize, entry.buffer);
+
uint64_t bindingSize =
(entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
- if (bindingSize > bufferSize) {
- return DAWN_VALIDATION_ERROR("Buffer binding size larger than the buffer");
- }
+ DAWN_INVALID_IF(bindingSize > bufferSize,
+ "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
+ bufferSize, entry.buffer);
- if (bindingSize == 0) {
- return DAWN_VALIDATION_ERROR("Buffer binding size cannot be zero.");
- }
+ DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
// Note that no overflow can happen because we already checked that
// bufferSize >= bindingSize
- if (entry.offset > bufferSize - bindingSize) {
- return DAWN_VALIDATION_ERROR("Buffer binding doesn't fit in the buffer");
- }
+ DAWN_INVALID_IF(
+ entry.offset > bufferSize - bindingSize,
+ "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
+ entry.offset, bufferSize, bindingSize, entry.buffer);
- if (!IsAligned(entry.offset, requiredBindingAlignment)) {
- return DAWN_VALIDATION_ERROR(
- "Buffer offset for bind group needs to satisfy the minimum alignment");
- }
+ DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
+ "Offset (%u) does not satisfy the minimum %s alignment (%u).",
+ entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
- if (!(entry.buffer->GetUsage() & requiredUsage)) {
- return DAWN_VALIDATION_ERROR("buffer binding usage mismatch");
- }
+ DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
+ "Binding usage (%s) of %s doesn't match expected usage (%s).",
+ entry.buffer->GetUsage(), entry.buffer, requiredUsage);
- if (bindingSize < bindingInfo.buffer.minBindingSize) {
- return DAWN_VALIDATION_ERROR(
- "Binding size smaller than minimum buffer size: binding " +
- std::to_string(entry.binding) + " given " + std::to_string(bindingSize) +
- " bytes, required " + std::to_string(bindingInfo.buffer.minBindingSize) +
- " bytes");
- }
+ DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
+ "Binding size (%u) is smaller than the minimum binding size (%u).",
+ bindingSize, bindingInfo.buffer.minBindingSize);
- if (bindingSize > maxBindingSize) {
- return DAWN_VALIDATION_ERROR(
- "Binding size bigger than maximum uniform buffer binding size: binding " +
- std::to_string(entry.binding) + " given " + std::to_string(bindingSize) +
- " bytes, maximum is " + std::to_string(kMaxUniformBufferBindingSize) +
- " bytes");
- }
+ DAWN_INVALID_IF(bindingSize > maxBindingSize,
+ "Binding size (%u) is larger than the maximum binding size (%u).",
+ bindingSize, maxBindingSize);
return {};
}
@@ -129,9 +122,8 @@ namespace dawn_native {
TextureViewBase* view = entry.textureView;
Aspect aspect = view->GetAspects();
- if (!HasOneBit(aspect)) {
- return DAWN_VALIDATION_ERROR("Texture view must select a single aspect");
- }
+ // TODO(dawn:563): Format Aspects
+ DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects selected in %s.", view);
TextureBase* texture = view->GetTexture();
switch (bindingInfo.bindingType) {
@@ -141,44 +133,46 @@ namespace dawn_native {
SampleTypeBit requiredType =
SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
- if (!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding)) {
- return DAWN_VALIDATION_ERROR("Texture binding usage mismatch");
- }
-
- if (texture->IsMultisampledTexture() != bindingInfo.texture.multisampled) {
- return DAWN_VALIDATION_ERROR("Texture multisampling mismatch");
- }
-
- if ((supportedTypes & requiredType) == 0) {
- if (IsSubset(SampleTypeBit::Depth, supportedTypes) != 0 &&
- IsSubset(requiredType,
- SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
- device->EmitDeprecationWarning(
- "Using depth textures with 'float' or 'unfilterable-float' texture "
- "bindings is deprecated. Use 'depth' instead.");
- } else {
- return DAWN_VALIDATION_ERROR("Texture component type usage mismatch");
- }
- }
-
- if (entry.textureView->GetDimension() != bindingInfo.texture.viewDimension) {
- return DAWN_VALIDATION_ERROR("Texture view dimension mismatch");
- }
+ DAWN_INVALID_IF(
+ !(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
+ "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
+ texture->GetUsage(), texture);
+
+ DAWN_INVALID_IF(
+ texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
+ "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
+ texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
+
+ // TODO(dawn:563): Improve error message.
+ DAWN_INVALID_IF((supportedTypes & requiredType) == 0,
+ "Texture component type usage mismatch.");
+
+ DAWN_INVALID_IF(
+ entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
+ "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+ entry.textureView->GetDimension(), entry.textureView,
+ bindingInfo.texture.viewDimension);
break;
}
case BindingInfoType::StorageTexture: {
- if (!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding)) {
- return DAWN_VALIDATION_ERROR("Storage Texture binding usage mismatch");
- }
+ DAWN_INVALID_IF(
+ !(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
+ "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
+ texture->GetUsage(), texture);
+
ASSERT(!texture->IsMultisampledTexture());
- if (texture->GetFormat().format != bindingInfo.storageTexture.format) {
- return DAWN_VALIDATION_ERROR("Storage texture format mismatch");
- }
- if (entry.textureView->GetDimension() !=
- bindingInfo.storageTexture.viewDimension) {
- return DAWN_VALIDATION_ERROR("Storage texture view dimension mismatch");
- }
+ DAWN_INVALID_IF(
+ texture->GetFormat().format != bindingInfo.storageTexture.format,
+ "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
+ texture, bindingInfo.storageTexture.format);
+
+ DAWN_INVALID_IF(
+ entry.textureView->GetDimension() !=
+ bindingInfo.storageTexture.viewDimension,
+ "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+ entry.textureView->GetDimension(), entry.textureView,
+ bindingInfo.storageTexture.viewDimension);
break;
}
default:
@@ -202,25 +196,25 @@ namespace dawn_native {
switch (bindingInfo.sampler.type) {
case wgpu::SamplerBindingType::NonFiltering:
- if (entry.sampler->IsFiltering()) {
- return DAWN_VALIDATION_ERROR(
- "Filtering sampler is incompatible with non-filtering sampler "
- "binding.");
- }
+ DAWN_INVALID_IF(
+ entry.sampler->IsFiltering(),
+ "Filtering sampler %s is incompatible with non-filtering sampler "
+ "binding.",
+ entry.sampler);
DAWN_FALLTHROUGH;
case wgpu::SamplerBindingType::Filtering:
- if (entry.sampler->IsComparison()) {
- return DAWN_VALIDATION_ERROR(
- "Comparison sampler is incompatible with non-comparison sampler "
- "binding.");
- }
+ DAWN_INVALID_IF(
+ entry.sampler->IsComparison(),
+ "Comparison sampler %s is incompatible with non-comparison sampler "
+ "binding.",
+ entry.sampler);
break;
case wgpu::SamplerBindingType::Comparison:
- if (!entry.sampler->IsComparison()) {
- return DAWN_VALIDATION_ERROR(
- "Non-comparison sampler is imcompatible with comparison sampler "
- "binding.");
- }
+ DAWN_INVALID_IF(
+ !entry.sampler->IsComparison(),
+ "Non-comparison sampler %s is imcompatible with comparison sampler "
+ "binding.",
+ entry.sampler);
break;
default:
UNREACHABLE();
@@ -259,9 +253,11 @@ namespace dawn_native {
DAWN_TRY(device->ValidateObject(descriptor->layout));
- if (BindingIndex(descriptor->entryCount) != descriptor->layout->GetBindingCount()) {
- return DAWN_VALIDATION_ERROR("numBindings mismatch");
- }
+ DAWN_INVALID_IF(
+ BindingIndex(descriptor->entryCount) != descriptor->layout->GetBindingCount(),
+ "Number of entries (%u) did not match the number of entries (%u) specified in %s",
+ descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
+ descriptor->layout);
const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
@@ -271,15 +267,17 @@ namespace dawn_native {
const BindGroupEntry& entry = descriptor->entries[i];
const auto& it = bindingMap.find(BindingNumber(entry.binding));
- if (it == bindingMap.end()) {
- return DAWN_VALIDATION_ERROR("setting non-existent binding");
- }
+ DAWN_INVALID_IF(it == bindingMap.end(),
+ "In entries[%u], binding index %u not present in the bind group layout",
+ i, entry.binding);
+
BindingIndex bindingIndex = it->second;
ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
- if (bindingsSet[bindingIndex]) {
- return DAWN_VALIDATION_ERROR("binding set twice");
- }
+ DAWN_INVALID_IF(bindingsSet[bindingIndex],
+ "In entries[%u], binding index %u already used by a previous entry", i,
+ entry.binding);
+
bindingsSet.set(bindingIndex);
const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
@@ -287,17 +285,21 @@ namespace dawn_native {
// Perform binding-type specific validation.
switch (bindingInfo.bindingType) {
case BindingInfoType::Buffer:
- DAWN_TRY(ValidateBufferBinding(device, entry, bindingInfo));
+ DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Buffer", i);
break;
case BindingInfoType::Texture:
case BindingInfoType::StorageTexture:
- DAWN_TRY(ValidateTextureBinding(device, entry, bindingInfo));
+ DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Texture", i);
break;
case BindingInfoType::Sampler:
- DAWN_TRY(ValidateSamplerBinding(device, entry, bindingInfo));
+ DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
+ "validating entries[%u] as a Sampler", i);
break;
case BindingInfoType::ExternalTexture:
- DAWN_TRY(ValidateExternalTextureBinding(device, entry, bindingInfo));
+ DAWN_TRY_CONTEXT(ValidateExternalTextureBinding(device, entry, bindingInfo),
+ "validating entries[%u] as an ExternalTexture", i);
break;
}
}
@@ -317,7 +319,7 @@ namespace dawn_native {
BindGroupBase::BindGroupBase(DeviceBase* device,
const BindGroupDescriptor* descriptor,
void* bindingDataStart)
- : ObjectBase(device),
+ : ApiObjectBase(device, kLabelNotImplemented),
mLayout(descriptor->layout),
mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
@@ -397,7 +399,7 @@ namespace dawn_native {
}
BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag), mBindingData() {
+ : ApiObjectBase(device, tag), mBindingData() {
}
// static
@@ -405,6 +407,10 @@ namespace dawn_native {
return new BindGroupBase(device, ObjectBase::kError);
}
+ ObjectType BindGroupBase::GetType() const {
+ return ObjectType::BindGroup;
+ }
+
BindGroupLayoutBase* BindGroupBase::GetLayout() {
ASSERT(!IsError());
return mLayout.Get();
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.h b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
index a636fe8f693..1ce4b9fe433 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
@@ -39,10 +39,12 @@ namespace dawn_native {
uint64_t size;
};
- class BindGroupBase : public ObjectBase {
+ class BindGroupBase : public ApiObjectBase {
public:
static BindGroupBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
BindGroupLayoutBase* GetLayout();
const BindGroupLayoutBase* GetLayout() const;
BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
index 1fc4a407881..5c2ea42e116 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
@@ -18,7 +18,9 @@
#include "dawn_native/ChainUtils_autogen.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/ObjectContentHasher.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/PerStage.h"
#include "dawn_native/ValidationUtils_autogen.h"
@@ -56,8 +58,9 @@ namespace dawn_native {
return {};
case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
} // anonymous namespace
@@ -149,12 +152,6 @@ namespace dawn_native {
if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
allowedStages &= ~wgpu::ShaderStage::Vertex;
}
-
- // TODO(crbug.com/dawn/1025): Remove after the deprecation period.
- if (storageTexture.access == wgpu::StorageTextureAccess::ReadOnly) {
- device->EmitDeprecationWarning(
- "Readonly storage textures are deprecated and will be removed.");
- }
}
const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
@@ -185,7 +182,6 @@ namespace dawn_native {
namespace {
-
bool operator!=(const BindingInfo& a, const BindingInfo& b) {
if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
return true;
@@ -209,6 +205,7 @@ namespace dawn_native {
case BindingInfoType::ExternalTexture:
return false;
}
+ UNREACHABLE();
}
bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
@@ -362,8 +359,11 @@ namespace dawn_native {
// BindGroupLayoutBase
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor)
- : CachedObject(device), mBindingInfo(BindingIndex(descriptor->entryCount)) {
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : ApiObjectBase(device, kLabelNotImplemented),
+ mBindingInfo(BindingIndex(descriptor->entryCount)),
+ mPipelineCompatibilityToken(pipelineCompatibilityToken) {
std::vector<BindGroupLayoutEntry> sortedBindings(
descriptor->entries, descriptor->entries + descriptor->entryCount);
@@ -388,7 +388,7 @@ namespace dawn_native {
}
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : CachedObject(device, tag) {
+ : ApiObjectBase(device, tag) {
}
BindGroupLayoutBase::~BindGroupLayoutBase() {
@@ -403,6 +403,10 @@ namespace dawn_native {
return new BindGroupLayoutBase(device, ObjectBase::kError);
}
+ ObjectType BindGroupLayoutBase::GetType() const {
+ return ObjectType::BindGroupLayout;
+ }
+
const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
ASSERT(!IsError());
return mBindingMap;
@@ -421,6 +425,8 @@ namespace dawn_native {
size_t BindGroupLayoutBase::ComputeContentHash() {
ObjectContentHasher recorder;
+ recorder.Record(mPipelineCompatibilityToken);
+
// std::map is sorted by key, so two BGLs constructed in different orders
// will still record the same.
for (const auto& it : mBindingMap) {
@@ -440,15 +446,7 @@ namespace dawn_native {
bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
const BindGroupLayoutBase* b) const {
- if (a->GetBindingCount() != b->GetBindingCount()) {
- return false;
- }
- for (BindingIndex i{0}; i < a->GetBindingCount(); ++i) {
- if (a->mBindingInfo[i] != b->mBindingInfo[i]) {
- return false;
- }
- }
- return a->mBindingMap == b->mBindingMap;
+ return a->IsLayoutEqual(b);
}
BindingIndex BindGroupLayoutBase::GetBindingCount() const {
@@ -474,6 +472,27 @@ namespace dawn_native {
return mBindingCounts;
}
+ bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
+ bool excludePipelineCompatibiltyToken) const {
+ if (!excludePipelineCompatibiltyToken &&
+ GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
+ return false;
+ }
+ if (GetBindingCount() != other->GetBindingCount()) {
+ return false;
+ }
+ for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
+ if (mBindingInfo[i] != other->mBindingInfo[i]) {
+ return false;
+ }
+ }
+ return mBindingMap == other->mBindingMap;
+ }
+
+ PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
+ return mPipelineCompatibilityToken;
+ }
+
size_t BindGroupLayoutBase::GetBindingDataSize() const {
// | ------ buffer-specific ----------| ------------ object pointers -------------|
// | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
index 8db649252da..5f75eb62eae 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
@@ -24,6 +24,7 @@
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/dawn_platform.h"
@@ -39,13 +40,17 @@ namespace dawn_native {
// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
// into a packed range of |BindingIndex| integers.
- class BindGroupLayoutBase : public CachedObject {
+ class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
public:
- BindGroupLayoutBase(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
+ BindGroupLayoutBase(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
~BindGroupLayoutBase() override;
static BindGroupLayoutBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
// A map from the BindingNumber to its packed BindingIndex.
using BindingMap = std::map<BindingNumber, BindingIndex>;
@@ -76,6 +81,13 @@ namespace dawn_native {
// should be used to get typed integer counts.
const BindingCounts& GetBindingCountInfo() const;
+ // Tests that the BindingInfo of two bind groups are equal,
+ // ignoring their compatibility groups.
+ bool IsLayoutEqual(const BindGroupLayoutBase* other,
+ bool excludePipelineCompatibiltyToken = false) const;
+
+ PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
+
struct BufferBindingData {
uint64_t offset;
uint64_t size;
@@ -115,6 +127,10 @@ namespace dawn_native {
// Map from BindGroupLayoutEntry.binding to packed indices.
BindingMap mBindingMap;
+
+ // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
+ const PipelineCompatibilityToken mPipelineCompatibilityToken =
+ PipelineCompatibilityToken(0);
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
index 3e1b72f460d..86ffcb97ded 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
@@ -20,6 +20,7 @@
#include "dawn_native/Device.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/Queue.h"
#include "dawn_native/ValidationUtils_autogen.h"
@@ -98,29 +99,28 @@ namespace dawn_native {
} // anonymous namespace
MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
-
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
DAWN_TRY(ValidateBufferUsage(descriptor->usage));
wgpu::BufferUsage usage = descriptor->usage;
const wgpu::BufferUsage kMapWriteAllowedUsages =
wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
- if (usage & wgpu::BufferUsage::MapWrite && (usage & kMapWriteAllowedUsages) != usage) {
- return DAWN_VALIDATION_ERROR("Only CopySrc is allowed with MapWrite");
- }
+ DAWN_INVALID_IF(
+ usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
+ "Buffer usages (%s) contains %s but is not a subset of %s.", usage,
+ wgpu::BufferUsage::MapWrite, kMapWriteAllowedUsages);
const wgpu::BufferUsage kMapReadAllowedUsages =
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
- if (usage & wgpu::BufferUsage::MapRead && (usage & kMapReadAllowedUsages) != usage) {
- return DAWN_VALIDATION_ERROR("Only CopyDst is allowed with MapRead");
- }
+ DAWN_INVALID_IF(
+ usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
+ "Buffer usages (%s) contains %s but is not a subset of %s.", usage,
+ wgpu::BufferUsage::MapRead, kMapReadAllowedUsages);
- if (descriptor->mappedAtCreation && descriptor->size % 4 != 0) {
- return DAWN_VALIDATION_ERROR("size must be aligned to 4 when mappedAtCreation is true");
- }
+ DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
+ "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
+ descriptor->size);
return {};
}
@@ -128,7 +128,7 @@ namespace dawn_native {
// Buffer
BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
- : ObjectBase(device),
+ : ApiObjectBase(device, descriptor->label),
mSize(descriptor->size),
mUsage(descriptor->usage),
mState(BufferState::Unmapped) {
@@ -147,12 +147,18 @@ namespace dawn_native {
if (mUsage & wgpu::BufferUsage::QueryResolve) {
mUsage |= kInternalStorageBuffer;
}
+
+ // We also add internal storage usage for Indirect buffers if validation is enabled, since
+ // validation involves binding them as storage buffers for use in a compute pass.
+ if ((mUsage & wgpu::BufferUsage::Indirect) && device->IsValidationEnabled()) {
+ mUsage |= kInternalStorageBuffer;
+ }
}
BufferBase::BufferBase(DeviceBase* device,
const BufferDescriptor* descriptor,
ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+ : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
if (descriptor->mappedAtCreation) {
mState = BufferState::MappedAtCreation;
mMapOffset = 0;
@@ -172,6 +178,10 @@ namespace dawn_native {
return new ErrorBuffer(device, descriptor);
}
+ ObjectType BufferBase::GetType() const {
+ return ObjectType::Buffer;
+ }
+
uint64_t BufferBase::GetSize() const {
ASSERT(!IsError());
return mSize;
@@ -254,13 +264,14 @@ namespace dawn_native {
switch (mState) {
case BufferState::Destroyed:
- return DAWN_VALIDATION_ERROR("Destroyed buffer used in a submit");
+ return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
case BufferState::Mapped:
case BufferState::MappedAtCreation:
- return DAWN_VALIDATION_ERROR("Buffer used in a submit while mapped");
+ return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
case BufferState::Unmapped:
return {};
}
+ UNREACHABLE();
}
void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
@@ -292,7 +303,9 @@ namespace dawn_native {
}
WGPUBufferMapAsyncStatus status;
- if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status))) {
+ if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
+ "calling %s.MapAsync(%s, %u, %u, ...)", this, mode, offset,
+ size)) {
if (callback) {
callback(status, userdata);
}
@@ -348,7 +361,7 @@ namespace dawn_native {
static_cast<ErrorBuffer*>(this)->ClearMappedData();
mState = BufferState::Destroyed;
}
- if (GetDevice()->ConsumedError(ValidateDestroy())) {
+ if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy()", this)) {
return;
}
ASSERT(!IsError());
@@ -399,7 +412,7 @@ namespace dawn_native {
static_cast<ErrorBuffer*>(this)->ClearMappedData();
mState = BufferState::Unmapped;
}
- if (GetDevice()->ConsumedError(ValidateUnmap())) {
+ if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap()", this)) {
return;
}
ASSERT(!IsError());
@@ -427,32 +440,6 @@ namespace dawn_native {
mState = BufferState::Unmapped;
}
- MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage,
- WGPUBufferMapAsyncStatus* status) const {
- *status = WGPUBufferMapAsyncStatus_DeviceLost;
- DAWN_TRY(GetDevice()->ValidateIsAlive());
-
- *status = WGPUBufferMapAsyncStatus_Error;
- DAWN_TRY(GetDevice()->ValidateObject(this));
-
- switch (mState) {
- case BufferState::Mapped:
- case BufferState::MappedAtCreation:
- return DAWN_VALIDATION_ERROR("Buffer is already mapped");
- case BufferState::Destroyed:
- return DAWN_VALIDATION_ERROR("Buffer is destroyed");
- case BufferState::Unmapped:
- break;
- }
-
- if (!(mUsage & requiredUsage)) {
- return DAWN_VALIDATION_ERROR("Buffer needs the correct map usage bit");
- }
-
- *status = WGPUBufferMapAsyncStatus_Success;
- return {};
- }
-
MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
size_t offset,
size_t size,
@@ -463,44 +450,37 @@ namespace dawn_native {
*status = WGPUBufferMapAsyncStatus_Error;
DAWN_TRY(GetDevice()->ValidateObject(this));
- if (offset % 8 != 0) {
- return DAWN_VALIDATION_ERROR("offset must be a multiple of 8");
- }
+ DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
+ DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
- if (size % 4 != 0) {
- return DAWN_VALIDATION_ERROR("size must be a multiple of 4");
- }
-
- if (uint64_t(offset) > mSize || uint64_t(size) > mSize - uint64_t(offset)) {
- return DAWN_VALIDATION_ERROR("size + offset must fit in the buffer");
- }
+ DAWN_INVALID_IF(uint64_t(offset) > mSize || uint64_t(size) > mSize - uint64_t(offset),
+ "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
+ offset, size, mSize, this);
switch (mState) {
case BufferState::Mapped:
case BufferState::MappedAtCreation:
- return DAWN_VALIDATION_ERROR("Buffer is already mapped");
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
case BufferState::Destroyed:
- return DAWN_VALIDATION_ERROR("Buffer is destroyed");
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
case BufferState::Unmapped:
break;
}
bool isReadMode = mode & wgpu::MapMode::Read;
bool isWriteMode = mode & wgpu::MapMode::Write;
- if (!(isReadMode ^ isWriteMode)) {
- return DAWN_VALIDATION_ERROR("Exactly one of Read or Write mode must be set");
- }
+ DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
+ wgpu::MapMode::Write, wgpu::MapMode::Read);
if (mode & wgpu::MapMode::Read) {
- if (!(mUsage & wgpu::BufferUsage::MapRead)) {
- return DAWN_VALIDATION_ERROR("The buffer must have the MapRead usage");
- }
+ DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
+ "The buffer usages (%s) do not contain %s.", mUsage,
+ wgpu::BufferUsage::MapRead);
} else {
ASSERT(mode & wgpu::MapMode::Write);
-
- if (!(mUsage & wgpu::BufferUsage::MapWrite)) {
- return DAWN_VALIDATION_ERROR("The buffer must have the MapWrite usage");
- }
+ DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
+ "The buffer usages (%s) do not contain %s.", mUsage,
+ wgpu::BufferUsage::MapWrite);
}
*status = WGPUBufferMapAsyncStatus_Success;
@@ -543,6 +523,7 @@ namespace dawn_native {
case BufferState::Destroyed:
return false;
}
+ UNREACHABLE();
}
MaybeError BufferBase::ValidateUnmap() const {
@@ -556,10 +537,11 @@ namespace dawn_native {
// even if it did not have a mappable usage.
return {};
case BufferState::Unmapped:
- return DAWN_VALIDATION_ERROR("Buffer is unmapped");
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
case BufferState::Destroyed:
- return DAWN_VALIDATION_ERROR("Buffer is destroyed");
+ return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
}
+ UNREACHABLE();
}
MaybeError BufferBase::ValidateDestroy() const {
@@ -589,4 +571,5 @@ namespace dawn_native {
bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
return offset == 0 && size == GetSize();
}
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.h b/chromium/third_party/dawn/src/dawn_native/Buffer.h
index 543ba7129ed..6ba57553c30 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.h
@@ -40,7 +40,7 @@ namespace dawn_native {
static constexpr wgpu::BufferUsage kMappableBufferUsages =
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
- class BufferBase : public ObjectBase {
+ class BufferBase : public ApiObjectBase {
enum class BufferState {
Unmapped,
Mapped,
@@ -53,6 +53,8 @@ namespace dawn_native {
static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
+ ObjectType GetType() const override;
+
uint64_t GetSize() const;
uint64_t GetAllocatedSize() const;
wgpu::BufferUsage GetUsage() const;
@@ -103,8 +105,6 @@ namespace dawn_native {
MaybeError CopyFromStagingBuffer();
void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
- MaybeError ValidateMap(wgpu::BufferUsage requiredUsage,
- WGPUBufferMapAsyncStatus* status) const;
MaybeError ValidateMapAsync(wgpu::MapMode mode,
size_t offset,
size_t size,
diff --git a/chromium/third_party/dawn/src/dawn_native/BufferLocation.cpp b/chromium/third_party/dawn/src/dawn_native/BufferLocation.cpp
new file mode 100644
index 00000000000..5ee24f2ff8b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/BufferLocation.cpp
@@ -0,0 +1,54 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/BufferLocation.h"
+
+namespace dawn_native {
+
+ BufferLocation::BufferLocation() = default;
+
+ BufferLocation::BufferLocation(BufferBase* buffer, uint64_t offset)
+ : mBuffer(buffer), mOffset(offset) {
+ }
+
+ BufferLocation::~BufferLocation() = default;
+
+ // static
+ Ref<BufferLocation> BufferLocation::New() {
+ return AcquireRef(new BufferLocation());
+ }
+
+ // static
+ Ref<BufferLocation> BufferLocation::New(BufferBase* buffer, uint64_t offset) {
+ return AcquireRef(new BufferLocation(buffer, offset));
+ }
+
+ bool BufferLocation::IsNull() const {
+ return mBuffer.Get() == nullptr;
+ }
+
+ BufferBase* BufferLocation::GetBuffer() const {
+ return mBuffer.Get();
+ }
+
+ uint64_t BufferLocation::GetOffset() const {
+ return mOffset;
+ }
+
+ void BufferLocation::Set(BufferBase* buffer, uint64_t offset) {
+ mBuffer = buffer;
+ mOffset = offset;
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BufferLocation.h b/chromium/third_party/dawn/src/dawn_native/BufferLocation.h
new file mode 100644
index 00000000000..4ff733d9724
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/BufferLocation.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BUFFERLOCATION_H_
+#define DAWNNATIVE_BUFFERLOCATION_H_
+
+#include "common/RefCounted.h"
+#include "dawn_native/Buffer.h"
+
+#include <cstdint>
+
+namespace dawn_native {
+
+ // A ref-counted wrapper around a Buffer ref and an offset into the buffer.
+ class BufferLocation : public RefCounted {
+ public:
+ BufferLocation();
+ BufferLocation(BufferBase* buffer, uint64_t offset = 0);
+ ~BufferLocation();
+
+ static Ref<BufferLocation> New();
+ static Ref<BufferLocation> New(BufferBase* buffer, uint64_t offset = 0);
+
+ bool IsNull() const;
+
+ BufferBase* GetBuffer() const;
+ uint64_t GetOffset() const;
+
+ void Set(BufferBase* buffer, uint64_t offset);
+
+ private:
+ Ref<BufferBase> mBuffer;
+ uint64_t mOffset = 0;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_BUFFERLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
index d93e8a4e12b..6b90e2cdc56 100644
--- a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
@@ -50,6 +50,8 @@ target_sources(dawn_native PRIVATE
"BuddyMemoryAllocator.h"
"Buffer.cpp"
"Buffer.h"
+ "BufferLocation.cpp"
+ "BufferLocation.h"
"CachedObject.cpp"
"CachedObject.h"
"CallbackTaskManager.cpp"
@@ -92,10 +94,14 @@ target_sources(dawn_native PRIVATE
"ErrorInjector.h"
"ErrorScope.cpp"
"ErrorScope.h"
- "Extensions.cpp"
- "Extensions.h"
+ "Features.cpp"
+ "Features.h"
"ExternalTexture.cpp"
"ExternalTexture.h"
+ "IndirectDrawMetadata.cpp"
+ "IndirectDrawMetadata.h"
+ "IndirectDrawValidationEncoder.cpp"
+ "IndirectDrawValidationEncoder.h"
"ObjectContentHasher.cpp"
"ObjectContentHasher.h"
"Format.cpp"
@@ -103,8 +109,11 @@ target_sources(dawn_native PRIVATE
"Forward.h"
"Instance.cpp"
"Instance.h"
+ "InternalPipelineStore.cpp"
"InternalPipelineStore.h"
"IntegerTypes.h"
+ "Limits.cpp"
+ "Limits.h"
"ObjectBase.cpp"
"ObjectBase.h"
"PassResourceUsage.h"
@@ -146,6 +155,8 @@ target_sources(dawn_native PRIVATE
"RingBufferAllocator.h"
"Sampler.cpp"
"Sampler.h"
+ "ScratchBuffer.cpp"
+ "ScratchBuffer.h"
"ShaderModule.cpp"
"ShaderModule.h"
"StagingBuffer.cpp"
@@ -174,13 +185,20 @@ target_link_libraries(dawn_native
dawn_platform
dawn_internal_config
libtint
- spirv-cross-core
- spirv-cross-glsl
- spirv-cross-hlsl
- spirv-cross-msl
SPIRV-Tools-opt
+ absl_strings
+ absl_str_format_internal
)
+if (DAWN_REQUIRES_SPIRV_CROSS)
+ target_link_libraries(dawn_native PRIVATE spirv-cross-core)
+ if (DAWN_ENABLE_OPENGL)
+ target_link_libraries(dawn_native PRIVATE spirv-cross-glsl)
+ endif()
+endif()
+
+target_include_directories(dawn_native PRIVATE ${DAWN_ABSEIL_DIR})
+
if (DAWN_USE_X11)
find_package(X11 REQUIRED)
target_link_libraries(dawn_native PRIVATE ${X11_LIBRARIES})
@@ -232,6 +250,8 @@ if (DAWN_ENABLE_D3D12)
"d3d12/CommandRecordingContext.h"
"d3d12/ComputePipelineD3D12.cpp"
"d3d12/ComputePipelineD3D12.h"
+ "d3d12/D3D11on12Util.cpp"
+ "d3d12/D3D11on12Util.h"
"d3d12/D3D12Error.cpp"
"d3d12/D3D12Error.h"
"d3d12/D3D12Info.cpp"
diff --git a/chromium/third_party/dawn/src/dawn_native/CachedObject.h b/chromium/third_party/dawn/src/dawn_native/CachedObject.h
index abf45253866..ff84e1e0d03 100644
--- a/chromium/third_party/dawn/src/dawn_native/CachedObject.h
+++ b/chromium/third_party/dawn/src/dawn_native/CachedObject.h
@@ -15,8 +15,6 @@
#ifndef DAWNNATIVE_CACHED_OBJECT_H_
#define DAWNNATIVE_CACHED_OBJECT_H_
-#include "dawn_native/ObjectBase.h"
-
#include <cstddef>
namespace dawn_native {
@@ -25,10 +23,8 @@ namespace dawn_native {
// we increase the refcount of an existing object.
// When an object is successfully created, the device should call
// SetIsCachedReference() and insert the object into the cache.
- class CachedObject : public ObjectBase {
+ class CachedObject {
public:
- using ObjectBase::ObjectBase;
-
bool IsCachedReference() const;
// Functor necessary for the unordered_set<CachedObject*>-based cache.
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
index 5ae5cfc385e..9516b59c558 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
@@ -20,6 +20,7 @@
#include <algorithm>
#include <climits>
#include <cstdlib>
+#include <utility>
namespace dawn_native {
@@ -43,22 +44,32 @@ namespace dawn_native {
CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
ASSERT(IsEmpty());
- mBlocks = std::move(other.mBlocks);
- other.Reset();
+ if (!other.IsEmpty()) {
+ mBlocks = std::move(other.mBlocks);
+ other.Reset();
+ }
Reset();
return *this;
}
- CommandIterator::CommandIterator(CommandAllocator&& allocator)
+ CommandIterator::CommandIterator(CommandAllocator allocator)
: mBlocks(allocator.AcquireBlocks()) {
Reset();
}
- CommandIterator& CommandIterator::operator=(CommandAllocator&& allocator) {
+ void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
ASSERT(IsEmpty());
- mBlocks = allocator.AcquireBlocks();
+ mBlocks.clear();
+ for (CommandAllocator& allocator : allocators) {
+ CommandBlocks blocks = allocator.AcquireBlocks();
+ if (!blocks.empty()) {
+ mBlocks.reserve(mBlocks.size() + blocks.size());
+ for (BlockDef& block : blocks) {
+ mBlocks.push_back(std::move(block));
+ }
+ }
+ }
Reset();
- return *this;
}
bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
@@ -92,7 +103,7 @@ namespace dawn_native {
return;
}
- for (auto& block : mBlocks) {
+ for (BlockDef& block : mBlocks) {
free(block.block);
}
mBlocks.clear();
@@ -114,13 +125,49 @@ namespace dawn_native {
// - Better block allocation, maybe have Dawn API to say command buffer is going to have size
// close to another
- CommandAllocator::CommandAllocator()
- : mCurrentPtr(reinterpret_cast<uint8_t*>(&mDummyEnum[0])),
- mEndPtr(reinterpret_cast<uint8_t*>(&mDummyEnum[1])) {
+ CommandAllocator::CommandAllocator() {
+ ResetPointers();
}
CommandAllocator::~CommandAllocator() {
- ASSERT(mBlocks.empty());
+ Reset();
+ }
+
+ CommandAllocator::CommandAllocator(CommandAllocator&& other)
+ : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
+ other.mBlocks.clear();
+ if (!other.IsEmpty()) {
+ mCurrentPtr = other.mCurrentPtr;
+ mEndPtr = other.mEndPtr;
+ } else {
+ ResetPointers();
+ }
+ other.Reset();
+ }
+
+ CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
+ Reset();
+ if (!other.IsEmpty()) {
+ std::swap(mBlocks, other.mBlocks);
+ mLastAllocationSize = other.mLastAllocationSize;
+ mCurrentPtr = other.mCurrentPtr;
+ mEndPtr = other.mEndPtr;
+ }
+ other.Reset();
+ return *this;
+ }
+
+ void CommandAllocator::Reset() {
+ for (BlockDef& block : mBlocks) {
+ free(block.block);
+ }
+ mBlocks.clear();
+ mLastAllocationSize = kDefaultBaseAllocationSize;
+ ResetPointers();
+ }
+
+ bool CommandAllocator::IsEmpty() const {
+ return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mDummyEnum[0]);
}
CommandBlocks&& CommandAllocator::AcquireBlocks() {
@@ -173,4 +220,9 @@ namespace dawn_native {
return true;
}
+ void CommandAllocator::ResetPointers() {
+ mCurrentPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[0]);
+ mEndPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[1]);
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
index 2713545cc9b..7a706aad9e7 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
@@ -75,8 +75,10 @@ namespace dawn_native {
CommandIterator(CommandIterator&& other);
CommandIterator& operator=(CommandIterator&& other);
- CommandIterator(CommandAllocator&& allocator);
- CommandIterator& operator=(CommandAllocator&& allocator);
+ // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
+ explicit CommandIterator(CommandAllocator allocator);
+
+ void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
template <typename E>
bool NextCommandId(E* commandId) {
@@ -149,6 +151,15 @@ namespace dawn_native {
CommandAllocator();
~CommandAllocator();
+ // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
+ CommandAllocator(CommandAllocator&&);
+ CommandAllocator& operator=(CommandAllocator&&);
+
+ // Frees all blocks held by the allocator and restores it to its initial empty state.
+ void Reset();
+
+ bool IsEmpty() const;
+
template <typename T, typename E>
T* Allocate(E commandId) {
static_assert(sizeof(E) == sizeof(uint32_t), "");
@@ -186,6 +197,9 @@ namespace dawn_native {
static constexpr size_t kWorstCaseAdditionalSize =
sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
+ // The default value of mLastAllocationSize.
+ static constexpr size_t kDefaultBaseAllocationSize = 2048;
+
friend CommandIterator;
CommandBlocks&& AcquireBlocks();
@@ -237,19 +251,21 @@ namespace dawn_native {
bool GetNewBlock(size_t minimumSize);
+ void ResetPointers();
+
CommandBlocks mBlocks;
- size_t mLastAllocationSize = 2048;
+ size_t mLastAllocationSize = kDefaultBaseAllocationSize;
+
+ // Data used for the block range at initialization so that the first call to Allocate sees
+ // there is not enough space and calls GetNewBlock. This avoids having to special case the
+ // initialization in Allocate.
+ uint32_t mDummyEnum[1] = {0};
// Pointers to the current range of allocation in the block. Guaranteed to allow for at
// least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
// be written. Nullptr iff the blocks were moved out.
uint8_t* mCurrentPtr = nullptr;
uint8_t* mEndPtr = nullptr;
-
- // Data used for the block range at initialization so that the first call to Allocate sees
- // there is not enough space and calls GetNewBlock. This avoids having to special case the
- // initialization in Allocate.
- uint32_t mDummyEnum[1] = {0};
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
index b6ba4f05f3d..5b07bb64a09 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
@@ -20,29 +20,42 @@
#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Format.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/Texture.h"
namespace dawn_native {
CommandBufferBase::CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor*)
- : ObjectBase(encoder->GetDevice()),
+ : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
mCommands(encoder->AcquireCommands()),
mResourceUsages(encoder->AcquireResourceUsages()) {
}
CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : ApiObjectBase(device, tag) {
}
CommandBufferBase::~CommandBufferBase() {
Destroy();
}
+ void CommandBufferBase::DoNextSetValidatedBufferLocationsInternal() {
+ SetValidatedBufferLocationsInternalCmd* cmd =
+ mCommands.NextCommand<SetValidatedBufferLocationsInternalCmd>();
+ for (const DeferredBufferLocationUpdate& update : cmd->updates) {
+ update.location->Set(update.buffer.Get(), update.offset);
+ }
+ }
+
// static
CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
return new CommandBufferBase(device, ObjectBase::kError);
}
+ ObjectType CommandBufferBase::GetType() const {
+ return ObjectType::CommandBuffer;
+ }
+
MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
ASSERT(!IsError());
@@ -127,7 +140,6 @@ namespace dawn_native {
break;
case wgpu::StoreOp::Discard:
- case wgpu::StoreOp::Clear:
view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
index e90d320280a..2800929d367 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
@@ -30,19 +30,23 @@ namespace dawn_native {
struct CopyTextureToBufferCmd;
struct TextureCopy;
- class CommandBufferBase : public ObjectBase {
+ class CommandBufferBase : public ApiObjectBase {
public:
CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
static CommandBufferBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
MaybeError ValidateCanUseInSubmitNow() const;
void Destroy();
const CommandBufferResourceUsage& GetResourceUsages() const;
protected:
- ~CommandBufferBase();
+ ~CommandBufferBase() override;
+
+ void DoNextSetValidatedBufferLocationsInternal();
CommandIterator mCommands;
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
index f3a6b470371..cff63f6eb2e 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
@@ -22,6 +22,10 @@
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/RenderPipeline.h"
+// TODO(dawn:563): None of the error messages in this file include the buffer objects they are
+// validating against. It would be nice to improve that, but difficult to do without incurring
+// additional tracking costs.
+
namespace dawn_native {
namespace {
@@ -88,17 +92,25 @@ namespace dawn_native {
mLastRenderPipeline->GetVertexBuffer(usedSlotVertex);
uint64_t arrayStride = vertexBuffer.arrayStride;
uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
+
if (arrayStride == 0) {
- if (vertexBuffer.usedBytesInStride > bufferSize) {
- return DAWN_VALIDATION_ERROR("Vertex buffer out of bound");
- }
+ DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+ "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+ "is smaller than the required size for all attributes (%u)",
+ bufferSize, static_cast<uint8_t>(usedSlotVertex),
+ vertexBuffer.usedBytesInStride);
} else {
+ uint64_t requiredSize =
+ (static_cast<uint64_t>(firstVertex) + vertexCount) * arrayStride;
// firstVertex and vertexCount are in uint32_t, and arrayStride must not
// be larger than kMaxVertexBufferArrayStride, which is currently 2048. So by
// doing checks in uint64_t we avoid overflows.
- if ((static_cast<uint64_t>(firstVertex) + vertexCount) * arrayStride > bufferSize) {
- return DAWN_VALIDATION_ERROR("Vertex buffer out of bound");
- }
+ DAWN_INVALID_IF(
+ requiredSize > bufferSize,
+ "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than the "
+ "bound buffer size (%u) of the vertex buffer at slot %u with stride (%u).",
+ firstVertex, vertexCount, requiredSize, bufferSize,
+ static_cast<uint8_t>(usedSlotVertex), arrayStride);
}
}
@@ -118,17 +130,23 @@ namespace dawn_native {
uint64_t arrayStride = vertexBuffer.arrayStride;
uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
if (arrayStride == 0) {
- if (vertexBuffer.usedBytesInStride > bufferSize) {
- return DAWN_VALIDATION_ERROR("Vertex buffer out of bound");
- }
+ DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+ "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+ "is smaller than the required size for all attributes (%u)",
+ bufferSize, static_cast<uint8_t>(usedSlotInstance),
+ vertexBuffer.usedBytesInStride);
} else {
+ uint64_t requiredSize =
+ (static_cast<uint64_t>(firstInstance) + instanceCount) * arrayStride;
// firstInstance and instanceCount are in uint32_t, and arrayStride must
// not be larger than kMaxVertexBufferArrayStride, which is currently 2048.
// So by doing checks in uint64_t we avoid overflows.
- if ((static_cast<uint64_t>(firstInstance) + instanceCount) * arrayStride >
- bufferSize) {
- return DAWN_VALIDATION_ERROR("Vertex buffer out of bound");
- }
+ DAWN_INVALID_IF(
+ requiredSize > bufferSize,
+ "Instance range (first: %u, count: %u) requires a larger buffer (%u) than the "
+ "bound buffer size (%u) of the vertex buffer at slot %u with stride (%u).",
+ firstInstance, instanceCount, requiredSize, bufferSize,
+ static_cast<uint8_t>(usedSlotInstance), arrayStride);
}
}
@@ -141,11 +159,12 @@ namespace dawn_native {
// firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
// wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
// uint64_t we avoid overflows.
- if ((static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
- mIndexBufferSize) {
- // Index range is out of bounds
- return DAWN_VALIDATION_ERROR("Index buffer out of bound");
- }
+ DAWN_INVALID_IF(
+ (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
+ mIndexBufferSize,
+ "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
+ "(%u).",
+ firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
return {};
}
@@ -212,15 +231,15 @@ namespace dawn_native {
return {};
}
- if (aspects[VALIDATION_ASPECT_INDEX_BUFFER]) {
+ if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
+ DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
+
wgpu::IndexFormat pipelineIndexFormat = mLastRenderPipeline->GetStripIndexFormat();
- if (!mIndexBufferSet) {
- return DAWN_VALIDATION_ERROR("Missing index buffer");
- } else if (IsStripPrimitiveTopology(mLastRenderPipeline->GetPrimitiveTopology()) &&
- mIndexFormat != pipelineIndexFormat) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline strip index format does not match index buffer format");
- }
+ DAWN_INVALID_IF(
+ IsStripPrimitiveTopology(mLastRenderPipeline->GetPrimitiveTopology()) &&
+ mIndexFormat != pipelineIndexFormat,
+ "Strip index format (%s) of %s does not match index buffer format (%s).",
+ pipelineIndexFormat, mLastRenderPipeline, mIndexFormat);
// The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
// It returns the first invalid state found. We shouldn't be able to reach this line
@@ -230,25 +249,27 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Index buffer invalid");
}
- if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
- return DAWN_VALIDATION_ERROR("Missing vertex buffer");
- }
+ // TODO(dawn:563): Indicate which slots were not set.
+ DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
+ "Vertex buffer slots required by %s were not set.", mLastRenderPipeline);
- if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
+ if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
- if (mBindgroups[i] == nullptr) {
- return DAWN_VALIDATION_ERROR("Missing bind group " +
- std::to_string(static_cast<uint32_t>(i)));
- } else if (mLastPipelineLayout->GetBindGroupLayout(i) !=
- mBindgroups[i]->GetLayout()) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline and bind group layout doesn't match for bind group " +
- std::to_string(static_cast<uint32_t>(i)));
- } else if (!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
- (*mMinBufferSizes)[i])) {
- return DAWN_VALIDATION_ERROR("Binding sizes too small for bind group " +
- std::to_string(static_cast<uint32_t>(i)));
- }
+ DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
+ static_cast<uint32_t>(i));
+
+ DAWN_INVALID_IF(
+ mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
+ "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
+ "group %s at index %u.",
+ mLastPipelineLayout->GetBindGroupLayout(i), mLastPipelineLayout,
+ mBindgroups[i]->GetLayout(), mBindgroups[i], static_cast<uint32_t>(i));
+
+ // TODO(dawn:563): Report the binding sizes and which ones are failing.
+ DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+ (*mMinBufferSizes)[i]),
+ "Binding sizes are too small for bind group %s at index %u",
+ mBindgroups[i], static_cast<uint32_t>(i));
}
// The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
@@ -259,9 +280,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Bind groups invalid");
}
- if (aspects[VALIDATION_ASPECT_PIPELINE]) {
- return DAWN_VALIDATION_ERROR("Missing pipeline");
- }
+ DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
UNREACHABLE();
}
@@ -308,4 +327,13 @@ namespace dawn_native {
PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
return mLastPipelineLayout;
}
+
+ wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
+ return mIndexFormat;
+ }
+
+ uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
+ return mIndexBufferSize;
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
index 805a4fb1440..0a6c587a982 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
@@ -47,6 +47,8 @@ namespace dawn_native {
BindGroupBase* GetBindGroup(BindGroupIndex index) const;
PipelineLayoutBase* GetPipelineLayout() const;
+ wgpu::IndexFormat GetIndexFormat() const;
+ uint64_t GetIndexBufferSize() const;
private:
MaybeError ValidateOperation(ValidationAspects requiredAspects);
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
index 431c7c2f9df..223cbb6cff5 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
@@ -25,6 +25,7 @@
#include "dawn_native/ComputePassEncoder.h"
#include "dawn_native/Device.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/QueryHelper.h"
#include "dawn_native/QuerySet.h"
#include "dawn_native/Queue.h"
@@ -41,37 +42,25 @@ namespace dawn_native {
namespace {
- MaybeError ValidateDeprecatedStoreOp(DeviceBase* device, wgpu::StoreOp value) {
- if (value == wgpu::StoreOp::Clear) {
- device->EmitDeprecationWarning(
- "The 'clear' storeOp is deprecated. Use 'discard' instead.");
- }
- return ValidateStoreOp(value);
- }
-
MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
uint64_t srcOffset,
uint64_t dstOffset) {
// Copy size must be a multiple of 4 bytes on macOS.
- if (dataSize % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Copy size must be a multiple of 4 bytes");
- }
+ DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
// SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
- if (srcOffset % 4 != 0 || dstOffset % 4 != 0) {
- return DAWN_VALIDATION_ERROR(
- "Source offset and destination offset must be multiples of 4 bytes");
- }
+ DAWN_INVALID_IF(
+ srcOffset % 4 != 0 || dstOffset % 4 != 0,
+ "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
+ srcOffset, dstOffset);
return {};
}
MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
- if (texture->GetSampleCount() > 1) {
- return DAWN_VALIDATION_ERROR(
- "The sample count of textures must be 1 when copying between buffers and "
- "textures");
- }
+ DAWN_INVALID_IF(texture->GetSampleCount() > 1,
+ "%s sample count (%u) is not 1 when copying to or from a buffer.",
+ texture, texture->GetSampleCount());
return {};
}
@@ -81,15 +70,13 @@ namespace dawn_native {
const bool hasDepthOrStencil) {
if (hasDepthOrStencil) {
// For depth-stencil texture, buffer offset must be a multiple of 4.
- if (layout.offset % 4 != 0) {
- return DAWN_VALIDATION_ERROR(
- "offset must be a multiple of 4 for depth/stencil texture.");
- }
+ DAWN_INVALID_IF(layout.offset % 4 != 0,
+ "Offset (%u) is not a multiple of 4 for depth/stencil texture.",
+ layout.offset);
} else {
- if (layout.offset % blockInfo.byteSize != 0) {
- return DAWN_VALIDATION_ERROR(
- "offset must be a multiple of the texel block byte size.");
- }
+ DAWN_INVALID_IF(layout.offset % blockInfo.byteSize != 0,
+ "Offset (%u) is not a multiple of the texel block byte size (%u).",
+ layout.offset, blockInfo.byteSize);
}
return {};
}
@@ -102,9 +89,10 @@ namespace dawn_native {
switch (src.texture->GetFormat().format) {
case wgpu::TextureFormat::Depth24Plus:
case wgpu::TextureFormat::Depth24PlusStencil8:
- return DAWN_VALIDATION_ERROR(
- "The depth aspect of depth24plus texture cannot be selected in a "
- "texture to buffer copy");
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "The depth aspect of %s format %s cannot be selected in a texture to "
+ "buffer copy.",
+ src.texture, src.texture->GetFormat().format);
case wgpu::TextureFormat::Depth32Float:
break;
@@ -118,17 +106,13 @@ namespace dawn_native {
MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
// Currently we do not support layered rendering.
- if (attachment->GetLayerCount() > 1) {
- return DAWN_VALIDATION_ERROR(
- "The layer count of the texture view used as attachment cannot be greater than "
- "1");
- }
+ DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
+ "The layer count (%u) of %s used as attachment is greater than 1.",
+ attachment->GetLayerCount(), attachment);
- if (attachment->GetLevelCount() > 1) {
- return DAWN_VALIDATION_ERROR(
- "The mipmap level count of the texture view used as attachment cannot be "
- "greater than 1");
- }
+ DAWN_INVALID_IF(attachment->GetLevelCount() > 1,
+ "The mip level count (%u) of %s used as attachment is greater than 1.",
+ attachment->GetLevelCount(), attachment);
return {};
}
@@ -144,8 +128,12 @@ namespace dawn_native {
*width = attachmentSize.width;
*height = attachmentSize.height;
DAWN_ASSERT(*width != 0 && *height != 0);
- } else if (*width != attachmentSize.width || *height != attachmentSize.height) {
- return DAWN_VALIDATION_ERROR("Attachment size mismatch");
+ } else {
+ DAWN_INVALID_IF(
+ *width != attachmentSize.width || *height != attachmentSize.height,
+ "Attachment %s size (width: %u, height: %u) does not match the size of the "
+ "other attachments (width: %u, height: %u).",
+ attachment, attachmentSize.width, attachmentSize.height, *width, *height);
}
return {};
@@ -156,110 +144,96 @@ namespace dawn_native {
if (*sampleCount == 0) {
*sampleCount = colorAttachment->GetTexture()->GetSampleCount();
DAWN_ASSERT(*sampleCount != 0);
- } else if (*sampleCount != colorAttachment->GetTexture()->GetSampleCount()) {
- return DAWN_VALIDATION_ERROR("Color attachment sample counts mismatch");
+ } else {
+ DAWN_INVALID_IF(
+ *sampleCount != colorAttachment->GetTexture()->GetSampleCount(),
+ "Color attachment %s sample count (%u) does not match the sample count of the "
+ "other attachments (%u).",
+ colorAttachment, colorAttachment->GetTexture()->GetSampleCount(), *sampleCount);
}
return {};
}
- MaybeError ValidateResolveTarget(
- const DeviceBase* device,
- const RenderPassColorAttachmentDescriptor& colorAttachment) {
+ MaybeError ValidateResolveTarget(const DeviceBase* device,
+ const RenderPassColorAttachment& colorAttachment) {
if (colorAttachment.resolveTarget == nullptr) {
return {};
}
const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
- const TextureViewBase* attachment =
- colorAttachment.view != nullptr ? colorAttachment.view : colorAttachment.attachment;
+ const TextureViewBase* attachment = colorAttachment.view;
DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
wgpu::TextureUsage::RenderAttachment));
- if (!attachment->GetTexture()->IsMultisampledTexture()) {
- return DAWN_VALIDATION_ERROR(
- "Cannot set resolve target when the sample count of the color attachment is 1");
- }
+ DAWN_INVALID_IF(
+ !attachment->GetTexture()->IsMultisampledTexture(),
+ "Cannot set %s as a resolve target when the color attachment %s has a sample "
+ "count of 1.",
+ resolveTarget, attachment);
- if (resolveTarget->GetTexture()->IsMultisampledTexture()) {
- return DAWN_VALIDATION_ERROR("Cannot use multisampled texture as resolve target");
- }
+ DAWN_INVALID_IF(resolveTarget->GetTexture()->IsMultisampledTexture(),
+ "Cannot use %s as resolve target. Sample count (%u) is greater than 1.",
+ resolveTarget, resolveTarget->GetTexture()->GetSampleCount());
- if (resolveTarget->GetLayerCount() > 1) {
- return DAWN_VALIDATION_ERROR(
- "The array layer count of the resolve target must be 1");
- }
+ DAWN_INVALID_IF(resolveTarget->GetLayerCount() > 1,
+ "The resolve target %s array layer count (%u) is not 1.", resolveTarget,
+ resolveTarget->GetLayerCount());
- if (resolveTarget->GetLevelCount() > 1) {
- return DAWN_VALIDATION_ERROR("The mip level count of the resolve target must be 1");
- }
+ DAWN_INVALID_IF(resolveTarget->GetLevelCount() > 1,
+ "The resolve target %s mip level count (%u) is not 1.", resolveTarget,
+ resolveTarget->GetLevelCount());
const Extent3D& colorTextureSize =
attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
const Extent3D& resolveTextureSize =
resolveTarget->GetTexture()->GetMipLevelVirtualSize(
resolveTarget->GetBaseMipLevel());
- if (colorTextureSize.width != resolveTextureSize.width ||
- colorTextureSize.height != resolveTextureSize.height) {
- return DAWN_VALIDATION_ERROR(
- "The size of the resolve target must be the same as the color attachment");
- }
+ DAWN_INVALID_IF(
+ colorTextureSize.width != resolveTextureSize.width ||
+ colorTextureSize.height != resolveTextureSize.height,
+ "The Resolve target %s size (width: %u, height: %u) does not match the color "
+ "attachment %s size (width: %u, height: %u).",
+ resolveTarget, resolveTextureSize.width, resolveTextureSize.height, attachment,
+ colorTextureSize.width, colorTextureSize.height);
wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
- if (resolveTargetFormat != attachment->GetFormat().format) {
- return DAWN_VALIDATION_ERROR(
- "The format of the resolve target must be the same as the color attachment");
- }
+ DAWN_INVALID_IF(
+ resolveTargetFormat != attachment->GetFormat().format,
+ "The resolve target %s format (%s) does not match the color attachment %s format "
+ "(%s).",
+ resolveTarget, resolveTargetFormat, attachment, attachment->GetFormat().format);
return {};
}
MaybeError ValidateRenderPassColorAttachment(
DeviceBase* device,
- const RenderPassColorAttachmentDescriptor& colorAttachment,
+ const RenderPassColorAttachment& colorAttachment,
uint32_t* width,
uint32_t* height,
uint32_t* sampleCount) {
- TextureViewBase* attachment;
- if (colorAttachment.view != nullptr) {
- if (colorAttachment.attachment != nullptr) {
- return DAWN_VALIDATION_ERROR(
- "Cannot specify both a attachment and view. attachment is deprecated, "
- "favor view instead.");
- }
- attachment = colorAttachment.view;
- } else if (colorAttachment.attachment != nullptr) {
- device->EmitDeprecationWarning(
- "RenderPassColorAttachmentDescriptor.attachment has been deprecated. Use "
- "RenderPassColorAttachmentDescriptor.view instead.");
- attachment = colorAttachment.attachment;
- } else {
- return DAWN_VALIDATION_ERROR(
- "Must specify a view for RenderPassColorAttachmentDescriptor");
- }
-
+ TextureViewBase* attachment = colorAttachment.view;
DAWN_TRY(device->ValidateObject(attachment));
DAWN_TRY(
ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment));
- if (!(attachment->GetAspects() & Aspect::Color) ||
- !attachment->GetFormat().isRenderable) {
- return DAWN_VALIDATION_ERROR(
- "The format of the texture view used as color attachment is not color "
- "renderable");
- }
+ DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) ||
+ !attachment->GetFormat().isRenderable,
+ "The color attachment %s format (%s) is not color renderable.",
+ attachment, attachment->GetFormat().format);
DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
- DAWN_TRY(ValidateDeprecatedStoreOp(device, colorAttachment.storeOp));
+ DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
- if (std::isnan(colorAttachment.clearColor.r) ||
- std::isnan(colorAttachment.clearColor.g) ||
- std::isnan(colorAttachment.clearColor.b) ||
- std::isnan(colorAttachment.clearColor.a)) {
- return DAWN_VALIDATION_ERROR("Color clear value cannot contain NaN");
- }
+ DAWN_INVALID_IF(std::isnan(colorAttachment.clearColor.r) ||
+ std::isnan(colorAttachment.clearColor.g) ||
+ std::isnan(colorAttachment.clearColor.b) ||
+ std::isnan(colorAttachment.clearColor.a),
+ "Color clear value (%s) contain a NaN.",
+ &colorAttachment.clearColor);
}
DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
@@ -274,93 +248,74 @@ namespace dawn_native {
MaybeError ValidateRenderPassDepthStencilAttachment(
DeviceBase* device,
- const RenderPassDepthStencilAttachmentDescriptor* depthStencilAttachment,
+ const RenderPassDepthStencilAttachment* depthStencilAttachment,
uint32_t* width,
uint32_t* height,
uint32_t* sampleCount) {
DAWN_ASSERT(depthStencilAttachment != nullptr);
- TextureViewBase* attachment;
- if (depthStencilAttachment->view != nullptr) {
- if (depthStencilAttachment->attachment != nullptr) {
- return DAWN_VALIDATION_ERROR(
- "Cannot specify both a attachment and view. attachment is deprecated, "
- "favor view instead.");
- }
- attachment = depthStencilAttachment->view;
- } else if (depthStencilAttachment->attachment != nullptr) {
- device->EmitDeprecationWarning(
- "RenderPassDepthStencilAttachmentDescriptor.attachment has been deprecated. "
- "Use RenderPassDepthStencilAttachmentDescriptor.view instead.");
- attachment = depthStencilAttachment->attachment;
- } else {
- return DAWN_VALIDATION_ERROR(
- "Must specify a view for RenderPassDepthStencilAttachmentDescriptor");
- }
-
+ TextureViewBase* attachment = depthStencilAttachment->view;
DAWN_TRY(device->ValidateObject(attachment));
DAWN_TRY(
ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment));
const Format& format = attachment->GetFormat();
- if (!format.HasDepthOrStencil()) {
- return DAWN_VALIDATION_ERROR(
- "The format of the texture view used as depth stencil attachment is not a "
- "depth stencil format");
- }
- if (!format.isRenderable) {
- return DAWN_VALIDATION_ERROR(
- "The format of the texture view used as depth stencil attachment is not "
- "renderable");
- }
- if (attachment->GetAspects() != format.aspects) {
- // TODO(https://crbug.com/dawn/812): Investigate if this limitation should be added
- // to the WebGPU spec of lifted from Dawn.
- return DAWN_VALIDATION_ERROR(
- "The texture view used as depth stencil view must encompass all aspects");
- }
+ DAWN_INVALID_IF(
+ !format.HasDepthOrStencil(),
+ "The depth stencil attachment %s format (%s) is not a depth stencil format.",
+ attachment, format.format);
- DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
- DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
- DAWN_TRY(ValidateDeprecatedStoreOp(device, depthStencilAttachment->depthStoreOp));
- DAWN_TRY(ValidateDeprecatedStoreOp(device, depthStencilAttachment->stencilStoreOp));
-
- if (attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
- depthStencilAttachment->depthReadOnly != depthStencilAttachment->stencilReadOnly) {
- return DAWN_VALIDATION_ERROR(
- "depthReadOnly and stencilReadOnly must be the same when texture aspect is "
- "'all'");
- }
+ DAWN_INVALID_IF(!format.isRenderable,
+ "The depth stencil attachment %s format (%s) is not renderable.",
+ attachment, format.format);
- if (depthStencilAttachment->depthReadOnly &&
- (depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Load ||
- depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Store)) {
- return DAWN_VALIDATION_ERROR(
- "depthLoadOp must be load and depthStoreOp must be store when depthReadOnly "
- "is true.");
- }
+ DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
+ "The depth stencil attachment %s must encompass all aspects.",
+ attachment);
- if (depthStencilAttachment->stencilReadOnly &&
- (depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Load ||
- depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Store)) {
- return DAWN_VALIDATION_ERROR(
- "stencilLoadOp must be load and stencilStoreOp must be store when "
- "stencilReadOnly "
- "is true.");
- }
-
- if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
- std::isnan(depthStencilAttachment->clearDepth)) {
- return DAWN_VALIDATION_ERROR("Depth clear value cannot be NaN");
- }
+ DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
+ DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
+ DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
+ DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
+
+ DAWN_INVALID_IF(
+ attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
+ depthStencilAttachment->depthReadOnly !=
+ depthStencilAttachment->stencilReadOnly,
+ "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
+ "is 'all'.",
+ depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
+
+ DAWN_INVALID_IF(
+ depthStencilAttachment->depthReadOnly &&
+ (depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Load ||
+ depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Store),
+ "depthLoadOp (%s) is not %s or depthStoreOp (%s) is not %s when depthReadOnly "
+ "is true.",
+ depthStencilAttachment->depthLoadOp, wgpu::LoadOp::Load,
+ depthStencilAttachment->depthStoreOp, wgpu::StoreOp::Store);
+
+ DAWN_INVALID_IF(depthStencilAttachment->stencilReadOnly &&
+ (depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Load ||
+ depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Store),
+ "stencilLoadOp (%s) is not %s or stencilStoreOp (%s) is not %s when "
+ "stencilReadOnly is true.",
+ depthStencilAttachment->stencilLoadOp, wgpu::LoadOp::Load,
+ depthStencilAttachment->stencilStoreOp, wgpu::StoreOp::Store);
+
+ DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
+ std::isnan(depthStencilAttachment->clearDepth),
+ "Depth clear value is NaN.");
// *sampleCount == 0 must only happen when there is no color attachment. In that case we
// do not need to validate the sample count of the depth stencil attachment.
const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
if (*sampleCount != 0) {
- if (depthStencilSampleCount != *sampleCount) {
- return DAWN_VALIDATION_ERROR("Depth stencil attachment sample counts mismatch");
- }
+ DAWN_INVALID_IF(
+ depthStencilSampleCount != *sampleCount,
+ "The depth stencil attachment %s sample count (%u) does not match the sample "
+ "count of the other attachments (%u).",
+ attachment, depthStencilSampleCount, *sampleCount);
} else {
*sampleCount = depthStencilSampleCount;
}
@@ -376,32 +331,37 @@ namespace dawn_native {
uint32_t* width,
uint32_t* height,
uint32_t* sampleCount) {
- if (descriptor->colorAttachmentCount > kMaxColorAttachments) {
- return DAWN_VALIDATION_ERROR("Setting color attachments out of bounds");
- }
+ DAWN_INVALID_IF(
+ descriptor->colorAttachmentCount > kMaxColorAttachments,
+ "Color attachment count (%u) exceeds the maximum number of color attachments (%u).",
+ descriptor->colorAttachmentCount, kMaxColorAttachments);
for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
- DAWN_TRY(ValidateRenderPassColorAttachment(device, descriptor->colorAttachments[i],
- width, height, sampleCount));
+ DAWN_TRY_CONTEXT(
+ ValidateRenderPassColorAttachment(device, descriptor->colorAttachments[i],
+ width, height, sampleCount),
+ "validating colorAttachments[%u].", i);
}
if (descriptor->depthStencilAttachment != nullptr) {
- DAWN_TRY(ValidateRenderPassDepthStencilAttachment(
- device, descriptor->depthStencilAttachment, width, height, sampleCount));
+ DAWN_TRY_CONTEXT(
+ ValidateRenderPassDepthStencilAttachment(
+ device, descriptor->depthStencilAttachment, width, height, sampleCount),
+ "validating depthStencilAttachment.");
}
if (descriptor->occlusionQuerySet != nullptr) {
DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
- if (descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion) {
- return DAWN_VALIDATION_ERROR("The type of query set must be Occlusion");
- }
+ DAWN_INVALID_IF(
+ descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
+ "The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet,
+ descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
}
- if (descriptor->colorAttachmentCount == 0 &&
- descriptor->depthStencilAttachment == nullptr) {
- return DAWN_VALIDATION_ERROR("Cannot use render pass with no attachments.");
- }
+ DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 &&
+ descriptor->depthStencilAttachment == nullptr,
+ "Render pass has no attachments.");
return {};
}
@@ -416,22 +376,19 @@ namespace dawn_native {
uint32_t queryCount,
const BufferBase* destination,
uint64_t destinationOffset) {
- if (firstQuery >= querySet->GetQueryCount()) {
- return DAWN_VALIDATION_ERROR("Query index out of bounds");
- }
+ DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
+ "First query (%u) exceeds the number of queries (%u) in %s.",
+ firstQuery, querySet->GetQueryCount(), querySet);
- if (queryCount > querySet->GetQueryCount() - firstQuery) {
- return DAWN_VALIDATION_ERROR(
- "The sum of firstQuery and queryCount exceeds the number of queries in query "
- "set");
- }
+ DAWN_INVALID_IF(
+ queryCount > querySet->GetQueryCount() - firstQuery,
+ "The query range (firstQuery: %u, queryCount: %u) exceeds the number of queries "
+ "(%u) in %s.",
+ firstQuery, queryCount, querySet->GetQueryCount(), querySet);
- // The destinationOffset must be a multiple of 8 bytes on D3D12 and Vulkan
- if (destinationOffset % 8 != 0) {
- return DAWN_VALIDATION_ERROR(
- "The alignment offset into the destination buffer must be a multiple of 8 "
- "bytes");
- }
+ DAWN_INVALID_IF(destinationOffset % 256 != 0,
+ "The destination buffer %s offset (%u) is not a multiple of 256.",
+ destination, destinationOffset);
uint64_t bufferSize = destination->GetSize();
// The destination buffer must have enough storage, from destination offset, to contain
@@ -439,9 +396,11 @@ namespace dawn_native {
bool fitsInBuffer = destinationOffset <= bufferSize &&
(static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
(bufferSize - destinationOffset));
- if (!fitsInBuffer) {
- return DAWN_VALIDATION_ERROR("The resolved query data would overflow the buffer");
- }
+ DAWN_INVALID_IF(
+ !fitsInBuffer,
+ "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
+ querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination,
+ bufferSize, destinationOffset);
return {};
}
@@ -489,10 +448,29 @@ namespace dawn_native {
encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
}
+ bool IsReadOnlyDepthStencilAttachment(
+ const RenderPassDepthStencilAttachment* depthStencilAttachment) {
+ DAWN_ASSERT(depthStencilAttachment != nullptr);
+ Aspect aspects = depthStencilAttachment->view->GetAspects();
+ DAWN_ASSERT(IsSubset(aspects, Aspect::Depth | Aspect::Stencil));
+
+ if ((aspects & Aspect::Depth) && !depthStencilAttachment->depthReadOnly) {
+ return false;
+ }
+ if (aspects & Aspect::Stencil && !depthStencilAttachment->stencilReadOnly) {
+ return false;
+ }
+ return true;
+ }
+
} // namespace
CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor*)
- : ObjectBase(device), mEncodingContext(device, this) {
+ : ApiObjectBase(device, kLabelNotImplemented), mEncodingContext(device, this) {
+ }
+
+ ObjectType CommandEncoder::GetType() const {
+ return ObjectType::CommandEncoder;
}
CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
@@ -526,14 +504,16 @@ namespace dawn_native {
const ComputePassDescriptor* descriptor) {
DeviceBase* device = GetDevice();
- bool success =
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ bool success = mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
DAWN_TRY(ValidateComputePassDescriptor(device, descriptor));
allocator->Allocate<BeginComputePassCmd>(Command::BeginComputePass);
return {};
- });
+ },
+ "encoding BeginComputePass(%s).", descriptor);
if (success) {
ComputePassEncoder* passEncoder =
@@ -553,8 +533,9 @@ namespace dawn_native {
uint32_t width = 0;
uint32_t height = 0;
Ref<AttachmentState> attachmentState;
- bool success =
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ bool success = mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
uint32_t sampleCount = 0;
DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height,
@@ -562,6 +543,7 @@ namespace dawn_native {
ASSERT(width > 0 && height > 0 && sampleCount > 0);
+ mEncodingContext.WillBeginRenderPass();
BeginRenderPassCmd* cmd =
allocator->Allocate<BeginRenderPassCmd>(Command::BeginRenderPass);
@@ -572,9 +554,6 @@ namespace dawn_native {
IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
uint8_t i = static_cast<uint8_t>(index);
TextureViewBase* view = descriptor->colorAttachments[i].view;
- if (view == nullptr) {
- view = descriptor->colorAttachments[i].attachment;
- }
TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
cmd->colorAttachments[index].view = view;
@@ -594,9 +573,6 @@ namespace dawn_native {
if (cmd->attachmentState->HasDepthStencilAttachment()) {
TextureViewBase* view = descriptor->depthStencilAttachment->view;
- if (view == nullptr) {
- view = descriptor->depthStencilAttachment->attachment;
- }
cmd->depthStencilAttachment.view = view;
cmd->depthStencilAttachment.clearDepth =
@@ -612,7 +588,18 @@ namespace dawn_native {
cmd->depthStencilAttachment.stencilStoreOp =
descriptor->depthStencilAttachment->stencilStoreOp;
- usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
+ if (IsReadOnlyDepthStencilAttachment(descriptor->depthStencilAttachment)) {
+ // TODO(dawn:485): Readonly depth/stencil attachment is not fully
+ // implemented. Disallow it as unsafe until the implementaion is completed.
+ DAWN_INVALID_IF(
+ device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Readonly depth/stencil attachment is disallowed because it's not "
+ "fully implemented");
+
+ usageTracker.TextureViewUsedAs(view, kReadOnlyRenderAttachment);
+ } else {
+ usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
+ }
}
cmd->width = width;
@@ -621,7 +608,8 @@ namespace dawn_native {
cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
return {};
- });
+ },
+ "encoding BeginRenderPass(%s).", descriptor);
if (success) {
RenderPassEncoder* passEncoder = new RenderPassEncoder(
@@ -639,140 +627,161 @@ namespace dawn_native {
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size) {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(source));
- DAWN_TRY(GetDevice()->ValidateObject(destination));
-
- if (source == destination) {
- return DAWN_VALIDATION_ERROR(
- "Source and destination cannot be the same buffer.");
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(source));
+ DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+ DAWN_INVALID_IF(source == destination,
+ "Source and destination are the same buffer (%s).", source);
+
+ DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
+ "validating source %s copy size.", source);
+ DAWN_TRY_CONTEXT(
+ ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
+ "validating destination %s copy size.", destination);
+ DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
+
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc),
+ "validating source %s usage.", source);
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst),
+ "validating destination %s usage.", destination);
+
+ mTopLevelBuffers.insert(source);
+ mTopLevelBuffers.insert(destination);
}
- DAWN_TRY(ValidateCopySizeFitsInBuffer(source, sourceOffset, size));
- DAWN_TRY(ValidateCopySizeFitsInBuffer(destination, destinationOffset, size));
- DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
-
- DAWN_TRY(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc));
- DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst));
-
- mTopLevelBuffers.insert(source);
- mTopLevelBuffers.insert(destination);
- }
-
- CopyBufferToBufferCmd* copy =
- allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
- copy->source = source;
- copy->sourceOffset = sourceOffset;
- copy->destination = destination;
- copy->destinationOffset = destinationOffset;
- copy->size = size;
+ CopyBufferToBufferCmd* copy =
+ allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
+ copy->source = source;
+ copy->sourceOffset = sourceOffset;
+ copy->destination = destination;
+ copy->destinationOffset = destinationOffset;
+ copy->size = size;
- return {};
- });
+ return {};
+ },
+ "encoding CopyBufferToBuffer(%s, %u, %s, %u, %u).", source, sourceOffset, destination,
+ destinationOffset, size);
}
void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
const ImageCopyTexture* destination,
const Extent3D* copySize) {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
- DAWN_TRY(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc));
-
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
- DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
-
- DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
- // We validate texture copy range before validating linear texture data,
- // because in the latter we divide copyExtent.width by blockWidth and
- // copyExtent.height by blockHeight while the divisibility conditions are
- // checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
- }
- const TexelBlockInfo& blockInfo =
- destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateLinearTextureCopyOffset(
- source->layout, blockInfo,
- destination->texture->GetFormat().HasDepthOrStencil()));
- DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
- blockInfo, *copySize));
-
- mTopLevelBuffers.insert(source->buffer);
- mTopLevelTextures.insert(destination->texture);
- }
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc),
+ "validating source %s usage.", source->buffer);
+
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
+ DAWN_TRY_CONTEXT(
+ ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst),
+ "validating destination %s usage.", destination->texture);
+ DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
+
+ DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
+ }
+ const TexelBlockInfo& blockInfo =
+ destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateLinearTextureCopyOffset(
+ source->layout, blockInfo,
+ destination->texture->GetFormat().HasDepthOrStencil()));
+ DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
+ blockInfo, *copySize));
+
+ mTopLevelBuffers.insert(source->buffer);
+ mTopLevelTextures.insert(destination->texture);
+ }
- TextureDataLayout srcLayout = source->layout;
- ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
-
- CopyBufferToTextureCmd* copy =
- allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
- copy->source.buffer = source->buffer;
- copy->source.offset = srcLayout.offset;
- copy->source.bytesPerRow = srcLayout.bytesPerRow;
- copy->source.rowsPerImage = srcLayout.rowsPerImage;
- copy->destination.texture = destination->texture;
- copy->destination.origin = destination->origin;
- copy->destination.mipLevel = destination->mipLevel;
- copy->destination.aspect =
- ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = *copySize;
+ TextureDataLayout srcLayout = source->layout;
+ ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
+
+ CopyBufferToTextureCmd* copy =
+ allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
+ copy->source.buffer = source->buffer;
+ copy->source.offset = srcLayout.offset;
+ copy->source.bytesPerRow = srcLayout.bytesPerRow;
+ copy->source.rowsPerImage = srcLayout.rowsPerImage;
+ copy->destination.texture = destination->texture;
+ copy->destination.origin = destination->origin;
+ copy->destination.mipLevel = destination->mipLevel;
+ copy->destination.aspect =
+ ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+ copy->copySize = *copySize;
- return {};
- });
+ return {};
+ },
+ "encoding CopyBufferToTexture(%s, %s, %s).", source->buffer, destination->texture,
+ copySize);
}
void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
const ImageCopyBuffer* destination,
const Extent3D* copySize) {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
- DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
- DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
-
- DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
- DAWN_TRY(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst));
-
- // We validate texture copy range before validating linear texture data,
- // because in the latter we divide copyExtent.width by blockWidth and
- // copyExtent.height by blockHeight while the divisibility conditions are
- // checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
- }
- const TexelBlockInfo& blockInfo =
- source->texture->GetFormat().GetAspectInfo(source->aspect).block;
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateLinearTextureCopyOffset(
- destination->layout, blockInfo,
- source->texture->GetFormat().HasDepthOrStencil()));
- DAWN_TRY(ValidateLinearTextureData(
- destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
-
- mTopLevelTextures.insert(source->texture);
- mTopLevelBuffers.insert(destination->buffer);
- }
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
+ DAWN_TRY_CONTEXT(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc),
+ "validating source %s usage.", source->texture);
+ DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
+ DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
+
+ DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
+ DAWN_TRY_CONTEXT(
+ ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
+ "validating destination %s usage.", destination->buffer);
+
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
+ }
+ const TexelBlockInfo& blockInfo =
+ source->texture->GetFormat().GetAspectInfo(source->aspect).block;
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateLinearTextureCopyOffset(
+ destination->layout, blockInfo,
+ source->texture->GetFormat().HasDepthOrStencil()));
+ DAWN_TRY(ValidateLinearTextureData(
+ destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
+
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelBuffers.insert(destination->buffer);
+ }
- TextureDataLayout dstLayout = destination->layout;
- ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
-
- CopyTextureToBufferCmd* copy =
- allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
- copy->source.texture = source->texture;
- copy->source.origin = source->origin;
- copy->source.mipLevel = source->mipLevel;
- copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
- copy->destination.buffer = destination->buffer;
- copy->destination.offset = dstLayout.offset;
- copy->destination.bytesPerRow = dstLayout.bytesPerRow;
- copy->destination.rowsPerImage = dstLayout.rowsPerImage;
- copy->copySize = *copySize;
+ TextureDataLayout dstLayout = destination->layout;
+ ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
+
+ CopyTextureToBufferCmd* copy =
+ allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
+ copy->source.texture = source->texture;
+ copy->source.origin = source->origin;
+ copy->source.mipLevel = source->mipLevel;
+ copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+ copy->destination.buffer = destination->buffer;
+ copy->destination.offset = dstLayout.offset;
+ copy->destination.bytesPerRow = dstLayout.bytesPerRow;
+ copy->destination.rowsPerImage = dstLayout.rowsPerImage;
+ copy->copySize = *copySize;
- return {};
- });
+ return {};
+ },
+ "encoding CopyTextureToBuffer(%s, %s, %s).", source->texture, destination->buffer,
+ copySize);
}
void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
@@ -791,51 +800,60 @@ namespace dawn_native {
void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
const ImageCopyTexture* destination,
const Extent3D* copySize) {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(source->texture));
- DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(source->texture));
+ DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *source, *copySize),
+ "validating source %s.", source->texture);
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *destination, *copySize),
+ "validating destination %s.", destination->texture);
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
- DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
-
- DAWN_TRY(
- ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
-
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
- DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
-
- // For internal usages (CopyToCopyInternal) we don't care if the user has added
- // CopySrc as a usage for this texture, but we will always add it internally.
- if (Internal) {
DAWN_TRY(
- ValidateInternalCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
- DAWN_TRY(ValidateInternalCanUseAs(destination->texture,
- wgpu::TextureUsage::CopyDst));
- } else {
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
- }
+ ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
+
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *source, *copySize),
+ "validating source %s copy range.", source->texture);
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *destination, *copySize),
+ "validating source %s copy range.", destination->texture);
+
+ // For internal usages (CopyToCopyInternal) we don't care if the user has added
+ // CopySrc as a usage for this texture, but we will always add it internally.
+ if (Internal) {
+ DAWN_TRY(
+ ValidateInternalCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
+ DAWN_TRY(ValidateInternalCanUseAs(destination->texture,
+ wgpu::TextureUsage::CopyDst));
+ } else {
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
+ DAWN_TRY(
+ ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
+ }
- mTopLevelTextures.insert(source->texture);
- mTopLevelTextures.insert(destination->texture);
- }
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelTextures.insert(destination->texture);
+ }
- CopyTextureToTextureCmd* copy =
- allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
- copy->source.texture = source->texture;
- copy->source.origin = source->origin;
- copy->source.mipLevel = source->mipLevel;
- copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
- copy->destination.texture = destination->texture;
- copy->destination.origin = destination->origin;
- copy->destination.mipLevel = destination->mipLevel;
- copy->destination.aspect =
- ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = *copySize;
+ CopyTextureToTextureCmd* copy =
+ allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
+ copy->source.texture = source->texture;
+ copy->source.origin = source->origin;
+ copy->source.mipLevel = source->mipLevel;
+ copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+ copy->destination.texture = destination->texture;
+ copy->destination.origin = destination->origin;
+ copy->destination.mipLevel = destination->mipLevel;
+ copy->destination.aspect =
+ ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+ copy->copySize = *copySize;
- return {};
- });
+ return {};
+ },
+ "encoding CopyTextureToTexture(%s, %s, %s).", source->texture, destination->texture,
+ copySize);
}
void CommandEncoder::APIInjectValidationError(const char* message) {
@@ -845,45 +863,57 @@ namespace dawn_native {
}
void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- InsertDebugMarkerCmd* cmd =
- allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
- cmd->length = strlen(groupLabel);
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ InsertDebugMarkerCmd* cmd =
+ allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+ cmd->length = strlen(groupLabel);
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
- return {};
- });
+ return {};
+ },
+ "encoding InsertDebugMarker(\"%s\").", groupLabel);
}
void CommandEncoder::APIPopDebugGroup() {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- if (mDebugGroupStackSize == 0) {
- return DAWN_VALIDATION_ERROR("Pop must be balanced by a corresponding Push.");
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_INVALID_IF(
+ mDebugGroupStackSize == 0,
+ "Every call to PopDebugGroup must be balanced by a corresponding call to "
+ "PushDebugGroup.");
}
- }
- allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
- mDebugGroupStackSize--;
+ allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+ mDebugGroupStackSize--;
+ mEncodingContext.PopDebugGroupLabel();
- return {};
- });
+ return {};
+ },
+ "encoding PopDebugGroup().");
}
void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- PushDebugGroupCmd* cmd =
- allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
- cmd->length = strlen(groupLabel);
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ PushDebugGroupCmd* cmd =
+ allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+ cmd->length = strlen(groupLabel);
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
- mDebugGroupStackSize++;
+ mDebugGroupStackSize++;
+ mEncodingContext.PushDebugGroupLabel(groupLabel);
- return {};
- });
+ return {};
+ },
+ "encoding PushDebugGroup(\"%s\").", groupLabel);
}
void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
@@ -891,54 +921,87 @@ namespace dawn_native {
uint32_t queryCount,
BufferBase* destination,
uint64_t destinationOffset) {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(GetDevice()->ValidateObject(destination));
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(GetDevice()->ValidateObject(destination));
- DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
- destinationOffset));
+ DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
+ destinationOffset));
- DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
+ DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
- TrackUsedQuerySet(querySet);
- mTopLevelBuffers.insert(destination);
- }
+ TrackUsedQuerySet(querySet);
+ mTopLevelBuffers.insert(destination);
+ }
- ResolveQuerySetCmd* cmd =
- allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
- cmd->querySet = querySet;
- cmd->firstQuery = firstQuery;
- cmd->queryCount = queryCount;
- cmd->destination = destination;
- cmd->destinationOffset = destinationOffset;
-
- // Encode internal compute pipeline for timestamp query
- if (querySet->GetQueryType() == wgpu::QueryType::Timestamp) {
- DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
- this, querySet, firstQuery, queryCount, destination, destinationOffset));
- }
+ ResolveQuerySetCmd* cmd =
+ allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
+ cmd->querySet = querySet;
+ cmd->firstQuery = firstQuery;
+ cmd->queryCount = queryCount;
+ cmd->destination = destination;
+ cmd->destinationOffset = destinationOffset;
+
+ // Encode internal compute pipeline for timestamp query
+ if (querySet->GetQueryType() == wgpu::QueryType::Timestamp) {
+ DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
+ this, querySet, firstQuery, queryCount, destination, destinationOffset));
+ }
- return {};
- });
+ return {};
+ },
+ "encoding ResolveQuerySet(%s, %u, %u, %s, %u).", querySet, firstQuery, queryCount,
+ destination, destinationOffset);
+ }
+
+ void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ uint64_t size) {
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+ }
+
+ WriteBufferCmd* cmd = allocator->Allocate<WriteBufferCmd>(Command::WriteBuffer);
+ cmd->buffer = buffer;
+ cmd->offset = bufferOffset;
+ cmd->size = size;
+
+ uint8_t* inlinedData = allocator->AllocateData<uint8_t>(size);
+ memcpy(inlinedData, data, size);
+
+ mTopLevelBuffers.insert(buffer);
+
+ return {};
+ },
+ "encoding WriteBuffer(%s, %u, ..., %u).", buffer, bufferOffset, size);
}
void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
- }
+ mEncodingContext.TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ }
- TrackQueryAvailability(querySet, queryIndex);
+ TrackQueryAvailability(querySet, queryIndex);
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
- return {};
- });
+ return {};
+ },
+ "encoding WriteTimestamp(%s, %u).", querySet, queryIndex);
}
CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
@@ -950,6 +1013,18 @@ namespace dawn_native {
return commandBuffer.Detach();
}
+ void CommandEncoder::EncodeSetValidatedBufferLocationsInternal(
+ std::vector<DeferredBufferLocationUpdate> updates) {
+ ASSERT(GetDevice()->IsValidationEnabled());
+ mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ SetValidatedBufferLocationsInternalCmd* cmd =
+ allocator->Allocate<SetValidatedBufferLocationsInternalCmd>(
+ Command::SetValidatedBufferLocationsInternal);
+ cmd->updates = std::move(updates);
+ return {};
+ });
+ }
+
ResultOrError<Ref<CommandBufferBase>> CommandEncoder::FinishInternal(
const CommandBufferDescriptor* descriptor) {
DeviceBase* device = GetDevice();
@@ -972,18 +1047,22 @@ namespace dawn_native {
DAWN_TRY(GetDevice()->ValidateObject(this));
for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
- DAWN_TRY(ValidateSyncScopeResourceUsage(passUsage));
+ DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(passUsage),
+ "validating render pass usage.");
}
for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
- DAWN_TRY(ValidateSyncScopeResourceUsage(scope));
+ DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(scope),
+ "validating compute pass usage.");
}
}
- if (mDebugGroupStackSize != 0) {
- return DAWN_VALIDATION_ERROR("Each Push must be balanced by a corresponding Pop.");
- }
+ DAWN_INVALID_IF(
+ mDebugGroupStackSize != 0,
+ "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup prior to "
+ "calling Finish.",
+ mDebugGroupStackSize);
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
index 776e1d28313..f7597e09eaa 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
@@ -26,10 +26,12 @@
namespace dawn_native {
- class CommandEncoder final : public ObjectBase {
+ class CommandEncoder final : public ApiObjectBase {
public:
CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
+ ObjectType GetType() const;
+
CommandIterator AcquireCommands();
CommandBufferResourceUsage AcquireResourceUsages();
@@ -68,10 +70,17 @@ namespace dawn_native {
uint32_t queryCount,
BufferBase* destination,
uint64_t destinationOffset);
+ void APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const uint8_t* data,
+ uint64_t size);
void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
+ void EncodeSetValidatedBufferLocationsInternal(
+ std::vector<DeferredBufferLocationUpdate> updates);
+
private:
ResultOrError<Ref<CommandBufferBase>> FinishInternal(
const CommandBufferDescriptor* descriptor);
@@ -94,6 +103,9 @@ namespace dawn_native {
uint64_t mDebugGroupStackSize = 0;
};
+ // For the benefit of template generation.
+ using CommandEncoderBase = CommandEncoder;
+
} // namespace dawn_native
#endif // DAWNNATIVE_COMMANDENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
index 7e3b8ee0ea9..ea2017d5e74 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
@@ -31,28 +31,30 @@ namespace dawn_native {
// Performs validation of the "synchronization scope" rules of WebGPU.
MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
// Buffers can only be used as single-write or multiple read.
- for (wgpu::BufferUsage usage : scope.bufferUsages) {
+ for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
+ const wgpu::BufferUsage usage = scope.bufferUsages[i];
bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
bool singleUse = wgpu::HasZeroOrOneBits(usage);
- if (!readOnly && !singleUse) {
- return DAWN_VALIDATION_ERROR(
- "Buffer used as writable usage and another usage in the same synchronization "
- "scope");
- }
+ DAWN_INVALID_IF(!readOnly && !singleUse,
+ "%s usage (%s) includes writable usage and another usage in the same "
+ "synchronization scope.",
+ scope.buffers[i], usage);
}
// Check that every single subresource is used as either a single-write usage or a
// combination of readonly usages.
- for (const TextureSubresourceUsage& textureUsage : scope.textureUsages) {
+ for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
+ const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
MaybeError error = {};
textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
bool singleUse = wgpu::HasZeroOrOneBits(usage);
if (!readOnly && !singleUse && !error.IsError()) {
- error = DAWN_VALIDATION_ERROR(
- "Texture used as writable usage and another usage in the same "
- "synchronization scope");
+ error = DAWN_FORMAT_VALIDATION_ERROR(
+ "%s usage (%s) includes writable usage and another usage in the same "
+ "synchronization scope.",
+ scope.textures[i], usage);
}
});
DAWN_TRY(std::move(error));
@@ -61,13 +63,35 @@ namespace dawn_native {
}
MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex) {
- if (querySet->GetQueryType() != wgpu::QueryType::Timestamp) {
- return DAWN_VALIDATION_ERROR("The type of query set must be Timestamp");
- }
+ DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
+ "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
- if (queryIndex >= querySet->GetQueryCount()) {
- return DAWN_VALIDATION_ERROR("Query index exceeds the number of queries in query set");
- }
+ DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
+ "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
+ querySet->GetQueryCount(), querySet);
+
+ return {};
+ }
+
+ MaybeError ValidateWriteBuffer(const DeviceBase* device,
+ const BufferBase* buffer,
+ uint64_t bufferOffset,
+ uint64_t size) {
+ DAWN_TRY(device->ValidateObject(buffer));
+
+ DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
+ bufferOffset);
+
+ DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
+ "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
+ bufferOffset, size, buffer, bufferSize);
+
+ DAWN_INVALID_IF(!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst),
+ "%s usage (%s) does not include %s.", buffer, buffer->GetUsage(),
+ wgpu::BufferUsage::CopyDst);
return {};
}
@@ -118,9 +142,11 @@ namespace dawn_native {
ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
rowsPerImage != wgpu::kCopyStrideUndefined));
uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
- if (bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers) {
- return DAWN_VALIDATION_ERROR("requiredBytesInCopy is too large.");
- }
+ DAWN_INVALID_IF(
+ bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+ "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
+ bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+ copySize.depthOrArrayLayers);
uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
if (heightInBlocks > 0) {
@@ -136,9 +162,9 @@ namespace dawn_native {
uint64_t size) {
uint64_t bufferSize = buffer->GetSize();
bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
- if (!fitsInBuffer) {
- return DAWN_VALIDATION_ERROR("Copy would overflow the buffer");
- }
+ DAWN_INVALID_IF(!fitsInBuffer,
+ "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
+ size, buffer.Get(), bufferSize);
return {};
}
@@ -173,15 +199,18 @@ namespace dawn_native {
ASSERT(copyExtent.height % blockInfo.height == 0);
uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
- if (copyExtent.depthOrArrayLayers > 1 &&
- (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
- layout.rowsPerImage == wgpu::kCopyStrideUndefined)) {
- return DAWN_VALIDATION_ERROR(
- "If copy depth > 1, bytesPerRow and rowsPerImage must be specified.");
- }
- if (heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined) {
- return DAWN_VALIDATION_ERROR("If heightInBlocks > 1, bytesPerRow must be specified.");
- }
+ // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
+ // validation message. Investigate ways to make it print as a more readable symbol.
+ DAWN_INVALID_IF(
+ copyExtent.depthOrArrayLayers > 1 &&
+ (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
+ layout.rowsPerImage == wgpu::kCopyStrideUndefined),
+ "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
+ copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
+
+ DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
+ "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
+ heightInBlocks);
// Validation for other members in layout:
ASSERT(copyExtent.width % blockInfo.width == 0);
@@ -192,15 +221,15 @@ namespace dawn_native {
// These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
// but they should get optimized out.
- if (layout.bytesPerRow != wgpu::kCopyStrideUndefined &&
- bytesInLastRow > layout.bytesPerRow) {
- return DAWN_VALIDATION_ERROR("The byte size of each row must be <= bytesPerRow.");
- }
- if (layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
- heightInBlocks > layout.rowsPerImage) {
- return DAWN_VALIDATION_ERROR(
- "The height of each image, in blocks, must be <= rowsPerImage.");
- }
+ DAWN_INVALID_IF(
+ layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
+ "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
+ layout.bytesPerRow);
+
+ DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
+ heightInBlocks > layout.rowsPerImage,
+ "The height of each image in blocks (%u) is > rowsPerImage (%u).",
+ heightInBlocks, layout.rowsPerImage);
// We compute required bytes in copy after validating texel block alignments
// because the divisibility conditions are necessary for the algorithm to be valid,
@@ -212,10 +241,11 @@ namespace dawn_native {
bool fitsInData =
layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
- if (!fitsInData) {
- return DAWN_VALIDATION_ERROR(
- "Required size for texture data layout exceeds the linear data size.");
- }
+ DAWN_INVALID_IF(
+ !fitsInData,
+ "Required size for texture data layout (%u) exceeds the linear data size (%u) with "
+ "offset (%u).",
+ requiredBytesInCopy, byteSize, layout.offset);
return {};
}
@@ -224,9 +254,9 @@ namespace dawn_native {
const ImageCopyBuffer& imageCopyBuffer) {
DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
- if (imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0) {
- return DAWN_VALIDATION_ERROR("bytesPerRow must be a multiple of 256");
- }
+ DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
+ "bytesPerRow (%u) is not a multiple of %u.",
+ imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
}
return {};
@@ -237,25 +267,28 @@ namespace dawn_native {
const Extent3D& copySize) {
const TextureBase* texture = textureCopy.texture;
DAWN_TRY(device->ValidateObject(texture));
- if (textureCopy.mipLevel >= texture->GetNumMipLevels()) {
- return DAWN_VALIDATION_ERROR("mipLevel out of range");
- }
+ DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
+ "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
+ textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
- if (SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None) {
- return DAWN_VALIDATION_ERROR("Texture does not have selected aspect for texture copy.");
- }
+ DAWN_INVALID_IF(
+ SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
+ "%s format (%s) does not have the selected aspect (%s).", texture,
+ texture->GetFormat().format, textureCopy.aspect);
if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- if (textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
- subresourceSize.width != copySize.width ||
- subresourceSize.height != copySize.height) {
- return DAWN_VALIDATION_ERROR(
- "The entire subresource must be copied when using a depth/stencil texture, or "
- "when sample count is greater than 1.");
- }
+ DAWN_INVALID_IF(
+ textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
+ subresourceSize.width != copySize.width ||
+ subresourceSize.height != copySize.height,
+ "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
+ "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
+ "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
+ &textureCopy.origin, &copySize, &subresourceSize, texture,
+ texture->GetFormat().format, texture->GetSampleCount());
}
return {};
@@ -277,37 +310,43 @@ namespace dawn_native {
}
// All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
// overflows.
- if (static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
- static_cast<uint64_t>(mipSize.width) ||
- static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
- static_cast<uint64_t>(mipSize.height) ||
- static_cast<uint64_t>(textureCopy.origin.z) +
- static_cast<uint64_t>(copySize.depthOrArrayLayers) >
- static_cast<uint64_t>(mipSize.depthOrArrayLayers)) {
- return DAWN_VALIDATION_ERROR("Touching outside of the texture");
- }
+ DAWN_INVALID_IF(
+ static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
+ static_cast<uint64_t>(mipSize.width) ||
+ static_cast<uint64_t>(textureCopy.origin.y) +
+ static_cast<uint64_t>(copySize.height) >
+ static_cast<uint64_t>(mipSize.height) ||
+ static_cast<uint64_t>(textureCopy.origin.z) +
+ static_cast<uint64_t>(copySize.depthOrArrayLayers) >
+ static_cast<uint64_t>(mipSize.depthOrArrayLayers),
+ "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
+ "size (%s).",
+ &textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
// Validation for the texel block alignments:
const Format& format = textureCopy.texture->GetFormat();
if (format.isCompressed) {
const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
- if (textureCopy.origin.x % blockInfo.width != 0) {
- return DAWN_VALIDATION_ERROR(
- "Offset.x must be a multiple of compressed texture format block width");
- }
- if (textureCopy.origin.y % blockInfo.height != 0) {
- return DAWN_VALIDATION_ERROR(
- "Offset.y must be a multiple of compressed texture format block height");
- }
- if (copySize.width % blockInfo.width != 0) {
- return DAWN_VALIDATION_ERROR(
- "copySize.width must be a multiple of compressed texture format block width");
- }
-
- if (copySize.height % blockInfo.height != 0) {
- return DAWN_VALIDATION_ERROR(
- "copySize.height must be a multiple of compressed texture format block height");
- }
+ DAWN_INVALID_IF(
+ textureCopy.origin.x % blockInfo.width != 0,
+ "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
+ "width (%u).",
+ textureCopy.origin.x, blockInfo.width);
+ DAWN_INVALID_IF(
+ textureCopy.origin.y % blockInfo.height != 0,
+ "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
+ "height (%u).",
+ textureCopy.origin.y, blockInfo.height);
+ DAWN_INVALID_IF(
+ copySize.width % blockInfo.width != 0,
+ "copySize.width (%u) is not a multiple of compressed texture format block width "
+ "(%u).",
+ copySize.width, blockInfo.width);
+ DAWN_INVALID_IF(
+ copySize.height % blockInfo.height != 0,
+ "copySize.height (%u) is not a multiple of compressed texture format block "
+ "height (%u).",
+ copySize.height, blockInfo.height);
}
return {};
@@ -318,14 +357,16 @@ namespace dawn_native {
ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
const Format& format = view.texture->GetFormat();
switch (view.aspect) {
- case wgpu::TextureAspect::All:
- if (HasOneBit(format.aspects)) {
- Aspect single = format.aspects;
- return single;
- }
- return DAWN_VALIDATION_ERROR(
- "A single aspect must be selected for multi-planar formats in "
- "texture <-> linear data copies");
+ case wgpu::TextureAspect::All: {
+ DAWN_INVALID_IF(
+ !HasOneBit(format.aspects),
+ "More than a single aspect (%s) is selected for multi-planar format (%s) in "
+ "%s <-> linear data copy.",
+ view.aspect, format.format, view.texture);
+
+ Aspect single = format.aspects;
+ return single;
+ }
case wgpu::TextureAspect::DepthOnly:
ASSERT(format.aspects & Aspect::Depth);
return Aspect::Depth;
@@ -334,16 +375,16 @@ namespace dawn_native {
return Aspect::Stencil;
case wgpu::TextureAspect::Plane0Only:
case wgpu::TextureAspect::Plane1Only:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
Aspect aspectUsed;
DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
- if (aspectUsed == Aspect::Depth) {
- return DAWN_VALIDATION_ERROR("Cannot copy into the depth aspect of a texture");
- }
+ DAWN_INVALID_IF(aspectUsed == Aspect::Depth, "Cannot copy into the depth aspect of %s.",
+ dst.texture);
return {};
}
@@ -354,31 +395,32 @@ namespace dawn_native {
const uint32_t srcSamples = src.texture->GetSampleCount();
const uint32_t dstSamples = dst.texture->GetSampleCount();
- if (srcSamples != dstSamples) {
- return DAWN_VALIDATION_ERROR(
- "Source and destination textures must have matching sample counts.");
- }
+ DAWN_INVALID_IF(
+ srcSamples != dstSamples,
+ "Source %s sample count (%u) and destination %s sample count (%u) does not match.",
+ src.texture, srcSamples, dst.texture, dstSamples);
// Metal cannot select a single aspect for texture-to-texture copies.
const Format& format = src.texture->GetFormat();
- if (SelectFormatAspects(format, src.aspect) != format.aspects) {
- return DAWN_VALIDATION_ERROR(
- "Source aspect doesn't select all the aspects of the source format.");
- }
- if (SelectFormatAspects(format, dst.aspect) != format.aspects) {
- return DAWN_VALIDATION_ERROR(
- "Destination aspect doesn't select all the aspects of the destination format.");
- }
+ DAWN_INVALID_IF(
+ SelectFormatAspects(format, src.aspect) != format.aspects,
+ "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
+ src.texture, src.aspect, format.format);
+
+ DAWN_INVALID_IF(
+ SelectFormatAspects(format, dst.aspect) != format.aspects,
+ "Destination %s aspect (%s) doesn't select all the aspects of the destination format "
+ "(%s).",
+ dst.texture, dst.aspect, format.format);
if (src.texture == dst.texture && src.mipLevel == dst.mipLevel) {
wgpu::TextureDimension dimension = src.texture->GetDimension();
ASSERT(dimension != wgpu::TextureDimension::e1D);
- if ((dimension == wgpu::TextureDimension::e2D &&
+ DAWN_INVALID_IF(
+ (dimension == wgpu::TextureDimension::e2D &&
IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers)) ||
- dimension == wgpu::TextureDimension::e3D) {
- return DAWN_VALIDATION_ERROR(
- "Cannot copy between overlapping subresources of the same texture.");
- }
+ dimension == wgpu::TextureDimension::e3D,
+ "Cannot copy between overlapping subresources of %s.", src.texture);
}
return {};
@@ -387,54 +429,36 @@ namespace dawn_native {
MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
const ImageCopyTexture& dst,
const Extent3D& copySize) {
- if (src.texture->GetFormat().format != dst.texture->GetFormat().format) {
- // Metal requires texture-to-texture copies be the same format
- return DAWN_VALIDATION_ERROR("Source and destination texture formats must match.");
- }
-
- return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
- }
-
- // CopyTextureForBrowser could handle color conversion during the copy and it
- // requires the source must be sampleable and the destination must be writable
- // using a render pass
- MaybeError ValidateCopyTextureForBrowserRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize) {
- if (!(src.texture->GetUsage() & wgpu::TextureUsage::TextureBinding)) {
- return DAWN_VALIDATION_ERROR("Source texture must have sampled usage");
- }
-
- if (!(dst.texture->GetUsage() & wgpu::TextureUsage::RenderAttachment)) {
- return DAWN_VALIDATION_ERROR("Dest texture must have RenderAttachment usage");
- }
+ // Metal requires texture-to-texture copies be the same format
+ DAWN_INVALID_IF(src.texture->GetFormat().format != dst.texture->GetFormat().format,
+ "Source %s format (%s) and destination %s format (%s) do not match.",
+ src.texture, src.texture->GetFormat().format, dst.texture,
+ dst.texture->GetFormat().format);
return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
}
MaybeError ValidateCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage) {
ASSERT(wgpu::HasZeroOrOneBits(usage));
- if (!(texture->GetUsage() & usage)) {
- return DAWN_VALIDATION_ERROR("texture doesn't have the required usage.");
- }
+ DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
+ texture, texture->GetUsage(), usage);
return {};
}
MaybeError ValidateInternalCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage) {
ASSERT(wgpu::HasZeroOrOneBits(usage));
- if (!(texture->GetInternalUsage() & usage)) {
- return DAWN_VALIDATION_ERROR("texture doesn't have the required usage.");
- }
+ DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
+ "%s internal usage (%s) doesn't include %s.", texture,
+ texture->GetInternalUsage(), usage);
return {};
}
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
ASSERT(wgpu::HasZeroOrOneBits(usage));
- if (!(buffer->GetUsage() & usage)) {
- return DAWN_VALIDATION_ERROR("buffer doesn't have the required usage.");
- }
+ DAWN_INVALID_IF(!(buffer->GetUsage() & usage), "%s usage (%s) doesn't include %s.", buffer,
+ buffer->GetUsage(), usage);
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
index a87e956cbba..3f57eae972c 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
@@ -31,6 +31,11 @@ namespace dawn_native {
MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex);
+ MaybeError ValidateWriteBuffer(const DeviceBase* device,
+ const BufferBase* buffer,
+ uint64_t bufferOffset,
+ uint64_t size);
+
ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
const Extent3D& copySize,
uint32_t bytesPerRow,
@@ -61,14 +66,13 @@ namespace dawn_native {
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
+ MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize);
MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
const ImageCopyTexture& dst,
const Extent3D& copySize);
- MaybeError ValidateCopyTextureForBrowserRestrictions(const ImageCopyTexture& src,
- const ImageCopyTexture& dst,
- const Extent3D& copySize);
-
MaybeError ValidateCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage);
MaybeError ValidateInternalCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage);
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.cpp b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
index e3f852e2a31..ea122e7468c 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
@@ -158,6 +158,12 @@ namespace dawn_native {
cmd->~SetStencilReferenceCmd();
break;
}
+ case Command::SetValidatedBufferLocationsInternal: {
+ SetValidatedBufferLocationsInternalCmd* cmd =
+ commands->NextCommand<SetValidatedBufferLocationsInternalCmd>();
+ cmd->~SetValidatedBufferLocationsInternalCmd();
+ break;
+ }
case Command::SetViewport: {
SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
cmd->~SetViewportCmd();
@@ -191,6 +197,12 @@ namespace dawn_native {
cmd->~SetVertexBufferCmd();
break;
}
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
+ commands->NextData<uint8_t>(write->size);
+ write->~WriteBufferCmd();
+ break;
+ }
case Command::WriteTimestamp: {
WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
cmd->~WriteTimestampCmd();
@@ -307,6 +319,10 @@ namespace dawn_native {
commands->NextCommand<SetStencilReferenceCmd>();
break;
+ case Command::SetValidatedBufferLocationsInternal:
+ commands->NextCommand<SetValidatedBufferLocationsInternalCmd>();
+ break;
+
case Command::SetViewport:
commands->NextCommand<SetViewportCmd>();
break;
@@ -336,6 +352,10 @@ namespace dawn_native {
break;
}
+ case Command::WriteBuffer:
+ commands->NextCommand<WriteBufferCmd>();
+ break;
+
case Command::WriteTimestamp: {
commands->NextCommand<WriteTimestampCmd>();
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.h b/chromium/third_party/dawn/src/dawn_native/Commands.h
index 3c958fa03a0..09acd10abff 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.h
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.h
@@ -19,6 +19,7 @@
#include "dawn_native/AttachmentState.h"
#include "dawn_native/BindingInfo.h"
+#include "dawn_native/BufferLocation.h"
#include "dawn_native/Texture.h"
#include "dawn_native/dawn_platform.h"
@@ -62,7 +63,9 @@ namespace dawn_native {
SetBlendConstant,
SetBindGroup,
SetIndexBuffer,
+ SetValidatedBufferLocationsInternal,
SetVertexBuffer,
+ WriteBuffer,
WriteTimestamp,
};
@@ -176,8 +179,7 @@ namespace dawn_native {
};
struct DrawIndexedIndirectCmd {
- Ref<BufferBase> indirectBuffer;
- uint64_t indirectOffset;
+ Ref<BufferLocation> indirectBufferLocation;
};
struct EndComputePassCmd {};
@@ -223,6 +225,16 @@ namespace dawn_native {
uint32_t reference;
};
+ struct DeferredBufferLocationUpdate {
+ Ref<BufferLocation> location;
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ };
+
+ struct SetValidatedBufferLocationsInternalCmd {
+ std::vector<DeferredBufferLocationUpdate> updates;
+ };
+
struct SetViewportCmd {
float x, y, width, height, minDepth, maxDepth;
};
@@ -255,6 +267,12 @@ namespace dawn_native {
uint64_t size;
};
+ struct WriteBufferCmd {
+ Ref<BufferBase> buffer;
+ uint64_t offset;
+ uint64_t size;
+ };
+
struct WriteTimestampCmd {
Ref<QuerySetBase> querySet;
uint32_t queryIndex;
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
index dcc5df83c3d..517429a30fb 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
@@ -20,6 +20,7 @@
#include "dawn_native/Commands.h"
#include "dawn_native/ComputePipeline.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/QuerySet.h"
@@ -57,138 +58,162 @@ namespace dawn_native {
return new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
}
- void ComputePassEncoder::APIEndPass() {
- if (mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateProgrammableEncoderEnd());
- }
-
- allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
+ ObjectType ComputePassEncoder::GetType() const {
+ return ObjectType::ComputePassEncoder;
+ }
- return {};
- })) {
- mEncodingContext->ExitPass(this, mUsageTracker.AcquireResourceUsage());
+ void ComputePassEncoder::APIEndPass() {
+ if (mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateProgrammableEncoderEnd());
+ }
+
+ allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
+
+ return {};
+ },
+ "encoding EndPass()")) {
+ mEncodingContext->ExitComputePass(this, mUsageTracker.AcquireResourceUsage());
}
}
void ComputePassEncoder::APIDispatch(uint32_t x, uint32_t y, uint32_t z) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
- DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(x));
- DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(y));
- DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(z));
- }
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+ DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(x));
+ DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(y));
+ DAWN_TRY(ValidatePerDimensionDispatchSizeLimit(z));
+ }
- // Record the synchronization scope for Dispatch, which is just the current bindgroups.
- AddDispatchSyncScope();
+ // Record the synchronization scope for Dispatch, which is just the current
+ // bindgroups.
+ AddDispatchSyncScope();
- DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
- dispatch->x = x;
- dispatch->y = y;
- dispatch->z = z;
+ DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
+ dispatch->x = x;
+ dispatch->y = y;
+ dispatch->z = z;
- return {};
- });
+ return {};
+ },
+ "encoding Dispatch (x: %u, y: %u, z: %u)", x, y, z);
}
void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer,
uint64_t indirectOffset) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
-
- // Indexed dispatches need a compute-shader based validation to check that the
- // dispatch sizes aren't too big. Disallow them as unsafe until the validation is
- // implemented.
- if (GetDevice()->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- return DAWN_VALIDATION_ERROR(
- "DispatchIndirect is disallowed because it doesn't validate that the "
- "dispatch "
- "size is valid yet.");
- }
-
- if (indirectOffset % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Indirect offset must be a multiple of 4");
- }
-
- if (indirectOffset >= indirectBuffer->GetSize() ||
- indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize()) {
- return DAWN_VALIDATION_ERROR("Indirect offset out of bounds");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+ // Indexed dispatches need a compute-shader based validation to check that the
+ // dispatch sizes aren't too big. Disallow them as unsafe until the validation
+ // is implemented.
+ if (GetDevice()->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+ return DAWN_VALIDATION_ERROR(
+ "DispatchIndirect is disallowed because it doesn't validate that the "
+ "dispatch "
+ "size is valid yet.");
+ }
+
+ if (indirectOffset % 4 != 0) {
+ return DAWN_VALIDATION_ERROR("Indirect offset must be a multiple of 4");
+ }
+
+ if (indirectOffset >= indirectBuffer->GetSize() ||
+ indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize()) {
+ return DAWN_VALIDATION_ERROR("Indirect offset out of bounds");
+ }
}
- }
- // Record the synchronization scope for Dispatch, both the bindgroups and the indirect
- // buffer.
- SyncScopeUsageTracker scope;
- scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
- mUsageTracker.AddReferencedBuffer(indirectBuffer);
- AddDispatchSyncScope(std::move(scope));
+ // Record the synchronization scope for Dispatch, both the bindgroups and the
+ // indirect buffer.
+ SyncScopeUsageTracker scope;
+ scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+ mUsageTracker.AddReferencedBuffer(indirectBuffer);
+ AddDispatchSyncScope(std::move(scope));
- DispatchIndirectCmd* dispatch =
- allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
- dispatch->indirectBuffer = indirectBuffer;
- dispatch->indirectOffset = indirectOffset;
+ DispatchIndirectCmd* dispatch =
+ allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
+ dispatch->indirectBuffer = indirectBuffer;
+ dispatch->indirectOffset = indirectOffset;
- return {};
- });
+ return {};
+ },
+ "encoding DispatchIndirect with %s", indirectBuffer);
}
void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(pipeline));
- }
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+ }
- mCommandBufferState.SetComputePipeline(pipeline);
+ mCommandBufferState.SetComputePipeline(pipeline);
- SetComputePipelineCmd* cmd =
- allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
- cmd->pipeline = pipeline;
+ SetComputePipelineCmd* cmd =
+ allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
+ cmd->pipeline = pipeline;
- return {};
- });
+ return {};
+ },
+ "encoding SetPipeline with %s", pipeline);
}
void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
BindGroupBase* group,
uint32_t dynamicOffsetCount,
const uint32_t* dynamicOffsets) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- BindGroupIndex groupIndex(groupIndexIn);
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
- if (IsValidationEnabled()) {
- DAWN_TRY(
- ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
- }
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets));
+ }
- mUsageTracker.AddResourcesReferencedByBindGroup(group);
+ mUsageTracker.AddResourcesReferencedByBindGroup(group);
- RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
- mCommandBufferState.SetBindGroup(groupIndex, group);
+ RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets);
+ mCommandBufferState.SetBindGroup(groupIndex, group);
- return {};
- });
+ return {};
+ },
+ "encoding SetBindGroup with %s at index %u", group, groupIndexIn);
}
void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
- }
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ }
- mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+ mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
- return {};
- });
+ return {};
+ },
+ "encoding WriteTimestamp to %s.", querySet);
}
void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
index cbf4f612906..57a975001bd 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
@@ -17,6 +17,7 @@
#include "dawn_native/CommandBufferStateTracker.h"
#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/ProgrammablePassEncoder.h"
@@ -34,6 +35,8 @@ namespace dawn_native {
CommandEncoder* commandEncoder,
EncodingContext* encodingContext);
+ ObjectType GetType() const override;
+
void APIEndPass();
void APIDispatch(uint32_t x, uint32_t y = 1, uint32_t z = 1);
@@ -66,6 +69,9 @@ namespace dawn_native {
Ref<CommandEncoder> mCommandEncoder;
};
+ // For the benefit of template generation.
+ using ComputePassEncoderBase = ComputePassEncoder;
+
} // namespace dawn_native
#endif // DAWNNATIVE_COMPUTEPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
index 8ae593b613b..f789235b02c 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
@@ -16,6 +16,7 @@
#include "dawn_native/Device.h"
#include "dawn_native/ObjectContentHasher.h"
+#include "dawn_native/ObjectType_autogen.h"
namespace dawn_native {
@@ -29,20 +30,10 @@ namespace dawn_native {
DAWN_TRY(device->ValidateObject(descriptor->layout));
}
- if (descriptor->compute.module != nullptr) {
- DAWN_TRY(ValidateProgrammableStage(device, descriptor->compute.module,
- descriptor->compute.entryPoint, descriptor->layout,
- SingleShaderStage::Compute));
- } else {
- // TODO(dawn:800): Remove after deprecation period.
- device->EmitDeprecationWarning(
- "computeStage has been deprecated. Please begin using compute instead.");
- DAWN_TRY(ValidateProgrammableStage(device, descriptor->computeStage.module,
- descriptor->computeStage.entryPoint,
- descriptor->layout, SingleShaderStage::Compute));
- }
-
- return {};
+ return ValidateProgrammableStage(
+ device, descriptor->compute.module, descriptor->compute.entryPoint,
+ descriptor->compute.constantCount, descriptor->compute.constants, descriptor->layout,
+ SingleShaderStage::Compute);
}
// ComputePipelineBase
@@ -51,8 +42,10 @@ namespace dawn_native {
const ComputePipelineDescriptor* descriptor)
: PipelineBase(device,
descriptor->layout,
+ descriptor->label,
{{SingleShaderStage::Compute, descriptor->compute.module,
- descriptor->compute.entryPoint}}) {
+ descriptor->compute.entryPoint, descriptor->compute.constantCount,
+ descriptor->compute.constants}}) {
}
ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -66,7 +59,7 @@ namespace dawn_native {
}
}
- MaybeError ComputePipelineBase::Initialize(const ComputePipelineDescriptor* descriptor) {
+ MaybeError ComputePipelineBase::Initialize() {
return {};
}
@@ -75,6 +68,10 @@ namespace dawn_native {
return new ComputePipelineBase(device, ObjectBase::kError);
}
+ ObjectType ComputePipelineBase::GetType() const {
+ return ObjectType::ComputePipeline;
+ }
+
bool ComputePipelineBase::EqualityFunc::operator()(const ComputePipelineBase* a,
const ComputePipelineBase* b) const {
return PipelineBase::EqualForCache(a, b);
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h
index f74f0502f23..1c134e65ea0 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h
@@ -15,6 +15,8 @@
#ifndef DAWNNATIVE_COMPUTEPIPELINE_H_
#define DAWNNATIVE_COMPUTEPIPELINE_H_
+#include "common/NonCopyable.h"
+#include "dawn_native/Forward.h"
#include "dawn_native/Pipeline.h"
namespace dawn_native {
@@ -32,6 +34,8 @@ namespace dawn_native {
static ComputePipelineBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
// Functors necessary for the unordered_set<ComputePipelineBase*>-based cache.
struct EqualityFunc {
bool operator()(const ComputePipelineBase* a, const ComputePipelineBase* b) const;
@@ -43,7 +47,7 @@ namespace dawn_native {
// CreateComputePipelineAsyncTask is declared as a friend of ComputePipelineBase as it
// needs to call the private member function ComputePipelineBase::Initialize().
friend class CreateComputePipelineAsyncTask;
- virtual MaybeError Initialize(const ComputePipelineDescriptor* descriptor);
+ virtual MaybeError Initialize();
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
index e177baad944..f8d2c8f8889 100644
--- a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
@@ -147,8 +147,8 @@ namespace dawn_native {
case wgpu::TextureFormat::RGBA8Unorm:
break;
default:
- return DAWN_VALIDATION_ERROR(
- "Unsupported src texture format for CopyTextureForBrowser.");
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Source texture format (%s) is not supported.", srcFormat);
}
switch (dstFormat) {
@@ -161,44 +161,8 @@ namespace dawn_native {
case wgpu::TextureFormat::RGB10A2Unorm:
break;
default:
- return DAWN_VALIDATION_ERROR(
- "Unsupported dst texture format for CopyTextureForBrowser.");
- }
-
- return {};
- }
-
- MaybeError ValidateCopyTextureForBrowserOptions(
- const CopyTextureForBrowserOptions* options) {
- if (options->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR(
- "CopyTextureForBrowserOptions: nextInChain must be nullptr");
- }
-
- DAWN_TRY(ValidateAlphaOp(options->alphaOp));
-
- return {};
- }
-
- MaybeError ValidateSourceOriginAndCopyExtent(const ImageCopyTexture source,
- const Extent3D copySize) {
- if (source.origin.z > 0) {
- return DAWN_VALIDATION_ERROR("Source origin cannot have non-zero z value");
- }
-
- if (copySize.depthOrArrayLayers > 1) {
- return DAWN_VALIDATION_ERROR("Cannot copy to multiple slices");
- }
-
- return {};
- }
-
- MaybeError ValidateSourceAndDestinationTextureSampleCount(
- const ImageCopyTexture source,
- const ImageCopyTexture destination) {
- if (source.texture->GetSampleCount() > 1 || destination.texture->GetSampleCount() > 1) {
- return DAWN_VALIDATION_ERROR(
- "Source and destiantion textures cannot be multisampled");
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Destination texture format (%s) is not supported.", dstFormat);
}
return {};
@@ -278,15 +242,28 @@ namespace dawn_native {
DAWN_TRY(device->ValidateObject(source->texture));
DAWN_TRY(device->ValidateObject(destination->texture));
- DAWN_TRY(ValidateImageCopyTexture(device, *source, *copySize));
- DAWN_TRY(ValidateImageCopyTexture(device, *destination, *copySize));
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
+ "validating the ImageCopyTexture for the source");
+ DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *destination, *copySize),
+ "validating the ImageCopyTexture for the destination");
+
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *source, *copySize),
+ "validating that the copy fits in the source");
+ DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *destination, *copySize),
+ "validating that the copy fits in the destination");
+
+ DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
- DAWN_TRY(ValidateSourceOriginAndCopyExtent(*source, *copySize));
- DAWN_TRY(ValidateCopyTextureForBrowserRestrictions(*source, *destination, *copySize));
- DAWN_TRY(ValidateSourceAndDestinationTextureSampleCount(*source, *destination));
+ DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).",
+ source->origin.z);
+ DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1,
+ "Copy is for more than one array layer (%u)", copySize->depthOrArrayLayers);
- DAWN_TRY(ValidateTextureCopyRange(device, *source, *copySize));
- DAWN_TRY(ValidateTextureCopyRange(device, *destination, *copySize));
+ DAWN_INVALID_IF(
+ source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
+ "The source texture sample count (%u) or the destination texture sample count (%u) is "
+ "not 1.",
+ source->texture->GetSampleCount(), destination->texture->GetSampleCount());
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::TextureBinding));
@@ -297,7 +274,8 @@ namespace dawn_native {
DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
destination->texture->GetFormat().format));
- DAWN_TRY(ValidateCopyTextureForBrowserOptions(options));
+ DAWN_INVALID_IF(options->nextInChain != nullptr, "nextInChain must be nullptr");
+ DAWN_TRY(ValidateAlphaOp(options->alphaOp));
return {};
}
@@ -405,7 +383,7 @@ namespace dawn_native {
device->CreateTextureView(destination->texture, &dstTextureViewDesc));
// Prepare render pass color attachment descriptor.
- RenderPassColorAttachmentDescriptor colorAttachmentDesc;
+ RenderPassColorAttachment colorAttachmentDesc;
colorAttachmentDesc.view = dstView.Get();
colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp
index 42a4b9c53c3..6ada64e21cd 100644
--- a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.cpp
@@ -103,40 +103,24 @@ namespace dawn_native {
CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
Ref<ComputePipelineBase> nonInitializedComputePipeline,
- const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata)
- : mComputePipeline(nonInitializedComputePipeline),
+ : mComputePipeline(std::move(nonInitializedComputePipeline)),
mBlueprintHash(blueprintHash),
mCallback(callback),
- mUserdata(userdata),
- mLabel(descriptor->label != nullptr ? descriptor->label : ""),
- mLayout(descriptor->layout),
- mEntryPoint(descriptor->compute.entryPoint),
- mComputeShaderModule(descriptor->compute.module) {
+ mUserdata(userdata) {
ASSERT(mComputePipeline != nullptr);
-
- // TODO(jiawei.shao@intel.com): save nextInChain when it is supported in Dawn.
- ASSERT(descriptor->nextInChain == nullptr);
}
void CreateComputePipelineAsyncTask::Run() {
- ComputePipelineDescriptor descriptor;
- if (!mLabel.empty()) {
- descriptor.label = mLabel.c_str();
- }
- descriptor.compute.entryPoint = mEntryPoint.c_str();
- descriptor.layout = mLayout.Get();
- descriptor.compute.module = mComputeShaderModule.Get();
- MaybeError maybeError = mComputePipeline->Initialize(&descriptor);
+ MaybeError maybeError = mComputePipeline->Initialize();
std::string errorMessage;
if (maybeError.IsError()) {
mComputePipeline = nullptr;
errorMessage = maybeError.AcquireError()->GetMessage();
}
- mComputeShaderModule = nullptr;
mComputePipeline->GetDevice()->AddComputePipelineAsyncCallbackTask(
mComputePipeline, errorMessage, mCallback, mUserdata, mBlueprintHash);
}
@@ -155,4 +139,39 @@ namespace dawn_native {
device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
}
+ CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
+ Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata)
+ : mRenderPipeline(std::move(nonInitializedRenderPipeline)),
+ mCallback(callback),
+ mUserdata(userdata) {
+ ASSERT(mRenderPipeline != nullptr);
+ }
+
+ void CreateRenderPipelineAsyncTask::Run() {
+ MaybeError maybeError = mRenderPipeline->Initialize();
+ std::string errorMessage;
+ if (maybeError.IsError()) {
+ mRenderPipeline = nullptr;
+ errorMessage = maybeError.AcquireError()->GetMessage();
+ }
+
+ mRenderPipeline->GetDevice()->AddRenderPipelineAsyncCallbackTask(
+ mRenderPipeline, errorMessage, mCallback, mUserdata);
+ }
+
+ void CreateRenderPipelineAsyncTask::RunAsync(
+ std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
+ DeviceBase* device = task->mRenderPipeline->GetDevice();
+
+ // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+ // since C++14:
+ // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+ auto asyncTask = [taskPtr = task.release()] {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> innerTaskPtr(taskPtr);
+ innerTaskPtr->Run();
+ };
+ device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+ }
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h
index 11ecd6d268c..6a36ff0a33d 100644
--- a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h
+++ b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTask.h
@@ -27,7 +27,7 @@ namespace dawn_native {
class PipelineLayoutBase;
class RenderPipelineBase;
class ShaderModuleBase;
- struct ComputePipelineDescriptor;
+ struct FlatComputePipelineDescriptor;
struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
@@ -52,50 +52,57 @@ namespace dawn_native {
WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
};
- struct CreateRenderPipelineAsyncCallbackTask final : CreatePipelineAsyncCallbackTaskBase {
+ struct CreateRenderPipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
std::string errorMessage,
WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata);
- void Finish() final;
+ void Finish() override;
void HandleShutDown() final;
void HandleDeviceLoss() final;
- private:
+ protected:
Ref<RenderPipelineBase> mPipeline;
WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
};
// CreateComputePipelineAsyncTask defines all the inputs and outputs of
// CreateComputePipelineAsync() tasks, which are the same among all the backends.
- // TODO(crbug.com/dawn/529): Define a "flat descriptor"
- // (like utils::ComboRenderPipelineDescriptor) in ComputePipeline.h that's reused here and for
- // caching, etc. ValidateComputePipelineDescriptor() could produce that flat descriptor so that
- // it is reused in other places.
class CreateComputePipelineAsyncTask {
public:
CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
- const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
- virtual ~CreateComputePipelineAsyncTask() = default;
void Run();
static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
- protected:
+ private:
Ref<ComputePipelineBase> mComputePipeline;
size_t mBlueprintHash;
WGPUCreateComputePipelineAsyncCallback mCallback;
void* mUserdata;
+ };
- std::string mLabel;
- Ref<PipelineLayoutBase> mLayout;
- std::string mEntryPoint;
- Ref<ShaderModuleBase> mComputeShaderModule;
+ // CreateRenderPipelineAsyncTask defines all the inputs and outputs of
+ // CreateRenderPipelineAsync() tasks, which are the same among all the backends.
+ class CreateRenderPipelineAsyncTask {
+ public:
+ CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ void Run();
+
+ static void RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task);
+
+ private:
+ Ref<RenderPipelineBase> mRenderPipeline;
+ WGPUCreateRenderPipelineAsyncCallback mCallback;
+ void* mUserdata;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
index 1ac8d98d7aa..c1733af8a05 100644
--- a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
@@ -13,6 +13,9 @@
// limitations under the License.
#include "dawn_native/DawnNative.h"
+
+#include "dawn_native/BindGroupLayout.h"
+#include "dawn_native/Buffer.h"
#include "dawn_native/Device.h"
#include "dawn_native/Instance.h"
#include "dawn_native/Texture.h"
@@ -73,8 +76,10 @@ namespace dawn_native {
return BackendType::OpenGLES;
case wgpu::BackendType::D3D11:
- UNREACHABLE();
+ case wgpu::BackendType::WebGPU:
+ break;
}
+ UNREACHABLE();
}
DeviceType Adapter::GetDeviceType() const {
@@ -88,21 +93,35 @@ namespace dawn_native {
case wgpu::AdapterType::Unknown:
return DeviceType::Unknown;
}
+ UNREACHABLE();
}
const PCIInfo& Adapter::GetPCIInfo() const {
return mImpl->GetPCIInfo();
}
+ // TODO(dawn:1149): remove once GetSupportedExtensions() is no longer used.
std::vector<const char*> Adapter::GetSupportedExtensions() const {
- ExtensionsSet supportedExtensionsSet = mImpl->GetSupportedExtensions();
- return supportedExtensionsSet.GetEnabledExtensionNames();
+ return GetSupportedFeatures();
+ }
+
+ std::vector<const char*> Adapter::GetSupportedFeatures() const {
+ FeaturesSet supportedFeaturesSet = mImpl->GetSupportedFeatures();
+ return supportedFeaturesSet.GetEnabledFeatureNames();
}
WGPUDeviceProperties Adapter::GetAdapterProperties() const {
return mImpl->GetAdapterProperties();
}
+ bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+ return mImpl->GetLimits(reinterpret_cast<SupportedLimits*>(limits));
+ }
+
+ void Adapter::SetUseTieredLimits(bool useTieredLimits) {
+ mImpl->SetUseTieredLimits(useTieredLimits);
+ }
+
bool Adapter::SupportsExternalImages() const {
return mImpl->SupportsExternalImages();
}
@@ -115,6 +134,12 @@ namespace dawn_native {
return reinterpret_cast<WGPUDevice>(mImpl->CreateDevice(deviceDescriptor));
}
+ void Adapter::RequestDevice(const DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata) {
+ mImpl->RequestDevice(descriptor, callback, userdata);
+ }
+
void Adapter::ResetInternalDeviceForTesting() {
mImpl->ResetInternalDeviceForTesting();
}
@@ -225,4 +250,20 @@ namespace dawn_native {
ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : type(type) {
}
+ const char* GetObjectLabelForTesting(void* objectHandle) {
+ ApiObjectBase* object = reinterpret_cast<ApiObjectBase*>(objectHandle);
+ return object->GetLabel().c_str();
+ }
+
+ uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
+ return reinterpret_cast<const BufferBase*>(buffer)->GetAllocatedSize();
+ }
+
+ bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
+ BindGroupLayoutBase* aBase = reinterpret_cast<BindGroupLayoutBase*>(a);
+ BindGroupLayoutBase* bBase = reinterpret_cast<BindGroupLayoutBase*>(b);
+ bool excludePipelineCompatibiltyToken = true;
+ return aBase->IsLayoutEqual(bBase, excludePipelineCompatibiltyToken);
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
index 27817628224..fc1c1a9d6bf 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Device.cpp
@@ -28,10 +28,12 @@
#include "dawn_native/CreatePipelineAsyncTask.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/ErrorInjector.h"
#include "dawn_native/ErrorScope.h"
#include "dawn_native/ExternalTexture.h"
#include "dawn_native/Instance.h"
#include "dawn_native/InternalPipelineStore.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/PersistentCache.h"
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/QuerySet.h"
@@ -46,6 +48,7 @@
#include "dawn_native/ValidationUtils_autogen.h"
#include "dawn_platform/DawnPlatform.h"
+#include <mutex>
#include <unordered_set>
namespace dawn_native {
@@ -121,15 +124,66 @@ namespace dawn_native {
std::string mMessage;
void* mUserdata;
};
+
+ ResultOrError<Ref<PipelineLayoutBase>>
+ ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ DeviceBase* device,
+ const ComputePipelineDescriptor& descriptor,
+ ComputePipelineDescriptor* outDescriptor) {
+ Ref<PipelineLayoutBase> layoutRef;
+ *outDescriptor = descriptor;
+
+ if (outDescriptor->layout == nullptr) {
+ DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
+ device, {{
+ SingleShaderStage::Compute,
+ outDescriptor->compute.module,
+ outDescriptor->compute.entryPoint,
+ outDescriptor->compute.constantCount,
+ outDescriptor->compute.constants,
+ }}));
+ outDescriptor->layout = layoutRef.Get();
+ }
+
+ return layoutRef;
+ }
+
+ ResultOrError<Ref<PipelineLayoutBase>>
+ ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ DeviceBase* device,
+ const RenderPipelineDescriptor& descriptor,
+ RenderPipelineDescriptor* outDescriptor) {
+ Ref<PipelineLayoutBase> layoutRef;
+ *outDescriptor = descriptor;
+
+ if (descriptor.layout == nullptr) {
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ DAWN_TRY_ASSIGN(layoutRef,
+ PipelineLayoutBase::CreateDefault(
+ device, GetRenderStagesAndSetDummyShader(device, &descriptor)));
+ outDescriptor->layout = layoutRef.Get();
+ }
+
+ return layoutRef;
+ }
+
} // anonymous namespace
// DeviceBase
DeviceBase::DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor)
- : mInstance(adapter->GetInstance()), mAdapter(adapter) {
+ : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
if (descriptor != nullptr) {
ApplyToggleOverrides(descriptor);
- ApplyExtensions(descriptor);
+ ApplyFeatures(descriptor);
+ }
+
+ if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
+ mLimits.v1 = ReifyDefaultLimits(
+ reinterpret_cast<const RequiredLimits*>(descriptor->requiredLimits)->limits);
+ } else {
+ GetDefaultLimits(&mLimits.v1);
}
mFormatTable = BuildFormatTable(this);
@@ -161,7 +215,7 @@ namespace dawn_native {
}
};
- mDeviceLostCallback = [](char const*, void*) {
+ mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
static bool calledOnce = false;
if (!calledOnce) {
calledOnce = true;
@@ -177,7 +231,7 @@ namespace dawn_native {
mDynamicUploader = std::make_unique<DynamicUploader>(this);
mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
- mInternalPipelineStore = std::make_unique<InternalPipelineStore>();
+ mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
mPersistentCache = std::make_unique<PersistentCache>(this);
ASSERT(GetPlatform() != nullptr);
@@ -190,6 +244,21 @@ namespace dawn_native {
DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
+ // If dummy fragment shader module is needed, initialize it
+ if (IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
+ // The empty fragment shader, used as a work around for vertex-only render pipeline
+ constexpr char kEmptyFragmentShader[] = R"(
+ [[stage(fragment)]] fn fs_empty_main() {}
+ )";
+ ShaderModuleDescriptor descriptor;
+ ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = kEmptyFragmentShader;
+ descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
+
+ DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
+ CreateShaderModule(&descriptor));
+ }
+
return {};
}
@@ -264,9 +333,18 @@ namespace dawn_native {
void DeviceBase::HandleError(InternalErrorType type, const char* message) {
if (type == InternalErrorType::DeviceLost) {
- // A real device lost happened. Set the state to disconnected as the device cannot be
- // used.
mState = State::Disconnected;
+
+ // If the ErrorInjector is enabled, then the device loss might be fake and the device
+ // still be executing commands. Force a wait for idle in this case, with State being
+ // Disconnected so we can detect this case in WaitForIdleForDestruction.
+ if (ErrorInjectorEnabled()) {
+ IgnoreErrors(WaitForIdleForDestruction());
+ }
+
+ // A real device lost happened. Set the state to disconnected as the device cannot be
+ // used. Also tags all commands as completed since the device stopped running.
+ AssumeCommandsComplete();
} else if (type == InternalErrorType::Internal) {
// If we receive an internal error, assume the backend can't recover and proceed with
// device destruction. We first wait for all previous commands to be completed so that
@@ -293,7 +371,9 @@ namespace dawn_native {
if (type == InternalErrorType::DeviceLost) {
// The device was lost, call the application callback.
if (mDeviceLostCallback != nullptr) {
- mDeviceLostCallback(message, mDeviceLostUserdata);
+ // TODO(crbug.com/dawn/628): Make sure the "Destroyed" reason is passed if
+ // the device was destroyed.
+ mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
mDeviceLostCallback = nullptr;
}
@@ -322,13 +402,7 @@ namespace dawn_native {
void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
ASSERT(error != nullptr);
- std::ostringstream ss;
- ss << error->GetMessage();
- for (const auto& callsite : error->GetBacktrace()) {
- ss << "\n at " << callsite.function << " (" << callsite.file << ":" << callsite.line
- << ")";
- }
- HandleError(error->GetType(), ss.str().c_str());
+ HandleError(error->GetType(), error->GetFormattedMessage().c_str());
}
void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
@@ -389,14 +463,15 @@ namespace dawn_native {
return mPersistentCache.get();
}
- MaybeError DeviceBase::ValidateObject(const ObjectBase* object) const {
+ MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
ASSERT(object != nullptr);
- if (DAWN_UNLIKELY(object->GetDevice() != this)) {
- return DAWN_VALIDATION_ERROR("Object from a different device.");
- }
- if (DAWN_UNLIKELY(object->IsError())) {
- return DAWN_VALIDATION_ERROR("Object is an error.");
- }
+ DAWN_INVALID_IF(object->GetDevice() != this,
+ "%s is associated with %s, and cannot be used with %s.", object,
+ object->GetDevice(), this);
+
+ // TODO(dawn:563): Preserve labels for error objects.
+ DAWN_INVALID_IF(object->IsError(), "%s is an error.", object);
+
return {};
}
@@ -424,6 +499,10 @@ namespace dawn_native {
return mState != State::Alive;
}
+ std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
+ return &mObjectLists[type].mutex;
+ }
+
AdapterBase* DeviceBase::GetAdapter() const {
return mAdapter;
}
@@ -519,8 +598,9 @@ namespace dawn_native {
}
ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor) {
- BindGroupLayoutBase blueprint(this, descriptor);
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken);
const size_t blueprintHash = blueprint.ComputeContentHash();
blueprint.SetContentHash(blueprintHash);
@@ -530,7 +610,8 @@ namespace dawn_native {
if (iter != mCaches->bindGroupLayouts.end()) {
result = *iter;
} else {
- DAWN_TRY_ASSIGN(result, CreateBindGroupLayoutImpl(descriptor));
+ DAWN_TRY_ASSIGN(result,
+ CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
result->SetIsCachedReference();
result->SetContentHash(blueprintHash);
mCaches->bindGroupLayouts.insert(result.Get());
@@ -575,7 +656,17 @@ namespace dawn_native {
return std::make_pair(result, blueprintHash);
}
- Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedPipeline(
+ Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
+ RenderPipelineBase* uninitializedRenderPipeline) {
+ Ref<RenderPipelineBase> cachedPipeline;
+ auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
+ if (iter != mCaches->renderPipelines.end()) {
+ cachedPipeline = *iter;
+ }
+ return cachedPipeline;
+ }
+
+ Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
Ref<ComputePipelineBase> computePipeline,
size_t blueprintHash) {
computePipeline->SetContentHash(blueprintHash);
@@ -588,6 +679,17 @@ namespace dawn_native {
}
}
+ Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
+ Ref<RenderPipelineBase> renderPipeline) {
+ auto insertion = mCaches->renderPipelines.insert(renderPipeline.Get());
+ if (insertion.second) {
+ renderPipeline->SetIsCachedReference();
+ return renderPipeline;
+ } else {
+ return *(insertion.first);
+ }
+ }
+
void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->computePipelines.erase(obj);
@@ -621,27 +723,6 @@ namespace dawn_native {
ASSERT(removedCount == 1);
}
- ResultOrError<Ref<RenderPipelineBase>> DeviceBase::GetOrCreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor) {
- RenderPipelineBase blueprint(this, descriptor);
-
- const size_t blueprintHash = blueprint.ComputeContentHash();
- blueprint.SetContentHash(blueprintHash);
-
- Ref<RenderPipelineBase> result;
- auto iter = mCaches->renderPipelines.find(&blueprint);
- if (iter != mCaches->renderPipelines.end()) {
- result = *iter;
- } else {
- DAWN_TRY_ASSIGN(result, CreateRenderPipelineImpl(descriptor));
- result->SetIsCachedReference();
- result->SetContentHash(blueprintHash);
- mCaches->renderPipelines.insert(result.Get());
- }
-
- return std::move(result);
- }
-
void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->renderPipelines.erase(obj);
@@ -757,7 +838,8 @@ namespace dawn_native {
BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
Ref<BindGroupBase> result;
- if (ConsumedError(CreateBindGroup(descriptor), &result)) {
+ if (ConsumedError(CreateBindGroup(descriptor), &result, "calling CreateBindGroup(%s).",
+ descriptor)) {
return BindGroupBase::MakeError(this);
}
return result.Detach();
@@ -765,14 +847,16 @@ namespace dawn_native {
BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
const BindGroupLayoutDescriptor* descriptor) {
Ref<BindGroupLayoutBase> result;
- if (ConsumedError(CreateBindGroupLayout(descriptor), &result)) {
+ if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
+ "calling CreateBindGroupLayout(%s).", descriptor)) {
return BindGroupLayoutBase::MakeError(this);
}
return result.Detach();
}
BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
Ref<BufferBase> result = nullptr;
- if (ConsumedError(CreateBuffer(descriptor), &result)) {
+ if (ConsumedError(CreateBuffer(descriptor), &result, "calling CreateBuffer(%s).",
+ descriptor)) {
ASSERT(result == nullptr);
return BufferBase::MakeError(this, descriptor);
}
@@ -785,7 +869,8 @@ namespace dawn_native {
ComputePipelineBase* DeviceBase::APICreateComputePipeline(
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipelineBase> result;
- if (ConsumedError(CreateComputePipeline(descriptor), &result)) {
+ if (ConsumedError(CreateComputePipeline(descriptor), &result,
+ "calling CreateComputePipeline(%s).", descriptor)) {
return ComputePipelineBase::MakeError(this);
}
return result.Detach();
@@ -807,21 +892,24 @@ namespace dawn_native {
PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor) {
Ref<PipelineLayoutBase> result;
- if (ConsumedError(CreatePipelineLayout(descriptor), &result)) {
+ if (ConsumedError(CreatePipelineLayout(descriptor), &result,
+ "calling CreatePipelineLayout(%s).", descriptor)) {
return PipelineLayoutBase::MakeError(this);
}
return result.Detach();
}
QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
Ref<QuerySetBase> result;
- if (ConsumedError(CreateQuerySet(descriptor), &result)) {
+ if (ConsumedError(CreateQuerySet(descriptor), &result, "calling CreateQuerySet(%s).",
+ descriptor)) {
return QuerySetBase::MakeError(this);
}
return result.Detach();
}
SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
Ref<SamplerBase> result;
- if (ConsumedError(CreateSampler(descriptor), &result)) {
+ if (ConsumedError(CreateSampler(descriptor), &result, "calling CreateSampler(%s).",
+ descriptor)) {
return SamplerBase::MakeError(this);
}
return result.Detach();
@@ -829,24 +917,23 @@ namespace dawn_native {
void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata) {
- ResultOrError<Ref<RenderPipelineBase>> maybeResult = CreateRenderPipeline(descriptor);
+ // TODO(dawn:563): Add validation error context.
+ MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
+
+ // Call the callback directly when a validation error has been found in the front-end
+ // validations. If there is no error, then CreateRenderPipelineAsync will call the
+ // callback.
if (maybeResult.IsError()) {
std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
userdata);
- return;
}
-
- Ref<RenderPipelineBase> result = maybeResult.AcquireSuccess();
- std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
- std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), "", callback,
- userdata);
- mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
}
RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
const RenderBundleEncoderDescriptor* descriptor) {
Ref<RenderBundleEncoder> result;
- if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result)) {
+ if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
+ "calling CreateRenderBundleEncoder(%s).", descriptor)) {
return RenderBundleEncoder::MakeError(this);
}
return result.Detach();
@@ -854,7 +941,8 @@ namespace dawn_native {
RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
const RenderPipelineDescriptor* descriptor) {
Ref<RenderPipelineBase> result;
- if (ConsumedError(CreateRenderPipeline(descriptor), &result)) {
+ if (ConsumedError(CreateRenderPipeline(descriptor), &result,
+ "calling CreateRenderPipeline(%s).", descriptor)) {
return RenderPipelineBase::MakeError(this);
}
return result.Detach();
@@ -863,7 +951,8 @@ namespace dawn_native {
Ref<ShaderModuleBase> result;
std::unique_ptr<OwnedCompilationMessages> compilationMessages(
std::make_unique<OwnedCompilationMessages>());
- if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result)) {
+ if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
+ "calling CreateShaderModule(%s).", descriptor)) {
DAWN_ASSERT(result == nullptr);
result = ShaderModuleBase::MakeError(this);
}
@@ -876,14 +965,16 @@ namespace dawn_native {
SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
const SwapChainDescriptor* descriptor) {
Ref<SwapChainBase> result;
- if (ConsumedError(CreateSwapChain(surface, descriptor), &result)) {
+ if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
+ "calling CreateSwapChain(%s).", descriptor)) {
return SwapChainBase::MakeError(this);
}
return result.Detach();
}
TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
Ref<TextureBase> result;
- if (ConsumedError(CreateTexture(descriptor), &result)) {
+ if (ConsumedError(CreateTexture(descriptor), &result, "calling CreateTexture(%s).",
+ descriptor)) {
return TextureBase::MakeError(this);
}
return result.Detach();
@@ -950,27 +1041,34 @@ namespace dawn_native {
ExternalTextureBase* DeviceBase::APICreateExternalTexture(
const ExternalTextureDescriptor* descriptor) {
Ref<ExternalTextureBase> result = nullptr;
- if (ConsumedError(CreateExternalTexture(descriptor), &result)) {
+ if (ConsumedError(CreateExternalTexture(descriptor), &result,
+ "calling CreateExternalTexture(%s).", descriptor)) {
return ExternalTextureBase::MakeError(this);
}
return result.Detach();
}
- void DeviceBase::ApplyExtensions(const DeviceDescriptor* deviceDescriptor) {
+ void DeviceBase::ApplyFeatures(const DeviceDescriptor* deviceDescriptor) {
ASSERT(deviceDescriptor);
- ASSERT(GetAdapter()->SupportsAllRequestedExtensions(deviceDescriptor->requiredExtensions));
+ // TODO(dawn:1149): remove once requiredExtensions is no longer used.
+ ASSERT(GetAdapter()->SupportsAllRequestedFeatures(deviceDescriptor->requiredExtensions));
+ ASSERT(GetAdapter()->SupportsAllRequestedFeatures(deviceDescriptor->requiredFeatures));
- mEnabledExtensions = GetAdapter()->GetInstance()->ExtensionNamesToExtensionsSet(
+ // TODO(dawn:1149): remove once requiredExtensions is no longer used.
+ mEnabledExtensions = GetAdapter()->GetInstance()->FeatureNamesToFeaturesSet(
deviceDescriptor->requiredExtensions);
+ mEnabledFeatures = GetAdapter()->GetInstance()->FeatureNamesToFeaturesSet(
+ deviceDescriptor->requiredFeatures);
}
- std::vector<const char*> DeviceBase::GetEnabledExtensions() const {
- return mEnabledExtensions.GetEnabledExtensionNames();
+ std::vector<const char*> DeviceBase::GetEnabledFeatures() const {
+ return mEnabledFeatures.GetEnabledFeatureNames();
}
- bool DeviceBase::IsExtensionEnabled(Extension extension) const {
- return mEnabledExtensions.IsEnabled(extension);
+ bool DeviceBase::IsFeatureEnabled(Feature feature) const {
+ // TODO(dawn:1149): remove mEnabledExtensions once it is no longer used.
+ return mEnabledFeatures.IsEnabled(feature) || mEnabledExtensions.IsEnabled(feature);
}
bool DeviceBase::IsValidationEnabled() const {
@@ -1014,6 +1112,15 @@ namespace dawn_native {
}
}
+ bool DeviceBase::APIGetLimits(SupportedLimits* limits) {
+ ASSERT(limits != nullptr);
+ if (limits->nextInChain != nullptr) {
+ return false;
+ }
+ limits->limits = mLimits.v1;
+ return true;
+ }
+
void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
if (ConsumedError(ValidateErrorType(type))) {
return;
@@ -1040,7 +1147,8 @@ namespace dawn_native {
const BindGroupDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateBindGroupDescriptor(this, descriptor));
+ DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor),
+ "validating %s against %s", descriptor, descriptor->layout);
}
return CreateBindGroupImpl(descriptor);
}
@@ -1058,7 +1166,8 @@ namespace dawn_native {
ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateBufferDescriptor(this, descriptor));
+ DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s",
+ descriptor);
}
Ref<BufferBase> buffer;
@@ -1082,8 +1191,8 @@ namespace dawn_native {
// the pipeline will take another reference.
Ref<PipelineLayoutBase> layoutRef;
ComputePipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateAndGetComputePipelineDescriptorWithDefaults(
- *descriptor, &appliedDescriptor));
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
auto pipelineAndBlueprintFromCache = GetCachedComputePipeline(&appliedDescriptor);
if (pipelineAndBlueprintFromCache.first.Get() != nullptr) {
@@ -1093,7 +1202,7 @@ namespace dawn_native {
Ref<ComputePipelineBase> backendObj;
DAWN_TRY_ASSIGN(backendObj, CreateComputePipelineImpl(&appliedDescriptor));
size_t blueprintHash = pipelineAndBlueprintFromCache.second;
- return AddOrGetCachedPipeline(backendObj, blueprintHash);
+ return AddOrGetCachedComputePipeline(backendObj, blueprintHash);
}
MaybeError DeviceBase::CreateComputePipelineAsync(
@@ -1105,12 +1214,10 @@ namespace dawn_native {
DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
}
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
Ref<PipelineLayoutBase> layoutRef;
ComputePipelineDescriptor appliedDescriptor;
- DAWN_TRY_ASSIGN(layoutRef, ValidateAndGetComputePipelineDescriptorWithDefaults(
- *descriptor, &appliedDescriptor));
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
// Call the callback directly when we can get a cached compute pipeline object.
auto pipelineAndBlueprintFromCache = GetCachedComputePipeline(&appliedDescriptor);
@@ -1129,32 +1236,8 @@ namespace dawn_native {
return {};
}
- ResultOrError<Ref<PipelineLayoutBase>>
- DeviceBase::ValidateAndGetComputePipelineDescriptorWithDefaults(
- const ComputePipelineDescriptor& descriptor,
- ComputePipelineDescriptor* outDescriptor) {
- Ref<PipelineLayoutBase> layoutRef;
- *outDescriptor = descriptor;
- // TODO(dawn:800): Remove after deprecation period.
- if (outDescriptor->compute.module == nullptr &&
- outDescriptor->computeStage.module != nullptr) {
- outDescriptor->compute.module = outDescriptor->computeStage.module;
- outDescriptor->compute.entryPoint = outDescriptor->computeStage.entryPoint;
- }
-
- if (outDescriptor->layout == nullptr) {
- DAWN_TRY_ASSIGN(layoutRef,
- PipelineLayoutBase::CreateDefault(
- this, {{SingleShaderStage::Compute, outDescriptor->compute.module,
- outDescriptor->compute.entryPoint}}));
- outDescriptor->layout = layoutRef.Get();
- }
-
- return layoutRef;
- }
-
- // This function is overwritten with the async version on the backends
- // that supports creating compute pipeline asynchronously
+ // This function is overwritten with the async version on the backends that supports creating
+ // compute pipeline asynchronously.
void DeviceBase::CreateComputePipelineAsyncImpl(const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
@@ -1167,7 +1250,7 @@ namespace dawn_native {
std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
errorMessage = error->GetMessage();
} else {
- result = AddOrGetCachedPipeline(resultOrError.AcquireSuccess(), blueprintHash);
+ result = AddOrGetCachedComputePipeline(resultOrError.AcquireSuccess(), blueprintHash);
}
std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
@@ -1176,6 +1259,29 @@ namespace dawn_native {
mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
}
+ // This function is overwritten with the async version on the backends
+ // that supports initializing render pipeline asynchronously
+ void DeviceBase::InitializeRenderPipelineAsyncImpl(
+ Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ Ref<RenderPipelineBase> result;
+ std::string errorMessage;
+
+ MaybeError maybeError = renderPipeline->Initialize();
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ errorMessage = error->GetMessage();
+ } else {
+ result = AddOrGetCachedRenderPipeline(renderPipeline);
+ }
+
+ std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
+ std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
+ callback, userdata);
+ mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+ }
+
ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
@@ -1188,7 +1294,8 @@ namespace dawn_native {
ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTexture(
const ExternalTextureDescriptor* descriptor) {
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateExternalTextureDescriptor(this, descriptor));
+ DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
+ descriptor);
}
return ExternalTextureBase::Create(this, descriptor);
@@ -1198,7 +1305,8 @@ namespace dawn_native {
const QuerySetDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateQuerySetDescriptor(this, descriptor));
+ DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s",
+ descriptor);
}
return CreateQuerySetImpl(descriptor);
}
@@ -1219,20 +1327,60 @@ namespace dawn_native {
DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
}
- if (descriptor->layout == nullptr) {
- RenderPipelineDescriptor descriptorWithDefaultLayout = *descriptor;
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ RenderPipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef;
- DAWN_TRY_ASSIGN(layoutRef,
- PipelineLayoutBase::CreateDefault(this, GetStages(descriptor)));
- descriptorWithDefaultLayout.layout = layoutRef.Get();
+ Ref<RenderPipelineBase> uninitializedRenderPipeline =
+ CreateUninitializedRenderPipeline(&appliedDescriptor);
+
+ Ref<RenderPipelineBase> cachedRenderPipeline =
+ GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+ if (cachedRenderPipeline != nullptr) {
+ return cachedRenderPipeline;
+ }
+
+ DAWN_TRY(uninitializedRenderPipeline->Initialize());
+ return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
+ }
+
+ MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+ }
+
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ RenderPipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+ this, *descriptor, &appliedDescriptor));
- return GetOrCreateRenderPipeline(&descriptorWithDefaultLayout);
+ Ref<RenderPipelineBase> uninitializedRenderPipeline =
+ CreateUninitializedRenderPipeline(&appliedDescriptor);
+
+ // Call the callback directly when we can get a cached render pipeline object.
+ Ref<RenderPipelineBase> cachedRenderPipeline =
+ GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+ if (cachedRenderPipeline != nullptr) {
+ callback(WGPUCreatePipelineAsyncStatus_Success,
+ reinterpret_cast<WGPURenderPipeline>(cachedRenderPipeline.Detach()), "",
+ userdata);
} else {
- return GetOrCreateRenderPipeline(descriptor);
+ // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
+ // where the pipeline object may be initialized asynchronously and the result will be
+ // saved to mCreatePipelineAsyncTracker.
+ InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
+ userdata);
}
+
+ return {};
}
ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
@@ -1240,7 +1388,8 @@ namespace dawn_native {
DAWN_TRY(ValidateIsAlive());
descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateSamplerDescriptor(this, descriptor));
+ DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s",
+ descriptor);
}
return GetOrCreateSampler(descriptor);
}
@@ -1297,7 +1446,8 @@ namespace dawn_native {
ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateTextureDescriptor(this, descriptor));
+ DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.",
+ descriptor);
}
return CreateTextureImpl(descriptor);
}
@@ -1309,7 +1459,8 @@ namespace dawn_native {
DAWN_TRY(ValidateObject(texture));
TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateTextureViewDescriptor(this, texture, &desc));
+ DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
+ "validating %s against %s.", &desc, texture);
}
return CreateTextureViewImpl(texture, &desc);
}
@@ -1394,6 +1545,11 @@ namespace dawn_native {
return mWorkerTaskPool.get();
}
+ Ref<RenderPipelineBase> DeviceBase::CreateUninitializedRenderPipeline(
+ const RenderPipelineDescriptor* descriptor) {
+ return CreateUninitializedRenderPipelineImpl(descriptor);
+ }
+
void DeviceBase::AddComputePipelineAsyncCallbackTask(
Ref<ComputePipelineBase> pipeline,
std::string errorMessage,
@@ -1401,7 +1557,7 @@ namespace dawn_native {
void* userdata,
size_t blueprintHash) {
// CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
- // needs to call the private member function DeviceBase::AddOrGetCachedPipeline().
+ // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
struct CreateComputePipelineAsyncWaitableCallbackTask final
: CreateComputePipelineAsyncCallbackTask {
CreateComputePipelineAsyncWaitableCallbackTask(
@@ -1418,12 +1574,12 @@ namespace dawn_native {
}
void Finish() final {
- // TODO(jiawei.shao@intel.com): call AddOrGetCachedPipeline() asynchronously in
+ // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
// CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
// thread-safe.
if (mPipeline.Get() != nullptr) {
- mPipeline =
- mPipeline->GetDevice()->AddOrGetCachedPipeline(mPipeline, mBlueprintHash);
+ mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(
+ mPipeline, mBlueprintHash);
}
CreateComputePipelineAsyncCallbackTask::Finish();
@@ -1438,4 +1594,48 @@ namespace dawn_native {
std::move(pipeline), errorMessage, callback, userdata, blueprintHash));
}
+ void DeviceBase::AddRenderPipelineAsyncCallbackTask(
+ Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
+ // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
+ struct CreateRenderPipelineAsyncWaitableCallbackTask final
+ : CreateRenderPipelineAsyncCallbackTask {
+ using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
+
+ void Finish() final {
+ // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
+ // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+ // thread-safe.
+ if (mPipeline.Get() != nullptr) {
+ mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
+ }
+
+ CreateRenderPipelineAsyncCallbackTask::Finish();
+ }
+ };
+
+ mCallbackTaskManager->AddCallbackTask(
+ std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
+ std::move(pipeline), errorMessage, callback, userdata));
+ }
+
+ PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
+ return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
+ }
+
+ const std::string& DeviceBase::GetLabel() const {
+ return mLabel;
+ }
+
+ void DeviceBase::APISetLabel(const char* label) {
+ mLabel = label;
+ SetLabelImpl();
+ }
+
+ void DeviceBase::SetLabelImpl() {
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
index 3872ed17898..eb7c9dcde6b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ b/chromium/third_party/dawn/src/dawn_native/Device.h
@@ -17,15 +17,19 @@
#include "dawn_native/Commands.h"
#include "dawn_native/Error.h"
-#include "dawn_native/Extensions.h"
+#include "dawn_native/Features.h"
#include "dawn_native/Format.h"
#include "dawn_native/Forward.h"
+#include "dawn_native/Limits.h"
#include "dawn_native/ObjectBase.h"
+#include "dawn_native/ObjectType_autogen.h"
+#include "dawn_native/StagingBuffer.h"
#include "dawn_native/Toggles.h"
#include "dawn_native/DawnNative.h"
#include "dawn_native/dawn_platform.h"
+#include <mutex>
#include <utility>
namespace dawn_platform {
@@ -74,7 +78,45 @@ namespace dawn_native {
return false;
}
- MaybeError ValidateObject(const ObjectBase* object) const;
+ template <typename... Args>
+ bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
+ }
+ }
+ ConsumeError(std::move(error));
+ return true;
+ }
+ return false;
+ }
+
+ template <typename T, typename... Args>
+ bool ConsumedError(ResultOrError<T> resultOrError,
+ T* result,
+ const char* formatStr,
+ const Args&... args) {
+ if (DAWN_UNLIKELY(resultOrError.IsError())) {
+ std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
+ }
+ }
+ ConsumeError(std::move(error));
+ return true;
+ }
+ *result = resultOrError.AcquireSuccess();
+ return false;
+ }
+
+ MaybeError ValidateObject(const ApiObjectBase* object) const;
AdapterBase* GetAdapter() const;
dawn_platform::Platform* GetPlatform() const;
@@ -113,7 +155,8 @@ namespace dawn_native {
// instead of a backend Foo object. If the blueprint doesn't match an object in the
// cache, then the descriptor is used to make a new object.
ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
- const BindGroupLayoutDescriptor* descriptor);
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
BindGroupLayoutBase* GetEmptyBindGroupLayout();
@@ -124,8 +167,6 @@ namespace dawn_native {
const PipelineLayoutDescriptor* descriptor);
void UncachePipelineLayout(PipelineLayoutBase* obj);
- ResultOrError<Ref<RenderPipelineBase>> GetOrCreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor);
void UncacheRenderPipeline(RenderPipelineBase* obj);
ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
@@ -165,6 +206,9 @@ namespace dawn_native {
const RenderBundleEncoderDescriptor* descriptor);
ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
const RenderPipelineDescriptor* descriptor);
+ MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
ResultOrError<Ref<SamplerBase>> CreateSampler(const SamplerDescriptor* descriptor);
ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
const ShaderModuleDescriptor* descriptor,
@@ -206,6 +250,7 @@ namespace dawn_native {
QueueBase* APIGetQueue();
+ bool APIGetLimits(SupportedLimits* limits);
void APIInjectError(wgpu::ErrorType type, const char* message);
bool APITick();
@@ -253,10 +298,11 @@ namespace dawn_native {
};
State GetState() const;
bool IsLost() const;
+ std::mutex* GetObjectListMutex(ObjectType type);
- std::vector<const char*> GetEnabledExtensions() const;
+ std::vector<const char*> GetEnabledFeatures() const;
std::vector<const char*> GetTogglesUsed() const;
- bool IsExtensionEnabled(Extension extension) const;
+ bool IsFeatureEnabled(Feature feature) const;
bool IsToggleEnabled(Toggle toggle) const;
bool IsValidationEnabled() const;
bool IsRobustnessEnabled() const;
@@ -296,6 +342,15 @@ namespace dawn_native {
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata,
size_t blueprintHash);
+ void AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+
+ PipelineCompatibilityToken GetNextPipelineCompatibilityToken();
+
+ const std::string& GetLabel() const;
+ void APISetLabel(const char* label);
protected:
void SetToggle(Toggle toggle, bool isEnabled);
@@ -311,7 +366,8 @@ namespace dawn_native {
virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) = 0;
virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) = 0;
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) = 0;
virtual ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
@@ -320,7 +376,7 @@ namespace dawn_native {
const PipelineLayoutDescriptor* descriptor) = 0;
virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) = 0;
- virtual ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ virtual Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) = 0;
virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
const SamplerDescriptor* descriptor) = 0;
@@ -339,26 +395,35 @@ namespace dawn_native {
virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) = 0;
+ virtual void SetLabelImpl();
virtual MaybeError TickImpl() = 0;
void FlushCallbackTaskQueue();
ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
- ResultOrError<Ref<PipelineLayoutBase>> ValidateAndGetComputePipelineDescriptorWithDefaults(
- const ComputePipelineDescriptor& descriptor,
- ComputePipelineDescriptor* outDescriptor);
std::pair<Ref<ComputePipelineBase>, size_t> GetCachedComputePipeline(
const ComputePipelineDescriptor* descriptor);
- Ref<ComputePipelineBase> AddOrGetCachedPipeline(Ref<ComputePipelineBase> computePipeline,
- size_t blueprintHash);
+ Ref<RenderPipelineBase> GetCachedRenderPipeline(
+ RenderPipelineBase* uninitializedRenderPipeline);
+ Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
+ Ref<ComputePipelineBase> computePipeline,
+ size_t blueprintHash);
+ Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(
+ Ref<RenderPipelineBase> renderPipeline);
virtual void CreateComputePipelineAsyncImpl(const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipeline(
+ const RenderPipelineDescriptor* descriptor);
+ virtual void InitializeRenderPipelineAsyncImpl(
+ Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
void ApplyToggleOverrides(const DeviceDescriptor* deviceDescriptor);
- void ApplyExtensions(const DeviceDescriptor* deviceDescriptor);
+ void ApplyFeatures(const DeviceDescriptor* deviceDescriptor);
void SetDefaultToggles();
@@ -429,13 +494,24 @@ namespace dawn_native {
State mState = State::BeingCreated;
+ // Encompasses the mutex and the actual list that contains all live objects "owned" by the
+ // device.
+ struct ApiObjectList {
+ std::mutex mutex;
+ LinkedList<ApiObjectBase> objects;
+ };
+ PerObjectType<ApiObjectList> mObjectLists;
+
FormatTable mFormatTable;
TogglesSet mEnabledToggles;
TogglesSet mOverridenToggles;
size_t mLazyClearCountForTesting = 0;
+ std::atomic_uint64_t mNextPipelineCompatibilityToken;
- ExtensionsSet mEnabledExtensions;
+ CombinedLimits mLimits;
+ FeaturesSet mEnabledExtensions;
+ FeaturesSet mEnabledFeatures;
std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
@@ -443,6 +519,7 @@ namespace dawn_native {
std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
std::unique_ptr<dawn_platform::WorkerTaskPool> mWorkerTaskPool;
+ std::string mLabel;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
index fe347db9e51..2724291234b 100644
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
@@ -19,6 +19,7 @@
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/IndirectDrawValidationEncoder.h"
#include "dawn_native/RenderBundleEncoder.h"
namespace dawn_native {
@@ -47,13 +48,20 @@ namespace dawn_native {
}
void EncodingContext::MoveToIterator() {
+ CommitCommands(std::move(mPendingCommands));
if (!mWasMovedToIterator) {
- mIterator = std::move(mAllocator);
+ mIterator.AcquireCommandBlocks(std::move(mAllocators));
mWasMovedToIterator = true;
}
}
void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
+ // Append in reverse so that the most recently set debug group is printed first, like a
+ // call stack.
+ for (auto iter = mDebugGroupLabels.rbegin(); iter != mDebugGroupLabels.rend(); ++iter) {
+ error->AppendDebugGroup(*iter);
+ }
+
if (!IsFinished()) {
// Encoding should only generate validation errors.
ASSERT(error->GetType() == InternalErrorType::Validation);
@@ -63,7 +71,19 @@ namespace dawn_native {
mError = std::move(error);
}
} else {
- mDevice->HandleError(error->GetType(), error->GetMessage().c_str());
+ mDevice->HandleError(error->GetType(), error->GetFormattedMessage().c_str());
+ }
+ }
+
+ void EncodingContext::WillBeginRenderPass() {
+ ASSERT(mCurrentEncoder == mTopLevelEncoder);
+ if (mDevice->IsValidationEnabled()) {
+ // When validation is enabled, we are going to want to capture all commands encoded
+ // between and including BeginRenderPassCmd and EndRenderPassCmd, and defer their
+ // sequencing util after we have a chance to insert any necessary validation
+ // commands. To support this we commit any current commands now, so that the
+ // impending BeginRenderPassCmd starts in a fresh CommandAllocator.
+ CommitCommands(std::move(mPendingCommands));
}
}
@@ -75,15 +95,34 @@ namespace dawn_native {
mCurrentEncoder = passEncoder;
}
- void EncodingContext::ExitPass(const ObjectBase* passEncoder, RenderPassResourceUsage usages) {
+ MaybeError EncodingContext::ExitRenderPass(const ObjectBase* passEncoder,
+ RenderPassResourceUsageTracker usageTracker,
+ CommandEncoder* commandEncoder,
+ IndirectDrawMetadata indirectDrawMetadata) {
ASSERT(mCurrentEncoder != mTopLevelEncoder);
ASSERT(mCurrentEncoder == passEncoder);
mCurrentEncoder = mTopLevelEncoder;
- mRenderPassUsages.push_back(std::move(usages));
+
+ if (mDevice->IsValidationEnabled()) {
+ // With validation enabled, commands were committed just before BeginRenderPassCmd was
+ // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
+ // mPendingCommands contains only the commands from BeginRenderPassCmd to
+ // EndRenderPassCmd, inclusive. Now we swap out this allocator with a fresh one to give
+ // the validation encoder a chance to insert its commands first.
+ CommandAllocator renderCommands = std::move(mPendingCommands);
+ DAWN_TRY(EncodeIndirectDrawValidationCommands(mDevice, commandEncoder, &usageTracker,
+ &indirectDrawMetadata));
+ CommitCommands(std::move(mPendingCommands));
+ CommitCommands(std::move(renderCommands));
+ }
+
+ mRenderPassUsages.push_back(usageTracker.AcquireResourceUsage());
+ return {};
}
- void EncodingContext::ExitPass(const ObjectBase* passEncoder, ComputePassResourceUsage usages) {
+ void EncodingContext::ExitComputePass(const ObjectBase* passEncoder,
+ ComputePassResourceUsage usages) {
ASSERT(mCurrentEncoder != mTopLevelEncoder);
ASSERT(mCurrentEncoder == passEncoder);
@@ -113,6 +152,14 @@ namespace dawn_native {
return std::move(mComputePassUsages);
}
+ void EncodingContext::PushDebugGroupLabel(const char* groupLabel) {
+ mDebugGroupLabels.emplace_back(groupLabel);
+ }
+
+ void EncodingContext::PopDebugGroupLabel() {
+ mDebugGroupLabels.pop_back();
+ }
+
MaybeError EncodingContext::Finish() {
if (IsFinished()) {
return DAWN_VALIDATION_ERROR("Command encoding already finished");
@@ -126,6 +173,7 @@ namespace dawn_native {
// if Finish() has been called.
mCurrentEncoder = nullptr;
mTopLevelEncoder = nullptr;
+ CommitCommands(std::move(mPendingCommands));
if (mError != nullptr) {
return std::move(mError);
@@ -136,6 +184,12 @@ namespace dawn_native {
return {};
}
+ void EncodingContext::CommitCommands(CommandAllocator allocator) {
+ if (!allocator.IsEmpty()) {
+ mAllocators.push_back(std::move(allocator));
+ }
+ }
+
bool EncodingContext::IsFinished() const {
return mTopLevelEncoder == nullptr;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
index b97e317abb0..b058e2bc610 100644
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
+++ b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
@@ -18,6 +18,7 @@
#include "dawn_native/CommandAllocator.h"
#include "dawn_native/Error.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/IndirectDrawMetadata.h"
#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/dawn_platform.h"
@@ -25,6 +26,7 @@
namespace dawn_native {
+ class CommandEncoder;
class DeviceBase;
class ObjectBase;
@@ -49,6 +51,25 @@ namespace dawn_native {
return false;
}
+ template <typename... Args>
+ inline bool ConsumedError(MaybeError maybeError,
+ const char* formatStr,
+ const Args&... args) {
+ if (DAWN_UNLIKELY(maybeError.IsError())) {
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ if (error->GetType() == InternalErrorType::Validation) {
+ std::string out;
+ absl::UntypedFormatSpec format(formatStr);
+ if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+ error->AppendContext(std::move(out));
+ }
+ }
+ HandleError(std::move(error));
+ return true;
+ }
+ return false;
+ }
+
inline bool CheckCurrentEncoder(const ObjectBase* encoder) {
if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
if (mCurrentEncoder != mTopLevelEncoder) {
@@ -69,13 +90,33 @@ namespace dawn_native {
return false;
}
ASSERT(!mWasMovedToIterator);
- return !ConsumedError(encodeFunction(&mAllocator));
+ return !ConsumedError(encodeFunction(&mPendingCommands));
}
+ template <typename EncodeFunction, typename... Args>
+ inline bool TryEncode(const ObjectBase* encoder,
+ EncodeFunction&& encodeFunction,
+ const char* formatStr,
+ const Args&... args) {
+ if (!CheckCurrentEncoder(encoder)) {
+ return false;
+ }
+ ASSERT(!mWasMovedToIterator);
+ return !ConsumedError(encodeFunction(&mPendingCommands), formatStr, args...);
+ }
+
+ // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this
+ // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup
+ // failed validation before the BeginRenderPassCmd could be encoded.
+ void WillBeginRenderPass();
+
// Functions to set current encoder state
void EnterPass(const ObjectBase* passEncoder);
- void ExitPass(const ObjectBase* passEncoder, RenderPassResourceUsage usages);
- void ExitPass(const ObjectBase* passEncoder, ComputePassResourceUsage usages);
+ MaybeError ExitRenderPass(const ObjectBase* passEncoder,
+ RenderPassResourceUsageTracker usageTracker,
+ CommandEncoder* commandEncoder,
+ IndirectDrawMetadata indirectDrawMetadata);
+ void ExitComputePass(const ObjectBase* passEncoder, ComputePassResourceUsage usages);
MaybeError Finish();
const RenderPassUsages& GetRenderPassUsages() const;
@@ -83,7 +124,12 @@ namespace dawn_native {
RenderPassUsages AcquireRenderPassUsages();
ComputePassUsages AcquireComputePassUsages();
+ void PushDebugGroupLabel(const char* groupLabel);
+ void PopDebugGroupLabel();
+
private:
+ void CommitCommands(CommandAllocator allocator);
+
bool IsFinished() const;
void MoveToIterator();
@@ -104,12 +150,15 @@ namespace dawn_native {
ComputePassUsages mComputePassUsages;
bool mWereComputePassUsagesAcquired = false;
- CommandAllocator mAllocator;
+ CommandAllocator mPendingCommands;
+
+ std::vector<CommandAllocator> mAllocators;
CommandIterator mIterator;
bool mWasMovedToIterator = false;
bool mWereCommandsAcquired = false;
std::unique_ptr<ErrorData> mError;
+ std::vector<std::string> mDebugGroupLabels;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Error.h b/chromium/third_party/dawn/src/dawn_native/Error.h
index f7bc4a3a23c..f5ed5a91a3c 100644
--- a/chromium/third_party/dawn/src/dawn_native/Error.h
+++ b/chromium/third_party/dawn/src/dawn_native/Error.h
@@ -15,8 +15,10 @@
#ifndef DAWNNATIVE_ERROR_H_
#define DAWNNATIVE_ERROR_H_
+#include "absl/strings/str_format.h"
#include "common/Result.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/webgpu_absl_format_autogen.h"
#include <string>
@@ -74,6 +76,18 @@ namespace dawn_native {
#define DAWN_VALIDATION_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Validation, MESSAGE)
+// TODO(dawn:563): Rename to DAWN_VALIDATION_ERROR once all message format strings have been
+// converted to constexpr.
+#define DAWN_FORMAT_VALIDATION_ERROR(...) \
+ DAWN_MAKE_ERROR(InternalErrorType::Validation, absl::StrFormat(__VA_ARGS__))
+
+#define DAWN_INVALID_IF(EXPR, ...) \
+ if (DAWN_UNLIKELY(EXPR)) { \
+ return DAWN_MAKE_ERROR(InternalErrorType::Validation, absl::StrFormat(__VA_ARGS__)); \
+ } \
+ for (;;) \
+ break
+
// DAWN_DEVICE_LOST_ERROR means that there was a real unrecoverable native device lost error.
// We can't even do a graceful shutdown because the Device is gone.
#define DAWN_DEVICE_LOST_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::DeviceLost, MESSAGE)
@@ -99,18 +113,24 @@ namespace dawn_native {
// the current function.
#define DAWN_TRY(EXPR) DAWN_TRY_WITH_CLEANUP(EXPR, {})
-#define DAWN_TRY_WITH_CLEANUP(EXPR, BODY) \
- { \
- auto DAWN_LOCAL_VAR = EXPR; \
- if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
- {BODY} /* comment to force the formatter to insert a newline */ \
- std::unique_ptr<::dawn_native::ErrorData> \
- error = DAWN_LOCAL_VAR.AcquireError(); \
- error->AppendBacktrace(__FILE__, __func__, __LINE__); \
- return {std::move(error)}; \
- } \
- } \
- for (;;) \
+#define DAWN_TRY_CONTEXT(EXPR, ...) \
+ DAWN_TRY_WITH_CLEANUP(EXPR, { \
+ if (error->GetType() == InternalErrorType::Validation) { \
+ error->AppendContext(absl::StrFormat(__VA_ARGS__)); \
+ } \
+ })
+
+#define DAWN_TRY_WITH_CLEANUP(EXPR, BODY) \
+ { \
+ auto DAWN_LOCAL_VAR = EXPR; \
+ if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
+ std::unique_ptr<::dawn_native::ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
+ {BODY} /* comment to force the formatter to insert a newline */ \
+ error->AppendBacktrace(__FILE__, __func__, __LINE__); \
+ return {std::move(error)}; \
+ } \
+ } \
+ for (;;) \
break
// DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
index 41d0c297021..8c9bbf6ed97 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/ErrorData.h"
#include "dawn_native/Error.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/dawn_platform.h"
namespace dawn_native {
@@ -42,6 +43,14 @@ namespace dawn_native {
mBacktrace.push_back(std::move(record));
}
+ void ErrorData::AppendContext(std::string context) {
+ mContexts.push_back(std::move(context));
+ }
+
+ void ErrorData::AppendDebugGroup(std::string label) {
+ mDebugGroups.push_back(std::move(label));
+ }
+
InternalErrorType ErrorData::GetType() const {
return mType;
}
@@ -54,4 +63,37 @@ namespace dawn_native {
return mBacktrace;
}
+ const std::vector<std::string>& ErrorData::GetContexts() const {
+ return mContexts;
+ }
+
+ const std::vector<std::string>& ErrorData::GetDebugGroups() const {
+ return mDebugGroups;
+ }
+
+ std::string ErrorData::GetFormattedMessage() const {
+ std::ostringstream ss;
+ ss << mMessage;
+
+ if (!mContexts.empty()) {
+ for (auto context : mContexts) {
+ ss << "\n - While " << context;
+ }
+ } else {
+ for (const auto& callsite : mBacktrace) {
+ ss << "\n at " << callsite.function << " (" << callsite.file << ":"
+ << callsite.line << ")";
+ }
+ }
+
+ if (!mDebugGroups.empty()) {
+ ss << "\n\nDebug group stack: ";
+ for (auto label : mDebugGroups) {
+ ss << "\n > \"" << label << "\"";
+ }
+ }
+
+ return ss.str();
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorData.h b/chromium/third_party/dawn/src/dawn_native/ErrorData.h
index 02486020e07..477a7e721b0 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorData.h
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorData.h
@@ -48,15 +48,23 @@ namespace dawn_native {
int line;
};
void AppendBacktrace(const char* file, const char* function, int line);
+ void AppendContext(std::string context);
+ void AppendDebugGroup(std::string label);
InternalErrorType GetType() const;
const std::string& GetMessage() const;
const std::vector<BacktraceRecord>& GetBacktrace() const;
+ const std::vector<std::string>& GetContexts() const;
+ const std::vector<std::string>& GetDebugGroups() const;
+
+ std::string GetFormattedMessage() const;
private:
InternalErrorType mType;
std::string mMessage;
std::vector<BacktraceRecord> mBacktrace;
+ std::vector<std::string> mContexts;
+ std::vector<std::string> mDebugGroups;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
index 01c88ed5737..d021de113a6 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
@@ -29,6 +29,7 @@ namespace dawn_native {
case wgpu::ErrorFilter::OutOfMemory:
return wgpu::ErrorType::OutOfMemory;
}
+ UNREACHABLE();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/Extensions.cpp b/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
deleted file mode 100644
index bd11c82d618..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <array>
-
-#include "common/Assert.h"
-#include "common/BitSetIterator.h"
-#include "dawn_native/Extensions.h"
-
-namespace dawn_native {
- namespace {
-
- struct ExtensionEnumAndInfo {
- Extension extension;
- ExtensionInfo info;
- bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
- };
-
- using ExtensionEnumAndInfoList =
- std::array<ExtensionEnumAndInfo, static_cast<size_t>(Extension::EnumCount)>;
-
- static constexpr ExtensionEnumAndInfoList kExtensionNameAndInfoList = {
- {{Extension::TextureCompressionBC,
- {"texture_compression_bc", "Support Block Compressed (BC) texture formats",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
- &WGPUDeviceProperties::textureCompressionBC},
- {Extension::ShaderFloat16,
- {"shader_float16",
- "Support 16bit float arithmetic and declarations in uniform and storage buffers",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
- &WGPUDeviceProperties::shaderFloat16},
- {Extension::PipelineStatisticsQuery,
- {"pipeline_statistics_query", "Support Pipeline Statistics Query",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
- &WGPUDeviceProperties::pipelineStatisticsQuery},
- {Extension::TimestampQuery,
- {"timestamp_query", "Support Timestamp Query",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
- &WGPUDeviceProperties::timestampQuery},
- {Extension::MultiPlanarFormats,
- {"multiplanar_formats",
- "Import and use multi-planar texture formats with per plane views",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
- &WGPUDeviceProperties::multiPlanarFormats},
- {Extension::DepthClamping,
- {"depth_clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
- &WGPUDeviceProperties::depthClamping},
- {Extension::DawnInternalUsages,
- {"dawn-internal-usages",
- "Add internal usages to resources to affect how the texture is allocated, but not "
- "frontend validation. Other internal commands may access this usage.",
- "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/extensions/"
- "dawn_internal_usages.md"},
- &WGPUDeviceProperties::dawnInternalUsages}}};
-
- } // anonymous namespace
-
- void ExtensionsSet::EnableExtension(Extension extension) {
- ASSERT(extension != Extension::InvalidEnum);
- const size_t extensionIndex = static_cast<size_t>(extension);
- extensionsBitSet.set(extensionIndex);
- }
-
- bool ExtensionsSet::IsEnabled(Extension extension) const {
- ASSERT(extension != Extension::InvalidEnum);
- const size_t extensionIndex = static_cast<size_t>(extension);
- return extensionsBitSet[extensionIndex];
- }
-
- std::vector<const char*> ExtensionsSet::GetEnabledExtensionNames() const {
- std::vector<const char*> enabledExtensionNames(extensionsBitSet.count());
-
- uint32_t index = 0;
- for (uint32_t i : IterateBitSet(extensionsBitSet)) {
- const char* extensionName = ExtensionEnumToName(static_cast<Extension>(i));
- enabledExtensionNames[index] = extensionName;
- ++index;
- }
- return enabledExtensionNames;
- }
-
- void ExtensionsSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
- ASSERT(properties != nullptr);
-
- for (uint32_t i : IterateBitSet(extensionsBitSet)) {
- properties->*(kExtensionNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
- }
- }
-
- const char* ExtensionEnumToName(Extension extension) {
- ASSERT(extension != Extension::InvalidEnum);
-
- const ExtensionEnumAndInfo& extensionNameAndInfo =
- kExtensionNameAndInfoList[static_cast<size_t>(extension)];
- ASSERT(extensionNameAndInfo.extension == extension);
- return extensionNameAndInfo.info.name;
- }
-
- ExtensionsInfo::ExtensionsInfo() {
- for (size_t index = 0; index < kExtensionNameAndInfoList.size(); ++index) {
- const ExtensionEnumAndInfo& extensionNameAndInfo = kExtensionNameAndInfoList[index];
- ASSERT(index == static_cast<size_t>(extensionNameAndInfo.extension));
- mExtensionNameToEnumMap[extensionNameAndInfo.info.name] =
- extensionNameAndInfo.extension;
- }
- }
-
- const ExtensionInfo* ExtensionsInfo::GetExtensionInfo(const char* extensionName) const {
- ASSERT(extensionName);
-
- const auto& iter = mExtensionNameToEnumMap.find(extensionName);
- if (iter != mExtensionNameToEnumMap.cend()) {
- return &kExtensionNameAndInfoList[static_cast<size_t>(iter->second)].info;
- }
- return nullptr;
- }
-
- Extension ExtensionsInfo::ExtensionNameToEnum(const char* extensionName) const {
- ASSERT(extensionName);
-
- const auto& iter = mExtensionNameToEnumMap.find(extensionName);
- if (iter != mExtensionNameToEnumMap.cend()) {
- return kExtensionNameAndInfoList[static_cast<size_t>(iter->second)].extension;
- }
- return Extension::InvalidEnum;
- }
-
- ExtensionsSet ExtensionsInfo::ExtensionNamesToExtensionsSet(
- const std::vector<const char*>& requiredExtensions) const {
- ExtensionsSet extensionsSet;
-
- for (const char* extensionName : requiredExtensions) {
- Extension extensionEnum = ExtensionNameToEnum(extensionName);
- ASSERT(extensionEnum != Extension::InvalidEnum);
- extensionsSet.EnableExtension(extensionEnum);
- }
- return extensionsSet;
- }
-
-} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Extensions.h b/chromium/third_party/dawn/src/dawn_native/Extensions.h
deleted file mode 100644
index 16ac80783be..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/Extensions.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_EXTENSIONS_H_
-#define DAWNNATIVE_EXTENSIONS_H_
-
-#include <bitset>
-#include <unordered_map>
-#include <vector>
-
-#include "dawn_native/DawnNative.h"
-
-namespace dawn_native {
-
- enum class Extension {
- TextureCompressionBC,
- ShaderFloat16,
- PipelineStatisticsQuery,
- TimestampQuery,
- MultiPlanarFormats,
- DepthClamping,
-
- // Dawn-specific
- DawnInternalUsages,
-
- EnumCount,
- InvalidEnum = EnumCount,
- ExtensionMin = TextureCompressionBC,
- };
-
- // A wrapper of the bitset to store if an extension is enabled or not. This wrapper provides the
- // convenience to convert the enums of enum class Extension to the indices of a bitset.
- struct ExtensionsSet {
- std::bitset<static_cast<size_t>(Extension::EnumCount)> extensionsBitSet;
-
- void EnableExtension(Extension extension);
- bool IsEnabled(Extension extension) const;
- std::vector<const char*> GetEnabledExtensionNames() const;
- void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
- };
-
- const char* ExtensionEnumToName(Extension extension);
-
- class ExtensionsInfo {
- public:
- ExtensionsInfo();
-
- // Used to query the details of an extension. Return nullptr if extensionName is not a valid
- // name of an extension supported in Dawn
- const ExtensionInfo* GetExtensionInfo(const char* extensionName) const;
- Extension ExtensionNameToEnum(const char* extensionName) const;
- ExtensionsSet ExtensionNamesToExtensionsSet(
- const std::vector<const char*>& requiredExtensions) const;
-
- private:
- std::unordered_map<std::string, Extension> mExtensionNameToEnumMap;
- };
-
-} // namespace dawn_native
-
-#endif // DAWNNATIVE_EXTENSIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
index 1ff5330c001..148e24de3d0 100644
--- a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/ExternalTexture.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/Texture.h"
#include "dawn_native/dawn_platform.h"
@@ -29,23 +30,22 @@ namespace dawn_native {
"at least one of the passed texture views.");
}
- if ((textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) !=
- wgpu::TextureUsage::TextureBinding) {
- return DAWN_VALIDATION_ERROR(
- "The external texture descriptor specifies a texture that was not created with "
- "TextureUsage::TextureBinding.");
- }
+ DAWN_INVALID_IF(
+ (textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) == 0,
+ "The external texture plane (%s) usage (%s) doesn't include the required usage (%s)",
+ textureView, textureView->GetTexture()->GetUsage(), wgpu::TextureUsage::TextureBinding);
- if (textureView->GetDimension() != wgpu::TextureViewDimension::e2D) {
- return DAWN_VALIDATION_ERROR(
- "The external texture descriptor contains a texture view with a non-2D dimension.");
- }
+ DAWN_INVALID_IF(textureView->GetDimension() != wgpu::TextureViewDimension::e2D,
+ "The external texture plane (%s) dimension (%s) is not 2D.", textureView,
+ textureView->GetDimension());
- if (textureView->GetLevelCount() > 1) {
- return DAWN_VALIDATION_ERROR(
- "The external texture descriptor contains a texture view with a level count "
- "greater than 1.");
- }
+ DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
+ "The external texture plane (%s) mip level count (%u) is not 1.",
+ textureView, textureView->GetLevelCount());
+
+ DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
+ "The external texture plane (%s) sample count (%u) is not one.",
+ textureView, textureView->GetTexture()->GetSampleCount());
return {};
}
@@ -65,11 +65,14 @@ namespace dawn_native {
case wgpu::TextureFormat::RGBA8Unorm:
case wgpu::TextureFormat::BGRA8Unorm:
case wgpu::TextureFormat::RGBA16Float:
- DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0, descriptor->format));
+ DAWN_TRY_CONTEXT(
+ ValidateExternalTexturePlane(descriptor->plane0, descriptor->format),
+ "validating plane0 against the external texture format (%s)",
+ descriptor->format);
break;
default:
- return DAWN_VALIDATION_ERROR(
- "The external texture descriptor specifies an unsupported format.");
+ return DAWN_FORMAT_VALIDATION_ERROR(
+ "Format (%s) is not a supported external texture format.", descriptor->format);
}
return {};
@@ -86,12 +89,12 @@ namespace dawn_native {
ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
const ExternalTextureDescriptor* descriptor)
- : ObjectBase(device), mState(ExternalTextureState::Alive) {
+ : ApiObjectBase(device, kLabelNotImplemented), mState(ExternalTextureState::Alive) {
textureViews[0] = descriptor->plane0;
}
ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : ApiObjectBase(device, tag) {
}
const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>&
@@ -101,9 +104,8 @@ namespace dawn_native {
MaybeError ExternalTextureBase::ValidateCanUseInSubmitNow() const {
ASSERT(!IsError());
- if (mState == ExternalTextureState::Destroyed) {
- return DAWN_VALIDATION_ERROR("Destroyed external texture used in a submit");
- }
+ DAWN_INVALID_IF(mState == ExternalTextureState::Destroyed,
+ "Destroyed external texture %s is used in a submit.", this);
return {};
}
@@ -120,4 +122,8 @@ namespace dawn_native {
return new ExternalTextureBase(device, ObjectBase::kError);
}
-} // namespace dawn_native \ No newline at end of file
+ ObjectType ExternalTextureBase::GetType() const {
+ return ObjectType::ExternalTexture;
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h
index b197b295ecd..d16147472cd 100644
--- a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h
+++ b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h
@@ -16,6 +16,7 @@
#define DAWNNATIVE_EXTERNALTEXTURE_H_
#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
#include "dawn_native/ObjectBase.h"
#include "dawn_native/Subresource.h"
@@ -29,7 +30,7 @@ namespace dawn_native {
MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
const ExternalTextureDescriptor* descriptor);
- class ExternalTextureBase : public ObjectBase {
+ class ExternalTextureBase : public ApiObjectBase {
public:
static ResultOrError<Ref<ExternalTextureBase>> Create(
DeviceBase* device,
@@ -41,6 +42,8 @@ namespace dawn_native {
static ExternalTextureBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
void APIDestroy();
private:
@@ -52,4 +55,4 @@ namespace dawn_native {
};
} // namespace dawn_native
-#endif // DAWNNATIVE_EXTERNALTEXTURE_H_ \ No newline at end of file
+#endif // DAWNNATIVE_EXTERNALTEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Features.cpp b/chromium/third_party/dawn/src/dawn_native/Features.cpp
new file mode 100644
index 00000000000..7b0e8a99f3e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/Features.cpp
@@ -0,0 +1,163 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+
+#include "common/Assert.h"
+#include "common/BitSetIterator.h"
+#include "dawn_native/Features.h"
+
+namespace dawn_native {
+ namespace {
+
+ struct FeatureEnumAndInfo {
+ Feature feature;
+ FeatureInfo info;
+ bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
+ };
+
+ using FeatureEnumAndInfoList =
+ std::array<FeatureEnumAndInfo, static_cast<size_t>(Feature::EnumCount)>;
+
+ static constexpr FeatureEnumAndInfoList kFeatureNameAndInfoList = {
+ {{Feature::TextureCompressionBC,
+ {"texture_compression_bc", "Support Block Compressed (BC) texture formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
+ &WGPUDeviceProperties::textureCompressionBC},
+ {Feature::TextureCompressionETC2,
+ {"texture-compression-etc2",
+ "Support Ericsson Texture Compressed (ETC2/EAC) texture "
+ "formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+ &WGPUDeviceProperties::textureCompressionETC2},
+ {Feature::TextureCompressionASTC,
+ {"texture-compression-astc",
+ "Support Adaptable Scalable Texture Compressed (ASTC) "
+ "texture formats",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+ &WGPUDeviceProperties::textureCompressionASTC},
+ {Feature::ShaderFloat16,
+ {"shader_float16",
+ "Support 16bit float arithmetic and declarations in uniform and storage buffers",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
+ &WGPUDeviceProperties::shaderFloat16},
+ {Feature::PipelineStatisticsQuery,
+ {"pipeline_statistics_query", "Support Pipeline Statistics Query",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+ &WGPUDeviceProperties::pipelineStatisticsQuery},
+ {Feature::TimestampQuery,
+ {"timestamp_query", "Support Timestamp Query",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+ &WGPUDeviceProperties::timestampQuery},
+ {Feature::DepthClamping,
+ {"depth_clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
+ &WGPUDeviceProperties::depthClamping},
+ {Feature::DawnInternalUsages,
+ {"dawn-internal-usages",
+ "Add internal usages to resources to affect how the texture is allocated, but not "
+ "frontend validation. Other internal commands may access this usage.",
+ "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/features/"
+ "dawn_internal_usages.md"},
+ &WGPUDeviceProperties::dawnInternalUsages},
+ {Feature::MultiPlanarFormats,
+ {"multiplanar_formats",
+ "Import and use multi-planar texture formats with per plane views",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
+ &WGPUDeviceProperties::multiPlanarFormats}}};
+
+ } // anonymous namespace
+
+ void FeaturesSet::EnableFeature(Feature feature) {
+ ASSERT(feature != Feature::InvalidEnum);
+ const size_t featureIndex = static_cast<size_t>(feature);
+ featuresBitSet.set(featureIndex);
+ }
+
+ bool FeaturesSet::IsEnabled(Feature feature) const {
+ ASSERT(feature != Feature::InvalidEnum);
+ const size_t featureIndex = static_cast<size_t>(feature);
+ return featuresBitSet[featureIndex];
+ }
+
+ std::vector<const char*> FeaturesSet::GetEnabledFeatureNames() const {
+ std::vector<const char*> enabledFeatureNames(featuresBitSet.count());
+
+ uint32_t index = 0;
+ for (uint32_t i : IterateBitSet(featuresBitSet)) {
+ const char* featureName = FeatureEnumToName(static_cast<Feature>(i));
+ enabledFeatureNames[index] = featureName;
+ ++index;
+ }
+ return enabledFeatureNames;
+ }
+
+ void FeaturesSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
+ ASSERT(properties != nullptr);
+
+ for (uint32_t i : IterateBitSet(featuresBitSet)) {
+ properties->*(kFeatureNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
+ }
+ }
+
+ const char* FeatureEnumToName(Feature feature) {
+ ASSERT(feature != Feature::InvalidEnum);
+
+ const FeatureEnumAndInfo& featureNameAndInfo =
+ kFeatureNameAndInfoList[static_cast<size_t>(feature)];
+ ASSERT(featureNameAndInfo.feature == feature);
+ return featureNameAndInfo.info.name;
+ }
+
+ FeaturesInfo::FeaturesInfo() {
+ for (size_t index = 0; index < kFeatureNameAndInfoList.size(); ++index) {
+ const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[index];
+ ASSERT(index == static_cast<size_t>(featureNameAndInfo.feature));
+ mFeatureNameToEnumMap[featureNameAndInfo.info.name] = featureNameAndInfo.feature;
+ }
+ }
+
+ const FeatureInfo* FeaturesInfo::GetFeatureInfo(const char* featureName) const {
+ ASSERT(featureName);
+
+ const auto& iter = mFeatureNameToEnumMap.find(featureName);
+ if (iter != mFeatureNameToEnumMap.cend()) {
+ return &kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].info;
+ }
+ return nullptr;
+ }
+
+ Feature FeaturesInfo::FeatureNameToEnum(const char* featureName) const {
+ ASSERT(featureName);
+
+ const auto& iter = mFeatureNameToEnumMap.find(featureName);
+ if (iter != mFeatureNameToEnumMap.cend()) {
+ return kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].feature;
+ }
+ return Feature::InvalidEnum;
+ }
+
+ FeaturesSet FeaturesInfo::FeatureNamesToFeaturesSet(
+ const std::vector<const char*>& requiredFeatures) const {
+ FeaturesSet featuresSet;
+
+ for (const char* featureName : requiredFeatures) {
+ Feature featureEnum = FeatureNameToEnum(featureName);
+ ASSERT(featureEnum != Feature::InvalidEnum);
+ featuresSet.EnableFeature(featureEnum);
+ }
+ return featuresSet;
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Features.h b/chromium/third_party/dawn/src/dawn_native/Features.h
new file mode 100644
index 00000000000..35bdf4f8b96
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/Features.h
@@ -0,0 +1,74 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_FEATURES_H_
+#define DAWNNATIVE_FEATURES_H_
+
+#include <bitset>
+#include <unordered_map>
+#include <vector>
+
+#include "dawn_native/DawnNative.h"
+
+namespace dawn_native {
+
+ enum class Feature {
+ TextureCompressionBC,
+ TextureCompressionETC2,
+ TextureCompressionASTC,
+ ShaderFloat16,
+ PipelineStatisticsQuery,
+ TimestampQuery,
+ DepthClamping,
+
+ // Dawn-specific
+ DawnInternalUsages,
+ MultiPlanarFormats,
+
+ EnumCount,
+ InvalidEnum = EnumCount,
+ FeatureMin = TextureCompressionBC,
+ };
+
+ // A wrapper of the bitset to store if an feature is enabled or not. This wrapper provides the
+ // convenience to convert the enums of enum class Feature to the indices of a bitset.
+ struct FeaturesSet {
+ std::bitset<static_cast<size_t>(Feature::EnumCount)> featuresBitSet;
+
+ void EnableFeature(Feature feature);
+ bool IsEnabled(Feature feature) const;
+ std::vector<const char*> GetEnabledFeatureNames() const;
+ void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
+ };
+
+ const char* FeatureEnumToName(Feature feature);
+
+ class FeaturesInfo {
+ public:
+ FeaturesInfo();
+
+ // Used to query the details of an feature. Return nullptr if featureName is not a valid
+ // name of an feature supported in Dawn
+ const FeatureInfo* GetFeatureInfo(const char* featureName) const;
+ Feature FeatureNameToEnum(const char* featureName) const;
+ FeaturesSet FeatureNamesToFeaturesSet(
+ const std::vector<const char*>& requiredFeatures) const;
+
+ private:
+ std::unordered_map<std::string, Feature> mFeatureNameToEnumMap;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_FEATURES_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.cpp b/chromium/third_party/dawn/src/dawn_native/Format.cpp
index 12198b2bb62..e9cde211779 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Format.cpp
@@ -16,7 +16,7 @@
#include "dawn_native/Device.h"
#include "dawn_native/EnumMaskIterator.h"
-#include "dawn_native/Extensions.h"
+#include "dawn_native/Features.h"
#include "dawn_native/Texture.h"
#include <bitset>
@@ -37,6 +37,7 @@ namespace dawn_native {
case wgpu::TextureComponentType::DepthComparison:
return SampleTypeBit::Depth;
}
+ UNREACHABLE();
}
SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType) {
@@ -121,7 +122,7 @@ namespace dawn_native {
// Implementation details of the format table of the DeviceBase
- // For the enum for formats are packed but this might change when we have a broader extension
+ // For the enum for formats are packed but this might change when we have a broader feature
// mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
size_t ComputeFormatIndex(wgpu::TextureFormat format) {
// This takes advantage of overflows to make the index of TextureFormat::Undefined outside
@@ -195,12 +196,13 @@ namespace dawn_native {
AddFormat(internalFormat);
};
- auto AddDepthFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize) {
+ auto AddDepthFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
+ bool isSupported) {
Format internalFormat;
internalFormat.format = format;
internalFormat.isRenderable = true;
internalFormat.isCompressed = false;
- internalFormat.isSupported = true;
+ internalFormat.isSupported = isSupported;
internalFormat.supportsStorageUsage = false;
internalFormat.aspects = Aspect::Depth;
internalFormat.componentCount = 1;
@@ -214,12 +216,12 @@ namespace dawn_native {
AddFormat(internalFormat);
};
- auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format) {
+ auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format, bool isSupported) {
Format internalFormat;
internalFormat.format = format;
internalFormat.isRenderable = true;
internalFormat.isCompressed = false;
- internalFormat.isSupported = false;
+ internalFormat.isSupported = isSupported;
internalFormat.supportsStorageUsage = false;
internalFormat.aspects = Aspect::Stencil;
internalFormat.componentCount = 1;
@@ -324,19 +326,21 @@ namespace dawn_native {
AddColorFormat(wgpu::TextureFormat::RGBA32Float, true, true, 16, SampleTypeBit::UnfilterableFloat, 4);
// Depth-stencil formats
- AddDepthFormat(wgpu::TextureFormat::Depth32Float, 4);
+ // TODO(dawn:666): Implement the stencil8 format
+ AddStencilFormat(wgpu::TextureFormat::Stencil8, false);
+ // TODO(dawn:570): Implement the depth16unorm format
+ AddDepthFormat(wgpu::TextureFormat::Depth16Unorm, 2, false);
// TODO(crbug.com/dawn/843): This is 4 because we read this to perform zero initialization,
// and textures are always use depth32float. We should improve this to be more robust. Perhaps,
// using 0 here to mean "unsized" and adding a backend-specific query for the block size.
- AddDepthFormat(wgpu::TextureFormat::Depth24Plus, 4);
- // TODO(dawn:666): Implement the stencil8 format
- AddStencilFormat(wgpu::TextureFormat::Stencil8);
+ AddDepthFormat(wgpu::TextureFormat::Depth24Plus, 4, true);
AddMultiAspectFormat(wgpu::TextureFormat::Depth24PlusStencil8,
Aspect::Depth | Aspect::Stencil, wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Stencil8, true, true, 2);
- // TODO(dawn:690): Implement Depth16Unorm, Depth24UnormStencil8, Depth32FloatStencil8.
+ AddDepthFormat(wgpu::TextureFormat::Depth32Float, 4, true);
+ // TODO(dawn:690): Implement Depth24UnormStencil8, Depth32FloatStencil8.
// BC compressed formats
- bool isBCFormatSupported = device->IsExtensionEnabled(Extension::TextureCompressionBC);
+ bool isBCFormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionBC);
AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnorm, 8, 4, 4, isBCFormatSupported, 4);
AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, 8, 4, 4, isBCFormatSupported, 4);
AddCompressedFormat(wgpu::TextureFormat::BC4RSnorm, 8, 4, 4, isBCFormatSupported, 1);
@@ -352,8 +356,52 @@ namespace dawn_native {
AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnorm, 16, 4, 4, isBCFormatSupported, 4);
AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported, 4);
+ // ETC2/EAC compressed formats
+ bool isETC2FormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionETC2);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8Unorm, 8, 4, 4, isETC2FormatSupported, 3);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8UnormSrgb, 8, 4, 4, isETC2FormatSupported, 3);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8A1Unorm, 8, 4, 4, isETC2FormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGB8A1UnormSrgb, 8, 4, 4, isETC2FormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGBA8Unorm, 16, 4, 4, isETC2FormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ETC2RGBA8UnormSrgb, 16, 4, 4, isETC2FormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::EACR11Unorm, 8, 4, 4, isETC2FormatSupported, 1);
+ AddCompressedFormat(wgpu::TextureFormat::EACR11Snorm, 8, 4, 4, isETC2FormatSupported, 1);
+ AddCompressedFormat(wgpu::TextureFormat::EACRG11Unorm, 16, 4, 4, isETC2FormatSupported, 2);
+ AddCompressedFormat(wgpu::TextureFormat::EACRG11Snorm, 16, 4, 4, isETC2FormatSupported, 2);
+
+ // ASTC compressed formats
+ bool isASTCFormatSupported = device->IsFeatureEnabled(Feature::TextureCompressionASTC);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC4x4Unorm, 16, 4, 4, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC4x4UnormSrgb, 16, 4, 4, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC5x4Unorm, 16, 5, 4, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC5x4UnormSrgb, 16, 5, 4, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC5x5Unorm, 16, 5, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC5x5UnormSrgb, 16, 5, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC6x5Unorm, 16, 6, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC6x5UnormSrgb, 16, 6, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC6x6Unorm, 16, 6, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC6x6UnormSrgb, 16, 6, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x5Unorm, 16, 8, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x5UnormSrgb, 16, 8, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x6Unorm, 16, 8, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x6UnormSrgb, 16, 8, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x8Unorm, 16, 8, 8, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC8x8UnormSrgb, 16, 8, 8, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x5Unorm, 16, 10, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x5UnormSrgb, 16, 10, 5, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x6Unorm, 16, 10, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x6UnormSrgb, 16, 10, 6, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x8Unorm, 16, 10, 8, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x8UnormSrgb, 16, 10, 8, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x10Unorm, 16, 10, 10, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC10x10UnormSrgb, 16, 10, 10, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC12x10Unorm, 16, 12, 10, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC12x10UnormSrgb, 16, 12, 10, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC12x12Unorm, 16, 12, 12, isASTCFormatSupported, 4);
+ AddCompressedFormat(wgpu::TextureFormat::ASTC12x12UnormSrgb, 16, 12, 12, isASTCFormatSupported, 4);
+
// multi-planar formats
- const bool isMultiPlanarFormatSupported = device->IsExtensionEnabled(Extension::MultiPlanarFormats);
+ const bool isMultiPlanarFormatSupported = device->IsFeatureEnabled(Feature::MultiPlanarFormats);
AddMultiAspectFormat(wgpu::TextureFormat::R8BG8Biplanar420Unorm, Aspect::Plane0 | Aspect::Plane1,
wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::RG8Unorm, false, isMultiPlanarFormatSupported, 3);
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.h b/chromium/third_party/dawn/src/dawn_native/Format.h
index f03f37d35f5..f509cfe9e4d 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.h
+++ b/chromium/third_party/dawn/src/dawn_native/Format.h
@@ -77,7 +77,7 @@ namespace dawn_native {
// The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
// exact number of known format.
- static constexpr size_t kKnownFormatCount = 55;
+ static constexpr size_t kKnownFormatCount = 94;
struct Format;
using FormatTable = std::array<Format, kKnownFormatCount>;
diff --git a/chromium/third_party/dawn/src/dawn_native/Forward.h b/chromium/third_party/dawn/src/dawn_native/Forward.h
index 9ee495d0a40..61b628cb41c 100644
--- a/chromium/third_party/dawn/src/dawn_native/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/Forward.h
@@ -22,6 +22,8 @@ class Ref;
namespace dawn_native {
+ enum class ObjectType : uint32_t;
+
class AdapterBase;
class BindGroupBase;
class BindGroupLayoutBase;
@@ -31,7 +33,6 @@ namespace dawn_native {
class CommandEncoder;
class ComputePassEncoder;
class ExternalTextureBase;
- class Fence;
class InstanceBase;
class PipelineBase;
class PipelineLayoutBase;
diff --git a/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.cpp b/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.cpp
new file mode 100644
index 00000000000..235935f08ee
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.cpp
@@ -0,0 +1,193 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/IndirectDrawMetadata.h"
+
+#include "common/Constants.h"
+#include "common/RefCounted.h"
+#include "dawn_native/IndirectDrawValidationEncoder.h"
+#include "dawn_native/RenderBundle.h"
+
+#include <algorithm>
+#include <utility>
+
+namespace dawn_native {
+
+ namespace {
+
+ // In the unlikely scenario that indirect offsets used over a single buffer span more than
+ // this length of the buffer, we split the validation work into multiple batches.
+ constexpr uint64_t kMaxBatchOffsetRange = kMaxStorageBufferBindingSize -
+ kMinStorageBufferOffsetAlignment -
+ kDrawIndexedIndirectSize;
+
+ } // namespace
+
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
+ BufferBase* indirectBuffer)
+ : mIndirectBuffer(indirectBuffer) {
+ }
+
+ void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndexedIndirectDraw(
+ IndexedIndirectDraw draw) {
+ const uint64_t newOffset = draw.clientBufferOffset;
+ auto it = mBatches.begin();
+ while (it != mBatches.end()) {
+ IndexedIndirectValidationBatch& batch = *it;
+ if (batch.draws.size() >= kMaxDrawCallsPerIndirectValidationBatch) {
+ // This batch is full. If its minOffset is to the right of the new offset, we can
+ // just insert a new batch here.
+ if (newOffset < batch.minOffset) {
+ break;
+ }
+
+ // Otherwise keep looking.
+ ++it;
+ continue;
+ }
+
+ if (newOffset >= batch.minOffset && newOffset <= batch.maxOffset) {
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
+
+ if (newOffset < batch.minOffset &&
+ batch.maxOffset - newOffset <= kMaxBatchOffsetRange) {
+ // We can extend this batch to the left in order to fit the new offset.
+ batch.minOffset = newOffset;
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
+
+ if (newOffset > batch.maxOffset &&
+ newOffset - batch.minOffset <= kMaxBatchOffsetRange) {
+ // We can extend this batch to the right in order to fit the new offset.
+ batch.maxOffset = newOffset;
+ batch.draws.push_back(std::move(draw));
+ return;
+ }
+
+ if (newOffset < batch.minOffset) {
+ // We want to insert a new batch just before this one.
+ break;
+ }
+
+ ++it;
+ }
+
+ IndexedIndirectValidationBatch newBatch;
+ newBatch.minOffset = newOffset;
+ newBatch.maxOffset = newOffset;
+ newBatch.draws.push_back(std::move(draw));
+
+ mBatches.insert(it, std::move(newBatch));
+ }
+
+ void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
+ const IndexedIndirectValidationBatch& newBatch) {
+ auto it = mBatches.begin();
+ while (it != mBatches.end()) {
+ IndexedIndirectValidationBatch& batch = *it;
+ uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
+ uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
+ if (max - min <= kMaxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
+ kMaxDrawCallsPerIndirectValidationBatch) {
+ // This batch fits within the limits of an existing batch. Merge it.
+ batch.minOffset = min;
+ batch.maxOffset = max;
+ batch.draws.insert(batch.draws.end(), newBatch.draws.begin(), newBatch.draws.end());
+ return;
+ }
+
+ if (newBatch.minOffset < batch.minOffset) {
+ break;
+ }
+
+ ++it;
+ }
+ mBatches.push_back(newBatch);
+ }
+
+ const std::vector<IndirectDrawMetadata::IndexedIndirectValidationBatch>&
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::GetBatches() const {
+ return mBatches;
+ }
+
+ IndirectDrawMetadata::IndirectDrawMetadata() = default;
+
+ IndirectDrawMetadata::~IndirectDrawMetadata() = default;
+
+ IndirectDrawMetadata::IndirectDrawMetadata(IndirectDrawMetadata&&) = default;
+
+ IndirectDrawMetadata& IndirectDrawMetadata::operator=(IndirectDrawMetadata&&) = default;
+
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap*
+ IndirectDrawMetadata::GetIndexedIndirectBufferValidationInfo() {
+ return &mIndexedIndirectBufferValidationInfo;
+ }
+
+ void IndirectDrawMetadata::AddBundle(RenderBundleBase* bundle) {
+ auto result = mAddedBundles.insert(bundle);
+ if (!result.second) {
+ return;
+ }
+
+ for (const auto& entry :
+ bundle->GetIndirectDrawMetadata().mIndexedIndirectBufferValidationInfo) {
+ const IndexedIndirectConfig& config = entry.first;
+ auto it = mIndexedIndirectBufferValidationInfo.lower_bound(config);
+ if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
+ // We already have batches for the same config. Merge the new ones in.
+ for (const IndexedIndirectValidationBatch& batch : entry.second.GetBatches()) {
+ it->second.AddBatch(batch);
+ }
+ } else {
+ mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, entry.second);
+ }
+ }
+ }
+
+ void IndirectDrawMetadata::AddIndexedIndirectDraw(
+ wgpu::IndexFormat indexFormat,
+ uint64_t indexBufferSize,
+ BufferBase* indirectBuffer,
+ uint64_t indirectOffset,
+ BufferLocation* drawCmdIndirectBufferLocation) {
+ uint64_t numIndexBufferElements;
+ switch (indexFormat) {
+ case wgpu::IndexFormat::Uint16:
+ numIndexBufferElements = indexBufferSize / 2;
+ break;
+ case wgpu::IndexFormat::Uint32:
+ numIndexBufferElements = indexBufferSize / 4;
+ break;
+ case wgpu::IndexFormat::Undefined:
+ UNREACHABLE();
+ }
+
+ const IndexedIndirectConfig config(indirectBuffer, numIndexBufferElements);
+ auto it = mIndexedIndirectBufferValidationInfo.find(config);
+ if (it == mIndexedIndirectBufferValidationInfo.end()) {
+ auto result = mIndexedIndirectBufferValidationInfo.emplace(
+ config, IndexedIndirectBufferValidationInfo(indirectBuffer));
+ it = result.first;
+ }
+
+ IndexedIndirectDraw draw;
+ draw.clientBufferOffset = indirectOffset;
+ draw.bufferLocation = drawCmdIndirectBufferLocation;
+ it->second.AddIndexedIndirectDraw(std::move(draw));
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.h b/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.h
new file mode 100644
index 00000000000..04c38e326b1
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/IndirectDrawMetadata.h
@@ -0,0 +1,112 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INDIRECTDRAWMETADATA_H_
+#define DAWNNATIVE_INDIRECTDRAWMETADATA_H_
+
+#include "common/NonCopyable.h"
+#include "common/RefCounted.h"
+#include "dawn_native/Buffer.h"
+#include "dawn_native/BufferLocation.h"
+#include "dawn_native/CommandBufferStateTracker.h"
+#include "dawn_native/Commands.h"
+
+#include <cstdint>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace dawn_native {
+
+ class RenderBundleBase;
+
+ // Metadata corresponding to the validation requirements of a single render pass. This metadata
+ // is accumulated while its corresponding render pass is encoded, and is later used to encode
+ // validation commands to be inserted into the command buffer just before the render pass's own
+ // commands.
+ class IndirectDrawMetadata : public NonCopyable {
+ public:
+ struct IndexedIndirectDraw {
+ uint64_t clientBufferOffset;
+ Ref<BufferLocation> bufferLocation;
+ };
+
+ struct IndexedIndirectValidationBatch {
+ uint64_t minOffset;
+ uint64_t maxOffset;
+ std::vector<IndexedIndirectDraw> draws;
+ };
+
+ // Tracks information about every draw call in this render pass which uses the same indirect
+ // buffer and the same-sized index buffer. Calls are grouped by indirect offset ranges so
+ // that validation work can be chunked efficiently if necessary.
+ class IndexedIndirectBufferValidationInfo {
+ public:
+ explicit IndexedIndirectBufferValidationInfo(BufferBase* indirectBuffer);
+
+ // Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
+ // assigned (and deferred) buffer ref and relative offset before returning.
+ void AddIndexedIndirectDraw(IndexedIndirectDraw draw);
+
+ // Adds draw calls from an already-computed batch, e.g. from a previously encoded
+ // RenderBundle. The added batch is merged into an existing batch if possible, otherwise
+ // it's added to mBatch.
+ void AddBatch(const IndexedIndirectValidationBatch& batch);
+
+ const std::vector<IndexedIndirectValidationBatch>& GetBatches() const;
+
+ private:
+ Ref<BufferBase> mIndirectBuffer;
+
+ // A list of information about validation batches that will need to be executed for the
+ // corresponding indirect buffer prior to a single render pass. These are kept sorted by
+ // minOffset and may overlap iff the number of offsets in one batch would otherwise
+ // exceed some large upper bound (roughly ~33M draw calls).
+ //
+ // Since the most common expected cases will overwhelmingly require only a single
+ // validation pass per render pass, this is optimized for efficient updates to a single
+ // batch rather than for efficient manipulation of a large number of batches.
+ std::vector<IndexedIndirectValidationBatch> mBatches;
+ };
+
+ // Combination of an indirect buffer reference, and the number of addressable index buffer
+ // elements at the time of a draw call.
+ using IndexedIndirectConfig = std::pair<BufferBase*, uint64_t>;
+ using IndexedIndirectBufferValidationInfoMap =
+ std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
+
+ IndirectDrawMetadata();
+ ~IndirectDrawMetadata();
+
+ IndirectDrawMetadata(IndirectDrawMetadata&&);
+ IndirectDrawMetadata& operator=(IndirectDrawMetadata&&);
+
+ IndexedIndirectBufferValidationInfoMap* GetIndexedIndirectBufferValidationInfo();
+
+ void AddBundle(RenderBundleBase* bundle);
+ void AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+ uint64_t indexBufferSize,
+ BufferBase* indirectBuffer,
+ uint64_t indirectOffset,
+ BufferLocation* drawCmdIndirectBufferLocation);
+
+ private:
+ IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
+ std::set<RenderBundleBase*> mAddedBundles;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_INDIRECTDRAWMETADATA_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.cpp
new file mode 100644
index 00000000000..c3c2a043a96
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.cpp
@@ -0,0 +1,397 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/IndirectDrawValidationEncoder.h"
+
+#include "common/Constants.h"
+#include "common/Math.h"
+#include "dawn_native/BindGroup.h"
+#include "dawn_native/BindGroupLayout.h"
+#include "dawn_native/CommandEncoder.h"
+#include "dawn_native/ComputePassEncoder.h"
+#include "dawn_native/ComputePipeline.h"
+#include "dawn_native/Device.h"
+#include "dawn_native/InternalPipelineStore.h"
+#include "dawn_native/Queue.h"
+
+#include <cstdlib>
+#include <limits>
+
+namespace dawn_native {
+
+ namespace {
+ // NOTE: This must match the workgroup_size attribute on the compute entry point below.
+ constexpr uint64_t kWorkgroupSize = 64;
+
+ // Equivalent to the BatchInfo struct defined in the shader below.
+ struct BatchInfo {
+ uint64_t numIndexBufferElements;
+ uint32_t numDraws;
+ uint32_t padding;
+ };
+
+ // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this shader in
+ // various failure modes.
+ static const char sRenderValidationShaderSource[] = R"(
+ let kNumIndirectParamsPerDrawCall = 5u;
+
+ let kIndexCountEntry = 0u;
+ let kInstanceCountEntry = 1u;
+ let kFirstIndexEntry = 2u;
+ let kBaseVertexEntry = 3u;
+ let kFirstInstanceEntry = 4u;
+
+ [[block]] struct BatchInfo {
+ numIndexBufferElementsLow: u32;
+ numIndexBufferElementsHigh: u32;
+ numDraws: u32;
+ padding: u32;
+ indirectOffsets: array<u32>;
+ };
+
+ [[block]] struct IndirectParams {
+ data: array<u32>;
+ };
+
+ [[group(0), binding(0)]] var<storage, read> batch: BatchInfo;
+ [[group(0), binding(1)]] var<storage, read_write> clientParams: IndirectParams;
+ [[group(0), binding(2)]] var<storage, write> validatedParams: IndirectParams;
+
+ fn fail(drawIndex: u32) {
+ let index = drawIndex * kNumIndirectParamsPerDrawCall;
+ validatedParams.data[index + kIndexCountEntry] = 0u;
+ validatedParams.data[index + kInstanceCountEntry] = 0u;
+ validatedParams.data[index + kFirstIndexEntry] = 0u;
+ validatedParams.data[index + kBaseVertexEntry] = 0u;
+ validatedParams.data[index + kFirstInstanceEntry] = 0u;
+ }
+
+ fn pass(drawIndex: u32) {
+ let vIndex = drawIndex * kNumIndirectParamsPerDrawCall;
+ let cIndex = batch.indirectOffsets[drawIndex];
+ validatedParams.data[vIndex + kIndexCountEntry] =
+ clientParams.data[cIndex + kIndexCountEntry];
+ validatedParams.data[vIndex + kInstanceCountEntry] =
+ clientParams.data[cIndex + kInstanceCountEntry];
+ validatedParams.data[vIndex + kFirstIndexEntry] =
+ clientParams.data[cIndex + kFirstIndexEntry];
+ validatedParams.data[vIndex + kBaseVertexEntry] =
+ clientParams.data[cIndex + kBaseVertexEntry];
+ validatedParams.data[vIndex + kFirstInstanceEntry] =
+ clientParams.data[cIndex + kFirstInstanceEntry];
+ }
+
+ [[stage(compute), workgroup_size(64, 1, 1)]]
+ fn main([[builtin(global_invocation_id)]] id : vec3<u32>) {
+ if (id.x >= batch.numDraws) {
+ return;
+ }
+
+ let clientIndex = batch.indirectOffsets[id.x];
+ let firstInstance = clientParams.data[clientIndex + kFirstInstanceEntry];
+ if (firstInstance != 0u) {
+ fail(id.x);
+ return;
+ }
+
+ if (batch.numIndexBufferElementsHigh >= 2u) {
+ // firstIndex and indexCount are both u32. The maximum possible sum of these
+ // values is 0x1fffffffe, which is less than 0x200000000. Nothing to validate.
+ pass(id.x);
+ return;
+ }
+
+ let firstIndex = clientParams.data[clientIndex + kFirstIndexEntry];
+ if (batch.numIndexBufferElementsHigh == 0u &&
+ batch.numIndexBufferElementsLow < firstIndex) {
+ fail(id.x);
+ return;
+ }
+
+ // Note that this subtraction may underflow, but only when
+ // numIndexBufferElementsHigh is 1u. The result is still correct in that case.
+ let maxIndexCount = batch.numIndexBufferElementsLow - firstIndex;
+ let indexCount = clientParams.data[clientIndex + kIndexCountEntry];
+ if (indexCount > maxIndexCount) {
+ fail(id.x);
+ return;
+ }
+ pass(id.x);
+ }
+ )";
+
+ ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(
+ DeviceBase* device) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+ if (store->renderValidationPipeline == nullptr) {
+ // Create compute shader module if not cached before.
+ if (store->renderValidationShader == nullptr) {
+ ShaderModuleDescriptor descriptor;
+ ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = sRenderValidationShaderSource;
+ descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
+ DAWN_TRY_ASSIGN(store->renderValidationShader,
+ device->CreateShaderModule(&descriptor));
+ }
+
+ BindGroupLayoutEntry entries[3];
+ entries[0].binding = 0;
+ entries[0].visibility = wgpu::ShaderStage::Compute;
+ entries[0].buffer.type = wgpu::BufferBindingType::ReadOnlyStorage;
+ entries[1].binding = 1;
+ entries[1].visibility = wgpu::ShaderStage::Compute;
+ entries[1].buffer.type = kInternalStorageBufferBinding;
+ entries[2].binding = 2;
+ entries[2].visibility = wgpu::ShaderStage::Compute;
+ entries[2].buffer.type = wgpu::BufferBindingType::Storage;
+
+ BindGroupLayoutDescriptor bindGroupLayoutDescriptor;
+ bindGroupLayoutDescriptor.entryCount = 3;
+ bindGroupLayoutDescriptor.entries = entries;
+ Ref<BindGroupLayoutBase> bindGroupLayout;
+ DAWN_TRY_ASSIGN(bindGroupLayout,
+ device->CreateBindGroupLayout(&bindGroupLayoutDescriptor, true));
+
+ PipelineLayoutDescriptor pipelineDescriptor;
+ pipelineDescriptor.bindGroupLayoutCount = 1;
+ pipelineDescriptor.bindGroupLayouts = &bindGroupLayout.Get();
+ Ref<PipelineLayoutBase> pipelineLayout;
+ DAWN_TRY_ASSIGN(pipelineLayout, device->CreatePipelineLayout(&pipelineDescriptor));
+
+ ComputePipelineDescriptor computePipelineDescriptor = {};
+ computePipelineDescriptor.layout = pipelineLayout.Get();
+ computePipelineDescriptor.compute.module = store->renderValidationShader.Get();
+ computePipelineDescriptor.compute.entryPoint = "main";
+
+ DAWN_TRY_ASSIGN(store->renderValidationPipeline,
+ device->CreateComputePipeline(&computePipelineDescriptor));
+ }
+
+ return store->renderValidationPipeline.Get();
+ }
+
+ size_t GetBatchDataSize(uint32_t numDraws) {
+ return sizeof(BatchInfo) + numDraws * sizeof(uint32_t);
+ }
+
+ } // namespace
+
+ const uint32_t kBatchDrawCallLimitByDispatchSize =
+ kMaxComputePerDimensionDispatchSize * kWorkgroupSize;
+ const uint32_t kBatchDrawCallLimitByStorageBindingSize =
+ (kMaxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
+ const uint32_t kMaxDrawCallsPerIndirectValidationBatch =
+ std::min(kBatchDrawCallLimitByDispatchSize, kBatchDrawCallLimitByStorageBindingSize);
+
+ MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ RenderPassResourceUsageTracker* usageTracker,
+ IndirectDrawMetadata* indirectDrawMetadata) {
+ struct Batch {
+ const IndirectDrawMetadata::IndexedIndirectValidationBatch* metadata;
+ uint64_t numIndexBufferElements;
+ uint64_t dataBufferOffset;
+ uint64_t dataSize;
+ uint64_t clientIndirectOffset;
+ uint64_t clientIndirectSize;
+ uint64_t validatedParamsOffset;
+ uint64_t validatedParamsSize;
+ BatchInfo* batchInfo;
+ };
+
+ struct Pass {
+ BufferBase* clientIndirectBuffer;
+ uint64_t validatedParamsSize = 0;
+ uint64_t batchDataSize = 0;
+ std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
+ std::vector<Batch> batches;
+ };
+
+ // First stage is grouping all batches into passes. We try to pack as many batches into a
+ // single pass as possible. Batches can be grouped together as long as they're validating
+ // data from the same indirect buffer, but they may still be split into multiple passes if
+ // the number of draw calls in a pass would exceed some (very high) upper bound.
+ uint64_t numTotalDrawCalls = 0;
+ size_t validatedParamsSize = 0;
+ std::vector<Pass> passes;
+ IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
+ *indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
+ if (bufferInfoMap.empty()) {
+ return {};
+ }
+
+ for (auto& entry : bufferInfoMap) {
+ const IndirectDrawMetadata::IndexedIndirectConfig& config = entry.first;
+ BufferBase* clientIndirectBuffer = config.first;
+ for (const IndirectDrawMetadata::IndexedIndirectValidationBatch& batch :
+ entry.second.GetBatches()) {
+ const uint64_t minOffsetFromAlignedBoundary =
+ batch.minOffset % kMinStorageBufferOffsetAlignment;
+ const uint64_t minOffsetAlignedDown =
+ batch.minOffset - minOffsetFromAlignedBoundary;
+
+ Batch newBatch;
+ newBatch.metadata = &batch;
+ newBatch.numIndexBufferElements = config.second;
+ newBatch.dataSize = GetBatchDataSize(batch.draws.size());
+ newBatch.clientIndirectOffset = minOffsetAlignedDown;
+ newBatch.clientIndirectSize =
+ batch.maxOffset + kDrawIndexedIndirectSize - minOffsetAlignedDown;
+ numTotalDrawCalls += batch.draws.size();
+
+ newBatch.validatedParamsSize = batch.draws.size() * kDrawIndexedIndirectSize;
+ newBatch.validatedParamsOffset =
+ Align(validatedParamsSize, kMinStorageBufferOffsetAlignment);
+ validatedParamsSize = newBatch.validatedParamsOffset + newBatch.validatedParamsSize;
+ if (validatedParamsSize > kMaxStorageBufferBindingSize) {
+ return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
+ }
+
+ Pass* currentPass = passes.empty() ? nullptr : &passes.back();
+ if (currentPass && currentPass->clientIndirectBuffer == clientIndirectBuffer) {
+ uint64_t nextBatchDataOffset =
+ Align(currentPass->batchDataSize, kMinStorageBufferOffsetAlignment);
+ uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
+ if (newPassBatchDataSize <= kMaxStorageBufferBindingSize) {
+ // We can fit this batch in the current pass.
+ newBatch.dataBufferOffset = nextBatchDataOffset;
+ currentPass->batchDataSize = newPassBatchDataSize;
+ currentPass->batches.push_back(newBatch);
+ continue;
+ }
+ }
+
+ // We need to start a new pass for this batch.
+ newBatch.dataBufferOffset = 0;
+
+ Pass newPass;
+ newPass.clientIndirectBuffer = clientIndirectBuffer;
+ newPass.batchDataSize = newBatch.dataSize;
+ newPass.batches.push_back(newBatch);
+ passes.push_back(std::move(newPass));
+ }
+ }
+
+ auto* const store = device->GetInternalPipelineStore();
+ ScratchBuffer& validatedParamsBuffer = store->scratchIndirectStorage;
+ ScratchBuffer& batchDataBuffer = store->scratchStorage;
+
+ uint64_t requiredBatchDataBufferSize = 0;
+ for (const Pass& pass : passes) {
+ requiredBatchDataBufferSize = std::max(requiredBatchDataBufferSize, pass.batchDataSize);
+ }
+ DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
+ usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
+
+ DAWN_TRY(validatedParamsBuffer.EnsureCapacity(validatedParamsSize));
+ usageTracker->BufferUsedAs(validatedParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
+
+ // Now we allocate and populate host-side batch data to be copied to the GPU, and prepare to
+ // update all DrawIndexedIndirectCmd buffer references.
+ std::vector<DeferredBufferLocationUpdate> deferredBufferLocationUpdates;
+ deferredBufferLocationUpdates.reserve(numTotalDrawCalls);
+ for (Pass& pass : passes) {
+ // We use std::malloc here because it guarantees maximal scalar alignment.
+ pass.batchData = {std::malloc(pass.batchDataSize), std::free};
+ memset(pass.batchData.get(), 0, pass.batchDataSize);
+ uint8_t* batchData = static_cast<uint8_t*>(pass.batchData.get());
+ for (Batch& batch : pass.batches) {
+ batch.batchInfo = new (&batchData[batch.dataBufferOffset]) BatchInfo();
+ batch.batchInfo->numIndexBufferElements = batch.numIndexBufferElements;
+ batch.batchInfo->numDraws = static_cast<uint32_t>(batch.metadata->draws.size());
+
+ uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
+ uint64_t validatedParamsOffset = batch.validatedParamsOffset;
+ for (const auto& draw : batch.metadata->draws) {
+ // The shader uses this to index an array of u32, hence the division by 4 bytes.
+ *indirectOffsets++ = static_cast<uint32_t>(
+ (draw.clientBufferOffset - batch.clientIndirectOffset) / 4);
+
+ DeferredBufferLocationUpdate deferredUpdate;
+ deferredUpdate.location = draw.bufferLocation;
+ deferredUpdate.buffer = validatedParamsBuffer.GetBuffer();
+ deferredUpdate.offset = validatedParamsOffset;
+ deferredBufferLocationUpdates.push_back(std::move(deferredUpdate));
+
+ validatedParamsOffset += kDrawIndexedIndirectSize;
+ }
+ }
+ }
+
+ ComputePipelineBase* pipeline;
+ DAWN_TRY_ASSIGN(pipeline, GetOrCreateRenderValidationPipeline(device));
+
+ Ref<BindGroupLayoutBase> layout;
+ DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+ BindGroupEntry bindings[3];
+ BindGroupEntry& bufferDataBinding = bindings[0];
+ bufferDataBinding.binding = 0;
+ bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
+
+ BindGroupEntry& clientIndirectBinding = bindings[1];
+ clientIndirectBinding.binding = 1;
+
+ BindGroupEntry& validatedParamsBinding = bindings[2];
+ validatedParamsBinding.binding = 2;
+ validatedParamsBinding.buffer = validatedParamsBuffer.GetBuffer();
+
+ BindGroupDescriptor bindGroupDescriptor = {};
+ bindGroupDescriptor.layout = layout.Get();
+ bindGroupDescriptor.entryCount = 3;
+ bindGroupDescriptor.entries = bindings;
+
+ // Finally, we can now encode our validation passes. Each pass first does a single
+ // WriteBuffer to get batch data over to the GPU, followed by a single compute pass. The
+ // compute pass encodes a separate SetBindGroup and Dispatch command for each batch.
+ commandEncoder->EncodeSetValidatedBufferLocationsInternal(
+ std::move(deferredBufferLocationUpdates));
+ for (const Pass& pass : passes) {
+ commandEncoder->APIWriteBuffer(batchDataBuffer.GetBuffer(), 0,
+ static_cast<const uint8_t*>(pass.batchData.get()),
+ pass.batchDataSize);
+
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ ComputePassDescriptor descriptor = {};
+ Ref<ComputePassEncoder> passEncoder =
+ AcquireRef(commandEncoder->APIBeginComputePass(&descriptor));
+ passEncoder->APISetPipeline(pipeline);
+
+ clientIndirectBinding.buffer = pass.clientIndirectBuffer;
+
+ for (const Batch& batch : pass.batches) {
+ bufferDataBinding.offset = batch.dataBufferOffset;
+ bufferDataBinding.size = batch.dataSize;
+ clientIndirectBinding.offset = batch.clientIndirectOffset;
+ clientIndirectBinding.size = batch.clientIndirectSize;
+ validatedParamsBinding.offset = batch.validatedParamsOffset;
+ validatedParamsBinding.size = batch.validatedParamsSize;
+
+ Ref<BindGroupBase> bindGroup;
+ DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));
+
+ const uint32_t numDrawsRoundedUp =
+ (batch.batchInfo->numDraws + kWorkgroupSize - 1) / kWorkgroupSize;
+ passEncoder->APISetBindGroup(0, bindGroup.Get());
+ passEncoder->APIDispatch(numDrawsRoundedUp);
+ }
+
+ passEncoder->APIEndPass();
+ }
+
+ return {};
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.h b/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.h
new file mode 100644
index 00000000000..bc62bf09b8c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/IndirectDrawValidationEncoder.h
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
+#define DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
+
+#include "dawn_native/Error.h"
+#include "dawn_native/IndirectDrawMetadata.h"
+
+namespace dawn_native {
+
+ class CommandEncoder;
+ class DeviceBase;
+ class RenderPassResourceUsageTracker;
+
+ // The maximum number of draws call we can fit into a single validation batch. This is
+ // essentially limited by the number of indirect parameter blocks that can fit into the maximum
+ // allowed storage binding size (about 6.7M).
+ extern const uint32_t kMaxDrawCallsPerIndirectValidationBatch;
+
+ MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ RenderPassResourceUsageTracker* usageTracker,
+ IndirectDrawMetadata* indirectDrawMetadata);
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_INDIRECTDRAWVALIDATIONENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.cpp b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
index b400f4081d6..936471e6c81 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
@@ -105,17 +105,17 @@ namespace dawn_native {
return mTogglesInfo.ToggleNameToEnum(toggleName);
}
- const ExtensionInfo* InstanceBase::GetExtensionInfo(const char* extensionName) {
- return mExtensionsInfo.GetExtensionInfo(extensionName);
+ const FeatureInfo* InstanceBase::GetFeatureInfo(const char* featureName) {
+ return mFeaturesInfo.GetFeatureInfo(featureName);
}
- Extension InstanceBase::ExtensionNameToEnum(const char* extensionName) {
- return mExtensionsInfo.ExtensionNameToEnum(extensionName);
+ Feature InstanceBase::FeatureNameToEnum(const char* featureName) {
+ return mFeaturesInfo.FeatureNameToEnum(featureName);
}
- ExtensionsSet InstanceBase::ExtensionNamesToExtensionsSet(
- const std::vector<const char*>& requiredExtensions) {
- return mExtensionsInfo.ExtensionNamesToExtensionsSet(requiredExtensions);
+ FeaturesSet InstanceBase::FeatureNamesToFeaturesSet(
+ const std::vector<const char*>& requiredFeatures) {
+ return mFeaturesInfo.FeatureNamesToFeaturesSet(requiredFeatures);
}
const std::vector<std::unique_ptr<AdapterBase>>& InstanceBase::GetAdapters() const {
@@ -196,7 +196,7 @@ namespace dawn_native {
std::unique_ptr<ErrorData> error = maybeError.AcquireError();
ASSERT(error != nullptr);
- dawn::InfoLog() << error->GetMessage();
+ dawn::InfoLog() << error->GetFormattedMessage();
return true;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.h b/chromium/third_party/dawn/src/dawn_native/Instance.h
index 4684e12fd6d..3451967bc6b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.h
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.h
@@ -18,7 +18,7 @@
#include "common/RefCounted.h"
#include "dawn_native/Adapter.h"
#include "dawn_native/BackendConnection.h"
-#include "dawn_native/Extensions.h"
+#include "dawn_native/Features.h"
#include "dawn_native/Toggles.h"
#include "dawn_native/dawn_platform.h"
@@ -55,12 +55,11 @@ namespace dawn_native {
const ToggleInfo* GetToggleInfo(const char* toggleName);
Toggle ToggleNameToEnum(const char* toggleName);
- // Used to query the details of an extension. Return nullptr if extensionName is not a valid
- // name of an extension supported in Dawn.
- const ExtensionInfo* GetExtensionInfo(const char* extensionName);
- Extension ExtensionNameToEnum(const char* extensionName);
- ExtensionsSet ExtensionNamesToExtensionsSet(
- const std::vector<const char*>& requiredExtensions);
+ // Used to query the details of an feature. Return nullptr if featureName is not a valid
+ // name of an feature supported in Dawn.
+ const FeatureInfo* GetFeatureInfo(const char* featureName);
+ Feature FeatureNameToEnum(const char* featureName);
+ FeaturesSet FeatureNamesToFeaturesSet(const std::vector<const char*>& requiredFeatures);
bool IsBackendValidationEnabled() const;
void SetBackendValidationLevel(BackendValidationLevel level);
@@ -104,7 +103,7 @@ namespace dawn_native {
std::vector<std::unique_ptr<BackendConnection>> mBackends;
std::vector<std::unique_ptr<AdapterBase>> mAdapters;
- ExtensionsInfo mExtensionsInfo;
+ FeaturesInfo mFeaturesInfo;
TogglesInfo mTogglesInfo;
#if defined(DAWN_USE_X11)
diff --git a/chromium/third_party/dawn/src/dawn_native/IntegerTypes.h b/chromium/third_party/dawn/src/dawn_native/IntegerTypes.h
index 55d9edf43e6..fbbaf4ed1af 100644
--- a/chromium/third_party/dawn/src/dawn_native/IntegerTypes.h
+++ b/chromium/third_party/dawn/src/dawn_native/IntegerTypes.h
@@ -64,6 +64,12 @@ namespace dawn_native {
using ExecutionSerial = TypedInteger<struct QueueSerialT, uint64_t>;
constexpr ExecutionSerial kMaxExecutionSerial = ExecutionSerial(~uint64_t(0));
+ // An identifier that indicates which Pipeline a BindGroupLayout is compatible with. Pipelines
+ // created with a default layout will produce BindGroupLayouts with a non-zero compatibility
+ // token, which prevents them (and any BindGroups created with them) from being used with any
+ // other pipelines.
+ using PipelineCompatibilityToken = TypedInteger<struct PipelineCompatibilityTokenT, uint64_t>;
+
} // namespace dawn_native
#endif // DAWNNATIVE_INTEGERTYPES_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.cpp b/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.cpp
new file mode 100644
index 00000000000..edfd115f5ea
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.cpp
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/InternalPipelineStore.h"
+
+#include "dawn_native/ComputePipeline.h"
+#include "dawn_native/Device.h"
+#include "dawn_native/RenderPipeline.h"
+#include "dawn_native/ShaderModule.h"
+
+#include <unordered_map>
+
+namespace dawn_native {
+
+ class RenderPipelineBase;
+ class ShaderModuleBase;
+
+ InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
+ : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
+ scratchIndirectStorage(device,
+ wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect |
+ wgpu::BufferUsage::Storage) {
+ }
+
+ InternalPipelineStore::~InternalPipelineStore() = default;
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h b/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h
index 3066a9a940e..acf3b13dcea 100644
--- a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h
+++ b/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h
@@ -16,15 +16,23 @@
#define DAWNNATIVE_INTERNALPIPELINESTORE_H_
#include "dawn_native/ObjectBase.h"
+#include "dawn_native/ScratchBuffer.h"
#include "dawn_native/dawn_platform.h"
#include <unordered_map>
namespace dawn_native {
+
+ class DeviceBase;
class RenderPipelineBase;
class ShaderModuleBase;
+ // Every DeviceBase owns an InternalPipelineStore. This is a general-purpose cache for
+ // long-lived objects scoped to a device and used to support arbitrary pipeline operations.
struct InternalPipelineStore {
+ explicit InternalPipelineStore(DeviceBase* device);
+ ~InternalPipelineStore();
+
std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>>
copyTextureForBrowserPipelines;
@@ -32,7 +40,20 @@ namespace dawn_native {
Ref<ComputePipelineBase> timestampComputePipeline;
Ref<ShaderModuleBase> timestampCS;
+
+ Ref<ShaderModuleBase> dummyFragmentShader;
+
+ // A scratch buffer suitable for use as a copy destination and storage binding.
+ ScratchBuffer scratchStorage;
+
+ // A scratch buffer suitable for use as a copy destination, storage binding, and indirect
+ // buffer for indirect dispatch or draw calls.
+ ScratchBuffer scratchIndirectStorage;
+
+ Ref<ComputePipelineBase> renderValidationPipeline;
+ Ref<ShaderModuleBase> renderValidationShader;
};
+
} // namespace dawn_native
#endif // DAWNNATIVE_INTERNALPIPELINESTORE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Limits.cpp b/chromium/third_party/dawn/src/dawn_native/Limits.cpp
new file mode 100644
index 00000000000..5c55a55b250
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/Limits.cpp
@@ -0,0 +1,212 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/Limits.h"
+
+#include "common/Assert.h"
+
+#include <array>
+
+// clang-format off
+// TODO(crbug.com/dawn/685):
+// For now, only expose these tiers until metrics can determine better ones.
+#define LIMITS_WORKGROUP_STORAGE_SIZE(X) \
+ X(Higher, maxComputeWorkgroupStorageSize, 16352, 32768, 49152, 65536)
+
+#define LIMITS_STORAGE_BUFFER_BINDING_SIZE(X) \
+ X(Higher, maxStorageBufferBindingSize, 134217728, 1073741824, 2147483647, 4294967295)
+
+// TODO(crbug.com/dawn/685):
+// These limits don't have tiers yet. Define two tiers with the same values since the macros
+// in this file expect more than one tier.
+#define LIMITS_OTHER(X) \
+ X(Higher, maxTextureDimension1D, 8192, 8192) \
+ X(Higher, maxTextureDimension2D, 8192, 8192) \
+ X(Higher, maxTextureDimension3D, 2048, 2048) \
+ X(Higher, maxTextureArrayLayers, 256, 256) \
+ X(Higher, maxBindGroups, 4, 4) \
+ X(Higher, maxDynamicUniformBuffersPerPipelineLayout, 8, 8) \
+ X(Higher, maxDynamicStorageBuffersPerPipelineLayout, 4, 4) \
+ X(Higher, maxSampledTexturesPerShaderStage, 16, 16) \
+ X(Higher, maxSamplersPerShaderStage, 16, 16) \
+ X(Higher, maxStorageBuffersPerShaderStage, 8, 8) \
+ X(Higher, maxStorageTexturesPerShaderStage, 4, 4) \
+ X(Higher, maxUniformBuffersPerShaderStage, 12, 12) \
+ X(Higher, maxUniformBufferBindingSize, 16384, 16384) \
+ X( Lower, minUniformBufferOffsetAlignment, 256, 256) \
+ X( Lower, minStorageBufferOffsetAlignment, 256, 256) \
+ X(Higher, maxVertexBuffers, 8, 8) \
+ X(Higher, maxVertexAttributes, 16, 16) \
+ X(Higher, maxVertexBufferArrayStride, 2048, 2048) \
+ X(Higher, maxInterStageShaderComponents, 60, 60) \
+ X(Higher, maxComputeInvocationsPerWorkgroup, 256, 256) \
+ X(Higher, maxComputeWorkgroupSizeX, 256, 256) \
+ X(Higher, maxComputeWorkgroupSizeY, 256, 256) \
+ X(Higher, maxComputeWorkgroupSizeZ, 64, 64) \
+ X(Higher, maxComputeWorkgroupsPerDimension, 65535, 65535)
+// clang-format on
+
+#define LIMITS_EACH_GROUP(X) \
+ X(LIMITS_WORKGROUP_STORAGE_SIZE) \
+ X(LIMITS_STORAGE_BUFFER_BINDING_SIZE) \
+ X(LIMITS_OTHER)
+
+#define LIMITS(X) \
+ LIMITS_WORKGROUP_STORAGE_SIZE(X) \
+ LIMITS_STORAGE_BUFFER_BINDING_SIZE(X) \
+ LIMITS_OTHER(X)
+
+namespace dawn_native {
+ namespace {
+ template <uint32_t A, uint32_t B>
+ constexpr void StaticAssertSame() {
+ static_assert(A == B, "Mismatching tier count in limit group.");
+ }
+
+ template <uint32_t I, uint32_t... Is>
+ constexpr uint32_t ReduceSameValue(std::integer_sequence<uint32_t, I, Is...>) {
+ int unused[] = {0, (StaticAssertSame<I, Is>(), 0)...};
+ DAWN_UNUSED(unused);
+ return I;
+ }
+
+ enum class LimitBetterDirection {
+ Lower,
+ Higher,
+ };
+
+ template <LimitBetterDirection Better>
+ struct CheckLimit;
+
+ template <>
+ struct CheckLimit<LimitBetterDirection::Lower> {
+ template <typename T>
+ static bool IsBetter(T lhs, T rhs) {
+ return lhs < rhs;
+ }
+
+ template <typename T>
+ static MaybeError Validate(T supported, T required) {
+ if (IsBetter(required, supported)) {
+ return DAWN_VALIDATION_ERROR("requiredLimit lower than supported limit");
+ }
+ return {};
+ }
+ };
+
+ template <>
+ struct CheckLimit<LimitBetterDirection::Higher> {
+ template <typename T>
+ static bool IsBetter(T lhs, T rhs) {
+ return lhs > rhs;
+ }
+
+ template <typename T>
+ static MaybeError Validate(T supported, T required) {
+ if (IsBetter(required, supported)) {
+ return DAWN_VALIDATION_ERROR("requiredLimit greater than supported limit");
+ }
+ return {};
+ }
+ };
+
+ template <typename T>
+ bool IsLimitUndefined(T value) {
+ static_assert(sizeof(T) != sizeof(T), "IsLimitUndefined not implemented for this type");
+ return false;
+ }
+
+ template <>
+ bool IsLimitUndefined<uint32_t>(uint32_t value) {
+ return value == wgpu::kLimitU32Undefined;
+ }
+
+ template <>
+ bool IsLimitUndefined<uint64_t>(uint64_t value) {
+ return value == wgpu::kLimitU64Undefined;
+ }
+
+ } // namespace
+
+ void GetDefaultLimits(Limits* limits) {
+ ASSERT(limits != nullptr);
+#define X(Better, limitName, base, ...) limits->limitName = base;
+ LIMITS(X)
+#undef X
+ }
+
+ Limits ReifyDefaultLimits(const Limits& limits) {
+ Limits out;
+#define X(Better, limitName, base, ...) \
+ if (IsLimitUndefined(limits.limitName) || \
+ CheckLimit<LimitBetterDirection::Better>::IsBetter( \
+ static_cast<decltype(limits.limitName)>(base), limits.limitName)) { \
+ /* If the limit is undefined or the default is better, use the default */ \
+ out.limitName = base; \
+ } else { \
+ out.limitName = limits.limitName; \
+ }
+ LIMITS(X)
+#undef X
+ return out;
+ }
+
+ MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits) {
+#define X(Better, limitName, ...) \
+ if (!IsLimitUndefined(requiredLimits.limitName)) { \
+ DAWN_TRY(CheckLimit<LimitBetterDirection::Better>::Validate(supportedLimits.limitName, \
+ requiredLimits.limitName)); \
+ }
+ LIMITS(X)
+#undef X
+ return {};
+ }
+
+ Limits ApplyLimitTiers(Limits limits) {
+#define X_TIER_COUNT(Better, limitName, ...) , std::integer_sequence<uint64_t, __VA_ARGS__>{}.size()
+#define GET_TIER_COUNT(LIMIT_GROUP) \
+ ReduceSameValue(std::integer_sequence<uint32_t LIMIT_GROUP(X_TIER_COUNT)>{})
+
+#define X_EACH_GROUP(LIMIT_GROUP) \
+ { \
+ constexpr uint32_t kTierCount = GET_TIER_COUNT(LIMIT_GROUP); \
+ for (uint32_t i = kTierCount; i != 0; --i) { \
+ LIMIT_GROUP(X_CHECK_BETTER_AND_CLAMP) \
+ /* Limits fit in tier and have been clamped. Break. */ \
+ break; \
+ } \
+ }
+
+#define X_CHECK_BETTER_AND_CLAMP(Better, limitName, ...) \
+ { \
+ constexpr std::array<decltype(Limits::limitName), kTierCount> tiers{__VA_ARGS__}; \
+ decltype(Limits::limitName) tierValue = tiers[i - 1]; \
+ if (CheckLimit<LimitBetterDirection::Better>::IsBetter(tierValue, limits.limitName)) { \
+ /* The tier is better. Go to the next tier. */ \
+ continue; \
+ } else if (tierValue != limits.limitName) { \
+ /* Better than the tier. Degrade |limits| to the tier. */ \
+ limits.limitName = tiers[i - 1]; \
+ } \
+ }
+
+ LIMITS_EACH_GROUP(X_EACH_GROUP)
+#undef X_CHECK_BETTER
+#undef X_EACH_GROUP
+#undef GET_TIER_COUNT
+#undef X_TIER_COUNT
+ return limits;
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Limits.h b/chromium/third_party/dawn/src/dawn_native/Limits.h
new file mode 100644
index 00000000000..4beed2e780e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/Limits.h
@@ -0,0 +1,43 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_LIMITS_H_
+#define DAWNNATIVE_LIMITS_H_
+
+#include "dawn_native/Error.h"
+#include "dawn_native/dawn_platform.h"
+
+namespace dawn_native {
+
+ struct CombinedLimits {
+ Limits v1;
+ };
+
+ // Populate |limits| with the default limits.
+ void GetDefaultLimits(Limits* limits);
+
+ // Returns a copy of |limits| where all undefined values are replaced
+ // with their defaults. Also clamps to the defaults if the provided limits
+ // are worse.
+ Limits ReifyDefaultLimits(const Limits& limits);
+
+ // Validate that |requiredLimits| are no better than |supportedLimits|.
+ MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits);
+
+ // Returns a copy of |limits| where limit tiers are applied.
+ Limits ApplyLimitTiers(Limits limits);
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_LIMITS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp b/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp
index 8b4731f338d..e33722fd732 100644
--- a/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp
@@ -13,6 +13,9 @@
// limitations under the License.
#include "dawn_native/ObjectBase.h"
+#include "dawn_native/Device.h"
+
+#include <mutex>
namespace dawn_native {
@@ -34,4 +37,37 @@ namespace dawn_native {
return GetRefCountPayload() == kErrorPayload;
}
+ bool ObjectBase::IsAlive() const {
+ return mDevice != nullptr;
+ }
+
+ void ObjectBase::DestroyObject() {
+ mDevice = nullptr;
+ }
+
+ ApiObjectBase::ApiObjectBase(DeviceBase* device, const char* label) : ObjectBase(device) {
+ if (label) {
+ mLabel = label;
+ }
+ }
+
+ ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {
+ }
+
+ ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag)
+ : ObjectBase(device) {
+ }
+
+ void ApiObjectBase::APISetLabel(const char* label) {
+ mLabel = label;
+ SetLabelImpl();
+ }
+
+ const std::string& ApiObjectBase::GetLabel() const {
+ return mLabel;
+ }
+
+ void ApiObjectBase::SetLabelImpl() {
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ObjectBase.h b/chromium/third_party/dawn/src/dawn_native/ObjectBase.h
index 544ce1a4bb6..17d32f8ff44 100644
--- a/chromium/third_party/dawn/src/dawn_native/ObjectBase.h
+++ b/chromium/third_party/dawn/src/dawn_native/ObjectBase.h
@@ -15,7 +15,11 @@
#ifndef DAWNNATIVE_OBJECTBASE_H_
#define DAWNNATIVE_OBJECTBASE_H_
+#include "common/LinkedList.h"
#include "common/RefCounted.h"
+#include "dawn_native/Forward.h"
+
+#include <string>
namespace dawn_native {
@@ -26,19 +30,41 @@ namespace dawn_native {
struct ErrorTag {};
static constexpr ErrorTag kError = {};
- ObjectBase(DeviceBase* device);
+ explicit ObjectBase(DeviceBase* device);
ObjectBase(DeviceBase* device, ErrorTag tag);
DeviceBase* GetDevice() const;
bool IsError() const;
-
- protected:
- ~ObjectBase() override = default;
+ bool IsAlive() const;
+ void DestroyObject();
private:
+ // Pointer to owning device, if nullptr, that means that the object is no longer alive or
+ // valid.
DeviceBase* mDevice;
};
+ class ApiObjectBase : public ObjectBase, public LinkNode<ApiObjectBase> {
+ public:
+ struct LabelNotImplementedTag {};
+ static constexpr LabelNotImplementedTag kLabelNotImplemented = {};
+
+ ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag);
+ ApiObjectBase(DeviceBase* device, const char* label);
+ ApiObjectBase(DeviceBase* device, ErrorTag tag);
+
+ virtual ObjectType GetType() const = 0;
+ const std::string& GetLabel() const;
+
+ // Dawn API
+ void APISetLabel(const char* label);
+
+ private:
+ virtual void SetLabelImpl();
+
+ std::string mLabel;
+ };
+
} // namespace dawn_native
#endif // DAWNNATIVE_OBJECTBASE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
index 2ebdc0238cf..470eee17fcb 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
@@ -115,9 +115,6 @@ namespace dawn_native {
case BindingInfoType::StorageTexture: {
TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::ReadOnly:
- TextureViewUsedAs(view, kReadOnlyStorageTexture);
- break;
case wgpu::StorageTextureAccess::WriteOnly:
TextureViewUsedAs(view, wgpu::TextureUsage::StorageBinding);
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
index c650afa1a4f..49590aa453b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
@@ -16,33 +16,69 @@
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/ObjectContentHasher.h"
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/ShaderModule.h"
namespace dawn_native {
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ SingleShaderStage value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case SingleShaderStage::Compute:
+ s->Append("Compute");
+ break;
+ case SingleShaderStage::Vertex:
+ s->Append("Vertex");
+ break;
+ case SingleShaderStage::Fragment:
+ s->Append("Fragment");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
MaybeError ValidateProgrammableStage(DeviceBase* device,
const ShaderModuleBase* module,
const std::string& entryPoint,
+ uint32_t constantCount,
+ const ConstantEntry* constants,
const PipelineLayoutBase* layout,
SingleShaderStage stage) {
DAWN_TRY(device->ValidateObject(module));
- if (!module->HasEntryPoint(entryPoint)) {
- return DAWN_VALIDATION_ERROR("Entry point doesn't exist in the module");
- }
+ DAWN_INVALID_IF(!module->HasEntryPoint(entryPoint),
+ "Entry point \"%s\" doesn't exist in the shader module %s.", entryPoint,
+ module);
const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
- if (metadata.stage != stage) {
- return DAWN_VALIDATION_ERROR("Entry point isn't for the correct stage");
- }
+ DAWN_INVALID_IF(metadata.stage != stage,
+ "The stage (%s) of the entry point \"%s\" isn't the expected one (%s).",
+ metadata.stage, entryPoint, stage);
if (layout != nullptr) {
DAWN_TRY(ValidateCompatibilityWithPipelineLayout(device, metadata, layout));
}
+ if (constantCount > 0u && device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+ return DAWN_VALIDATION_ERROR(
+ "Pipeline overridable constants are disallowed because they are partially "
+ "implemented.");
+ }
+
+ // Validate if overridable constants exist in shader module
+ // pipelineBase is not yet constructed at this moment so iterate constants from descriptor
+ for (uint32_t i = 0; i < constantCount; i++) {
+ DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
+ "Pipeline overridable constant \"%s\" not found in shader module %s.",
+ constants[i].key, module);
+ }
+
return {};
}
@@ -50,8 +86,9 @@ namespace dawn_native {
PipelineBase::PipelineBase(DeviceBase* device,
PipelineLayoutBase* layout,
+ const char* label,
std::vector<StageAndDescriptor> stages)
- : CachedObject(device), mLayout(layout) {
+ : ApiObjectBase(device, label), mLayout(layout) {
ASSERT(!stages.empty());
for (const StageAndDescriptor& stage : stages) {
@@ -66,7 +103,12 @@ namespace dawn_native {
// Record them internally.
bool isFirstStage = mStageMask == wgpu::ShaderStage::None;
mStageMask |= StageBit(shaderStage);
- mStages[shaderStage] = {module, entryPointName, &metadata};
+ mStages[shaderStage] = {module, entryPointName, &metadata,
+ std::vector<PipelineConstantEntry>()};
+ auto& constants = mStages[shaderStage].constants;
+ for (uint32_t i = 0; i < stage.constantCount; i++) {
+ constants.emplace_back(stage.constants[i].key, stage.constants[i].value);
+ }
// Compute the max() of all minBufferSizes across all stages.
RequiredBufferSizes stageMinBufferSizes =
@@ -88,7 +130,7 @@ namespace dawn_native {
}
PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : CachedObject(device, tag) {
+ : ApiObjectBase(device, tag) {
}
PipelineLayoutBase* PipelineBase::GetLayout() {
@@ -115,13 +157,18 @@ namespace dawn_native {
return mStages;
}
+ wgpu::ShaderStage PipelineBase::GetStageMask() const {
+ return mStageMask;
+ }
+
MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
- if (groupIndex >= kMaxBindGroups) {
- return DAWN_VALIDATION_ERROR("Bind group layout index out of bounds");
- }
+ DAWN_INVALID_IF(
+ groupIndex >= kMaxBindGroups,
+ "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
+ groupIndex, kMaxBindGroups);
return {};
}
@@ -139,7 +186,9 @@ namespace dawn_native {
BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
Ref<BindGroupLayoutBase> result;
- if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result)) {
+ if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result,
+ "Validating GetBindGroupLayout (%u) on %s", groupIndexIn,
+ this)) {
return BindGroupLayoutBase::MakeError(GetDevice());
}
return result.Detach();
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.h b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
index 3ed80f1148d..c73d38968e7 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
@@ -17,6 +17,7 @@
#include "dawn_native/CachedObject.h"
#include "dawn_native/Forward.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/PerStage.h"
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/ShaderModule.h"
@@ -31,24 +32,30 @@ namespace dawn_native {
MaybeError ValidateProgrammableStage(DeviceBase* device,
const ShaderModuleBase* module,
const std::string& entryPoint,
+ uint32_t constantCount,
+ const ConstantEntry* constants,
const PipelineLayoutBase* layout,
SingleShaderStage stage);
+ using PipelineConstantEntry = std::pair<std::string, double>;
struct ProgrammableStage {
Ref<ShaderModuleBase> module;
std::string entryPoint;
// The metadata lives as long as module, that's ref-ed in the same structure.
const EntryPointMetadata* metadata = nullptr;
+
+ std::vector<PipelineConstantEntry> constants;
};
- class PipelineBase : public CachedObject {
+ class PipelineBase : public ApiObjectBase, public CachedObject {
public:
PipelineLayoutBase* GetLayout();
const PipelineLayoutBase* GetLayout() const;
const RequiredBufferSizes& GetMinBufferSizes() const;
const ProgrammableStage& GetStage(SingleShaderStage stage) const;
const PerStage<ProgrammableStage>& GetAllStages() const;
+ wgpu::ShaderStage GetStageMask() const;
ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
@@ -62,6 +69,7 @@ namespace dawn_native {
protected:
PipelineBase(DeviceBase* device,
PipelineLayoutBase* layout,
+ const char* label,
std::vector<StageAndDescriptor> stages);
PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
index a3c2eae2580..2f96eddf01b 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
@@ -20,12 +20,15 @@
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Device.h"
#include "dawn_native/ObjectContentHasher.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/ShaderModule.h"
namespace dawn_native {
- MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor) {
+ MaybeError ValidatePipelineLayoutDescriptor(
+ DeviceBase* device,
+ const PipelineLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
@@ -37,6 +40,12 @@ namespace dawn_native {
BindingCounts bindingCounts = {};
for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
+ if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() !=
+ pipelineCompatibilityToken) {
+ return DAWN_VALIDATION_ERROR(
+ "cannot create a pipeline layout using a bind group layout that was created as "
+ "part of a pipeline's default layout");
+ }
AccumulateBindingCounts(&bindingCounts,
descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
}
@@ -49,7 +58,7 @@ namespace dawn_native {
PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
const PipelineLayoutDescriptor* descriptor)
- : CachedObject(device) {
+ : ApiObjectBase(device, kLabelNotImplemented) {
ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
++group) {
@@ -59,7 +68,7 @@ namespace dawn_native {
}
PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : CachedObject(device, tag) {
+ : ApiObjectBase(device, tag) {
}
PipelineLayoutBase::~PipelineLayoutBase() {
@@ -144,7 +153,7 @@ namespace dawn_native {
// Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry
auto ConvertMetadataToEntry =
- [](const EntryPointMetadata::ShaderBindingInfo& shaderBinding,
+ [](const ShaderBindingInfo& shaderBinding,
const ExternalTextureBindingLayout* externalTextureBindingEntry)
-> BindGroupLayoutEntry {
BindGroupLayoutEntry entry = {};
@@ -203,9 +212,13 @@ namespace dawn_native {
return entry;
};
+ PipelineCompatibilityToken pipelineCompatibilityToken =
+ device->GetNextPipelineCompatibilityToken();
+
// Creates the BGL from the entries for a stage, checking it is valid.
- auto CreateBGL = [](DeviceBase* device,
- const EntryMap& entries) -> ResultOrError<Ref<BindGroupLayoutBase>> {
+ auto CreateBGL = [](DeviceBase* device, const EntryMap& entries,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ -> ResultOrError<Ref<BindGroupLayoutBase>> {
std::vector<BindGroupLayoutEntry> entryVec;
entryVec.reserve(entries.size());
for (auto& it : entries) {
@@ -219,7 +232,7 @@ namespace dawn_native {
if (device->IsValidationEnabled()) {
DAWN_TRY(ValidateBindGroupLayoutDescriptor(device, &desc));
}
- return device->GetOrCreateBindGroupLayout(&desc);
+ return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken);
};
ASSERT(!stages.empty());
@@ -242,7 +255,7 @@ namespace dawn_native {
for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) {
for (const auto& bindingIt : metadata.bindings[group]) {
BindingNumber bindingNumber = bindingIt.first;
- const EntryPointMetadata::ShaderBindingInfo& shaderBinding = bindingIt.second;
+ const ShaderBindingInfo& shaderBinding = bindingIt.second;
// Create the BindGroupLayoutEntry
BindGroupLayoutEntry entry =
@@ -276,7 +289,8 @@ namespace dawn_native {
BindGroupIndex pipelineBGLCount = BindGroupIndex(0);
ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
- DAWN_TRY_ASSIGN(bindGroupLayouts[group], CreateBGL(device, entryData[group]));
+ DAWN_TRY_ASSIGN(bindGroupLayouts[group],
+ CreateBGL(device, entryData[group], pipelineCompatibilityToken));
if (entryData[group].size() != 0) {
pipelineBGLCount = group + BindGroupIndex(1);
}
@@ -292,7 +306,7 @@ namespace dawn_native {
desc.bindGroupLayouts = bgls.data();
desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
- DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc));
+ DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken));
Ref<PipelineLayoutBase> result;
DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
@@ -309,6 +323,10 @@ namespace dawn_native {
return std::move(result);
}
+ ObjectType PipelineLayoutBase::GetType() const {
+ return ObjectType::PipelineLayout;
+ }
+
const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
ASSERT(!IsError());
ASSERT(group < kMaxBindGroupsTyped);
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
index 0c1b5d74daa..7371dab46df 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
@@ -22,6 +22,7 @@
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/dawn_platform.h"
@@ -30,8 +31,10 @@
namespace dawn_native {
- MaybeError ValidatePipelineLayoutDescriptor(DeviceBase*,
- const PipelineLayoutDescriptor* descriptor);
+ MaybeError ValidatePipelineLayoutDescriptor(
+ DeviceBase*,
+ const PipelineLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
using BindGroupLayoutArray =
ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
@@ -41,9 +44,11 @@ namespace dawn_native {
SingleShaderStage shaderStage;
ShaderModuleBase* module;
std::string entryPoint;
+ uint32_t constantCount = 0u;
+ ConstantEntry const* constants = nullptr;
};
- class PipelineLayoutBase : public CachedObject {
+ class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
public:
PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
~PipelineLayoutBase() override;
@@ -53,6 +58,8 @@ namespace dawn_native {
DeviceBase* device,
std::vector<StageAndDescriptor> stages);
+ ObjectType GetType() const override;
+
const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
index 47d095368ec..5f7ed7710fe 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
@@ -21,6 +21,7 @@
#include "dawn_native/CommandBuffer.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include <cstring>
@@ -29,7 +30,7 @@ namespace dawn_native {
ProgrammablePassEncoder::ProgrammablePassEncoder(DeviceBase* device,
EncodingContext* encodingContext)
- : ObjectBase(device),
+ : ApiObjectBase(device, kLabelNotImplemented),
mEncodingContext(encodingContext),
mValidationEnabled(device->IsValidationEnabled()) {
}
@@ -37,7 +38,7 @@ namespace dawn_native {
ProgrammablePassEncoder::ProgrammablePassEncoder(DeviceBase* device,
EncodingContext* encodingContext,
ErrorTag errorTag)
- : ObjectBase(device, errorTag),
+ : ApiObjectBase(device, errorTag),
mEncodingContext(encodingContext),
mValidationEnabled(device->IsValidationEnabled()) {
}
@@ -54,45 +55,57 @@ namespace dawn_native {
}
void ProgrammablePassEncoder::APIInsertDebugMarker(const char* groupLabel) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- InsertDebugMarkerCmd* cmd =
- allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
- cmd->length = strlen(groupLabel);
-
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
-
- return {};
- });
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ InsertDebugMarkerCmd* cmd =
+ allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ return {};
+ },
+ "encoding InsertDebugMarker(\"%s\")", groupLabel);
}
void ProgrammablePassEncoder::APIPopDebugGroup() {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- if (mDebugGroupStackSize == 0) {
- return DAWN_VALIDATION_ERROR("Pop must be balanced by a corresponding Push.");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ if (mDebugGroupStackSize == 0) {
+ return DAWN_VALIDATION_ERROR(
+ "Pop must be balanced by a corresponding Push.");
+ }
}
- }
- allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
- mDebugGroupStackSize--;
+ allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+ mDebugGroupStackSize--;
+ mEncodingContext->PopDebugGroupLabel();
- return {};
- });
+ return {};
+ },
+ "encoding PopDebugGroup()");
}
void ProgrammablePassEncoder::APIPushDebugGroup(const char* groupLabel) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- PushDebugGroupCmd* cmd =
- allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
- cmd->length = strlen(groupLabel);
-
- char* label = allocator->AllocateData<char>(cmd->length + 1);
- memcpy(label, groupLabel, cmd->length + 1);
-
- mDebugGroupStackSize++;
-
- return {};
- });
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ PushDebugGroupCmd* cmd =
+ allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+ cmd->length = strlen(groupLabel);
+
+ char* label = allocator->AllocateData<char>(cmd->length + 1);
+ memcpy(label, groupLabel, cmd->length + 1);
+
+ mDebugGroupStackSize++;
+ mEncodingContext->PushDebugGroupLabel(groupLabel);
+
+ return {};
+ },
+ "encoding PushDebugGroup(\"%s\")", groupLabel);
}
MaybeError ProgrammablePassEncoder::ValidateSetBindGroup(
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
index 4cea69628b3..f58a9090acb 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
@@ -17,6 +17,7 @@
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
#include "dawn_native/IntegerTypes.h"
#include "dawn_native/ObjectBase.h"
@@ -27,7 +28,7 @@ namespace dawn_native {
class DeviceBase;
// Base class for shared functionality between ComputePassEncoder and RenderPassEncoder.
- class ProgrammablePassEncoder : public ObjectBase {
+ class ProgrammablePassEncoder : public ApiObjectBase {
public:
ProgrammablePassEncoder(DeviceBase* device, EncodingContext* encodingContext);
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
index 598bac24d04..2ad7cd75ae1 100644
--- a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
@@ -15,7 +15,8 @@
#include "dawn_native/QuerySet.h"
#include "dawn_native/Device.h"
-#include "dawn_native/Extensions.h"
+#include "dawn_native/Features.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include <set>
@@ -39,43 +40,34 @@ namespace dawn_native {
MaybeError ValidateQuerySetDescriptor(DeviceBase* device,
const QuerySetDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
-
- if (descriptor->count > kMaxQueryCount) {
- return DAWN_VALIDATION_ERROR("Max query count exceeded");
- }
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
DAWN_TRY(ValidateQueryType(descriptor->type));
+ DAWN_INVALID_IF(descriptor->count > kMaxQueryCount,
+ "Query count (%u) exceeds the maximum query count (%u).", descriptor->count,
+ kMaxQueryCount);
+
switch (descriptor->type) {
case wgpu::QueryType::Occlusion:
- if (descriptor->pipelineStatisticsCount != 0) {
- return DAWN_VALIDATION_ERROR(
- "The pipeline statistics should not be set if query type is Occlusion");
- }
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+ "Pipeline statistics specified for a query of type %s.",
+ descriptor->type);
break;
case wgpu::QueryType::PipelineStatistics: {
// TODO(crbug.com/1177506): Pipeline statistics query is not fully implemented.
// Disallow it as unsafe until the implementaion is completed.
- if (device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline statistics query is disallowed because it's not fully "
- "implemented");
- }
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Pipeline statistics queries are disallowed because they are not "
+ "fully implemented");
- if (!device->IsExtensionEnabled(Extension::PipelineStatisticsQuery)) {
- return DAWN_VALIDATION_ERROR(
- "The pipeline statistics query feature is not supported");
- }
+ DAWN_INVALID_IF(
+ !device->IsFeatureEnabled(Feature::PipelineStatisticsQuery),
+ "Pipeline statistics query set created without the feature being enabled.");
- if (descriptor->pipelineStatisticsCount == 0) {
- return DAWN_VALIDATION_ERROR(
- "At least one pipeline statistics is set if query type is "
- "PipelineStatistics");
- }
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount == 0,
+ "Pipeline statistics query set created with 0 statistics.");
std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
@@ -83,27 +75,22 @@ namespace dawn_native {
std::pair<std::set<wgpu::PipelineStatisticName>::iterator, bool> res =
pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
- if (!res.second) {
- return DAWN_VALIDATION_ERROR("Duplicate pipeline statistics found");
- }
+ DAWN_INVALID_IF(!res.second, "Statistic %s is specified more than once.",
+ descriptor->pipelineStatistics[i]);
}
} break;
case wgpu::QueryType::Timestamp:
- if (!device->IsExtensionEnabled(Extension::TimestampQuery)) {
- return DAWN_VALIDATION_ERROR("The timestamp query feature is not supported");
- }
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Timestamp queries are disallowed because they may expose precise "
+ "timing information.");
- if (device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- return DAWN_VALIDATION_ERROR(
- "The timestamp query is disallowed because it may expose precise timing "
- "information");
- }
+ DAWN_INVALID_IF(!device->IsFeatureEnabled(Feature::TimestampQuery),
+ "Timestamp query set created without the feature being enabled.");
- if (descriptor->pipelineStatisticsCount != 0) {
- return DAWN_VALIDATION_ERROR(
- "The pipeline statistics should not be set if query type is Timestamp");
- }
+ DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+ "Pipeline statistics specified for a query of type %s.",
+ descriptor->type);
break;
default:
@@ -114,7 +101,7 @@ namespace dawn_native {
}
QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
- : ObjectBase(device),
+ : ApiObjectBase(device, kLabelNotImplemented),
mQueryType(descriptor->type),
mQueryCount(descriptor->count),
mState(QuerySetState::Available) {
@@ -126,7 +113,7 @@ namespace dawn_native {
}
QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : ApiObjectBase(device, tag) {
}
QuerySetBase::~QuerySetBase() {
@@ -139,6 +126,10 @@ namespace dawn_native {
return new ErrorQuerySet(device);
}
+ ObjectType QuerySetBase::GetType() const {
+ return ObjectType::QuerySet;
+ }
+
wgpu::QueryType QuerySetBase::GetQueryType() const {
return mQueryType;
}
@@ -161,9 +152,7 @@ namespace dawn_native {
MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
ASSERT(!IsError());
- if (mState == QuerySetState::Destroyed) {
- return DAWN_VALIDATION_ERROR("Destroyed query set used in a submit");
- }
+ DAWN_INVALID_IF(mState == QuerySetState::Destroyed, "%s used while destroyed.", this);
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.h b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
index 32b75c9edcc..3ad4e7ca4cd 100644
--- a/chromium/third_party/dawn/src/dawn_native/QuerySet.h
+++ b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
@@ -25,12 +25,14 @@ namespace dawn_native {
MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
- class QuerySetBase : public ObjectBase {
+ class QuerySetBase : public ApiObjectBase {
public:
QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
static QuerySetBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
wgpu::QueryType GetQueryType() const;
uint32_t GetQueryCount() const;
const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
index 9aac22b73d6..e6af98b2028 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
@@ -24,6 +24,7 @@
#include "dawn_native/Device.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ExternalTexture.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/QuerySet.h"
#include "dawn_native/RenderPassEncoder.h"
#include "dawn_native/RenderPipeline.h"
@@ -161,10 +162,11 @@ namespace dawn_native {
QueueBase::TaskInFlight::~TaskInFlight() {
}
- QueueBase::QueueBase(DeviceBase* device) : ObjectBase(device) {
+ QueueBase::QueueBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
}
- QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag) : ObjectBase(device, tag) {
+ QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ApiObjectBase(device, tag) {
}
QueueBase::~QueueBase() {
@@ -176,6 +178,10 @@ namespace dawn_native {
return new ErrorQueue(device);
}
+ ObjectType QueueBase::GetType() const {
+ return ObjectType::Queue;
+ }
+
void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
SubmitInternal(commandCount, commands);
@@ -244,7 +250,10 @@ namespace dawn_native {
uint64_t bufferOffset,
const void* data,
size_t size) {
- DAWN_TRY(ValidateWriteBuffer(buffer, bufferOffset, size));
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
return WriteBufferImpl(buffer, bufferOffset, data, size);
}
@@ -357,8 +366,10 @@ namespace dawn_native {
const Extent3D* copySize,
const CopyTextureForBrowserOptions* options) {
if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(
- ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options));
+ DAWN_TRY_CONTEXT(
+ ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
+ "validating CopyTextureForBrowser from %s to %s", source->texture,
+ destination->texture);
}
return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
@@ -430,34 +441,6 @@ namespace dawn_native {
return {};
}
- MaybeError QueueBase::ValidateWriteBuffer(const BufferBase* buffer,
- uint64_t bufferOffset,
- size_t size) const {
- DAWN_TRY(GetDevice()->ValidateIsAlive());
- DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
-
- if (bufferOffset % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Queue::WriteBuffer bufferOffset must be a multiple of 4");
- }
- if (size % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Queue::WriteBuffer size must be a multiple of 4");
- }
-
- uint64_t bufferSize = buffer->GetSize();
- if (bufferOffset > bufferSize || size > (bufferSize - bufferOffset)) {
- return DAWN_VALIDATION_ERROR("Queue::WriteBuffer out of range");
- }
-
- if (!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst)) {
- return DAWN_VALIDATION_ERROR("Buffer needs the CopyDst usage bit");
- }
-
- DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
-
- return {};
- }
-
MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
size_t dataSize,
const TextureDataLayout& dataLayout,
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.h b/chromium/third_party/dawn/src/dawn_native/Queue.h
index f0178d27382..4c76f181c92 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.h
@@ -25,7 +25,7 @@
namespace dawn_native {
- class QueueBase : public ObjectBase {
+ class QueueBase : public ApiObjectBase {
public:
struct TaskInFlight {
virtual ~TaskInFlight();
@@ -33,9 +33,12 @@ namespace dawn_native {
virtual void HandleDeviceLoss() = 0;
};
- static QueueBase* MakeError(DeviceBase* device);
~QueueBase() override;
+ static QueueBase* MakeError(DeviceBase* device);
+
+ ObjectType GetType() const override;
+
// Dawn API
void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
void APIOnSubmittedWorkDone(uint64_t signalValue,
@@ -92,9 +95,6 @@ namespace dawn_native {
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
MaybeError ValidateOnSubmittedWorkDone(uint64_t signalValue,
WGPUQueueWorkDoneStatus* status) const;
- MaybeError ValidateWriteBuffer(const BufferBase* buffer,
- uint64_t bufferOffset,
- size_t size) const;
MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
size_t dataSize,
const TextureDataLayout& dataLayout,
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
index f4e0a8c0e09..cb81dab75ed 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
@@ -17,6 +17,7 @@
#include "common/BitSetIterator.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/RenderBundleEncoder.h"
namespace dawn_native {
@@ -24,9 +25,11 @@ namespace dawn_native {
RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
const RenderBundleDescriptor* descriptor,
Ref<AttachmentState> attachmentState,
- RenderPassResourceUsage resourceUsage)
- : ObjectBase(encoder->GetDevice()),
+ RenderPassResourceUsage resourceUsage,
+ IndirectDrawMetadata indirectDrawMetadata)
+ : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
mCommands(encoder->AcquireCommands()),
+ mIndirectDrawMetadata(std::move(indirectDrawMetadata)),
mAttachmentState(std::move(attachmentState)),
mResourceUsage(std::move(resourceUsage)) {
}
@@ -41,7 +44,11 @@ namespace dawn_native {
}
RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
- : ObjectBase(device, errorTag) {
+ : ApiObjectBase(device, errorTag) {
+ }
+
+ ObjectType RenderBundleBase::GetType() const {
+ return ObjectType::RenderBundle;
}
CommandIterator* RenderBundleBase::GetCommands() {
@@ -58,4 +65,8 @@ namespace dawn_native {
return mResourceUsage;
}
+ const IndirectDrawMetadata& RenderBundleBase::GetIndirectDrawMetadata() {
+ return mIndirectDrawMetadata;
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundle.h b/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
index f971ed6a369..37517d9e60e 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
@@ -19,6 +19,8 @@
#include "dawn_native/AttachmentState.h"
#include "dawn_native/CommandAllocator.h"
#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
+#include "dawn_native/IndirectDrawMetadata.h"
#include "dawn_native/ObjectBase.h"
#include "dawn_native/PassResourceUsage.h"
@@ -31,19 +33,23 @@ namespace dawn_native {
struct RenderBundleDescriptor;
class RenderBundleEncoder;
- class RenderBundleBase : public ObjectBase {
+ class RenderBundleBase : public ApiObjectBase {
public:
RenderBundleBase(RenderBundleEncoder* encoder,
const RenderBundleDescriptor* descriptor,
Ref<AttachmentState> attachmentState,
- RenderPassResourceUsage resourceUsage);
+ RenderPassResourceUsage resourceUsage,
+ IndirectDrawMetadata indirectDrawMetadata);
static RenderBundleBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
CommandIterator* GetCommands();
const AttachmentState* GetAttachmentState() const;
const RenderPassResourceUsage& GetResourceUsage() const;
+ const IndirectDrawMetadata& GetIndirectDrawMetadata();
protected:
~RenderBundleBase() override;
@@ -52,6 +58,7 @@ namespace dawn_native {
RenderBundleBase(DeviceBase* device, ErrorTag errorTag);
CommandIterator mCommands;
+ IndirectDrawMetadata mIndirectDrawMetadata;
Ref<AttachmentState> mAttachmentState;
RenderPassResourceUsage mResourceUsage;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
index daff3eb33bc..7a0fc5dc97f 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
@@ -18,6 +18,7 @@
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/Format.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/RenderPipeline.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include "dawn_platform/DawnPlatform.h"
@@ -30,10 +31,8 @@ namespace dawn_native {
DAWN_TRY(ValidateTextureFormat(textureFormat));
const Format* format = nullptr;
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
- if (!format->IsColor() || !format->isRenderable) {
- return DAWN_VALIDATION_ERROR(
- "The color attachment texture format is not color renderable");
- }
+ DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+ "Texture format %s is not color renderable.", textureFormat);
return {};
}
@@ -42,36 +41,35 @@ namespace dawn_native {
DAWN_TRY(ValidateTextureFormat(textureFormat));
const Format* format = nullptr;
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
- if (!format->HasDepthOrStencil() || !format->isRenderable) {
- return DAWN_VALIDATION_ERROR(
- "The depth stencil attachment texture format is not a renderable depth/stencil "
- "format");
- }
+ DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+ "Texture format %s is not depth/stencil renderable.", textureFormat);
return {};
}
MaybeError ValidateRenderBundleEncoderDescriptor(
const DeviceBase* device,
const RenderBundleEncoderDescriptor* descriptor) {
- if (!IsValidSampleCount(descriptor->sampleCount)) {
- return DAWN_VALIDATION_ERROR("Sample count is not supported");
- }
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+ "Sample count (%u) is not supported.", descriptor->sampleCount);
- if (descriptor->colorFormatsCount > kMaxColorAttachments) {
- return DAWN_VALIDATION_ERROR("Color formats count exceeds maximum");
- }
+ DAWN_INVALID_IF(
+ descriptor->colorFormatsCount > kMaxColorAttachments,
+ "Color formats count (%u) exceeds maximum number of color attachements (%u).",
+ descriptor->colorFormatsCount, kMaxColorAttachments);
- if (descriptor->colorFormatsCount == 0 &&
- descriptor->depthStencilFormat == wgpu::TextureFormat::Undefined) {
- return DAWN_VALIDATION_ERROR("Should have at least one attachment format");
- }
+ DAWN_INVALID_IF(descriptor->colorFormatsCount == 0 &&
+ descriptor->depthStencilFormat == wgpu::TextureFormat::Undefined,
+ "No color or depth/stencil attachment formats specified.");
for (uint32_t i = 0; i < descriptor->colorFormatsCount; ++i) {
- DAWN_TRY(ValidateColorAttachmentFormat(device, descriptor->colorFormats[i]));
+ DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, descriptor->colorFormats[i]),
+ "validating colorFormats[%u]", i);
}
if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
- DAWN_TRY(ValidateDepthStencilAttachmentFormat(device, descriptor->depthStencilFormat));
+ DAWN_TRY_CONTEXT(
+ ValidateDepthStencilAttachmentFormat(device, descriptor->depthStencilFormat),
+ "validating depthStencilFormat");
}
return {};
@@ -102,6 +100,10 @@ namespace dawn_native {
return new RenderBundleEncoder(device, ObjectBase::kError);
}
+ ObjectType RenderBundleEncoder::GetType() const {
+ return ObjectType::RenderBundleEncoder;
+ }
+
CommandIterator RenderBundleEncoder::AcquireCommands() {
return mBundleEncodingContext.AcquireCommands();
}
@@ -130,7 +132,8 @@ namespace dawn_native {
DAWN_TRY(ValidateFinish(usages));
}
- return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), std::move(usages));
+ return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), std::move(usages),
+ std::move(mIndirectDrawMetadata));
}
MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
index 13439b7ef73..0c7ab447be7 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
@@ -17,6 +17,7 @@
#include "dawn_native/EncodingContext.h"
#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/RenderEncoderBase.h"
@@ -32,6 +33,8 @@ namespace dawn_native {
const RenderBundleEncoderDescriptor* descriptor);
static RenderBundleEncoder* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
CommandIterator AcquireCommands();
@@ -45,6 +48,10 @@ namespace dawn_native {
EncodingContext mBundleEncodingContext;
};
+
+ // For the benefit of template generation.
+ using RenderBundleEncoderBase = RenderBundleEncoder;
+
} // namespace dawn_native
#endif // DAWNNATIVE_RENDERBUNDLEENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
index e98c45a2b5e..240fc294360 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
@@ -17,6 +17,7 @@
#include "common/Constants.h"
#include "common/Log.h"
#include "dawn_native/Buffer.h"
+#include "dawn_native/BufferLocation.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
@@ -60,28 +61,31 @@ namespace dawn_native {
uint32_t instanceCount,
uint32_t firstVertex,
uint32_t firstInstance) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDraw());
-
- if (mDisableBaseInstance && firstInstance != 0) {
- return DAWN_VALIDATION_ERROR("Non-zero first instance not supported");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+
+ DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+ "First instance (%u) must be zero.", firstInstance);
+
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
+ firstVertex));
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
+ instanceCount, firstInstance));
}
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
- firstVertex));
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(instanceCount,
- firstInstance));
- }
-
- DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
- draw->vertexCount = vertexCount;
- draw->instanceCount = instanceCount;
- draw->firstVertex = firstVertex;
- draw->firstInstance = firstInstance;
+ DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
+ draw->vertexCount = vertexCount;
+ draw->instanceCount = instanceCount;
+ draw->firstVertex = firstVertex;
+ draw->firstInstance = firstInstance;
- return {};
- });
+ return {};
+ },
+ "encoding Draw(%u, %u, %u, %u).", vertexCount, instanceCount, firstVertex,
+ firstInstance);
}
void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
@@ -89,248 +93,307 @@ namespace dawn_native {
uint32_t firstIndex,
int32_t baseVertex,
uint32_t firstInstance) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
-
- if (mDisableBaseInstance && firstInstance != 0) {
- return DAWN_VALIDATION_ERROR("Non-zero first instance not supported");
- }
- if (mDisableBaseVertex && baseVertex != 0) {
- return DAWN_VALIDATION_ERROR("Non-zero base vertex not supported");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+ DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+ "First instance (%u) must be zero.", firstInstance);
+
+ DAWN_INVALID_IF(mDisableBaseVertex && baseVertex != 0,
+ "Base vertex (%u) must be zero.", baseVertex);
+
+ DAWN_TRY(
+ mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
+
+ // Although we don't know actual vertex access range in CPU, we still call the
+ // ValidateBufferInRangeForVertexBuffer in order to deal with those vertex step
+ // mode vertex buffer with an array stride of zero.
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(0, 0));
+ DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
+ instanceCount, firstInstance));
}
- DAWN_TRY(mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
-
- // Although we don't know actual vertex access range in CPU, we still call the
- // ValidateBufferInRangeForVertexBuffer in order to deal with those vertex step mode
- // vertex buffer with an array stride of zero.
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(0, 0));
- DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(instanceCount,
- firstInstance));
- }
-
- DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
- draw->indexCount = indexCount;
- draw->instanceCount = instanceCount;
- draw->firstIndex = firstIndex;
- draw->baseVertex = baseVertex;
- draw->firstInstance = firstInstance;
-
- return {};
- });
+ DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
+ draw->indexCount = indexCount;
+ draw->instanceCount = instanceCount;
+ draw->firstIndex = firstIndex;
+ draw->baseVertex = baseVertex;
+ draw->firstInstance = firstInstance;
+
+ return {};
+ },
+ "encoding DrawIndexed(%u, %u, %u, %i, %u).", indexCount, instanceCount, firstIndex,
+ baseVertex, firstInstance);
}
void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDraw());
-
- if (indirectOffset % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Indirect offset must be a multiple of 4");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+
+ DAWN_INVALID_IF(indirectOffset % 4 != 0,
+ "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+ DAWN_INVALID_IF(
+ indirectOffset >= indirectBuffer->GetSize() ||
+ kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset,
+ "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+ indirectOffset, indirectBuffer, indirectBuffer->GetSize());
}
- if (indirectOffset >= indirectBuffer->GetSize() ||
- kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset) {
- return DAWN_VALIDATION_ERROR("Indirect offset out of bounds");
- }
- }
-
- DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
- cmd->indirectBuffer = indirectBuffer;
- cmd->indirectOffset = indirectOffset;
+ DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
+ cmd->indirectBuffer = indirectBuffer;
+ cmd->indirectOffset = indirectOffset;
- mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
- return {};
- });
+ return {};
+ },
+ "encoding DrawIndirect(%s, %u).", indirectBuffer, indirectOffset);
}
void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
uint64_t indirectOffset) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
- DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
- DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
-
- // Indexed indirect draws need a compute-shader based validation check that the
- // range of indices is contained inside the index buffer on Metal. Disallow them as
- // unsafe until the validation is implemented.
- if (GetDevice()->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- return DAWN_VALIDATION_ERROR(
- "DrawIndexedIndirect is disallowed because it doesn't validate that the "
- "index "
- "range is valid yet.");
- }
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+ DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+ DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+ // Disallow draw indexed indirect until the validation is correctly implemented.
+ if (GetDevice()->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+ return DAWN_VALIDATION_ERROR(
+ "DrawIndexedIndirect is disallowed because it doesn't correctly "
+ "validate that "
+ "the index range is valid yet.");
+ }
- if (indirectOffset % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Indirect offset must be a multiple of 4");
- }
+ DAWN_INVALID_IF(indirectOffset % 4 != 0,
+ "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
- if ((indirectOffset >= indirectBuffer->GetSize() ||
- kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset)) {
- return DAWN_VALIDATION_ERROR("Indirect offset out of bounds");
+ DAWN_INVALID_IF(
+ (indirectOffset >= indirectBuffer->GetSize() ||
+ kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset),
+ "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+ indirectOffset, indirectBuffer, indirectBuffer->GetSize());
}
- }
- DrawIndexedIndirectCmd* cmd =
- allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
- cmd->indirectBuffer = indirectBuffer;
- cmd->indirectOffset = indirectOffset;
+ DrawIndexedIndirectCmd* cmd =
+ allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
+ if (IsValidationEnabled()) {
+ cmd->indirectBufferLocation = BufferLocation::New();
+ mIndirectDrawMetadata.AddIndexedIndirectDraw(
+ mCommandBufferState.GetIndexFormat(),
+ mCommandBufferState.GetIndexBufferSize(), indirectBuffer, indirectOffset,
+ cmd->indirectBufferLocation.Get());
+ } else {
+ cmd->indirectBufferLocation =
+ BufferLocation::New(indirectBuffer, indirectOffset);
+ }
- mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
- return {};
- });
+ return {};
+ },
+ "encoding DrawIndexedIndirect(%s, %u).", indirectBuffer, indirectOffset);
}
void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(pipeline));
-
- if (pipeline->GetAttachmentState() != mAttachmentState.Get()) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline attachment state is not compatible with render encoder "
- "attachment state");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+
+ // TODO(dawn:563): More detail about why the states are incompatible would be
+ // nice.
+ DAWN_INVALID_IF(
+ pipeline->GetAttachmentState() != mAttachmentState.Get(),
+ "Attachment state of %s is not compatible with the attachment state of %s",
+ pipeline, this);
}
- }
- mCommandBufferState.SetRenderPipeline(pipeline);
+ mCommandBufferState.SetRenderPipeline(pipeline);
- SetRenderPipelineCmd* cmd =
- allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
- cmd->pipeline = pipeline;
+ SetRenderPipelineCmd* cmd =
+ allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
+ cmd->pipeline = pipeline;
- return {};
- });
+ return {};
+ },
+ "encoding SetPipeline(%s).", pipeline);
}
void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
wgpu::IndexFormat format,
uint64_t offset,
uint64_t size) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
- DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
-
- DAWN_TRY(ValidateIndexFormat(format));
- if (format == wgpu::IndexFormat::Undefined) {
- return DAWN_VALIDATION_ERROR("Index format must be specified");
- }
-
- if (offset % uint64_t(IndexFormatSize(format)) != 0) {
- return DAWN_VALIDATION_ERROR(
- "Offset must be a multiple of the index format size");
- }
-
- uint64_t bufferSize = buffer->GetSize();
- if (offset > bufferSize) {
- return DAWN_VALIDATION_ERROR("Offset larger than the buffer size");
- }
- uint64_t remainingSize = bufferSize - offset;
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+ DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
+
+ DAWN_TRY(ValidateIndexFormat(format));
+
+ DAWN_INVALID_IF(format == wgpu::IndexFormat::Undefined,
+ "Index format must be specified");
+
+ DAWN_INVALID_IF(offset % uint64_t(IndexFormatSize(format)) != 0,
+ "Index buffer offset (%u) is not a multiple of the size (%u)"
+ "of %s.",
+ offset, IndexFormatSize(format), format);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(offset > bufferSize,
+ "Index buffer offset (%u) is larger than the size (%u) of %s.",
+ offset, bufferSize, buffer);
+
+ uint64_t remainingSize = bufferSize - offset;
+
+ // Temporarily treat 0 as undefined for size, and give a warning
+ // TODO(dawn:1058): Remove this if block
+ if (size == 0) {
+ size = wgpu::kWholeSize;
+ GetDevice()->EmitDeprecationWarning(
+ "Using size=0 to indicate default binding size for setIndexBuffer "
+ "is deprecated. In the future it will result in a zero-size binding. "
+ "Use `undefined` (wgpu::kWholeSize) or just omit the parameter "
+ "instead.");
+ }
- if (size == 0) {
- size = remainingSize;
+ if (size == wgpu::kWholeSize) {
+ size = remainingSize;
+ } else {
+ DAWN_INVALID_IF(size > remainingSize,
+ "Index buffer range (offset: %u, size: %u) doesn't fit in "
+ "the size (%u) of "
+ "%s.",
+ offset, size, bufferSize, buffer);
+ }
} else {
- if (size > remainingSize) {
- return DAWN_VALIDATION_ERROR("Size + offset larger than the buffer size");
+ if (size == wgpu::kWholeSize) {
+ DAWN_ASSERT(buffer->GetSize() >= offset);
+ size = buffer->GetSize() - offset;
}
}
- } else {
- if (size == 0) {
- size = buffer->GetSize() - offset;
- }
- }
- mCommandBufferState.SetIndexBuffer(format, size);
+ mCommandBufferState.SetIndexBuffer(format, size);
- SetIndexBufferCmd* cmd =
- allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
- cmd->buffer = buffer;
- cmd->format = format;
- cmd->offset = offset;
- cmd->size = size;
+ SetIndexBufferCmd* cmd =
+ allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
+ cmd->buffer = buffer;
+ cmd->format = format;
+ cmd->offset = offset;
+ cmd->size = size;
- mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
+ mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
- return {};
- });
+ return {};
+ },
+ "encoding SetIndexBuffer(%s, %s, %u, %u).", buffer, format, offset, size);
}
void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
BufferBase* buffer,
uint64_t offset,
uint64_t size) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(buffer));
- DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
-
- if (slot >= kMaxVertexBuffers) {
- return DAWN_VALIDATION_ERROR("Vertex buffer slot out of bounds");
- }
-
- if (offset % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Offset must be a multiple of 4");
- }
-
- uint64_t bufferSize = buffer->GetSize();
- if (offset > bufferSize) {
- return DAWN_VALIDATION_ERROR("Offset larger than the buffer size");
- }
- uint64_t remainingSize = bufferSize - offset;
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(buffer));
+ DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
+
+ DAWN_INVALID_IF(slot >= kMaxVertexBuffers,
+ "Vertex buffer slot (%u) is larger the maximum (%u)", slot,
+ kMaxVertexBuffers - 1);
+
+ DAWN_INVALID_IF(offset % 4 != 0,
+ "Vertex buffer offset (%u) is not a multiple of 4", offset);
+
+ uint64_t bufferSize = buffer->GetSize();
+ DAWN_INVALID_IF(offset > bufferSize,
+ "Vertex buffer offset (%u) is larger than the size (%u) of %s.",
+ offset, bufferSize, buffer);
+
+ uint64_t remainingSize = bufferSize - offset;
+
+ // Temporarily treat 0 as undefined for size, and give a warning
+ // TODO(dawn:1058): Remove this if block
+ if (size == 0) {
+ size = wgpu::kWholeSize;
+ GetDevice()->EmitDeprecationWarning(
+ "Using size=0 to indicate default binding size for setVertexBuffer "
+ "is deprecated. In the future it will result in a zero-size binding. "
+ "Use `undefined` (wgpu::kWholeSize) or just omit the parameter "
+ "instead.");
+ }
- if (size == 0) {
- size = remainingSize;
+ if (size == wgpu::kWholeSize) {
+ size = remainingSize;
+ } else {
+ DAWN_INVALID_IF(size > remainingSize,
+ "Vertex buffer range (offset: %u, size: %u) doesn't fit in "
+ "the size (%u) "
+ "of %s.",
+ offset, size, bufferSize, buffer);
+ }
} else {
- if (size > remainingSize) {
- return DAWN_VALIDATION_ERROR("Size + offset larger than the buffer size");
+ if (size == wgpu::kWholeSize) {
+ DAWN_ASSERT(buffer->GetSize() >= offset);
+ size = buffer->GetSize() - offset;
}
}
- } else {
- if (size == 0) {
- size = buffer->GetSize() - offset;
- }
- }
- mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
+ mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
- SetVertexBufferCmd* cmd =
- allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
- cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
- cmd->buffer = buffer;
- cmd->offset = offset;
- cmd->size = size;
+ SetVertexBufferCmd* cmd =
+ allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
+ cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
+ cmd->buffer = buffer;
+ cmd->offset = offset;
+ cmd->size = size;
- mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
+ mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
- return {};
- });
+ return {};
+ },
+ "encoding SetVertexBuffer(%u, %s, %u, %u).", slot, buffer, offset, size);
}
void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
BindGroupBase* group,
uint32_t dynamicOffsetCount,
const uint32_t* dynamicOffsets) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- BindGroupIndex groupIndex(groupIndexIn);
-
- if (IsValidationEnabled()) {
- DAWN_TRY(
- ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
- }
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ BindGroupIndex groupIndex(groupIndexIn);
+
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets));
+ }
- RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
- mCommandBufferState.SetBindGroup(groupIndex, group);
- mUsageTracker.AddBindGroup(group);
+ RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
+ dynamicOffsets);
+ mCommandBufferState.SetBindGroup(groupIndex, group);
+ mUsageTracker.AddBindGroup(group);
- return {};
- });
+ return {};
+ },
+ "encoding SetBindGroup(%u, %s, %u).", groupIndexIn, group, dynamicOffsetCount);
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
index 4976ee20cd0..30b7a3ce889 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
@@ -18,6 +18,7 @@
#include "dawn_native/AttachmentState.h"
#include "dawn_native/CommandBufferStateTracker.h"
#include "dawn_native/Error.h"
+#include "dawn_native/IndirectDrawMetadata.h"
#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/ProgrammablePassEncoder.h"
@@ -64,6 +65,7 @@ namespace dawn_native {
CommandBufferStateTracker mCommandBufferState;
RenderPassResourceUsageTracker mUsageTracker;
+ IndirectDrawMetadata mIndirectDrawMetadata;
private:
Ref<AttachmentState> mAttachmentState;
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
index 7c1a81a7d6c..aa86702e0b1 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
@@ -20,6 +20,7 @@
#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/QuerySet.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/RenderPipeline.h"
@@ -35,10 +36,9 @@ namespace dawn_native {
uint32_t queryIndex,
const QueryAvailabilityMap& queryAvailabilityMap) {
auto it = queryAvailabilityMap.find(querySet);
- if (it != queryAvailabilityMap.end() && it->second[queryIndex]) {
- return DAWN_VALIDATION_ERROR(
- "The same query cannot be written twice in same render pass.");
- }
+ DAWN_INVALID_IF(it != queryAvailabilityMap.end() && it->second[queryIndex],
+ "Query index %u of %s is written to twice in a render pass.",
+ queryIndex, querySet);
return {};
}
@@ -77,6 +77,10 @@ namespace dawn_native {
return new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
}
+ ObjectType RenderPassEncoder::GetType() const {
+ return ObjectType::RenderPassEncoder;
+ }
+
void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
DAWN_ASSERT(querySet != nullptr);
@@ -89,46 +93,52 @@ namespace dawn_native {
}
void RenderPassEncoder::APIEndPass() {
- if (mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(ValidateProgrammableEncoderEnd());
- if (mOcclusionQueryActive) {
- return DAWN_VALIDATION_ERROR(
- "The occlusion query must be ended before endPass.");
+ if (mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateProgrammableEncoderEnd());
+
+ DAWN_INVALID_IF(
+ mOcclusionQueryActive,
+ "Render pass %s ended with incomplete occlusion query index %u of %s.",
+ this, mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
}
- }
- allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
- return {};
- })) {
- mEncodingContext->ExitPass(this, mUsageTracker.AcquireResourceUsage());
+ allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
+ DAWN_TRY(mEncodingContext->ExitRenderPass(this, std::move(mUsageTracker),
+ mCommandEncoder.Get(),
+ std::move(mIndirectDrawMetadata)));
+ return {};
+ },
+ "encoding EndPass().")) {
}
}
void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- SetStencilReferenceCmd* cmd =
- allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
- cmd->reference = reference;
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ SetStencilReferenceCmd* cmd =
+ allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
+ cmd->reference = reference;
- return {};
- });
+ return {};
+ },
+ "encoding SetStencilReference(%u)", reference);
}
void RenderPassEncoder::APISetBlendConstant(const Color* color) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- SetBlendConstantCmd* cmd =
- allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
- cmd->color = *color;
-
- return {};
- });
- }
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ SetBlendConstantCmd* cmd =
+ allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
+ cmd->color = *color;
- void RenderPassEncoder::APISetBlendColor(const Color* color) {
- GetDevice()->EmitDeprecationWarning(
- "SetBlendColor has been deprecated in favor of SetBlendConstant.");
- APISetBlendConstant(color);
+ return {};
+ },
+ "encoding SetBlendConstant(%s).", color);
}
void RenderPassEncoder::APISetViewport(float x,
@@ -137,183 +147,214 @@ namespace dawn_native {
float height,
float minDepth,
float maxDepth) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- if ((isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
- isnan(maxDepth))) {
- return DAWN_VALIDATION_ERROR("NaN is not allowed.");
- }
-
- if (x < 0 || y < 0 || width < 0 || height < 0) {
- return DAWN_VALIDATION_ERROR("X, Y, width and height must be non-negative.");
- }
-
- if (x + width > mRenderTargetWidth || y + height > mRenderTargetHeight) {
- return DAWN_VALIDATION_ERROR(
- "The viewport must be contained in the render targets");
- }
-
- // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
- if (minDepth < 0 || minDepth > maxDepth || maxDepth > 1) {
- return DAWN_VALIDATION_ERROR(
- "minDepth and maxDepth must be in [0, 1] and minDepth <= maxDepth.");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(
+ (isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
+ isnan(maxDepth)),
+ "A parameter of the viewport (x: %f, y: %f, width: %f, height: %f, "
+ "minDepth: %f, maxDepth: %f) is NaN.",
+ x, y, width, height, minDepth, maxDepth);
+
+ DAWN_INVALID_IF(
+ x < 0 || y < 0 || width < 0 || height < 0,
+ "Viewport bounds (x: %f, y: %f, width: %f, height: %f) contains a negative "
+ "value.",
+ x, y, width, height);
+
+ DAWN_INVALID_IF(
+ x + width > mRenderTargetWidth || y + height > mRenderTargetHeight,
+ "Viewport bounds (x: %f, y: %f, width: %f, height: %f) are not contained "
+ "in "
+ "the render target dimensions (%u x %u).",
+ x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
+
+ // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
+ DAWN_INVALID_IF(minDepth < 0 || minDepth > maxDepth || maxDepth > 1,
+ "Viewport minDepth (%f) and maxDepth (%f) are not in [0, 1] or "
+ "minDepth was "
+ "greater than maxDepth.",
+ minDepth, maxDepth);
}
- }
- SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
- cmd->x = x;
- cmd->y = y;
- cmd->width = width;
- cmd->height = height;
- cmd->minDepth = minDepth;
- cmd->maxDepth = maxDepth;
+ SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
+ cmd->x = x;
+ cmd->y = y;
+ cmd->width = width;
+ cmd->height = height;
+ cmd->minDepth = minDepth;
+ cmd->maxDepth = maxDepth;
- return {};
- });
+ return {};
+ },
+ "encoding SetViewport(%f, %f, %f, %f, %f, %f).", x, y, width, height, minDepth,
+ maxDepth);
}
void RenderPassEncoder::APISetScissorRect(uint32_t x,
uint32_t y,
uint32_t width,
uint32_t height) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- if (width > mRenderTargetWidth || height > mRenderTargetHeight ||
- x > mRenderTargetWidth - width || y > mRenderTargetHeight - height) {
- return DAWN_VALIDATION_ERROR(
- "The scissor rect must be contained in the render targets");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(
+ width > mRenderTargetWidth || height > mRenderTargetHeight ||
+ x > mRenderTargetWidth - width || y > mRenderTargetHeight - height,
+ "Scissor rect (x: %u, y: %u, width: %u, height: %u) is not contained in "
+ "the render target dimensions (%u x %u).",
+ x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
}
- }
- SetScissorRectCmd* cmd =
- allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
- cmd->x = x;
- cmd->y = y;
- cmd->width = width;
- cmd->height = height;
+ SetScissorRectCmd* cmd =
+ allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
+ cmd->x = x;
+ cmd->y = y;
+ cmd->width = width;
+ cmd->height = height;
- return {};
- });
+ return {};
+ },
+ "encoding SetScissorRect(%u, %u, %u, %u).", x, y, width, height);
}
void RenderPassEncoder::APIExecuteBundles(uint32_t count,
RenderBundleBase* const* renderBundles) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- for (uint32_t i = 0; i < count; ++i) {
- DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
-
- if (GetAttachmentState() != renderBundles[i]->GetAttachmentState()) {
- return DAWN_VALIDATION_ERROR(
- "Render bundle attachment state is not compatible with render pass "
- "attachment state");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ for (uint32_t i = 0; i < count; ++i) {
+ DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
+
+ // TODO(dawn:563): Give more detail about why the states are incompatible.
+ DAWN_INVALID_IF(
+ GetAttachmentState() != renderBundles[i]->GetAttachmentState(),
+ "Attachment state of renderBundles[%i] (%s) is not compatible with "
+ "attachment state of %s.",
+ i, renderBundles[i], this);
}
}
- }
- mCommandBufferState = CommandBufferStateTracker{};
+ mCommandBufferState = CommandBufferStateTracker{};
+
+ ExecuteBundlesCmd* cmd =
+ allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
+ cmd->count = count;
- ExecuteBundlesCmd* cmd =
- allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
- cmd->count = count;
+ Ref<RenderBundleBase>* bundles =
+ allocator->AllocateData<Ref<RenderBundleBase>>(count);
+ for (uint32_t i = 0; i < count; ++i) {
+ bundles[i] = renderBundles[i];
- Ref<RenderBundleBase>* bundles = allocator->AllocateData<Ref<RenderBundleBase>>(count);
- for (uint32_t i = 0; i < count; ++i) {
- bundles[i] = renderBundles[i];
+ const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
+ for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
+ mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
+ }
- const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
- for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
- mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
- }
+ for (uint32_t i = 0; i < usages.textures.size(); ++i) {
+ mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
+ usages.textureUsages[i]);
+ }
- for (uint32_t i = 0; i < usages.textures.size(); ++i) {
- mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
- usages.textureUsages[i]);
+ if (IsValidationEnabled()) {
+ mIndirectDrawMetadata.AddBundle(renderBundles[i]);
+ }
}
- }
- return {};
- });
+ return {};
+ },
+ "encoding ExecuteBundles(%u, ...)", count);
}
void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- if (mOcclusionQuerySet.Get() == nullptr) {
- return DAWN_VALIDATION_ERROR(
- "The occlusionQuerySet in RenderPassDescriptor must be set.");
- }
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(mOcclusionQuerySet.Get() == nullptr,
+ "The occlusionQuerySet in RenderPassDescriptor is not set.");
- // The type of querySet has been validated by ValidateRenderPassDescriptor
+ // The type of querySet has been validated by ValidateRenderPassDescriptor
- if (queryIndex >= mOcclusionQuerySet->GetQueryCount()) {
- return DAWN_VALIDATION_ERROR(
- "Query index exceeds the number of queries in query set.");
- }
+ DAWN_INVALID_IF(queryIndex >= mOcclusionQuerySet->GetQueryCount(),
+ "Query index (%u) exceeds the number of queries (%u) in %s.",
+ queryIndex, mOcclusionQuerySet->GetQueryCount(),
+ mOcclusionQuerySet.Get());
- if (mOcclusionQueryActive) {
- return DAWN_VALIDATION_ERROR(
- "Only a single occlusion query can be begun at a time.");
- }
+ DAWN_INVALID_IF(mOcclusionQueryActive,
+ "An occlusion query (%u) in %s is already active.",
+ mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
- DAWN_TRY(ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
- mUsageTracker.GetQueryAvailabilityMap()));
- }
+ DAWN_TRY_CONTEXT(
+ ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
+ mUsageTracker.GetQueryAvailabilityMap()),
+ "validating the occlusion query index (%u) in %s", queryIndex,
+ mOcclusionQuerySet.Get());
+ }
- // Record the current query index for endOcclusionQuery.
- mCurrentOcclusionQueryIndex = queryIndex;
- mOcclusionQueryActive = true;
+ // Record the current query index for endOcclusionQuery.
+ mCurrentOcclusionQueryIndex = queryIndex;
+ mOcclusionQueryActive = true;
- BeginOcclusionQueryCmd* cmd =
- allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
- cmd->querySet = mOcclusionQuerySet.Get();
- cmd->queryIndex = queryIndex;
+ BeginOcclusionQueryCmd* cmd =
+ allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
+ cmd->querySet = mOcclusionQuerySet.Get();
+ cmd->queryIndex = queryIndex;
- return {};
- });
+ return {};
+ },
+ "encoding BeginOcclusionQuery(%u)", queryIndex);
}
void RenderPassEncoder::APIEndOcclusionQuery() {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- if (!mOcclusionQueryActive) {
- return DAWN_VALIDATION_ERROR(
- "EndOcclusionQuery cannot be called without corresponding "
- "BeginOcclusionQuery.");
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_INVALID_IF(!mOcclusionQueryActive, "No occlusion queries are active.");
}
- }
- TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
+ TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
- mOcclusionQueryActive = false;
+ mOcclusionQueryActive = false;
- EndOcclusionQueryCmd* cmd =
- allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
- cmd->querySet = mOcclusionQuerySet.Get();
- cmd->queryIndex = mCurrentOcclusionQueryIndex;
+ EndOcclusionQueryCmd* cmd =
+ allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
+ cmd->querySet = mOcclusionQuerySet.Get();
+ cmd->queryIndex = mCurrentOcclusionQueryIndex;
- return {};
- });
+ return {};
+ },
+ "encoding EndOcclusionQuery()");
}
void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
- mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- if (IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(querySet));
- DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
- DAWN_TRY(ValidateQueryIndexOverwrite(querySet, queryIndex,
- mUsageTracker.GetQueryAvailabilityMap()));
- }
+ mEncodingContext->TryEncode(
+ this,
+ [&](CommandAllocator* allocator) -> MaybeError {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ DAWN_TRY_CONTEXT(
+ ValidateQueryIndexOverwrite(querySet, queryIndex,
+ mUsageTracker.GetQueryAvailabilityMap()),
+ "validating the timestamp query index (%u) of %s", queryIndex, querySet);
+ }
- TrackQueryAvailability(querySet, queryIndex);
+ TrackQueryAvailability(querySet, queryIndex);
- WriteTimestampCmd* cmd =
- allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
- cmd->querySet = querySet;
- cmd->queryIndex = queryIndex;
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
- return {};
- });
+ return {};
+ },
+ "encoding WriteTimestamp(%s, %u).", querySet, queryIndex);
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
index 19fcc803654..5aaf32eea00 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
@@ -16,6 +16,7 @@
#define DAWNNATIVE_RENDERPASSENCODER_H_
#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
#include "dawn_native/RenderEncoderBase.h"
namespace dawn_native {
@@ -37,11 +38,12 @@ namespace dawn_native {
CommandEncoder* commandEncoder,
EncodingContext* encodingContext);
+ ObjectType GetType() const override;
+
void APIEndPass();
void APISetStencilReference(uint32_t reference);
void APISetBlendConstant(const Color* color);
- void APISetBlendColor(const Color* color); // Deprecated
void APISetViewport(float x,
float y,
float width,
@@ -78,6 +80,9 @@ namespace dawn_native {
bool mOcclusionQueryActive = false;
};
+ // For the benefit of template generation.
+ using RenderPassEncoderBase = RenderPassEncoder;
+
} // namespace dawn_native
#endif // DAWNNATIVE_RENDERPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
index decde9cc384..2ad7a224183 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
@@ -18,7 +18,9 @@
#include "dawn_native/ChainUtils_autogen.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
+#include "dawn_native/InternalPipelineStore.h"
#include "dawn_native/ObjectContentHasher.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include "dawn_native/VertexFormat.h"
@@ -26,9 +28,91 @@
#include <sstream>
namespace dawn_native {
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ VertexFormatBaseType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case VertexFormatBaseType::Float:
+ s->Append("Float");
+ break;
+ case VertexFormatBaseType::Uint:
+ s->Append("Uint");
+ break;
+ case VertexFormatBaseType::Sint:
+ s->Append("Sint");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterStageComponentType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterStageComponentType::Float:
+ s->Append("Float");
+ break;
+ case InterStageComponentType::Uint:
+ s->Append("Uint");
+ break;
+ case InterStageComponentType::Sint:
+ s->Append("Sint");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterpolationType value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterpolationType::Perspective:
+ s->Append("Perspective");
+ break;
+ case InterpolationType::Linear:
+ s->Append("Linear");
+ break;
+ case InterpolationType::Flat:
+ s->Append("Flat");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
+ absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+ InterpolationSampling value,
+ const absl::FormatConversionSpec& spec,
+ absl::FormatSink* s) {
+ switch (value) {
+ case InterpolationSampling::None:
+ s->Append("None");
+ break;
+ case InterpolationSampling::Center:
+ s->Append("Center");
+ break;
+ case InterpolationSampling::Centroid:
+ s->Append("Centroid");
+ break;
+ case InterpolationSampling::Sample:
+ s->Append("Sample");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return {true};
+ }
+
// Helper functions
namespace {
-
MaybeError ValidateVertexAttribute(
DeviceBase* device,
const VertexAttribute* attribute,
@@ -38,40 +122,48 @@ namespace dawn_native {
DAWN_TRY(ValidateVertexFormat(attribute->format));
const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format);
- if (attribute->shaderLocation >= kMaxVertexAttributes) {
- return DAWN_VALIDATION_ERROR("Setting attribute out of bounds");
- }
+ DAWN_INVALID_IF(
+ attribute->shaderLocation >= kMaxVertexAttributes,
+ "Attribute shader location (%u) exceeds the maximum number of vertex attributes "
+ "(%u).",
+ attribute->shaderLocation, kMaxVertexAttributes);
+
VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation));
// No underflow is possible because the max vertex format size is smaller than
// kMaxVertexBufferArrayStride.
ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize);
- if (attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize) {
- return DAWN_VALIDATION_ERROR("Setting attribute offset out of bounds");
- }
+ DAWN_INVALID_IF(
+ attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize,
+ "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex "
+ "buffer stride (%u).",
+ attribute->offset, attribute->format, formatInfo.byteSize,
+ kMaxVertexBufferArrayStride);
// No overflow is possible because the offset is already validated to be less
// than kMaxVertexBufferArrayStride.
ASSERT(attribute->offset < kMaxVertexBufferArrayStride);
- if (vertexBufferStride > 0 &&
- attribute->offset + formatInfo.byteSize > vertexBufferStride) {
- return DAWN_VALIDATION_ERROR("Setting attribute offset out of bounds");
- }
-
- if (attribute->offset % std::min(4u, formatInfo.byteSize) != 0) {
- return DAWN_VALIDATION_ERROR(
- "Attribute offset needs to be a multiple of min(4, formatSize)");
- }
-
- if (metadata.usedVertexInputs[location] &&
- formatInfo.baseType != metadata.vertexInputBaseTypes[location]) {
- return DAWN_VALIDATION_ERROR(
- "Attribute base type must match the base type in the shader.");
- }
-
- if ((*attributesSetMask)[location]) {
- return DAWN_VALIDATION_ERROR("Setting already set attribute");
- }
+ DAWN_INVALID_IF(
+ vertexBufferStride > 0 &&
+ attribute->offset + formatInfo.byteSize > vertexBufferStride,
+ "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer "
+ "stride (%u).",
+ attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride);
+
+ DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0,
+ "Attribute offset (%u) in not a multiple of %u.", attribute->offset,
+ std::min(4u, formatInfo.byteSize));
+
+ DAWN_INVALID_IF(metadata.usedVertexInputs[location] &&
+ formatInfo.baseType != metadata.vertexInputBaseTypes[location],
+ "Attribute base type (%s) does not match the "
+ "shader's base type (%s) in location (%u).",
+ formatInfo.baseType, metadata.vertexInputBaseTypes[location],
+ attribute->shaderLocation);
+
+ DAWN_INVALID_IF((*attributesSetMask)[location],
+ "Attribute shader location (%u) is used more than once.",
+ attribute->shaderLocation);
attributesSetMask->set(location);
return {};
@@ -83,18 +175,19 @@ namespace dawn_native {
const EntryPointMetadata& metadata,
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
DAWN_TRY(ValidateVertexStepMode(buffer->stepMode));
- if (buffer->arrayStride > kMaxVertexBufferArrayStride) {
- return DAWN_VALIDATION_ERROR("Setting arrayStride out of bounds");
- }
+ DAWN_INVALID_IF(
+ buffer->arrayStride > kMaxVertexBufferArrayStride,
+ "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).",
+ buffer->arrayStride, kMaxVertexBufferArrayStride);
- if (buffer->arrayStride % 4 != 0) {
- return DAWN_VALIDATION_ERROR(
- "arrayStride of Vertex buffer needs to be a multiple of 4 bytes");
- }
+ DAWN_INVALID_IF(buffer->arrayStride % 4 != 0,
+ "Vertex buffer arrayStride (%u) is not a multiple of 4.",
+ buffer->arrayStride);
for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
- DAWN_TRY(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
- buffer->arrayStride, attributesSetMask));
+ DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
+ buffer->arrayStride, attributesSetMask),
+ "validating attributes[%u].", i);
}
return {};
@@ -103,24 +196,28 @@ namespace dawn_native {
MaybeError ValidateVertexState(DeviceBase* device,
const VertexState* descriptor,
const PipelineLayoutBase* layout) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
-
- if (descriptor->bufferCount > kMaxVertexBuffers) {
- return DAWN_VALIDATION_ERROR("Vertex buffer count exceeds maximum");
- }
-
- DAWN_TRY(ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
- layout, SingleShaderStage::Vertex));
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+ DAWN_INVALID_IF(
+ descriptor->bufferCount > kMaxVertexBuffers,
+ "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).",
+ descriptor->bufferCount, kMaxVertexBuffers);
+
+ DAWN_TRY_CONTEXT(
+ ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+ descriptor->constantCount, descriptor->constants, layout,
+ SingleShaderStage::Vertex),
+ "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module,
+ descriptor->entryPoint);
const EntryPointMetadata& vertexMetadata =
descriptor->module->GetEntryPoint(descriptor->entryPoint);
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask;
uint32_t totalAttributesNum = 0;
for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
- DAWN_TRY(ValidateVertexBufferLayout(device, &descriptor->buffers[i], vertexMetadata,
- &attributesSetMask));
+ DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i],
+ vertexMetadata, &attributesSetMask),
+ "validating buffers[%u].", i);
totalAttributesNum += descriptor->buffers[i].attributeCount;
}
@@ -130,10 +227,9 @@ namespace dawn_native {
// attribute number never exceed kMaxVertexAttributes.
ASSERT(totalAttributesNum <= kMaxVertexAttributes);
- if (!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask)) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline vertex stage uses vertex buffers not in the vertex state");
- }
+ // TODO(dawn:563): Specify which inputs were not used in error message.
+ DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask),
+ "Pipeline vertex stage uses vertex buffers not in the vertex state");
return {};
}
@@ -144,7 +240,7 @@ namespace dawn_native {
wgpu::SType::PrimitiveDepthClampingState));
const PrimitiveDepthClampingState* clampInfo = nullptr;
FindInChain(descriptor->nextInChain, &clampInfo);
- if (clampInfo && !device->IsExtensionEnabled(Extension::DepthClamping)) {
+ if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) {
return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
}
DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
@@ -155,14 +251,16 @@ namespace dawn_native {
// Pipeline descriptors must have stripIndexFormat != undefined IFF they are using strip
// topologies.
if (IsStripPrimitiveTopology(descriptor->topology)) {
- if (descriptor->stripIndexFormat == wgpu::IndexFormat::Undefined) {
- return DAWN_VALIDATION_ERROR(
- "stripIndexFormat must not be undefined when using strip primitive "
- "topologies");
- }
- } else if (descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined) {
- return DAWN_VALIDATION_ERROR(
- "stripIndexFormat must be undefined when using non-strip primitive topologies");
+ DAWN_INVALID_IF(
+ descriptor->stripIndexFormat == wgpu::IndexFormat::Undefined,
+ "StripIndexFormat is undefined when using a strip primitive topology (%s).",
+ descriptor->topology);
+ } else {
+ DAWN_INVALID_IF(
+ descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined,
+ "StripIndexFormat (%s) is not undefined when using a non-strip primitive "
+ "topology (%s).",
+ descriptor->stripIndexFormat, descriptor->topology);
}
return {};
@@ -186,54 +284,31 @@ namespace dawn_native {
const Format* format;
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
- if (!format->HasDepthOrStencil() || !format->isRenderable) {
- return DAWN_VALIDATION_ERROR(
- "Depth stencil format must be depth-stencil renderable");
- }
+ DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+ "Depth stencil format (%s) is not depth-stencil renderable.",
+ descriptor->format);
- if (std::isnan(descriptor->depthBiasSlopeScale) ||
- std::isnan(descriptor->depthBiasClamp)) {
- return DAWN_VALIDATION_ERROR("Depth bias parameters must not be NaN.");
- }
+ DAWN_INVALID_IF(std::isnan(descriptor->depthBiasSlopeScale) ||
+ std::isnan(descriptor->depthBiasClamp),
+ "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.",
+ descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp);
return {};
}
MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
- if (!IsValidSampleCount(descriptor->count)) {
- return DAWN_VALIDATION_ERROR("Multisample count is not supported");
- }
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count),
+ "Multisample count (%u) is not supported.", descriptor->count);
- if (descriptor->alphaToCoverageEnabled && descriptor->count <= 1) {
- return DAWN_VALIDATION_ERROR("Enabling alphaToCoverage requires sample count > 1");
- }
+ DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1,
+ "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.",
+ descriptor->count);
return {};
}
- static constexpr wgpu::BlendFactor kFirstDeprecatedBlendFactor =
- wgpu::BlendFactor::SrcColor;
- static constexpr uint32_t kDeprecatedBlendFactorOffset = 100;
-
- bool IsDeprecatedBlendFactor(wgpu::BlendFactor blendFactor) {
- return blendFactor >= kFirstDeprecatedBlendFactor;
- }
-
- wgpu::BlendFactor NormalizeBlendFactor(wgpu::BlendFactor blendFactor) {
- // If the specified format is from the deprecated range return the corresponding
- // non-deprecated format.
- if (blendFactor >= kFirstDeprecatedBlendFactor) {
- uint32_t blendFactorValue = static_cast<uint32_t>(blendFactor);
- return static_cast<wgpu::BlendFactor>(blendFactorValue -
- kDeprecatedBlendFactorOffset);
- }
- return blendFactor;
- }
-
MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
@@ -241,15 +316,6 @@ namespace dawn_native {
DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
-
- if (IsDeprecatedBlendFactor(descriptor->alpha.srcFactor) ||
- IsDeprecatedBlendFactor(descriptor->alpha.dstFactor) ||
- IsDeprecatedBlendFactor(descriptor->color.srcFactor) ||
- IsDeprecatedBlendFactor(descriptor->color.dstFactor)) {
- device->EmitDeprecationWarning(
- "Blend factor enums have changed and the old enums will be removed soon.");
- }
-
return {};
}
@@ -264,57 +330,58 @@ namespace dawn_native {
const ColorTargetState* descriptor,
bool fragmentWritten,
const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
if (descriptor->blend) {
- DAWN_TRY(ValidateBlendState(device, descriptor->blend));
+ DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend),
+ "validating blend state.");
}
DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
const Format* format;
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
- if (!format->IsColor() || !format->isRenderable) {
- return DAWN_VALIDATION_ERROR("Color format must be color renderable");
- }
- if (descriptor->blend && !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes &
- SampleTypeBit::Float)) {
- return DAWN_VALIDATION_ERROR(
- "Color format must be blendable when blending is enabled");
- }
- if (fragmentWritten) {
- if (fragmentOutputVariable.baseType !=
- format->GetAspectInfo(Aspect::Color).baseType) {
- return DAWN_VALIDATION_ERROR(
- "Color format must match the fragment stage output type");
- }
+ DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+ "Color format (%s) is not color renderable.", descriptor->format);
- if (fragmentOutputVariable.componentCount < format->componentCount) {
- return DAWN_VALIDATION_ERROR(
- "The fragment stage output components count must be no fewer than the "
- "color format channel count");
- }
+ DAWN_INVALID_IF(
+ descriptor->blend && !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes &
+ SampleTypeBit::Float),
+ "Blending is enabled but color format (%s) is not blendable.", descriptor->format);
+
+ if (fragmentWritten) {
+ DAWN_INVALID_IF(fragmentOutputVariable.baseType !=
+ format->GetAspectInfo(Aspect::Color).baseType,
+ "Color format (%s) base type (%s) doesn't match the fragment "
+ "module output type (%s).",
+ descriptor->format, format->GetAspectInfo(Aspect::Color).baseType,
+ fragmentOutputVariable.baseType);
+
+ DAWN_INVALID_IF(
+ fragmentOutputVariable.componentCount < format->componentCount,
+ "The fragment stage has fewer output components (%u) than the color format "
+ "(%s) component count (%u).",
+ fragmentOutputVariable.componentCount, descriptor->format,
+ format->componentCount);
if (descriptor->blend) {
if (fragmentOutputVariable.componentCount < 4u) {
// No alpha channel output
// Make sure there's no alpha involved in the blending operation
- if (BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
- BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor)) {
- return DAWN_VALIDATION_ERROR(
- "Color blending factor is reading alpha but it is missing from "
- "fragment output");
- }
+ DAWN_INVALID_IF(
+ BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
+ BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor),
+ "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha "
+ "but it is missing from fragment output.",
+ descriptor->blend->color.srcFactor, descriptor->blend->color.dstFactor);
}
}
} else {
- if (descriptor->writeMask != wgpu::ColorWriteMask::None) {
- return DAWN_VALIDATION_ERROR(
- "writeMask must be zero for color targets with no corresponding fragment "
- "stage output");
- }
+ DAWN_INVALID_IF(
+ descriptor->writeMask != wgpu::ColorWriteMask::None,
+ "Color target has no corresponding fragment stage output but writeMask (%s) is "
+ "not zero.",
+ descriptor->writeMask);
}
return {};
@@ -323,25 +390,28 @@ namespace dawn_native {
MaybeError ValidateFragmentState(DeviceBase* device,
const FragmentState* descriptor,
const PipelineLayoutBase* layout) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
- DAWN_TRY(ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
- layout, SingleShaderStage::Fragment));
+ DAWN_TRY_CONTEXT(
+ ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+ descriptor->constantCount, descriptor->constants, layout,
+ SingleShaderStage::Fragment),
+ "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module,
+ descriptor->entryPoint);
- if (descriptor->targetCount > kMaxColorAttachments) {
- return DAWN_VALIDATION_ERROR("Number of color targets exceeds maximum");
- }
+ DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments,
+ "Number of targets (%u) exceeds the maximum (%u).",
+ descriptor->targetCount, kMaxColorAttachments);
const EntryPointMetadata& fragmentMetadata =
descriptor->module->GetEntryPoint(descriptor->entryPoint);
for (ColorAttachmentIndex i(uint8_t(0));
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
- DAWN_TRY(ValidateColorTargetState(device,
- &descriptor->targets[static_cast<uint8_t>(i)],
- fragmentMetadata.fragmentOutputsWritten[i],
- fragmentMetadata.fragmentOutputVariables[i]));
+ DAWN_TRY_CONTEXT(
+ ValidateColorTargetState(device, &descriptor->targets[static_cast<uint8_t>(i)],
+ fragmentMetadata.fragmentOutputsWritten[i],
+ fragmentMetadata.fragmentOutputVariables[i]),
+ "validating targets[%u].", static_cast<uint8_t>(i));
}
return {};
@@ -355,36 +425,41 @@ namespace dawn_native {
const EntryPointMetadata& fragmentMetadata =
fragmentState.module->GetEntryPoint(fragmentState.entryPoint);
- if (vertexMetadata.usedInterStageVariables !=
- fragmentMetadata.usedInterStageVariables) {
- return DAWN_VALIDATION_ERROR(
- "One or more fragment inputs and vertex outputs are not one-to-one matching");
- }
+ // TODO(dawn:563): Can this message give more details?
+ DAWN_INVALID_IF(
+ vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables,
+ "One or more fragment inputs and vertex outputs are not one-to-one matching");
- auto generateErrorString = [](const char* interStageAttribute, size_t location) {
- std::ostringstream stream;
- stream << "The " << interStageAttribute << " of the vertex output at location "
- << location
- << " is different from the one of the fragment input at the same location";
- return stream.str();
- };
// TODO(dawn:802): Validate interpolation types and interpolition sampling types
for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) {
const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i];
const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i];
- if (vertexOutputInfo.baseType != fragmentInputInfo.baseType) {
- return DAWN_VALIDATION_ERROR(generateErrorString("base type", i));
- }
- if (vertexOutputInfo.componentCount != fragmentInputInfo.componentCount) {
- return DAWN_VALIDATION_ERROR(generateErrorString("componentCount", i));
- }
- if (vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType) {
- return DAWN_VALIDATION_ERROR(generateErrorString("interpolation type", i));
- }
- if (vertexOutputInfo.interpolationSampling !=
- fragmentInputInfo.interpolationSampling) {
- return DAWN_VALIDATION_ERROR(generateErrorString("interpolation sampling", i));
- }
+ DAWN_INVALID_IF(
+ vertexOutputInfo.baseType != fragmentInputInfo.baseType,
+ "The base type (%s) of the vertex output at location %u is different from the "
+ "base type (%s) of the fragment input at location %u.",
+ vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i);
+
+ DAWN_INVALID_IF(
+ vertexOutputInfo.componentCount != fragmentInputInfo.componentCount,
+ "The component count (%u) of the vertex output at location %u is different "
+ "from the component count (%u) of the fragment input at location %u.",
+ vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i);
+
+ DAWN_INVALID_IF(
+ vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType,
+ "The interpolation type (%s) of the vertex output at location %u is different "
+ "from the interpolation type (%s) of the fragment input at location %u.",
+ vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i);
+
+ DAWN_INVALID_IF(
+ vertexOutputInfo.interpolationSampling !=
+ fragmentInputInfo.interpolationSampling,
+ "The interpolation sampling (%s) of the vertex output at location %u is "
+ "different from the interpolation sampling (%s) of the fragment input at "
+ "location %u.",
+ vertexOutputInfo.interpolationSampling, i,
+ fragmentInputInfo.interpolationSampling, i);
}
return {};
@@ -399,8 +474,9 @@ namespace dawn_native {
case wgpu::IndexFormat::Uint32:
return sizeof(uint32_t);
case wgpu::IndexFormat::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
@@ -410,48 +486,59 @@ namespace dawn_native {
MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
const RenderPipelineDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
if (descriptor->layout != nullptr) {
DAWN_TRY(device->ValidateObject(descriptor->layout));
}
- // TODO(crbug.com/dawn/136): Support vertex-only pipelines.
- if (descriptor->fragment == nullptr) {
- return DAWN_VALIDATION_ERROR("Null fragment stage is not supported (yet)");
- }
+ DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout),
+ "validating vertex state.");
- DAWN_TRY(ValidateVertexState(device, &descriptor->vertex, descriptor->layout));
-
- DAWN_TRY(ValidatePrimitiveState(device, &descriptor->primitive));
+ DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive),
+ "validating primitive state.");
if (descriptor->depthStencil) {
- DAWN_TRY(ValidateDepthStencilState(device, descriptor->depthStencil));
+ DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil),
+ "validating depthStencil state.");
}
- DAWN_TRY(ValidateMultisampleState(&descriptor->multisample));
+ DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample),
+ "validating multisample state.");
+
+ if (descriptor->fragment != nullptr) {
+ DAWN_TRY_CONTEXT(
+ ValidateFragmentState(device, descriptor->fragment, descriptor->layout),
+ "validating fragment state.");
- ASSERT(descriptor->fragment != nullptr);
- DAWN_TRY(ValidateFragmentState(device, descriptor->fragment, descriptor->layout));
+ DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil,
+ "Must have at least one color or depthStencil target.");
- if (descriptor->fragment->targetCount == 0 && !descriptor->depthStencil) {
- return DAWN_VALIDATION_ERROR("Should have at least one color target or a depthStencil");
+ DAWN_TRY(
+ ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
}
- DAWN_TRY(ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
-
return {};
}
- std::vector<StageAndDescriptor> GetStages(const RenderPipelineDescriptor* descriptor) {
+ std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
+ DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor) {
std::vector<StageAndDescriptor> stages;
- stages.push_back(
- {SingleShaderStage::Vertex, descriptor->vertex.module, descriptor->vertex.entryPoint});
+ stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module,
+ descriptor->vertex.entryPoint, descriptor->vertex.constantCount,
+ descriptor->vertex.constants});
if (descriptor->fragment != nullptr) {
stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
- descriptor->fragment->entryPoint});
+ descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
+ descriptor->fragment->constants});
+ } else if (device->IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
+ InternalPipelineStore* store = device->GetInternalPipelineStore();
+ // The dummy fragment shader module should already be initialized
+ DAWN_ASSERT(store->dummyFragmentShader != nullptr);
+ ShaderModuleBase* dummyFragmentShader = store->dummyFragmentShader.Get();
+ stages.push_back(
+ {SingleShaderStage::Fragment, dummyFragmentShader, "fs_empty_main", 0, nullptr});
}
return stages;
}
@@ -473,10 +560,8 @@ namespace dawn_native {
const RenderPipelineDescriptor* descriptor)
: PipelineBase(device,
descriptor->layout,
- {{SingleShaderStage::Vertex, descriptor->vertex.module,
- descriptor->vertex.entryPoint},
- {SingleShaderStage::Fragment, descriptor->fragment->module,
- descriptor->fragment->entryPoint}}),
+ descriptor->label,
+ GetRenderStagesAndSetDummyShader(device, descriptor)),
mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
mVertexBufferCount = descriptor->vertex.bufferCount;
const VertexBufferLayout* buffers = descriptor->vertex.buffers;
@@ -558,6 +643,9 @@ namespace dawn_native {
}
for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+ // Vertex-only render pipeline have no color attachment. For a render pipeline with
+ // color attachments, there must be a valid FragmentState.
+ ASSERT(descriptor->fragment != nullptr);
const ColorTargetState* target =
&descriptor->fragment->targets[static_cast<uint8_t>(i)];
mTargets[i] = *target;
@@ -565,16 +653,10 @@ namespace dawn_native {
if (target->blend != nullptr) {
mTargetBlend[i] = *target->blend;
mTargets[i].blend = &mTargetBlend[i];
- mTargetBlend[i].alpha.srcFactor =
- NormalizeBlendFactor(mTargetBlend[i].alpha.srcFactor);
- mTargetBlend[i].alpha.dstFactor =
- NormalizeBlendFactor(mTargetBlend[i].alpha.dstFactor);
- mTargetBlend[i].color.srcFactor =
- NormalizeBlendFactor(mTargetBlend[i].color.srcFactor);
- mTargetBlend[i].color.dstFactor =
- NormalizeBlendFactor(mTargetBlend[i].color.dstFactor);
}
}
+
+ SetContentHash(ComputeContentHash());
}
RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -583,7 +665,23 @@ namespace dawn_native {
// static
RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) {
- return new RenderPipelineBase(device, ObjectBase::kError);
+ class ErrorRenderPipeline final : public RenderPipelineBase {
+ public:
+ ErrorRenderPipeline(DeviceBase* device)
+ : RenderPipelineBase(device, ObjectBase::kError) {
+ }
+
+ MaybeError Initialize() override {
+ UNREACHABLE();
+ return {};
+ }
+ };
+
+ return new ErrorRenderPipeline(device);
+ }
+
+ ObjectType RenderPipelineBase::GetType() const {
+ return ObjectType::RenderPipeline;
}
RenderPipelineBase::~RenderPipelineBase() {
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
index 0aebfc2c4ca..464ee5dc2d2 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
@@ -17,6 +17,7 @@
#include "common/TypedInteger.h"
#include "dawn_native/AttachmentState.h"
+#include "dawn_native/Forward.h"
#include "dawn_native/IntegerTypes.h"
#include "dawn_native/Pipeline.h"
@@ -32,7 +33,9 @@ namespace dawn_native {
MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
const RenderPipelineDescriptor* descriptor);
- std::vector<StageAndDescriptor> GetStages(const RenderPipelineDescriptor* descriptor);
+ std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
+ DeviceBase* device,
+ const RenderPipelineDescriptor* descriptor);
size_t IndexFormatSize(wgpu::IndexFormat format);
@@ -60,6 +63,8 @@ namespace dawn_native {
static RenderPipelineBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
GetAttributeLocationsUsed() const;
const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
@@ -100,6 +105,9 @@ namespace dawn_native {
bool operator()(const RenderPipelineBase* a, const RenderPipelineBase* b) const;
};
+ // Initialize() should only be called once by the frontend.
+ virtual MaybeError Initialize() = 0;
+
private:
RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
diff --git a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
index 1637b16da7b..ec3266c50cb 100644
--- a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
@@ -23,33 +23,31 @@
namespace dawn_native {
MaybeError ValidateSamplerDescriptor(DeviceBase*, const SamplerDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
- if (std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp)) {
- return DAWN_VALIDATION_ERROR("LOD clamp bounds must not be NaN");
- }
+ DAWN_INVALID_IF(std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp),
+ "LOD clamp bounds [%f, %f] contain a NaN.", descriptor->lodMinClamp,
+ descriptor->lodMaxClamp);
- if (descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0) {
- return DAWN_VALIDATION_ERROR("LOD clamp bounds must be positive");
- }
+ DAWN_INVALID_IF(descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0,
+ "LOD clamp bounds [%f, %f] contain contain a negative number.",
+ descriptor->lodMinClamp, descriptor->lodMaxClamp);
- if (descriptor->lodMinClamp > descriptor->lodMaxClamp) {
- return DAWN_VALIDATION_ERROR(
- "Min lod clamp value cannot greater than max lod clamp value");
- }
+ DAWN_INVALID_IF(descriptor->lodMinClamp > descriptor->lodMaxClamp,
+ "LOD min clamp (%f) is larger than the max clamp (%f).",
+ descriptor->lodMinClamp, descriptor->lodMaxClamp);
if (descriptor->maxAnisotropy > 1) {
- if (descriptor->minFilter != wgpu::FilterMode::Linear ||
- descriptor->magFilter != wgpu::FilterMode::Linear ||
- descriptor->mipmapFilter != wgpu::FilterMode::Linear) {
- return DAWN_VALIDATION_ERROR(
- "min, mag, and mipmap filter should be linear when using anisotropic "
- "filtering");
- }
+ DAWN_INVALID_IF(descriptor->minFilter != wgpu::FilterMode::Linear ||
+ descriptor->magFilter != wgpu::FilterMode::Linear ||
+ descriptor->mipmapFilter != wgpu::FilterMode::Linear,
+ "One of minFilter (%s), magFilter (%s) or mipmapFilter (%s) is not %s "
+ "while using anisotropic filter (maxAnisotropy is %f)",
+ descriptor->magFilter, descriptor->minFilter, descriptor->mipmapFilter,
+ wgpu::FilterMode::Linear, descriptor->maxAnisotropy);
} else if (descriptor->maxAnisotropy == 0u) {
- return DAWN_VALIDATION_ERROR("max anisotropy cannot be set to 0");
+ return DAWN_FORMAT_VALIDATION_ERROR("Max anisotropy (%f) is less than 1.",
+ descriptor->maxAnisotropy);
}
DAWN_TRY(ValidateFilterMode(descriptor->minFilter));
@@ -72,7 +70,7 @@ namespace dawn_native {
// SamplerBase
SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
- : CachedObject(device),
+ : ApiObjectBase(device, kLabelNotImplemented),
mAddressModeU(descriptor->addressModeU),
mAddressModeV(descriptor->addressModeV),
mAddressModeW(descriptor->addressModeW),
@@ -86,7 +84,7 @@ namespace dawn_native {
}
SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : CachedObject(device, tag) {
+ : ApiObjectBase(device, tag) {
}
SamplerBase::~SamplerBase() {
@@ -100,6 +98,10 @@ namespace dawn_native {
return new SamplerBase(device, ObjectBase::kError);
}
+ ObjectType SamplerBase::GetType() const {
+ return ObjectType::Sampler;
+ }
+
bool SamplerBase::IsComparison() const {
return mCompareFunction != wgpu::CompareFunction::Undefined;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Sampler.h b/chromium/third_party/dawn/src/dawn_native/Sampler.h
index aa74966e488..3e7d1fbae48 100644
--- a/chromium/third_party/dawn/src/dawn_native/Sampler.h
+++ b/chromium/third_party/dawn/src/dawn_native/Sampler.h
@@ -17,6 +17,8 @@
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/dawn_platform.h"
@@ -26,13 +28,15 @@ namespace dawn_native {
MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
- class SamplerBase : public CachedObject {
+ class SamplerBase : public ApiObjectBase, public CachedObject {
public:
SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
~SamplerBase() override;
static SamplerBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
bool IsComparison() const;
bool IsFiltering() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.cpp
new file mode 100644
index 00000000000..976214cb912
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.cpp
@@ -0,0 +1,47 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/ScratchBuffer.h"
+
+#include "dawn_native/Device.h"
+
+namespace dawn_native {
+
+ ScratchBuffer::ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage)
+ : mDevice(device), mUsage(usage) {
+ }
+
+ ScratchBuffer::~ScratchBuffer() = default;
+
+ void ScratchBuffer::Reset() {
+ mBuffer = nullptr;
+ }
+
+ MaybeError ScratchBuffer::EnsureCapacity(uint64_t capacity) {
+ if (!mBuffer.Get() || mBuffer->GetSize() < capacity) {
+ BufferDescriptor descriptor;
+ descriptor.size = capacity;
+ descriptor.usage = mUsage;
+ DAWN_TRY_ASSIGN(mBuffer, mDevice->CreateBuffer(&descriptor));
+ mBuffer->SetIsDataInitialized();
+ }
+ return {};
+ }
+
+ BufferBase* ScratchBuffer::GetBuffer() const {
+ ASSERT(mBuffer.Get() != nullptr);
+ return mBuffer.Get();
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.h b/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.h
new file mode 100644
index 00000000000..7bb446dfa29
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ScratchBuffer.h
@@ -0,0 +1,55 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SCRATCHBUFFER_H_
+#define DAWNNATIVE_SCRATCHBUFFER_H_
+
+#include "common/RefCounted.h"
+#include "dawn_native/Buffer.h"
+
+#include <cstdint>
+
+namespace dawn_native {
+
+ class DeviceBase;
+
+ // A ScratchBuffer is a lazily allocated and lazily grown GPU buffer for intermittent use by
+ // commands in the GPU queue. Note that scratch buffers are not zero-initialized, so users must
+ // be careful not to exposed uninitialized bytes to client shaders.
+ class ScratchBuffer {
+ public:
+ // Note that this object does not retain a reference to `device`, so `device` MUST outlive
+ // this object.
+ ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage);
+ ~ScratchBuffer();
+
+ // Resets this ScratchBuffer, guaranteeing that the next EnsureCapacity call allocates a
+ // fresh buffer.
+ void Reset();
+
+ // Ensures that this ScratchBuffer is backed by a buffer on `device` with at least
+ // `capacity` bytes of storage.
+ MaybeError EnsureCapacity(uint64_t capacity);
+
+ BufferBase* GetBuffer() const;
+
+ private:
+ DeviceBase* const mDevice;
+ const wgpu::BufferUsage mUsage;
+ Ref<BufferBase> mBuffer;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_SCRATCHBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
index 0941922823b..8d6c59b79a5 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/ShaderModule.h"
+#include "absl/strings/str_format.h"
#include "common/Constants.h"
#include "common/HashUtils.h"
#include "dawn_native/BindGroupLayout.h"
@@ -32,13 +33,27 @@
namespace dawn_native {
+ EntryPointMetadata::OverridableConstant::Type GetDawnOverridableConstantType(
+ tint::inspector::OverridableConstant::Type type) {
+ switch (type) {
+ case tint::inspector::OverridableConstant::Type::kBool:
+ return EntryPointMetadata::OverridableConstant::Type::Boolean;
+ case tint::inspector::OverridableConstant::Type::kFloat32:
+ return EntryPointMetadata::OverridableConstant::Type::Float32;
+ case tint::inspector::OverridableConstant::Type::kInt32:
+ return EntryPointMetadata::OverridableConstant::Type::Int32;
+ case tint::inspector::OverridableConstant::Type::kUint32:
+ return EntryPointMetadata::OverridableConstant::Type::Uint32;
+ default:
+ UNREACHABLE();
+ }
+ }
+
namespace {
std::string GetShaderDeclarationString(BindGroupIndex group, BindingNumber binding) {
- std::ostringstream ostream;
- ostream << "the shader module declaration at set " << static_cast<uint32_t>(group)
- << " binding " << static_cast<uint32_t>(binding);
- return ostream.str();
+ return absl::StrFormat("the shader module declaration at set %u, binding %u",
+ static_cast<uint32_t>(group), static_cast<uint32_t>(binding));
}
tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
@@ -117,6 +132,7 @@ namespace dawn_native {
case wgpu::VertexStepMode::Instance:
return tint::transform::VertexStepMode::kInstance;
}
+ UNREACHABLE();
}
ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(
@@ -129,8 +145,9 @@ namespace dawn_native {
case tint::ast::PipelineStage::kCompute:
return SingleShaderStage::Compute;
case tint::ast::PipelineStage::kNone:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
BindingInfoType TintResourceTypeToBindingInfoType(
@@ -148,7 +165,6 @@ namespace dawn_native {
case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
return BindingInfoType::Texture;
- case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageTexture:
case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
return BindingInfoType::StorageTexture;
case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
@@ -236,6 +252,7 @@ namespace dawn_native {
case tint::inspector::ResourceBinding::ImageFormat::kNone:
return wgpu::TextureFormat::Undefined;
}
+ UNREACHABLE();
}
wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
@@ -256,6 +273,7 @@ namespace dawn_native {
case tint::inspector::ResourceBinding::TextureDimension::kNone:
return wgpu::TextureViewDimension::Undefined;
}
+ UNREACHABLE();
}
SampleTypeBit TintSampledKindToSampleTypeBit(
@@ -270,6 +288,7 @@ namespace dawn_native {
case tint::inspector::ResourceBinding::SampledKind::kUnknown:
return SampleTypeBit::None;
}
+ UNREACHABLE();
}
ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
@@ -285,6 +304,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR(
"Attempted to convert 'Unknown' component type from Tint");
}
+ UNREACHABLE();
}
ResultOrError<VertexFormatBaseType> TintComponentTypeToVertexFormatBaseType(
@@ -300,6 +320,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR(
"Attempted to convert 'Unknown' component type from Tint");
}
+ UNREACHABLE();
}
ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
@@ -314,19 +335,19 @@ namespace dawn_native {
default:
return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
}
+ UNREACHABLE();
}
ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
tint::inspector::ResourceBinding::ResourceType resource_type) {
switch (resource_type) {
- case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageTexture:
- return wgpu::StorageTextureAccess::ReadOnly;
case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
return wgpu::StorageTextureAccess::WriteOnly;
default:
return DAWN_VALIDATION_ERROR(
"Attempted to convert non-storage texture resource type");
}
+ UNREACHABLE();
}
ResultOrError<InterStageComponentType> TintComponentTypeToInterStageComponentType(
@@ -342,6 +363,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR(
"Attempted to convert 'Unknown' component type from Tint");
}
+ UNREACHABLE();
}
ResultOrError<uint32_t> TintCompositionTypeToInterStageComponentCount(
@@ -359,6 +381,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR(
"Attempt to convert 'Unknown' composition type from Tint");
}
+ UNREACHABLE();
}
ResultOrError<InterpolationType> TintInterpolationTypeToInterpolationType(
@@ -374,6 +397,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR(
"Attempted to convert 'Unknown' interpolation type from Tint");
}
+ UNREACHABLE();
}
ResultOrError<InterpolationSampling> TintInterpolationSamplingToInterpolationSamplingType(
@@ -391,6 +415,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR(
"Attempted to convert 'Unknown' interpolation sampling type from Tint");
}
+ UNREACHABLE();
}
ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
@@ -431,9 +456,8 @@ namespace dawn_native {
return std::move(program);
}
- std::vector<uint64_t> GetBindGroupMinBufferSizes(
- const EntryPointMetadata::BindingGroupInfoMap& shaderBindings,
- const BindGroupLayoutBase* layout) {
+ std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingGroupInfoMap& shaderBindings,
+ const BindGroupLayoutBase* layout) {
std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
uint32_t packedIdx = 0;
@@ -471,7 +495,7 @@ namespace dawn_native {
// corresponding binding in the BindGroupLayout, if it exists.
for (const auto& it : entryPoint.bindings[group]) {
BindingNumber bindingNumber = it.first;
- const EntryPointMetadata::ShaderBindingInfo& shaderInfo = it.second;
+ const ShaderBindingInfo& shaderInfo = it.second;
const auto& bindingIt = layoutBindings.find(bindingNumber);
if (bindingIt == layoutBindings.end()) {
@@ -601,7 +625,7 @@ namespace dawn_native {
}
ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(
- DeviceBase*,
+ const DeviceBase* device,
const tint::Program* program) {
ASSERT(program->IsValid());
@@ -620,13 +644,26 @@ namespace dawn_native {
for (auto& entryPoint : entryPoints) {
ASSERT(result.count(entryPoint.name) == 0);
+ auto metadata = std::make_unique<EntryPointMetadata>();
+
if (!entryPoint.overridable_constants.empty()) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline overridable constants are not implemented yet");
+ DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+ "Pipeline overridable constants are disallowed because they "
+ "are partially implemented.");
+
+ const auto& name2Id = inspector.GetConstantNameToIdMap();
+
+ for (auto& c : entryPoint.overridable_constants) {
+ EntryPointMetadata::OverridableConstant constant = {
+ name2Id.at(c.name), GetDawnOverridableConstantType(c.type)};
+ metadata->overridableConstants[c.name] = constant;
+ // TODO(tint:1155) tint needs ways to differentiate whether a pipeline
+ // constant id is specified explicitly. Now we just store numeric id and
+ // variable name in the index at the same time
+ metadata->overridableConstants[std::to_string(constant.id)] = constant;
+ }
}
- auto metadata = std::make_unique<EntryPointMetadata>();
-
DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
if (metadata->stage == SingleShaderStage::Compute) {
@@ -825,12 +862,12 @@ namespace dawn_native {
}
const auto& it = metadata->bindings[bindGroupIndex].emplace(
- bindingNumber, EntryPointMetadata::ShaderBindingInfo{});
+ bindingNumber, ShaderBindingInfo{});
if (!it.second) {
return DAWN_VALIDATION_ERROR("Shader has duplicate bindings");
}
- EntryPointMetadata::ShaderBindingInfo* info = &it.first->second;
+ ShaderBindingInfo* info = &it.first->second;
info->bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
switch (info->bindingType) {
@@ -1037,31 +1074,36 @@ namespace dawn_native {
return std::move(output.program);
}
- void AddVertexPullingTransformConfig(const VertexState& vertexState,
+ void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet,
tint::transform::DataMap* transformInputs) {
tint::transform::VertexPulling::Config cfg;
cfg.entry_point_name = entryPoint;
cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
- for (uint32_t i = 0; i < vertexState.bufferCount; ++i) {
- const auto& vertexBuffer = vertexState.buffers[i];
- tint::transform::VertexBufferLayoutDescriptor layout;
- layout.array_stride = vertexBuffer.arrayStride;
- layout.step_mode = ToTintVertexStepMode(vertexBuffer.stepMode);
-
- for (uint32_t j = 0; j < vertexBuffer.attributeCount; ++j) {
- const auto& attribute = vertexBuffer.attributes[j];
- tint::transform::VertexAttributeDescriptor attr;
- attr.format = ToTintVertexFormat(attribute.format);
- attr.offset = attribute.offset;
- attr.shader_location = attribute.shaderLocation;
-
- layout.attributes.push_back(std::move(attr));
- }
- cfg.vertex_state.push_back(std::move(layout));
+ cfg.vertex_state.resize(renderPipeline.GetVertexBufferCount());
+ for (VertexBufferSlot slot : IterateBitSet(renderPipeline.GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& dawnInfo = renderPipeline.GetVertexBuffer(slot);
+ tint::transform::VertexBufferLayoutDescriptor* tintInfo =
+ &cfg.vertex_state[static_cast<uint8_t>(slot)];
+
+ tintInfo->array_stride = dawnInfo.arrayStride;
+ tintInfo->step_mode = ToTintVertexStepMode(dawnInfo.stepMode);
+ }
+
+ for (VertexAttributeLocation location :
+ IterateBitSet(renderPipeline.GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& dawnInfo = renderPipeline.GetAttribute(location);
+ tint::transform::VertexAttributeDescriptor tintInfo;
+ tintInfo.format = ToTintVertexFormat(dawnInfo.format);
+ tintInfo.offset = dawnInfo.offset;
+ tintInfo.shader_location = static_cast<uint32_t>(static_cast<uint8_t>(location));
+
+ uint8_t vertexBufferSlot = static_cast<uint8_t>(dawnInfo.vertexBufferSlot);
+ cfg.vertex_state[vertexBufferSlot].attributes.push_back(tintInfo);
}
+
transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
}
@@ -1125,7 +1167,7 @@ namespace dawn_native {
// ShaderModuleBase
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
- : CachedObject(device), mType(Type::Undefined) {
+ : ApiObjectBase(device, descriptor->label), mType(Type::Undefined) {
ASSERT(descriptor->nextInChain != nullptr);
const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
FindInChain(descriptor->nextInChain, &spirvDesc);
@@ -1143,7 +1185,7 @@ namespace dawn_native {
}
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : CachedObject(device, tag), mType(Type::Undefined) {
+ : ApiObjectBase(device, tag), mType(Type::Undefined) {
}
ShaderModuleBase::~ShaderModuleBase() {
@@ -1157,6 +1199,10 @@ namespace dawn_native {
return AcquireRef(new ShaderModuleBase(device, ObjectBase::kError));
}
+ ObjectType ShaderModuleBase::GetType() const {
+ return ObjectType::ShaderModule;
+ }
+
bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
return mEntryPoints.count(entryPoint) > 0;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
index 6240cecd08a..8f8081cedb1 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
@@ -24,6 +24,7 @@
#include "dawn_native/Format.h"
#include "dawn_native/Forward.h"
#include "dawn_native/IntegerTypes.h"
+#include "dawn_native/ObjectBase.h"
#include "dawn_native/PerStage.h"
#include "dawn_native/VertexFormat.h"
#include "dawn_native/dawn_platform.h"
@@ -110,50 +111,51 @@ namespace dawn_native {
OwnedCompilationMessages* messages);
/// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
- void AddVertexPullingTransformConfig(const VertexState& vertexState,
+ void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet,
tint::transform::DataMap* transformInputs);
- // Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
- // stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
- // pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
- // ShaderModuleBase.
- struct EntryPointMetadata {
- // Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
- // for isComparison instead of a wgpu::SamplerBindingType enum.
- struct ShaderSamplerBindingInfo {
- bool isComparison;
- };
+ // Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
+ // for isComparison instead of a wgpu::SamplerBindingType enum.
+ struct ShaderSamplerBindingInfo {
+ bool isComparison;
+ };
- // Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
- // instead of a single enum.
- struct ShaderTextureBindingInfo {
- SampleTypeBit compatibleSampleTypes;
- wgpu::TextureViewDimension viewDimension;
- bool multisampled;
- };
+ // Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
+ // instead of a single enum.
+ struct ShaderTextureBindingInfo {
+ SampleTypeBit compatibleSampleTypes;
+ wgpu::TextureViewDimension viewDimension;
+ bool multisampled;
+ };
- // Per-binding shader metadata contains some SPIRV specific information in addition to
- // most of the frontend per-binding information.
- struct ShaderBindingInfo {
- // The SPIRV ID of the resource.
- uint32_t id;
- uint32_t base_type_id;
+ // Per-binding shader metadata contains some SPIRV specific information in addition to
+ // most of the frontend per-binding information.
+ struct ShaderBindingInfo {
+ // The SPIRV ID of the resource.
+ uint32_t id;
+ uint32_t base_type_id;
- BindingNumber binding;
- BindingInfoType bindingType;
+ BindingNumber binding;
+ BindingInfoType bindingType;
- BufferBindingLayout buffer;
- ShaderSamplerBindingInfo sampler;
- ShaderTextureBindingInfo texture;
- StorageTextureBindingLayout storageTexture;
- };
+ BufferBindingLayout buffer;
+ ShaderSamplerBindingInfo sampler;
+ ShaderTextureBindingInfo texture;
+ StorageTextureBindingLayout storageTexture;
+ };
+ using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
+ using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
+
+ // Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
+ // stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
+ // pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
+ // ShaderModuleBase.
+ struct EntryPointMetadata {
// bindings[G][B] is the reflection data for the binding defined with
// [[group=G, binding=B]] in WGSL / SPIRV.
- using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
- using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
BindingInfoArray bindings;
struct SamplerTexturePair {
@@ -192,15 +194,27 @@ namespace dawn_native {
// The shader stage for this binding.
SingleShaderStage stage;
+
+ struct OverridableConstant {
+ uint32_t id;
+ // Match tint::inspector::OverridableConstant::Type
+ // Bool is defined as a macro on linux X11 and cannot compile
+ enum class Type { Boolean, Float32, Uint32, Int32 } type;
+ };
+
+ // Store overridableConstants from tint program
+ std::unordered_map<std::string, OverridableConstant> overridableConstants;
};
- class ShaderModuleBase : public CachedObject {
+ class ShaderModuleBase : public ApiObjectBase, public CachedObject {
public:
ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
~ShaderModuleBase() override;
static Ref<ShaderModuleBase> MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
// Return true iff the program has an entrypoint called `entryPoint`.
bool HasEntryPoint(const std::string& entryPoint) const;
diff --git a/chromium/third_party/dawn/src/dawn_native/Subresource.cpp b/chromium/third_party/dawn/src/dawn_native/Subresource.cpp
index 99b42cb52a5..ff0bd53fd18 100644
--- a/chromium/third_party/dawn/src/dawn_native/Subresource.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Subresource.cpp
@@ -59,6 +59,7 @@ namespace dawn_native {
case wgpu::TextureAspect::Plane1Only:
return format.aspects & Aspect::Plane1;
}
+ UNREACHABLE();
}
uint8_t GetAspectIndex(Aspect aspect) {
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface.h b/chromium/third_party/dawn/src/dawn_native/Surface.h
index 44864b54d9e..a1377368573 100644
--- a/chromium/third_party/dawn/src/dawn_native/Surface.h
+++ b/chromium/third_party/dawn/src/dawn_native/Surface.h
@@ -100,6 +100,9 @@ namespace dawn_native {
uint32_t mXWindow = 0;
};
+ // For the benefit of template generation.
+ using SurfaceBase = Surface;
+
} // namespace dawn_native
#endif // DAWNNATIVE_SURFACE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
index b81d9066406..2634cd83cb3 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
@@ -17,6 +17,7 @@
#include "common/Constants.h"
#include "dawn_native/Adapter.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/Surface.h"
#include "dawn_native/Texture.h"
#include "dawn_native/ValidationUtils_autogen.h"
@@ -112,11 +113,11 @@ namespace dawn_native {
// SwapChainBase
- SwapChainBase::SwapChainBase(DeviceBase* device) : ObjectBase(device) {
+ SwapChainBase::SwapChainBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
}
SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : ApiObjectBase(device, tag) {
}
SwapChainBase::~SwapChainBase() {
@@ -127,6 +128,10 @@ namespace dawn_native {
return new ErrorSwapChain(device);
}
+ ObjectType SwapChainBase::GetType() const {
+ return ObjectType::SwapChain;
+ }
+
// OldSwapChainBase
OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.h b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
index ad67d2679e1..4a70431c3b0 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.h
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
@@ -30,12 +30,14 @@ namespace dawn_native {
TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
- class SwapChainBase : public ObjectBase {
+ class SwapChainBase : public ApiObjectBase {
public:
SwapChainBase(DeviceBase* device);
static SwapChainBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
// Dawn API
virtual void APIConfigure(wgpu::TextureFormat format,
wgpu::TextureUsage allowedUsage,
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
index bf1ccff5031..508beaa55ce 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
@@ -23,6 +23,7 @@
#include "dawn_native/ChainUtils_autogen.h"
#include "dawn_native/Device.h"
#include "dawn_native/EnumMaskIterator.h"
+#include "dawn_native/ObjectType_autogen.h"
#include "dawn_native/PassResourceUsage.h"
#include "dawn_native/ValidationUtils_autogen.h"
@@ -62,8 +63,9 @@ namespace dawn_native {
case wgpu::TextureViewDimension::e1D:
case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
// TODO(crbug.com/dawn/814): Implement for 1D texture.
@@ -83,62 +85,47 @@ namespace dawn_native {
case wgpu::TextureViewDimension::e1D:
case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
- }
- }
-
- bool IsTextureSizeValidForTextureViewDimension(
- wgpu::TextureViewDimension textureViewDimension,
- const Extent3D& textureSize) {
- switch (textureViewDimension) {
- case wgpu::TextureViewDimension::Cube:
- case wgpu::TextureViewDimension::CubeArray:
- return textureSize.width == textureSize.height;
- case wgpu::TextureViewDimension::e2D:
- case wgpu::TextureViewDimension::e2DArray:
- case wgpu::TextureViewDimension::e3D:
- return true;
-
- case wgpu::TextureViewDimension::e1D:
- case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
MaybeError ValidateSampleCount(const TextureDescriptor* descriptor,
wgpu::TextureUsage usage,
const Format* format) {
- if (!IsValidSampleCount(descriptor->sampleCount)) {
- return DAWN_VALIDATION_ERROR("The sample count of the texture is not supported.");
- }
+ DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+ "The sample count (%u) of the texture is not supported.",
+ descriptor->sampleCount);
if (descriptor->sampleCount > 1) {
- if (descriptor->mipLevelCount > 1) {
- return DAWN_VALIDATION_ERROR(
- "The mipmap level count of a multisampled texture must be 1.");
- }
+ DAWN_INVALID_IF(descriptor->mipLevelCount > 1,
+ "The mip level count (%u) of a multisampled texture is not 1.",
+ descriptor->mipLevelCount);
// Multisampled 1D and 3D textures are not supported in D3D12/Metal/Vulkan.
// Multisampled 2D array texture is not supported because on Metal it requires the
// version of macOS be greater than 10.14.
- if (descriptor->dimension != wgpu::TextureDimension::e2D ||
- descriptor->size.depthOrArrayLayers > 1) {
- return DAWN_VALIDATION_ERROR("Multisampled texture must be 2D with depth=1");
- }
+ DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+ "The dimension (%s) of a multisampled texture is not 2D.",
+ descriptor->dimension);
+
+ DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers > 1,
+ "The depthOrArrayLayers (%u) of a multisampled texture is not 1.",
+ descriptor->size.depthOrArrayLayers);
// If a format can support multisample, it must be renderable. Because Vulkan
// requires that if the format is not color-renderable or depth/stencil renderable,
// sampleCount must be 1.
- if (!format->isRenderable) {
- return DAWN_VALIDATION_ERROR("This format cannot support multisample.");
- }
+ DAWN_INVALID_IF(!format->isRenderable,
+ "The texture format (%s) does not support multisampling.",
+ format->format);
+
// Compressed formats are not renderable. They cannot support multisample.
ASSERT(!format->isCompressed);
- if (usage & wgpu::TextureUsage::StorageBinding) {
- return DAWN_VALIDATION_ERROR(
- "The sample counts of the storage textures must be 1.");
- }
+ DAWN_INVALID_IF(usage & wgpu::TextureUsage::StorageBinding,
+ "The sample count (%u) of a storage textures is not 1.",
+ descriptor->sampleCount);
}
return {};
@@ -147,24 +134,40 @@ namespace dawn_native {
MaybeError ValidateTextureViewDimensionCompatibility(
const TextureBase* texture,
const TextureViewDescriptor* descriptor) {
- if (!IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
- descriptor->arrayLayerCount)) {
- return DAWN_VALIDATION_ERROR(
- "The dimension of the texture view is not compatible with the layer count");
- }
+ DAWN_INVALID_IF(
+ !IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
+ descriptor->arrayLayerCount),
+ "The dimension (%s) of the texture view is not compatible with the layer count "
+ "(%u) of %s.",
+ descriptor->dimension, descriptor->arrayLayerCount, texture);
+
+ DAWN_INVALID_IF(
+ !IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
+ texture->GetDimension()),
+ "The dimension (%s) of the texture view is not compatible with the dimension (%s) "
+ "of %s.",
+ descriptor->dimension, texture->GetDimension(), texture);
- if (!IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
- texture->GetDimension())) {
- return DAWN_VALIDATION_ERROR(
- "The dimension of the texture view is not compatible with the dimension of the"
- "original texture");
- }
+ switch (descriptor->dimension) {
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ DAWN_INVALID_IF(
+ texture->GetSize().width != texture->GetSize().height,
+ "A %s texture view is not compatible with %s because the texture's width "
+ "(%u) and height (%u) are not equal.",
+ descriptor->dimension, texture, texture->GetSize().width,
+ texture->GetSize().height);
+ break;
- if (!IsTextureSizeValidForTextureViewDimension(descriptor->dimension,
- texture->GetSize())) {
- return DAWN_VALIDATION_ERROR(
- "The dimension of the texture view is not compatible with the size of the"
- "original texture");
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e3D:
+ break;
+
+ case wgpu::TextureViewDimension::e1D:
+ case wgpu::TextureViewDimension::Undefined:
+ UNREACHABLE();
+ break;
}
return {};
@@ -188,11 +191,11 @@ namespace dawn_native {
default:
UNREACHABLE();
}
- if (descriptor->size.width > maxExtent.width ||
- descriptor->size.height > maxExtent.height ||
- descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers) {
- return DAWN_VALIDATION_ERROR("Texture dimension (width, height or depth) exceeded");
- }
+ DAWN_INVALID_IF(descriptor->size.width > maxExtent.width ||
+ descriptor->size.height > maxExtent.height ||
+ descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers,
+ "Texture size (%s) exceeded maximum texture size (%s).",
+ &descriptor->size, &maxExtent);
uint32_t maxMippedDimension = descriptor->size.width;
if (descriptor->dimension != wgpu::TextureDimension::e1D) {
@@ -202,19 +205,22 @@ namespace dawn_native {
maxMippedDimension =
std::max(maxMippedDimension, descriptor->size.depthOrArrayLayers);
}
- if (Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount) {
- return DAWN_VALIDATION_ERROR("Texture has too many mip levels");
- }
+ DAWN_INVALID_IF(
+ Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+ "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+ descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+
ASSERT(descriptor->mipLevelCount <= kMaxTexture2DMipLevels);
if (format->isCompressed) {
const TexelBlockInfo& blockInfo =
format->GetAspectInfo(wgpu::TextureAspect::All).block;
- if (descriptor->size.width % blockInfo.width != 0 ||
- descriptor->size.height % blockInfo.height != 0) {
- return DAWN_VALIDATION_ERROR(
- "The size of the texture is incompatible with the texture format");
- }
+ DAWN_INVALID_IF(
+ descriptor->size.width % blockInfo.width != 0 ||
+ descriptor->size.height % blockInfo.height != 0,
+ "The size (%s) of the texture is not a multiple of the block width (%u) and "
+ "height (%u) of the texture format (%s).",
+ &descriptor->size, blockInfo.width, blockInfo.height, format->format);
}
return {};
@@ -228,25 +234,28 @@ namespace dawn_native {
constexpr wgpu::TextureUsage kValidCompressedUsages =
wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
wgpu::TextureUsage::CopyDst;
- if (format->isCompressed && !IsSubset(usage, kValidCompressedUsages)) {
- return DAWN_VALIDATION_ERROR(
- "Compressed texture format is incompatible with the texture usage");
- }
-
- if (!format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment)) {
- return DAWN_VALIDATION_ERROR(
- "Non-renderable format used with RenderAttachment usage");
- }
-
- if (!format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding)) {
- return DAWN_VALIDATION_ERROR("Format cannot be used in storage textures");
- }
+ DAWN_INVALID_IF(
+ format->isCompressed && !IsSubset(usage, kValidCompressedUsages),
+ "The texture usage (%s) is incompatible with the compressed texture format (%s).",
+ usage, format->format);
+
+ DAWN_INVALID_IF(
+ !format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment),
+ "The texture usage (%s) includes %s, which is incompatible with the non-renderable "
+ "format (%s).",
+ usage, wgpu::TextureUsage::RenderAttachment, format->format);
+
+ DAWN_INVALID_IF(
+ !format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding),
+ "The texture usage (%s) includes %s, which is incompatible with the format (%s).",
+ usage, wgpu::TextureUsage::StorageBinding, format->format);
constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
wgpu::TextureUsage::TextureBinding;
- if (format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages)) {
- return DAWN_VALIDATION_ERROR("Multi-planar format doesn't have valid usage.");
- }
+ DAWN_INVALID_IF(
+ format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
+ "The texture usage (%s) is incompatible with the multi-planar format (%s).", usage,
+ format->format);
return {};
}
@@ -261,14 +270,12 @@ namespace dawn_native {
const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
FindInChain(descriptor->nextInChain, &internalUsageDesc);
- if (descriptor->dimension == wgpu::TextureDimension::e1D) {
- return DAWN_VALIDATION_ERROR("1D textures aren't supported (yet).");
- }
+ DAWN_INVALID_IF(descriptor->dimension == wgpu::TextureDimension::e1D,
+ "1D textures aren't supported (yet).");
- if (internalUsageDesc != nullptr &&
- !device->IsExtensionEnabled(Extension::DawnInternalUsages)) {
- return DAWN_VALIDATION_ERROR("The dawn-internal-usages feature is not enabled");
- }
+ DAWN_INVALID_IF(
+ internalUsageDesc != nullptr && !device->IsFeatureEnabled(Feature::DawnInternalUsages),
+ "The dawn-internal-usages feature is not enabled");
const Format* format;
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
@@ -282,34 +289,42 @@ namespace dawn_native {
DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
- if (descriptor->size.width == 0 || descriptor->size.height == 0 ||
- descriptor->size.depthOrArrayLayers == 0 || descriptor->mipLevelCount == 0) {
- return DAWN_VALIDATION_ERROR("Cannot create an empty texture");
- }
+ DAWN_INVALID_IF(descriptor->size.width == 0 || descriptor->size.height == 0 ||
+ descriptor->size.depthOrArrayLayers == 0 ||
+ descriptor->mipLevelCount == 0,
+ "The texture size (%s) or mipLevelCount (%u) is empty.", &descriptor->size,
+ descriptor->mipLevelCount);
- if (descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed) {
- return DAWN_VALIDATION_ERROR("Compressed texture must be 2D");
- }
+ DAWN_INVALID_IF(
+ descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed,
+ "The dimension (%s) of a texture with a compressed format (%s) is not 2D.",
+ descriptor->dimension, format->format);
// Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
// doesn't support depth/stencil formats on 3D textures.
- if (descriptor->dimension != wgpu::TextureDimension::e2D &&
- (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
- return DAWN_VALIDATION_ERROR("Depth/stencil formats are valid for 2D textures only");
- }
+ DAWN_INVALID_IF(
+ descriptor->dimension != wgpu::TextureDimension::e2D &&
+ (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0,
+ "The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
+ descriptor->dimension, format->format);
DAWN_TRY(ValidateTextureSize(descriptor, format));
- if (device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
- descriptor->mipLevelCount > 1 &&
- device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal) {
- // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
- // Readbacks from the non-zero mip of a stencil texture may contain
- // garbage data.
- return DAWN_VALIDATION_ERROR(
- "crbug.com/dawn/838: Stencil textures with more than one mip level are "
- "disabled on Metal.");
- }
+ // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
+ // Readbacks from the non-zero mip of a stencil texture may contain garbage data.
+ DAWN_INVALID_IF(
+ device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
+ descriptor->mipLevelCount > 1 &&
+ device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal,
+ "https://crbug.com/dawn/838: Stencil textures with more than one mip level are "
+ "disabled on Metal.");
+
+ DAWN_INVALID_IF(
+ device->IsToggleEnabled(Toggle::DisableR8RG8Mipmaps) && descriptor->mipLevelCount > 1 &&
+ (descriptor->format == wgpu::TextureFormat::R8Unorm ||
+ descriptor->format == wgpu::TextureFormat::RG8Unorm),
+ "https://crbug.com/dawn/1071: r8unorm and rg8unorm textures with more than one mip "
+ "level are disabled on Metal.");
return {};
}
@@ -317,39 +332,41 @@ namespace dawn_native {
MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
const TextureBase* texture,
const TextureViewDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
- }
+ DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
// Parent texture should have been already validated.
ASSERT(texture);
ASSERT(!texture->IsError());
DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
- if (descriptor->dimension == wgpu::TextureViewDimension::e1D) {
- return DAWN_VALIDATION_ERROR("1D texture views aren't supported (yet).");
- }
+ DAWN_INVALID_IF(descriptor->dimension == wgpu::TextureViewDimension::e1D,
+ "1D texture views aren't supported (yet).");
DAWN_TRY(ValidateTextureFormat(descriptor->format));
DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
- if (SelectFormatAspects(texture->GetFormat(), descriptor->aspect) == Aspect::None) {
- return DAWN_VALIDATION_ERROR("Texture does not have selected aspect for texture view.");
- }
-
- if (descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0) {
- return DAWN_VALIDATION_ERROR("Cannot create an empty texture view");
- }
-
- if (uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
- uint64_t(texture->GetArrayLayers())) {
- return DAWN_VALIDATION_ERROR("Texture view array-layer out of range");
- }
-
- if (uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
- uint64_t(texture->GetNumMipLevels())) {
- return DAWN_VALIDATION_ERROR("Texture view mip-level out of range");
- }
+ DAWN_INVALID_IF(
+ SelectFormatAspects(texture->GetFormat(), descriptor->aspect) == Aspect::None,
+ "Texture format (%s) does not have the texture view's selected aspect (%s).",
+ texture->GetFormat().format, descriptor->aspect);
+
+ DAWN_INVALID_IF(descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0,
+ "The texture view's arrayLayerCount (%u) or mipLevelCount (%u) is zero.",
+ descriptor->arrayLayerCount, descriptor->mipLevelCount);
+
+ DAWN_INVALID_IF(
+ uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
+ uint64_t(texture->GetArrayLayers()),
+ "Texture view array layer range (baseArrayLayer: %u, arrayLayerCount: %u) exceeds the "
+ "texture's array layer count (%u).",
+ descriptor->baseArrayLayer, descriptor->arrayLayerCount, texture->GetArrayLayers());
+
+ DAWN_INVALID_IF(
+ uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
+ uint64_t(texture->GetNumMipLevels()),
+ "Texture view mip level range (baseMipLevel: %u, mipLevelCount: %u) exceeds the "
+ "texture's mip level count (%u).",
+ descriptor->baseMipLevel, descriptor->mipLevelCount, texture->GetNumMipLevels());
DAWN_TRY(ValidateTextureViewFormatCompatibility(texture, descriptor));
DAWN_TRY(ValidateTextureViewDimensionCompatibility(texture, descriptor));
@@ -389,7 +406,7 @@ namespace dawn_native {
// TODO(dawn:682): Use GetAspectInfo(aspect).
desc.format = texture->GetFormat().format;
}
- if (desc.arrayLayerCount == 0) {
+ if (desc.arrayLayerCount == wgpu::kArrayLayerCountUndefined) {
switch (desc.dimension) {
case wgpu::TextureViewDimension::e1D:
case wgpu::TextureViewDimension::e2D:
@@ -409,14 +426,15 @@ namespace dawn_native {
break;
}
}
- if (desc.mipLevelCount == 0) {
+
+ if (desc.mipLevelCount == wgpu::kMipLevelCountUndefined) {
desc.mipLevelCount = texture->GetNumMipLevels() - desc.baseMipLevel;
}
return desc;
}
// WebGPU only supports sample counts of 1 and 4. We could expand to more based on
- // platform support, but it would probably be an extension.
+ // platform support, but it would probably be a feature.
bool IsValidSampleCount(uint32_t sampleCount) {
switch (sampleCount) {
case 1:
@@ -433,7 +451,7 @@ namespace dawn_native {
TextureBase::TextureBase(DeviceBase* device,
const TextureDescriptor* descriptor,
TextureState state)
- : ObjectBase(device),
+ : ApiObjectBase(device, descriptor->label),
mDimension(descriptor->dimension),
mFormat(device->GetValidInternalFormat(descriptor->format)),
mSize(descriptor->size),
@@ -451,18 +469,12 @@ namespace dawn_native {
if (internalUsageDesc != nullptr) {
mInternalUsage |= internalUsageDesc->internalUsage;
}
-
- // Add readonly storage usage if the texture has a storage usage. The validation rules in
- // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
- if (mInternalUsage & wgpu::TextureUsage::StorageBinding) {
- mInternalUsage |= kReadOnlyStorageTexture;
- }
}
static Format kUnusedFormat;
TextureBase::TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag), mFormat(kUnusedFormat) {
+ : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
}
// static
@@ -470,6 +482,10 @@ namespace dawn_native {
return new TextureBase(device, ObjectBase::kError);
}
+ ObjectType TextureBase::GetType() const {
+ return ObjectType::Texture;
+ }
+
wgpu::TextureDimension TextureBase::GetDimension() const {
ASSERT(!IsError());
return mDimension;
@@ -585,9 +601,8 @@ namespace dawn_native {
MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
ASSERT(!IsError());
- if (mState == TextureState::Destroyed) {
- return DAWN_VALIDATION_ERROR("Destroyed texture used in a submit");
- }
+ DAWN_INVALID_IF(mState == TextureState::Destroyed, "Destroyed texture %s used in a submit.",
+ this);
return {};
}
@@ -648,14 +663,15 @@ namespace dawn_native {
DeviceBase* device = GetDevice();
Ref<TextureViewBase> result;
- if (device->ConsumedError(device->CreateTextureView(this, descriptor), &result)) {
+ if (device->ConsumedError(device->CreateTextureView(this, descriptor), &result,
+ "calling %s.CreateView(%s).", this, descriptor)) {
return TextureViewBase::MakeError(device);
}
return result.Detach();
}
void TextureBase::APIDestroy() {
- if (GetDevice()->ConsumedError(ValidateDestroy())) {
+ if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy().", this)) {
return;
}
ASSERT(!IsError());
@@ -678,7 +694,7 @@ namespace dawn_native {
// TextureViewBase
TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
- : ObjectBase(texture->GetDevice()),
+ : ApiObjectBase(texture->GetDevice(), kLabelNotImplemented),
mTexture(texture),
mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
mDimension(descriptor->dimension),
@@ -688,7 +704,7 @@ namespace dawn_native {
}
TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag), mFormat(kUnusedFormat) {
+ : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
}
// static
@@ -696,6 +712,10 @@ namespace dawn_native {
return new TextureViewBase(device, ObjectBase::kError);
}
+ ObjectType TextureViewBase::GetType() const {
+ return ObjectType::TextureView;
+ }
+
const TextureBase* TextureViewBase::GetTexture() const {
ASSERT(!IsError());
return mTexture.Get();
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.h b/chromium/third_party/dawn/src/dawn_native/Texture.h
index c3e161d68c2..52f5f391e56 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.h
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.h
@@ -40,9 +40,10 @@ namespace dawn_native {
bool IsValidSampleCount(uint32_t sampleCount);
static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
- wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding | kReadOnlyStorageTexture;
+ wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding |
+ kReadOnlyRenderAttachment;
- class TextureBase : public ObjectBase {
+ class TextureBase : public ApiObjectBase {
public:
enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
enum class ClearValue { Zero, NonZero };
@@ -50,6 +51,8 @@ namespace dawn_native {
static TextureBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
wgpu::TextureDimension GetDimension() const;
const Format& GetFormat() const;
const Extent3D& GetSize() const;
@@ -113,12 +116,14 @@ namespace dawn_native {
std::vector<bool> mIsSubresourceContentInitializedAtIndex;
};
- class TextureViewBase : public ObjectBase {
+ class TextureViewBase : public ApiObjectBase {
public:
TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor);
static TextureViewBase* MakeError(DeviceBase* device);
+ ObjectType GetType() const override;
+
const TextureBase* GetTexture() const;
TextureBase* GetTexture();
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
index efd65190e28..e7f844d5917 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
@@ -212,6 +212,22 @@ namespace dawn_native {
{"disable_symbol_renaming",
"Disables the WGSL symbol renaming so that names are preserved.",
"https://crbug.com/dawn/1016"}},
+ {Toggle::UseUserDefinedLabelsInBackend,
+ {"use_user_defined_labels_in_backend",
+ "Enables calls to SetLabel to be forwarded to backend-specific APIs that label "
+ "objects.",
+ "https://crbug.com/dawn/840"}},
+ {Toggle::DisableR8RG8Mipmaps,
+ {"disable_r8_rg8_mipmaps",
+ "Disables mipmaps for r8unorm and rg8unorm textures, which are known on some drivers "
+ "to not clear correctly.",
+ "https://crbug.com/dawn/1071"}},
+ {Toggle::UseDummyFragmentInVertexOnlyPipeline,
+ {"use_dummy_fragment_in_vertex_only_pipeline",
+ "Use a dummy empty fragment shader in vertex only render pipeline. This toggle must "
+ "be enabled for OpenGL ES backend, and serves as a workaround by default enabled on "
+ "some Metal devices with Intel GPU to ensure the depth result is correct.",
+ "https://crbug.com/dawn/136"}},
// Dummy comment to separate the }} so it is clearer what to copy-paste to add a toggle.
}};
} // anonymous namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.h b/chromium/third_party/dawn/src/dawn_native/Toggles.h
index 9c114002e5d..098859879a6 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.h
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.h
@@ -58,6 +58,9 @@ namespace dawn_native {
ForceWGSLStep,
DisableWorkgroupInit,
DisableSymbolRenaming,
+ UseUserDefinedLabelsInBackend,
+ DisableR8RG8Mipmaps,
+ UseDummyFragmentInVertexOnlyPipeline,
EnumCount,
InvalidEnum = EnumCount,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
index d26603a958a..67f50665846 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
@@ -104,16 +104,39 @@ namespace dawn_native { namespace d3d12 {
mDriverDescription = o.str();
}
- InitializeSupportedExtensions();
+ InitializeSupportedFeatures();
return {};
}
- void Adapter::InitializeSupportedExtensions() {
- mSupportedExtensions.EnableExtension(Extension::TextureCompressionBC);
- mSupportedExtensions.EnableExtension(Extension::PipelineStatisticsQuery);
- mSupportedExtensions.EnableExtension(Extension::TimestampQuery);
- mSupportedExtensions.EnableExtension(Extension::MultiPlanarFormats);
+ bool Adapter::AreTimestampQueriesSupported() const {
+ D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+ queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+ queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+ ComPtr<ID3D12CommandQueue> d3d12CommandQueue;
+ HRESULT hr = mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&d3d12CommandQueue));
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ // GetTimestampFrequency returns an error HRESULT when there are bugs in Windows container
+ // and vGPU implementations.
+ uint64_t timeStampFrequency;
+ hr = d3d12CommandQueue->GetTimestampFrequency(&timeStampFrequency);
+ if (FAILED(hr)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ void Adapter::InitializeSupportedFeatures() {
+ if (AreTimestampQueriesSupported()) {
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+ }
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
}
MaybeError Adapter::InitializeDebugLayerFilters() {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
index ea6975efa4c..9c3d8f4d213 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
@@ -45,7 +45,9 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<DeviceBase*> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
MaybeError ResetInternalDeviceForTestingImpl() override;
- void InitializeSupportedExtensions();
+ bool AreTimestampQueriesSupported() const;
+
+ void InitializeSupportedFeatures();
MaybeError InitializeDebugLayerFilters();
void CleanUpDebugLayerFilters();
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
index a9f27314e35..f002ece3535 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
@@ -40,7 +40,7 @@ namespace dawn_native { namespace d3d12 {
mCPUViewAllocation = viewAllocation;
- const auto& bindingOffsets = bgl->GetBindingOffsets();
+ const auto& descriptorHeapOffsets = bgl->GetDescriptorHeapOffsets();
ID3D12Device* d3d12Device = device->GetD3D12Device();
@@ -69,13 +69,14 @@ namespace dawn_native { namespace d3d12 {
switch (bindingInfo.buffer.type) {
case wgpu::BufferBindingType::Uniform: {
D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
- desc.SizeInBytes = Align(binding.size, 256);
+ desc.SizeInBytes =
+ Align(binding.size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT);
desc.BufferLocation =
ToBackend(binding.buffer)->GetVA() + binding.offset;
d3d12Device->CreateConstantBufferView(
- &desc, viewAllocation.OffsetFrom(viewSizeIncrement,
- bindingOffsets[bindingIndex]));
+ &desc, viewAllocation.OffsetFrom(
+ viewSizeIncrement, descriptorHeapOffsets[bindingIndex]));
break;
}
case wgpu::BufferBindingType::Storage:
@@ -99,7 +100,7 @@ namespace dawn_native { namespace d3d12 {
d3d12Device->CreateUnorderedAccessView(
resource, nullptr, &desc,
viewAllocation.OffsetFrom(viewSizeIncrement,
- bindingOffsets[bindingIndex]));
+ descriptorHeapOffsets[bindingIndex]));
break;
}
case wgpu::BufferBindingType::ReadOnlyStorage: {
@@ -118,7 +119,7 @@ namespace dawn_native { namespace d3d12 {
d3d12Device->CreateShaderResourceView(
resource, &desc,
viewAllocation.OffsetFrom(viewSizeIncrement,
- bindingOffsets[bindingIndex]));
+ descriptorHeapOffsets[bindingIndex]));
break;
}
case wgpu::BufferBindingType::Undefined:
@@ -142,7 +143,8 @@ namespace dawn_native { namespace d3d12 {
d3d12Device->CreateShaderResourceView(
resource, &srv,
- viewAllocation.OffsetFrom(viewSizeIncrement, bindingOffsets[bindingIndex]));
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
break;
}
@@ -158,23 +160,12 @@ namespace dawn_native { namespace d3d12 {
}
switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::ReadOnly: {
- // Readonly storage is implemented as SRV so it can be used at the same
- // time as a sampled texture.
- auto& srv = view->GetSRVDescriptor();
- d3d12Device->CreateShaderResourceView(
- resource, &srv,
- viewAllocation.OffsetFrom(viewSizeIncrement,
- bindingOffsets[bindingIndex]));
- break;
- }
-
case wgpu::StorageTextureAccess::WriteOnly: {
D3D12_UNORDERED_ACCESS_VIEW_DESC uav = view->GetUAVDescriptor();
d3d12Device->CreateUnorderedAccessView(
resource, nullptr, &uav,
viewAllocation.OffsetFrom(viewSizeIncrement,
- bindingOffsets[bindingIndex]));
+ descriptorHeapOffsets[bindingIndex]));
break;
}
@@ -201,7 +192,8 @@ namespace dawn_native { namespace d3d12 {
d3d12Device->CreateShaderResourceView(
resource, &srv,
- viewAllocation.OffsetFrom(viewSizeIncrement, bindingOffsets[bindingIndex]));
+ viewAllocation.OffsetFrom(viewSizeIncrement,
+ descriptorHeapOffsets[bindingIndex]));
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
index 28719de8b70..761b8f74a7a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
@@ -22,35 +22,33 @@
namespace dawn_native { namespace d3d12 {
namespace {
- BindGroupLayout::DescriptorType WGPUBindingInfoToDescriptorType(
+ D3D12_DESCRIPTOR_RANGE_TYPE WGPUBindingInfoToDescriptorRangeType(
const BindingInfo& bindingInfo) {
switch (bindingInfo.bindingType) {
case BindingInfoType::Buffer:
switch (bindingInfo.buffer.type) {
case wgpu::BufferBindingType::Uniform:
- return BindGroupLayout::DescriptorType::CBV;
+ return D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
case wgpu::BufferBindingType::Storage:
case kInternalStorageBufferBinding:
- return BindGroupLayout::DescriptorType::UAV;
+ return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
case wgpu::BufferBindingType::ReadOnlyStorage:
- return BindGroupLayout::DescriptorType::SRV;
+ return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
case wgpu::BufferBindingType::Undefined:
UNREACHABLE();
}
case BindingInfoType::Sampler:
- return BindGroupLayout::DescriptorType::Sampler;
+ return D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
case BindingInfoType::Texture:
case BindingInfoType::ExternalTexture:
- return BindGroupLayout::DescriptorType::SRV;
+ return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
case BindingInfoType::StorageTexture:
switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::ReadOnly:
- return BindGroupLayout::DescriptorType::SRV;
case wgpu::StorageTextureAccess::WriteOnly:
- return BindGroupLayout::DescriptorType::UAV;
+ return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
case wgpu::StorageTextureAccess::Undefined:
UNREACHABLE();
}
@@ -59,89 +57,73 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
// static
- Ref<BindGroupLayout> BindGroupLayout::Create(Device* device,
- const BindGroupLayoutDescriptor* descriptor) {
- return AcquireRef(new BindGroupLayout(device, descriptor));
+ Ref<BindGroupLayout> BindGroupLayout::Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
}
- BindGroupLayout::BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor)
- : BindGroupLayoutBase(device, descriptor),
- mBindingOffsets(GetBindingCount()),
- mDescriptorCounts{},
+ BindGroupLayout::BindGroupLayout(Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+ mDescriptorHeapOffsets(GetBindingCount()),
+ mShaderRegisters(GetBindingCount()),
+ mCbvUavSrvDescriptorCount(0),
+ mSamplerDescriptorCount(0),
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
- for (BindingIndex bindingIndex = GetDynamicBufferCount(); bindingIndex < GetBindingCount();
- ++bindingIndex) {
+ for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
- // For dynamic resources, Dawn uses root descriptor in D3D12 backend.
- // So there is no need to allocate the descriptor from descriptor heap.
- // This loop starts after the dynamic buffer indices to skip counting
- // dynamic resources in calculating the size of the descriptor heap.
- ASSERT(!bindingInfo.buffer.hasDynamicOffset);
+ D3D12_DESCRIPTOR_RANGE_TYPE descriptorRangeType =
+ WGPUBindingInfoToDescriptorRangeType(bindingInfo);
// TODO(dawn:728) In the future, special handling will be needed for external textures
// here because they encompass multiple views.
- DescriptorType descriptorType = WGPUBindingInfoToDescriptorType(bindingInfo);
- mBindingOffsets[bindingIndex] = mDescriptorCounts[descriptorType]++;
- }
+ mShaderRegisters[bindingIndex] = uint32_t(bindingInfo.binding);
- auto SetDescriptorRange = [&](uint32_t index, uint32_t count, uint32_t* baseRegister,
- D3D12_DESCRIPTOR_RANGE_TYPE type) -> bool {
- if (count == 0) {
- return false;
+ if (bindingIndex < GetDynamicBufferCount()) {
+ continue;
}
- auto& range = mRanges[index];
- range.RangeType = type;
- range.NumDescriptors = count;
- range.RegisterSpace = 0;
- range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
- range.BaseShaderRegister = *baseRegister;
- *baseRegister += count;
- // These ranges will be copied and range.BaseShaderRegister will be set in
- // d3d12::PipelineLayout to account for bind group register offsets
- return true;
- };
-
- uint32_t rangeIndex = 0;
- uint32_t baseRegister = 0;
-
- std::array<uint32_t, DescriptorType::Count> descriptorOffsets;
- // Ranges 0-2 contain the CBV, UAV, and SRV ranges, if they exist, tightly packed
- // Range 3 contains the Sampler range, if there is one
- if (SetDescriptorRange(rangeIndex, mDescriptorCounts[CBV], &baseRegister,
- D3D12_DESCRIPTOR_RANGE_TYPE_CBV)) {
- descriptorOffsets[CBV] = mRanges[rangeIndex++].BaseShaderRegister;
- }
- if (SetDescriptorRange(rangeIndex, mDescriptorCounts[UAV], &baseRegister,
- D3D12_DESCRIPTOR_RANGE_TYPE_UAV)) {
- descriptorOffsets[UAV] = mRanges[rangeIndex++].BaseShaderRegister;
- }
- if (SetDescriptorRange(rangeIndex, mDescriptorCounts[SRV], &baseRegister,
- D3D12_DESCRIPTOR_RANGE_TYPE_SRV)) {
- descriptorOffsets[SRV] = mRanges[rangeIndex++].BaseShaderRegister;
- }
- uint32_t zero = 0;
- SetDescriptorRange(Sampler, mDescriptorCounts[Sampler], &zero,
- D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER);
- descriptorOffsets[Sampler] = 0;
+ // For dynamic resources, Dawn uses root descriptor in D3D12 backend. So there is no
+ // need to allocate the descriptor from descriptor heap or create descriptor ranges.
+ ASSERT(!bindingInfo.buffer.hasDynamicOffset);
- for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
- const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+ // TODO(dawn:728) In the future, special handling will be needed for external textures
+ // here because they encompass multiple views.
+ mDescriptorHeapOffsets[bindingIndex] =
+ descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
+ ? mSamplerDescriptorCount++
+ : mCbvUavSrvDescriptorCount++;
+
+ D3D12_DESCRIPTOR_RANGE range;
+ range.RangeType = descriptorRangeType;
+ range.NumDescriptors = 1;
+ range.BaseShaderRegister = GetShaderRegister(bindingIndex);
+ range.RegisterSpace = kRegisterSpacePlaceholder;
+ range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
- if (bindingInfo.bindingType == BindingInfoType::Buffer &&
- bindingInfo.buffer.hasDynamicOffset) {
- // Dawn is using values in mBindingOffsets to decide register number in HLSL.
- // Root descriptor needs to set this value to set correct register number in
- // generated HLSL shader.
- mBindingOffsets[bindingIndex] = baseRegister++;
- continue;
+ std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges =
+ descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
+ ? mSamplerDescriptorRanges
+ : mCbvUavSrvDescriptorRanges;
+
+ // Try to join this range with the previous one, if the current range is a continuation
+ // of the previous. This is possible because the binding infos in the base type are
+ // sorted.
+ if (descriptorRanges.size() >= 2) {
+ D3D12_DESCRIPTOR_RANGE& previous = descriptorRanges.back();
+ if (previous.RangeType == range.RangeType &&
+ previous.BaseShaderRegister + previous.NumDescriptors ==
+ range.BaseShaderRegister) {
+ previous.NumDescriptors += range.NumDescriptors;
+ continue;
+ }
}
- // TODO(dawn:728) In the future, special handling will be needed here for external
- // textures because they encompass multiple views.
- DescriptorType descriptorType = WGPUBindingInfoToDescriptorType(bindingInfo);
- mBindingOffsets[bindingIndex] += descriptorOffsets[descriptorType];
+ descriptorRanges.push_back(range);
}
mViewAllocator = device->GetViewStagingDescriptorAllocator(GetCbvUavSrvDescriptorCount());
@@ -181,34 +163,29 @@ namespace dawn_native { namespace d3d12 {
mBindGroupAllocator.Deallocate(bindGroup);
}
- ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetBindingOffsets() const {
- return {mBindingOffsets.data(), mBindingOffsets.size()};
- }
-
- uint32_t BindGroupLayout::GetCbvUavSrvDescriptorTableSize() const {
- return (static_cast<uint32_t>(mDescriptorCounts[CBV] > 0) +
- static_cast<uint32_t>(mDescriptorCounts[UAV] > 0) +
- static_cast<uint32_t>(mDescriptorCounts[SRV] > 0));
+ ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetDescriptorHeapOffsets() const {
+ return {mDescriptorHeapOffsets.data(), mDescriptorHeapOffsets.size()};
}
- uint32_t BindGroupLayout::GetSamplerDescriptorTableSize() const {
- return mDescriptorCounts[Sampler] > 0;
+ uint32_t BindGroupLayout::GetShaderRegister(BindingIndex bindingIndex) const {
+ return mShaderRegisters[bindingIndex];
}
uint32_t BindGroupLayout::GetCbvUavSrvDescriptorCount() const {
- return mDescriptorCounts[CBV] + mDescriptorCounts[UAV] + mDescriptorCounts[SRV];
+ return mCbvUavSrvDescriptorCount;
}
uint32_t BindGroupLayout::GetSamplerDescriptorCount() const {
- return mDescriptorCounts[Sampler];
+ return mSamplerDescriptorCount;
}
- const D3D12_DESCRIPTOR_RANGE* BindGroupLayout::GetCbvUavSrvDescriptorRanges() const {
- return mRanges;
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetCbvUavSrvDescriptorRanges()
+ const {
+ return mCbvUavSrvDescriptorRanges;
}
- const D3D12_DESCRIPTOR_RANGE* BindGroupLayout::GetSamplerDescriptorRanges() const {
- return &mRanges[Sampler];
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetSamplerDescriptorRanges() const {
+ return mSamplerDescriptorRanges;
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
index 503566643d5..abf67021a03 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
@@ -28,37 +28,60 @@ namespace dawn_native { namespace d3d12 {
class Device;
class StagingDescriptorAllocator;
+ // A purposefully invalid register space.
+ //
+ // We use the bind group index as the register space, but don't know the bind group index until
+ // pipeline layout creation time. This value should be replaced in PipelineLayoutD3D12.
+ static constexpr uint32_t kRegisterSpacePlaceholder =
+ D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START;
+
class BindGroupLayout final : public BindGroupLayoutBase {
public:
static Ref<BindGroupLayout> Create(Device* device,
- const BindGroupLayoutDescriptor* descriptor);
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
const BindGroupDescriptor* descriptor);
void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
- enum DescriptorType {
- CBV,
- UAV,
- SRV,
- Sampler,
- Count,
- };
-
- ityp::span<BindingIndex, const uint32_t> GetBindingOffsets() const;
- uint32_t GetCbvUavSrvDescriptorTableSize() const;
- uint32_t GetSamplerDescriptorTableSize() const;
+ // The offset (in descriptor count) into the corresponding descriptor heap. Not valid for
+ // dynamic binding indexes.
+ ityp::span<BindingIndex, const uint32_t> GetDescriptorHeapOffsets() const;
+
+ // The D3D shader register that the Dawn binding index is mapped to by this bind group
+ // layout.
+ uint32_t GetShaderRegister(BindingIndex bindingIndex) const;
+
+ // Counts of descriptors in the descriptor tables.
uint32_t GetCbvUavSrvDescriptorCount() const;
uint32_t GetSamplerDescriptorCount() const;
- const D3D12_DESCRIPTOR_RANGE* GetCbvUavSrvDescriptorRanges() const;
- const D3D12_DESCRIPTOR_RANGE* GetSamplerDescriptorRanges() const;
+
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& GetCbvUavSrvDescriptorRanges() const;
+ const std::vector<D3D12_DESCRIPTOR_RANGE>& GetSamplerDescriptorRanges() const;
private:
- BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor);
+ BindGroupLayout(Device* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
~BindGroupLayout() override = default;
- ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mBindingOffsets;
- std::array<uint32_t, DescriptorType::Count> mDescriptorCounts;
- D3D12_DESCRIPTOR_RANGE mRanges[DescriptorType::Count];
+
+ // Contains the offset into the descriptor heap for the given resource view. Samplers and
+ // non-samplers are stored in separate descriptor heaps, so the offsets should be unique
+ // within each group and tightly packed.
+ //
+ // Dynamic resources are not used here since their descriptors are placed directly in root
+ // parameters.
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mDescriptorHeapOffsets;
+
+ // Contains the shader register this binding is mapped to.
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mShaderRegisters;
+
+ uint32_t mCbvUavSrvDescriptorCount;
+ uint32_t mSamplerDescriptorCount;
+
+ std::vector<D3D12_DESCRIPTOR_RANGE> mCbvUavSrvDescriptorRanges;
+ std::vector<D3D12_DESCRIPTOR_RANGE> mSamplerDescriptorRanges;
SlabAllocator<BindGroup> mBindGroupAllocator;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
index 399a02c39a4..39fcb839802 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
@@ -24,6 +24,7 @@
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/HeapD3D12.h"
#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn_native/d3d12/UtilsD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -81,12 +82,16 @@ namespace dawn_native { namespace d3d12 {
}
size_t D3D12BufferSizeAlignment(wgpu::BufferUsage usage) {
- switch (usage) {
- case wgpu::BufferUsage::Uniform:
- return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
- default:
- return 1;
+ if ((usage & wgpu::BufferUsage::Uniform) != 0) {
+ // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation
+ // forbids binding a CBV to an unaligned size. To prevent, one can always safely
+ // align the buffer size to the CBV data alignment as other buffer usages
+ // ignore it (no size check). The validation will still enforce bound checks with
+ // the unaligned size returned by GetSize().
+ // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
+ return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
}
+ return 1;
}
} // namespace
@@ -102,12 +107,6 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Buffer::Initialize(bool mappedAtCreation) {
- // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation forbids
- // binding a CBV to an unaligned size. To prevent, one can always safely align the buffer
- // desc size to the CBV data alignment as other buffer usages ignore it (no size check).
- // The validation will still enforce bound checks with the unaligned size returned by
- // GetSize().
- // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
// Allocate at least 4 bytes so clamped accesses are always in bounds.
uint64_t size = std::max(GetSize(), uint64_t(4u));
size_t alignment = D3D12BufferSizeAlignment(GetUsage());
@@ -155,7 +154,7 @@ namespace dawn_native { namespace d3d12 {
mResourceAllocation,
ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
- DAWN_TRY(mResourceAllocation.SetDebugName("Dawn_Buffer"));
+ SetLabelImpl();
// The buffers with mappedAtCreation == true will be initialized in
// BufferBase::MapAtCreation().
@@ -446,6 +445,11 @@ namespace dawn_native { namespace d3d12 {
return {};
}
+ void Buffer::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), "Dawn_Buffer",
+ GetLabel());
+ }
+
MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse));
ASSERT(!IsDataInitialized());
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
index ed19efac677..d6fcbbdc060 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
@@ -48,6 +48,8 @@ namespace dawn_native { namespace d3d12 {
uint64_t size);
MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
const CopyTextureToBufferCmd* copy);
+ // Dawn API
+ void SetLabelImpl() override;
private:
Buffer(Device* device, const BufferDescriptor* descriptor);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
index 50c5236f3aa..4bdc3b0760e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
@@ -981,6 +981,40 @@ namespace dawn_native { namespace d3d12 {
break;
}
+ case Command::SetValidatedBufferLocationsInternal:
+ DoNextSetValidatedBufferLocationsInternal();
+ break;
+
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
+ }
+
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ DAWN_TRY(dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset,
+ size));
+ dstBuffer->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::CopyDst);
+ commandList->CopyBufferRegion(
+ dstBuffer->GetD3D12Resource(), offset,
+ ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+ uploadHandle.startOffset, size);
+ break;
+ }
+
default:
UNREACHABLE();
}
@@ -1304,6 +1338,8 @@ namespace dawn_native { namespace d3d12 {
static constexpr std::array<float, 4> defaultBlendFactor = {0, 0, 0, 0};
commandList->OMSetBlendFactor(&defaultBlendFactor[0]);
+
+ commandList->OMSetStencilRef(0);
}
RenderPipeline* lastPipeline = nullptr;
@@ -1351,14 +1387,16 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+ ASSERT(!draw->indirectBufferLocation->IsNull());
DAWN_TRY(bindingTracker->Apply(commandContext));
vertexBufferTracker.Apply(commandList, lastPipeline);
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ Buffer* buffer = ToBackend(draw->indirectBufferLocation->GetBuffer());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
- draw->indirectOffset, nullptr, 0);
+ draw->indirectBufferLocation->GetOffset(), nullptr,
+ 0);
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
index 1c903388799..0925b92cda0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
@@ -28,11 +28,11 @@ namespace dawn_native { namespace d3d12 {
Device* device,
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
- DAWN_TRY(pipeline->Initialize(descriptor));
+ DAWN_TRY(pipeline->Initialize());
return pipeline;
}
- MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
+ MaybeError ComputePipeline::Initialize() {
Device* device = ToBackend(GetDevice());
uint32_t compileFlags = 0;
@@ -43,20 +43,24 @@ namespace dawn_native { namespace d3d12 {
// SPRIV-cross does matrix multiplication expecting row major matrices
compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
- ShaderModule* module = ToBackend(descriptor->compute.module);
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+ ShaderModule* module = ToBackend(computeStage.module.Get());
D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
CompiledShader compiledShader;
DAWN_TRY_ASSIGN(compiledShader,
- module->Compile(descriptor->compute.entryPoint, SingleShaderStage::Compute,
+ module->Compile(computeStage.entryPoint.c_str(), SingleShaderStage::Compute,
ToBackend(GetLayout()), compileFlags));
d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
auto* d3d12Device = device->GetD3D12Device();
DAWN_TRY(CheckHRESULT(
d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
"D3D12 creating pipeline state"));
+
+ SetLabelImpl();
+
return {};
}
@@ -68,6 +72,11 @@ namespace dawn_native { namespace d3d12 {
return mPipelineState.Get();
}
+ void ComputePipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_ComputePipeline",
+ GetLabel());
+ }
+
void ComputePipeline::CreateAsync(Device* device,
const ComputePipelineDescriptor* descriptor,
size_t blueprintHash,
@@ -75,8 +84,8 @@ namespace dawn_native { namespace d3d12 {
void* userdata) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(pipeline, descriptor, blueprintHash,
- callback, userdata);
+ std::make_unique<CreateComputePipelineAsyncTask>(pipeline, blueprintHash, callback,
+ userdata);
CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
index 37c25d3425c..d945ee2e382 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
@@ -37,10 +37,13 @@ namespace dawn_native { namespace d3d12 {
ID3D12PipelineState* GetPipelineState() const;
+ // Dawn API
+ void SetLabelImpl() override;
+
private:
~ComputePipeline() override;
using ComputePipelineBase::ComputePipelineBase;
- MaybeError Initialize(const ComputePipelineDescriptor* descriptor) override;
+ MaybeError Initialize() override;
ComPtr<ID3D12PipelineState> mPipelineState;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.cpp
new file mode 100644
index 00000000000..b8c28f6c43e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.cpp
@@ -0,0 +1,164 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// D3D12Backend.cpp: contains the definition of symbols exported by D3D12Backend.h so that they
+// can be compiled twice: once export (shared library), once not exported (static library)
+
+#include "dawn_native/d3d12/D3D11on12Util.h"
+
+#include "common/HashUtils.h"
+#include "common/Log.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ void Flush11On12DeviceToAvoidLeaks(ComPtr<ID3D11On12Device> d3d11on12Device) {
+ if (d3d11on12Device == nullptr) {
+ return;
+ }
+
+ ComPtr<ID3D11Device> d3d11Device;
+ if (FAILED(d3d11on12Device.As(&d3d11Device))) {
+ return;
+ }
+
+ ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+ d3d11Device->GetImmediateContext(&d3d11DeviceContext);
+
+ ASSERT(d3d11DeviceContext != nullptr);
+
+ // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
+ // are not released until work is submitted to the device context and flushed.
+ // The most minimal work we can get away with is issuing a TiledResourceBarrier.
+
+ // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
+ // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
+ ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
+ if (FAILED(d3d11DeviceContext.As(&d3d11DeviceContext2))) {
+ return;
+ }
+
+ d3d11DeviceContext2->TiledResourceBarrier(nullptr, nullptr);
+ d3d11DeviceContext2->Flush();
+ }
+
+ D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
+ ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex,
+ ComPtr<ID3D11On12Device> d3d11On12Device)
+ : mDXGIKeyedMutex(std::move(dxgiKeyedMutex)), mD3D11on12Device(std::move(d3d11On12Device)) {
+ }
+
+ D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
+ ComPtr<ID3D11On12Device> d3d11On12Device)
+ : mD3D11on12Device(std::move(d3d11On12Device)) {
+ }
+
+ D3D11on12ResourceCacheEntry::~D3D11on12ResourceCacheEntry() {
+ if (mDXGIKeyedMutex == nullptr) {
+ return;
+ }
+
+ ComPtr<ID3D11Resource> d3d11Resource;
+ if (FAILED(mDXGIKeyedMutex.As(&d3d11Resource))) {
+ return;
+ }
+
+ ASSERT(mD3D11on12Device != nullptr);
+
+ ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
+ mD3D11on12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
+
+ d3d11Resource.Reset();
+ mDXGIKeyedMutex.Reset();
+
+ Flush11On12DeviceToAvoidLeaks(std::move(mD3D11on12Device));
+ }
+
+ ComPtr<IDXGIKeyedMutex> D3D11on12ResourceCacheEntry::GetDXGIKeyedMutex() const {
+ ASSERT(mDXGIKeyedMutex != nullptr);
+ return mDXGIKeyedMutex;
+ }
+
+ size_t D3D11on12ResourceCacheEntry::HashFunc::operator()(
+ const Ref<D3D11on12ResourceCacheEntry> a) const {
+ size_t hash = 0;
+ HashCombine(&hash, a->mD3D11on12Device.Get());
+ return hash;
+ }
+
+ bool D3D11on12ResourceCacheEntry::EqualityFunc::operator()(
+ const Ref<D3D11on12ResourceCacheEntry> a,
+ const Ref<D3D11on12ResourceCacheEntry> b) const {
+ return a->mD3D11on12Device == b->mD3D11on12Device;
+ }
+
+ D3D11on12ResourceCache::D3D11on12ResourceCache() = default;
+
+ D3D11on12ResourceCache::~D3D11on12ResourceCache() = default;
+
+ Ref<D3D11on12ResourceCacheEntry> D3D11on12ResourceCache::GetOrCreateD3D11on12Resource(
+ WGPUDevice device,
+ ID3D12Resource* d3d12Resource) {
+ Device* backendDevice = reinterpret_cast<Device*>(device);
+ // The Dawn and 11on12 device share the same D3D12 command queue whereas this external image
+ // could be accessed/produced with multiple Dawn devices. To avoid cross-queue sharing
+ // restrictions, the 11 wrapped resource is forbidden to be shared between Dawn devices by
+ // using the 11on12 device as the cache key.
+ ComPtr<ID3D11On12Device> d3d11on12Device = backendDevice->GetOrCreateD3D11on12Device();
+ if (d3d11on12Device == nullptr) {
+ dawn::ErrorLog() << "Unable to create 11on12 device for external image";
+ return nullptr;
+ }
+
+ D3D11on12ResourceCacheEntry blueprint(d3d11on12Device);
+ auto iter = mCache.find(&blueprint);
+ if (iter != mCache.end()) {
+ return *iter;
+ }
+
+ // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
+ // are a viable alternative but are, unfortunately, not available on all versions of Windows
+ // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
+ // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
+ ComPtr<ID3D11Texture2D> d3d11Texture;
+ D3D11_RESOURCE_FLAGS resourceFlags;
+ resourceFlags.BindFlags = 0;
+ resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+ resourceFlags.CPUAccessFlags = 0;
+ resourceFlags.StructureByteStride = 0;
+ if (FAILED(d3d11on12Device->CreateWrappedResource(
+ d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON,
+ D3D12_RESOURCE_STATE_COMMON, IID_PPV_ARGS(&d3d11Texture)))) {
+ return nullptr;
+ }
+
+ ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+ if (FAILED(d3d11Texture.As(&dxgiKeyedMutex))) {
+ return nullptr;
+ }
+
+ // Keep this cache from growing unbounded.
+ // TODO(dawn:625): Consider using a replacement policy based cache.
+ if (mCache.size() > kMaxD3D11on12ResourceCacheSize) {
+ mCache.clear();
+ }
+
+ Ref<D3D11on12ResourceCacheEntry> entry =
+ AcquireRef(new D3D11on12ResourceCacheEntry(dxgiKeyedMutex, std::move(d3d11on12Device)));
+ mCache.insert(entry);
+
+ return entry;
+ }
+
+}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.h
new file mode 100644
index 00000000000..91db081a8e2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D11on12Util.h
@@ -0,0 +1,89 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D11ON12UTIL_H_
+#define DAWNNATIVE_D3D11ON12UTIL_H_
+
+#include "common/RefCounted.h"
+#include "dawn_native/d3d12/d3d12_platform.h"
+
+#include <dawn_native/DawnNative.h>
+#include <memory>
+#include <unordered_set>
+
+struct ID3D11On12Device;
+struct IDXGIKeyedMutex;
+
+namespace dawn_native { namespace d3d12 {
+
+ // Wraps 11 wrapped resources in a cache.
+ class D3D11on12ResourceCacheEntry : public RefCounted {
+ public:
+ D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11on12Device);
+ D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> d3d11on12Resource,
+ ComPtr<ID3D11On12Device> d3d11on12Device);
+ ~D3D11on12ResourceCacheEntry();
+
+ ComPtr<IDXGIKeyedMutex> GetDXGIKeyedMutex() const;
+
+ // Functors necessary for the
+ // unordered_set<D3D11on12ResourceCacheEntry&>-based cache.
+ struct HashFunc {
+ size_t operator()(const Ref<D3D11on12ResourceCacheEntry> a) const;
+ };
+
+ struct EqualityFunc {
+ bool operator()(const Ref<D3D11on12ResourceCacheEntry> a,
+ const Ref<D3D11on12ResourceCacheEntry> b) const;
+ };
+
+ private:
+ ComPtr<IDXGIKeyedMutex> mDXGIKeyedMutex;
+ ComPtr<ID3D11On12Device> mD3D11on12Device;
+ };
+
+ // |D3D11on12ResourceCache| maintains a cache of 11 wrapped resources.
+ // Each entry represents a 11 resource that is exclusively accessed by Dawn device.
+ // Since each Dawn device creates and stores a 11on12 device, the 11on12 device
+ // is used as the key for the cache entry which ensures only the same 11 wrapped
+ // resource is re-used and also fully released.
+ //
+ // The cache is primarily needed to avoid repeatedly calling CreateWrappedResource
+ // and special release code per ProduceTexture(device).
+ class D3D11on12ResourceCache {
+ public:
+ D3D11on12ResourceCache();
+ ~D3D11on12ResourceCache();
+
+ Ref<D3D11on12ResourceCacheEntry> GetOrCreateD3D11on12Resource(
+ WGPUDevice device,
+ ID3D12Resource* d3d12Resource);
+
+ private:
+ // TODO(dawn:625): Figure out a large enough cache size.
+ static constexpr uint64_t kMaxD3D11on12ResourceCacheSize = 5;
+
+ // 11on12 resource cache entries are refcounted to ensure if the ExternalImage outlives the
+ // Dawn texture (or vice-versa), we always fully release the 11 wrapped resource without
+ // waiting until Dawn device to shutdown.
+ using Cache = std::unordered_set<Ref<D3D11on12ResourceCacheEntry>,
+ D3D11on12ResourceCacheEntry::HashFunc,
+ D3D11on12ResourceCacheEntry::EqualityFunc>;
+
+ Cache mCache;
+ };
+
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D11ON12UTIL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
index fc10fe3a434..bc9df608c65 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
@@ -20,6 +20,7 @@
#include "common/Log.h"
#include "common/Math.h"
#include "common/SwapChainUtils.h"
+#include "dawn_native/d3d12/D3D11on12Util.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/NativeSwapChainImplD3D12.h"
#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
@@ -62,9 +63,18 @@ namespace dawn_native { namespace d3d12 {
mFormat(descriptor->format),
mMipLevelCount(descriptor->mipLevelCount),
mSampleCount(descriptor->sampleCount) {
- ASSERT(descriptor->nextInChain == nullptr);
+ ASSERT(!descriptor->nextInChain ||
+ descriptor->nextInChain->sType == WGPUSType_DawnTextureInternalUsageDescriptor);
+ if (descriptor->nextInChain) {
+ mUsageInternal = reinterpret_cast<const WGPUDawnTextureInternalUsageDescriptor*>(
+ descriptor->nextInChain)
+ ->internalUsage;
+ }
+ mD3D11on12ResourceCache = std::make_unique<D3D11on12ResourceCache>();
}
+ ExternalImageDXGI::~ExternalImageDXGI() = default;
+
WGPUTexture ExternalImageDXGI::ProduceTexture(
WGPUDevice device,
const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
@@ -84,10 +94,26 @@ namespace dawn_native { namespace d3d12 {
textureDescriptor.mipLevelCount = mMipLevelCount;
textureDescriptor.sampleCount = mSampleCount;
+ DawnTextureInternalUsageDescriptor internalDesc = {};
+ if (mUsageInternal) {
+ textureDescriptor.nextInChain = &internalDesc;
+ internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(mUsageInternal);
+ internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+ }
+
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource =
+ mD3D11on12ResourceCache->GetOrCreateD3D11on12Resource(device, mD3D12Resource.Get());
+ if (d3d11on12Resource == nullptr) {
+ dawn::ErrorLog() << "Unable to create 11on12 resource for external image";
+ return nullptr;
+ }
+
Ref<TextureBase> texture = backendDevice->CreateExternalTexture(
- &textureDescriptor, mD3D12Resource, ExternalMutexSerial(descriptor->acquireMutexKey),
+ &textureDescriptor, mD3D12Resource, std::move(d3d11on12Resource),
+ ExternalMutexSerial(descriptor->acquireMutexKey),
ExternalMutexSerial(descriptor->releaseMutexKey), descriptor->isSwapChainTexture,
descriptor->isInitialized);
+
return reinterpret_cast<WGPUTexture>(texture.Detach());
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
index 628b48e9417..3b96092efd0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
@@ -23,6 +23,7 @@
#include "dawn_native/d3d12/CommandAllocatorManager.h"
#include "dawn_native/d3d12/CommandBufferD3D12.h"
#include "dawn_native/d3d12/ComputePipelineD3D12.h"
+#include "dawn_native/d3d12/D3D11on12Util.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
@@ -72,14 +73,17 @@ namespace dawn_native { namespace d3d12 {
CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
"D3D12 create command queue"));
- // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
- // command queue doesn't support timestamps, D3D12_COMMAND_LIST_TYPE_DIRECT always support
- // timestamps.
- uint64_t frequency;
- DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
- "D3D12 get timestamp frequency"));
- // Calculate the period in nanoseconds by the frequency.
- mTimestampPeriod = static_cast<float>(1e9) / frequency;
+ if (IsFeatureEnabled(Feature::TimestampQuery)) {
+ // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
+ // command queue doesn't support timestamps. D3D12_COMMAND_LIST_TYPE_DIRECT queues
+ // always support timestamps except where there are bugs in Windows container and vGPU
+ // implementations.
+ uint64_t frequency;
+ DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
+ "D3D12 get timestamp frequency"));
+ // Calculate the period in nanoseconds by the frequency.
+ mTimestampPeriod = static_cast<float>(1e9) / frequency;
+ }
// If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
// value.
@@ -199,7 +203,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError Device::ApplyUseDxcToggle() {
if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
ForceSetToggle(Toggle::UseDXC, false);
- } else if (IsExtensionEnabled(Extension::ShaderFloat16)) {
+ } else if (IsFeatureEnabled(Feature::ShaderFloat16)) {
// Currently we can only use DXC to compile HLSL shaders using float16.
ForceSetToggle(Toggle::UseDXC, true);
}
@@ -318,8 +322,9 @@ namespace dawn_native { namespace d3d12 {
return BindGroup::Create(this, descriptor);
}
ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) {
- return BindGroupLayout::Create(this, descriptor);
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return Buffer::Create(this, descriptor);
@@ -341,9 +346,9 @@ namespace dawn_native { namespace d3d12 {
const QuerySetDescriptor* descriptor) {
return QuerySet::Create(this, descriptor);
}
- ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::Create(this, descriptor);
+ return RenderPipeline::CreateUninitialized(this, descriptor);
}
ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
return Sampler::Create(this, descriptor);
@@ -377,6 +382,11 @@ namespace dawn_native { namespace d3d12 {
void* userdata) {
ComputePipeline::CreateAsync(this, descriptor, blueprintHash, callback, userdata);
}
+ void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(renderPipeline, callback, userdata);
+ }
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
std::unique_ptr<StagingBufferBase> stagingBuffer =
@@ -458,90 +468,43 @@ namespace dawn_native { namespace d3d12 {
initialUsage);
}
- Ref<TextureBase> Device::CreateExternalTexture(const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture,
- bool isInitialized) {
+ Ref<TextureBase> Device::CreateExternalTexture(
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
+ bool isSwapChainTexture,
+ bool isInitialized) {
Ref<Texture> dawnTexture;
- if (ConsumedError(Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
- acquireMutexKey, releaseMutexKey,
- isSwapChainTexture, isInitialized),
- &dawnTexture)) {
+ if (ConsumedError(
+ Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
+ std::move(d3d11on12Resource), acquireMutexKey,
+ releaseMutexKey, isSwapChainTexture, isInitialized),
+ &dawnTexture)) {
return nullptr;
}
return {dawnTexture};
}
- // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
- // are a viable alternative but are, unfortunately, not available on all versions of Windows
- // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
- // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
- ResultOrError<ComPtr<IDXGIKeyedMutex>> Device::CreateKeyedMutexForTexture(
- ID3D12Resource* d3d12Resource) {
+ ComPtr<ID3D11On12Device> Device::GetOrCreateD3D11on12Device() {
if (mD3d11On12Device == nullptr) {
ComPtr<ID3D11Device> d3d11Device;
- ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
D3D_FEATURE_LEVEL d3dFeatureLevel;
IUnknown* const iUnknownQueue = mCommandQueue.Get();
- DAWN_TRY(CheckHRESULT(GetFunctions()->d3d11on12CreateDevice(
- mD3d12Device.Get(), 0, nullptr, 0, &iUnknownQueue, 1, 1,
- &d3d11Device, &d3d11DeviceContext, &d3dFeatureLevel),
- "D3D12 11on12 device create"));
+ if (FAILED(GetFunctions()->d3d11on12CreateDevice(mD3d12Device.Get(), 0, nullptr, 0,
+ &iUnknownQueue, 1, 1, &d3d11Device,
+ nullptr, &d3dFeatureLevel))) {
+ return nullptr;
+ }
ComPtr<ID3D11On12Device> d3d11on12Device;
- DAWN_TRY(CheckHRESULT(d3d11Device.As(&d3d11on12Device),
- "D3D12 QueryInterface ID3D11Device to ID3D11On12Device"));
+ HRESULT hr = d3d11Device.As(&d3d11on12Device);
+ ASSERT(SUCCEEDED(hr));
- ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
- DAWN_TRY(
- CheckHRESULT(d3d11DeviceContext.As(&d3d11DeviceContext2),
- "D3D12 QueryInterface ID3D11DeviceContext to ID3D11DeviceContext2"));
-
- mD3d11On12DeviceContext = std::move(d3d11DeviceContext2);
mD3d11On12Device = std::move(d3d11on12Device);
}
-
- ComPtr<ID3D11Texture2D> d3d11Texture;
- D3D11_RESOURCE_FLAGS resourceFlags;
- resourceFlags.BindFlags = 0;
- resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
- resourceFlags.CPUAccessFlags = 0;
- resourceFlags.StructureByteStride = 0;
- DAWN_TRY(CheckHRESULT(mD3d11On12Device->CreateWrappedResource(
- d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON,
- D3D12_RESOURCE_STATE_COMMON, IID_PPV_ARGS(&d3d11Texture)),
- "D3D12 creating a wrapped resource"));
-
- ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
- DAWN_TRY(CheckHRESULT(d3d11Texture.As(&dxgiKeyedMutex),
- "D3D12 QueryInterface ID3D11Texture2D to IDXGIKeyedMutex"));
-
- return std::move(dxgiKeyedMutex);
- }
-
- void Device::ReleaseKeyedMutexForTexture(ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex) {
- ComPtr<ID3D11Resource> d3d11Resource;
- HRESULT hr = dxgiKeyedMutex.As(&d3d11Resource);
- if (FAILED(hr)) {
- return;
- }
-
- ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
- mD3d11On12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
-
- d3d11Resource.Reset();
- dxgiKeyedMutex.Reset();
-
- // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
- // are not released until work is submitted to the device context and flushed.
- // The most minimal work we can get away with is issuing a TiledResourceBarrier.
-
- // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
- // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
- mD3d11On12DeviceContext->TiledResourceBarrier(nullptr, nullptr);
- mD3d11On12DeviceContext->Flush();
+ return mD3d11On12Device;
}
const D3D12DeviceInfo& Device::GetDeviceInfo() const {
@@ -570,9 +533,14 @@ namespace dawn_native { namespace d3d12 {
if (gpu_info::IsIntel(pciInfo.vendorId) &&
(gpu_info::IsSkylake(pciInfo.deviceId) || gpu_info::IsKabylake(pciInfo.deviceId) ||
gpu_info::IsCoffeelake(pciInfo.deviceId))) {
- SetToggle(
- Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
- true);
+ constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {30, 0, 100, 9864};
+ if (gpu_info::CompareD3DDriverVersion(pciInfo.vendorId,
+ ToBackend(GetAdapter())->GetDriverVersion(),
+ kFirstDriverVersionWithFix) < 0) {
+ SetToggle(
+ Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ true);
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
index 152fc58022f..186e29ee70f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
@@ -124,13 +124,13 @@ namespace dawn_native { namespace d3d12 {
Ref<TextureBase> CreateExternalTexture(const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
ExternalMutexSerial acquireMutexKey,
ExternalMutexSerial releaseMutexKey,
bool isSwapChainTexture,
bool isInitialized);
- ResultOrError<ComPtr<IDXGIKeyedMutex>> CreateKeyedMutexForTexture(
- ID3D12Resource* d3d12Resource);
- void ReleaseKeyedMutexForTexture(ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex);
+
+ ComPtr<ID3D11On12Device> GetOrCreateD3D11on12Device();
void InitTogglesFromDriver();
@@ -145,7 +145,8 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) override;
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
@@ -154,7 +155,7 @@ namespace dawn_native { namespace d3d12 {
const PipelineLayoutDescriptor* descriptor) override;
ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
const SamplerDescriptor* descriptor) override;
@@ -176,6 +177,9 @@ namespace dawn_native { namespace d3d12 {
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
void ShutDownImpl() override;
MaybeError WaitForIdleForDestruction() override;
@@ -192,9 +196,8 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12CommandQueue> mCommandQueue;
ComPtr<ID3D12SharingContract> mD3d12SharingContract;
- // 11on12 device and device context corresponding to mCommandQueue
+ // 11on12 device corresponding to mCommandQueue
ComPtr<ID3D11On12Device> mD3d11On12Device;
- ComPtr<ID3D11DeviceContext2> mD3d11On12DeviceContext;
ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
index 8565eb0df7d..372b61b6d4b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
@@ -13,6 +13,7 @@
// limitations under the License.
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
+#include <sstream>
#include "common/Assert.h"
#include "common/BitSetIterator.h"
@@ -69,9 +70,15 @@ namespace dawn_native { namespace d3d12 {
// descriptor.
std::vector<D3D12_ROOT_PARAMETER> rootParameters;
- // Ranges are D3D12_DESCRIPTOR_RANGE_TYPE_(SRV|UAV|CBV|SAMPLER)
- // They are grouped together so each bind group has at most 4 ranges
- D3D12_DESCRIPTOR_RANGE ranges[kMaxBindGroups * 4];
+ size_t rangesCount = 0;
+ for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
+ rangesCount += bindGroupLayout->GetCbvUavSrvDescriptorRanges().size() +
+ bindGroupLayout->GetSamplerDescriptorRanges().size();
+ }
+
+ // We are taking pointers to `ranges`, so we cannot let it resize while we're pushing to it.
+ std::vector<D3D12_DESCRIPTOR_RANGE> ranges(rangesCount);
uint32_t rangeIndex = 0;
@@ -82,7 +89,8 @@ namespace dawn_native { namespace d3d12 {
// bind group index Returns whether or not the parameter was set. A root parameter is
// not set if the number of ranges is 0
auto SetRootDescriptorTable =
- [&](uint32_t rangeCount, const D3D12_DESCRIPTOR_RANGE* descriptorRanges) -> bool {
+ [&](const std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges) -> bool {
+ auto rangeCount = descriptorRanges.size();
if (rangeCount == 0) {
return false;
}
@@ -93,8 +101,9 @@ namespace dawn_native { namespace d3d12 {
rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
- for (uint32_t i = 0; i < rangeCount; ++i) {
- ranges[rangeIndex] = descriptorRanges[i];
+ for (auto& range : descriptorRanges) {
+ ASSERT(range.RegisterSpace == kRegisterSpacePlaceholder);
+ ranges[rangeIndex] = range;
ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
rangeIndex++;
}
@@ -104,19 +113,13 @@ namespace dawn_native { namespace d3d12 {
return true;
};
- if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorTableSize(),
- bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
+ if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
}
-
- if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorTableSize(),
- bindGroupLayout->GetSamplerDescriptorRanges())) {
+ if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorRanges())) {
mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
}
- // Get calculated shader register for root descriptors
- const auto& shaderRegisters = bindGroupLayout->GetBindingOffsets();
-
// Init root descriptors in root signatures for dynamic buffer bindings.
// These are packed at the beginning of the layout binding info.
for (BindingIndex dynamicBindingIndex{0};
@@ -135,7 +138,8 @@ namespace dawn_native { namespace d3d12 {
// Setup root descriptor.
D3D12_ROOT_DESCRIPTOR rootDescriptor;
- rootDescriptor.ShaderRegister = shaderRegisters[dynamicBindingIndex];
+ rootDescriptor.ShaderRegister =
+ bindGroupLayout->GetShaderRegister(dynamicBindingIndex);
rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
// Set root descriptors in root signatures.
@@ -152,24 +156,9 @@ namespace dawn_native { namespace d3d12 {
}
}
- // Since Tint's HLSL writer doesn't currently map sets to spaces, we use the default space
- // (0).
- mFirstIndexOffsetRegisterSpace = 0;
- BindGroupIndex firstOffsetGroup{mFirstIndexOffsetRegisterSpace};
- if (GetBindGroupLayoutsMask()[firstOffsetGroup]) {
- // Find the last register used on firstOffsetGroup.
- uint32_t maxRegister = 0;
- for (uint32_t shaderRegister :
- ToBackend(GetBindGroupLayout(firstOffsetGroup))->GetBindingOffsets()) {
- if (shaderRegister > maxRegister) {
- maxRegister = shaderRegister;
- }
- }
- mFirstIndexOffsetShaderRegister = maxRegister + 1;
- } else {
- // firstOffsetGroup is not in use, we can use the first register.
- mFirstIndexOffsetShaderRegister = 0;
- }
+ // Make sure that we added exactly the number of elements we expected. If we added more,
+ // |ranges| will have resized and the pointers in the |rootParameter|s will be invalid.
+ ASSERT(rangeIndex == rangesCount);
D3D12_ROOT_PARAMETER indexOffsetConstants{};
indexOffsetConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_VERTEX;
@@ -178,8 +167,8 @@ namespace dawn_native { namespace d3d12 {
// NOTE: We should consider delaying root signature creation until we know how many values
// we need
indexOffsetConstants.Constants.Num32BitValues = 2;
- indexOffsetConstants.Constants.RegisterSpace = mFirstIndexOffsetRegisterSpace;
- indexOffsetConstants.Constants.ShaderRegister = mFirstIndexOffsetShaderRegister;
+ indexOffsetConstants.Constants.RegisterSpace = kReservedRegisterSpace;
+ indexOffsetConstants.Constants.ShaderRegister = kFirstOffsetInfoBaseRegister;
mFirstIndexOffsetParameterIndex = rootParameters.size();
// NOTE: We should consider moving this entry to earlier in the root signature since offsets
// would need to be updated often
@@ -195,10 +184,20 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3DBlob> signature;
ComPtr<ID3DBlob> error;
- DAWN_TRY(CheckHRESULT(
- device->GetFunctions()->d3d12SerializeRootSignature(
- &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error),
- "D3D12 serialize root signature"));
+ HRESULT hr = device->GetFunctions()->d3d12SerializeRootSignature(
+ &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error);
+ if (DAWN_UNLIKELY(FAILED(hr))) {
+ std::ostringstream messageStream;
+ if (error) {
+ messageStream << static_cast<const char*>(error->GetBufferPointer());
+
+ // |error| is observed to always end with a \n, but is not
+ // specified to do so, so we add an extra newline just in case.
+ messageStream << std::endl;
+ }
+ messageStream << "D3D12 serialize root signature";
+ DAWN_TRY(CheckHRESULT(hr, messageStream.str().c_str()));
+ }
DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
0, signature->GetBufferPointer(), signature->GetBufferSize(),
IID_PPV_ARGS(&mRootSignature)),
@@ -231,11 +230,11 @@ namespace dawn_native { namespace d3d12 {
}
uint32_t PipelineLayout::GetFirstIndexOffsetRegisterSpace() const {
- return mFirstIndexOffsetRegisterSpace;
+ return kReservedRegisterSpace;
}
uint32_t PipelineLayout::GetFirstIndexOffsetShaderRegister() const {
- return mFirstIndexOffsetShaderRegister;
+ return kFirstOffsetInfoBaseRegister;
}
uint32_t PipelineLayout::GetFirstIndexOffsetParameterIndex() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
index f20923543bf..b1efc0d00fb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
@@ -15,6 +15,7 @@
#ifndef DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
#define DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
+#include "common/Constants.h"
#include "common/ityp_array.h"
#include "dawn_native/BindingInfo.h"
#include "dawn_native/PipelineLayout.h"
@@ -22,6 +23,10 @@
namespace dawn_native { namespace d3d12 {
+ // We reserve a register space that a user cannot use.
+ static constexpr uint32_t kReservedRegisterSpace = kMaxBindGroups + 1;
+ static constexpr uint32_t kFirstOffsetInfoBaseRegister = 0;
+
class Device;
class PipelineLayout final : public PipelineLayoutBase {
@@ -53,8 +58,6 @@ namespace dawn_native { namespace d3d12 {
ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
kMaxBindGroups>
mDynamicRootParameterIndices;
- uint32_t mFirstIndexOffsetRegisterSpace;
- uint32_t mFirstIndexOffsetShaderRegister;
uint32_t mFirstIndexOffsetParameterIndex;
ComPtr<ID3D12RootSignature> mRootSignature;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
index 147531190ab..47da954c228 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
@@ -36,7 +36,6 @@ namespace dawn_native { namespace d3d12 {
D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
switch (storeOp) {
case wgpu::StoreOp::Discard:
- case wgpu::StoreOp::Clear:
return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
case wgpu::StoreOp::Store:
return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
@@ -56,7 +55,7 @@ namespace dawn_native { namespace d3d12 {
ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
// Clear or preserve the resolve source.
- if (storeOp == wgpu::StoreOp::Discard || storeOp == wgpu::StoreOp::Clear) {
+ if (storeOp == wgpu::StoreOp::Discard) {
resolveParameters.PreserveResolveSource = false;
} else if (storeOp == wgpu::StoreOp::Store) {
resolveParameters.PreserveResolveSource = true;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
index a5efaaeb751..ee2fde6b4e6 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
@@ -16,6 +16,7 @@
#include "common/Assert.h"
#include "common/Log.h"
+#include "dawn_native/CreatePipelineAsyncTask.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
@@ -173,15 +174,6 @@ namespace dawn_native { namespace d3d12 {
return D3D12_BLEND_BLEND_FACTOR;
case wgpu::BlendFactor::OneMinusConstant:
return D3D12_BLEND_INV_BLEND_FACTOR;
-
- // Deprecated blend factors should be normalized prior to this call.
- case wgpu::BlendFactor::SrcColor:
- case wgpu::BlendFactor::OneMinusSrcColor:
- case wgpu::BlendFactor::DstColor:
- case wgpu::BlendFactor::OneMinusDstColor:
- case wgpu::BlendFactor::BlendColor:
- case wgpu::BlendFactor::OneMinusBlendColor:
- UNREACHABLE();
}
}
@@ -274,7 +266,7 @@ namespace dawn_native { namespace d3d12 {
}
}
- D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilStateFaceDescriptor descriptor) {
+ D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilFaceState& descriptor) {
D3D12_DEPTH_STENCILOP_DESC desc;
desc.StencilFailOp = StencilOp(descriptor.failOp);
@@ -324,15 +316,13 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
- ResultOrError<Ref<RenderPipeline>> RenderPipeline::Create(
+ Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
Device* device,
const RenderPipelineDescriptor* descriptor) {
- Ref<RenderPipeline> pipeline = AcquireRef(new RenderPipeline(device, descriptor));
- DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline;
+ return AcquireRef(new RenderPipeline(device, descriptor));
}
- MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ MaybeError RenderPipeline::Initialize() {
Device* device = ToBackend(GetDevice());
uint32_t compileFlags = 0;
@@ -349,24 +339,19 @@ namespace dawn_native { namespace d3d12 {
D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
- PerStage<const char*> entryPoints;
- entryPoints[SingleShaderStage::Vertex] = descriptor->vertex.entryPoint;
- entryPoints[SingleShaderStage::Fragment] = descriptor->fragment->entryPoint;
-
- PerStage<ShaderModule*> modules;
- modules[SingleShaderStage::Vertex] = ToBackend(descriptor->vertex.module);
- modules[SingleShaderStage::Fragment] = ToBackend(descriptor->fragment->module);
+ PerStage<ProgrammableStage> pipelineStages = GetAllStages();
PerStage<D3D12_SHADER_BYTECODE*> shaders;
shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
PerStage<CompiledShader> compiledShader;
- wgpu::ShaderStage renderStages = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
- for (auto stage : IterateStages(renderStages)) {
+
+ for (auto stage : IterateStages(GetStageMask())) {
DAWN_TRY_ASSIGN(compiledShader[stage],
- modules[stage]->Compile(entryPoints[stage], stage,
- ToBackend(GetLayout()), compileFlags));
+ ToBackend(pipelineStages[stage].module)
+ ->Compile(pipelineStages[stage].entryPoint.c_str(), stage,
+ ToBackend(GetLayout()), compileFlags));
*shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
}
@@ -426,6 +411,9 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
&descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
"D3D12 create graphics pipeline state"));
+
+ SetLabelImpl();
+
return {};
}
@@ -445,6 +433,10 @@ namespace dawn_native { namespace d3d12 {
return mFirstOffsetInfo;
}
+ void RenderPipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_RenderPipeline", GetLabel());
+ }
+
D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
unsigned int count = 0;
@@ -478,4 +470,13 @@ namespace dawn_native { namespace d3d12 {
return inputLayoutDescriptor;
}
+ void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
index c99d0a12ff3..be0efef500e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
@@ -26,20 +26,26 @@ namespace dawn_native { namespace d3d12 {
class RenderPipeline final : public RenderPipelineBase {
public:
- static ResultOrError<Ref<RenderPipeline>> Create(
- Device* device,
- const RenderPipelineDescriptor* descriptor);
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
RenderPipeline() = delete;
+ MaybeError Initialize() override;
+
D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
ID3D12PipelineState* GetPipelineState() const;
const FirstOffsetInfo& GetFirstOffsetInfo() const;
+ // Dawn API
+ void SetLabelImpl() override;
+
private:
~RenderPipeline() override;
using RenderPipelineBase::RenderPipelineBase;
- MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
index 10a43d80761..c7f8c68d1df 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -40,11 +40,4 @@ namespace dawn_native { namespace d3d12 {
D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
return mResource->GetGPUVirtualAddress();
}
-
- MaybeError ResourceHeapAllocation::SetDebugName(const char* name) {
- DAWN_TRY(CheckHRESULT(
- mResource->SetPrivateData(WKPDID_D3DDebugObjectName, std::strlen(name), name),
- "ID3D12Resource::SetName"));
- return {};
- }
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
index 892a72cc4e4..7f1fe0a9d85 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
@@ -35,7 +35,6 @@ namespace dawn_native { namespace d3d12 {
ResourceHeapAllocation& operator=(const ResourceHeapAllocation&) = default;
void Invalidate() override;
- MaybeError SetDebugName(const char* name);
ID3D12Resource* GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
index 3a598901e54..e7395cf77a4 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
@@ -28,10 +28,226 @@
#include <d3dcompiler.h>
#include <tint/tint.h>
+#include <map>
+#include <sstream>
+#include <unordered_map>
namespace dawn_native { namespace d3d12 {
namespace {
+ ResultOrError<uint64_t> GetDXCompilerVersion(ComPtr<IDxcValidator> dxcValidator) {
+ ComPtr<IDxcVersionInfo> versionInfo;
+ DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
+ "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
+
+ uint32_t compilerMajor, compilerMinor;
+ DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
+ "IDxcVersionInfo::GetVersion"));
+
+ // Pack both into a single version number.
+ return (uint64_t(compilerMajor) << uint64_t(32)) + compilerMinor;
+ }
+
+ uint64_t GetD3DCompilerVersion() {
+ return D3D_COMPILER_VERSION;
+ }
+
+ struct CompareBindingPoint {
+ constexpr bool operator()(const tint::transform::BindingPoint& lhs,
+ const tint::transform::BindingPoint& rhs) const {
+ if (lhs.group != rhs.group) {
+ return lhs.group < rhs.group;
+ } else {
+ return lhs.binding < rhs.binding;
+ }
+ }
+ };
+
+ void Serialize(std::stringstream& output, const tint::ast::Access& access) {
+ output << access;
+ }
+
+ void Serialize(std::stringstream& output,
+ const tint::transform::BindingPoint& binding_point) {
+ output << "(BindingPoint";
+ output << " group=" << binding_point.group;
+ output << " binding=" << binding_point.binding;
+ output << ")";
+ }
+
+ template <typename T>
+ void Serialize(std::stringstream& output,
+ const std::unordered_map<tint::transform::BindingPoint, T>& map) {
+ output << "(map";
+
+ std::map<tint::transform::BindingPoint, T, CompareBindingPoint> sorted(map.begin(),
+ map.end());
+ for (auto& entry : sorted) {
+ output << " ";
+ Serialize(output, entry.first);
+ output << "=";
+ Serialize(output, entry.second);
+ }
+ output << ")";
+ }
+
+ // The inputs to a shader compilation. These have been intentionally isolated from the
+ // device to help ensure that the pipeline cache key contains all inputs for compilation.
+ struct ShaderCompilationRequest {
+ enum Compiler { FXC, DXC };
+
+ // Common inputs
+ Compiler compiler;
+ const tint::Program* program;
+ const char* entryPointName;
+ SingleShaderStage stage;
+ uint32_t compileFlags;
+ bool disableSymbolRenaming;
+ tint::transform::BindingRemapper::BindingPoints bindingPoints;
+ tint::transform::BindingRemapper::AccessControls accessControls;
+ bool isRobustnessEnabled;
+
+ // FXC/DXC common inputs
+ bool disableWorkgroupInit;
+
+ // FXC inputs
+ uint64_t fxcVersion;
+
+ // DXC inputs
+ uint64_t dxcVersion;
+ const D3D12DeviceInfo* deviceInfo;
+ bool hasShaderFloat16Feature;
+
+ static ResultOrError<ShaderCompilationRequest> Create(
+ const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t compileFlags,
+ const Device* device,
+ const tint::Program* program,
+ const BindingInfoArray& moduleBindingInfo) {
+ Compiler compiler;
+ uint64_t dxcVersion = 0;
+ if (device->IsToggleEnabled(Toggle::UseDXC)) {
+ compiler = Compiler::DXC;
+ DAWN_TRY_ASSIGN(dxcVersion, GetDXCompilerVersion(device->GetDxcValidator()));
+ } else {
+ compiler = Compiler::FXC;
+ }
+
+ using tint::transform::BindingPoint;
+ using tint::transform::BindingRemapper;
+
+ BindingRemapper::BindingPoints bindingPoints;
+ BindingRemapper::AccessControls accessControls;
+
+ // d3d12::BindGroupLayout packs the bindings per HLSL register-space. We modify the
+ // Tint AST to make the "bindings" decoration match the offset chosen by
+ // d3d12::BindGroupLayout so that Tint produces HLSL with the correct registers
+ // assigned to each interface variable.
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+ const auto& groupBindingInfo = moduleBindingInfo[group];
+ for (const auto& it : groupBindingInfo) {
+ BindingNumber binding = it.first;
+ auto const& bindingInfo = it.second;
+ BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+ BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+ bgl->GetShaderRegister(bindingIndex)};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+ }
+
+ // Declaring a read-only storage buffer in HLSL but specifying a storage
+ // buffer in the BGL produces the wrong output. Force read-only storage
+ // buffer bindings to be treated as UAV instead of SRV. Internal storage
+ // buffer is a storage buffer used in the internal pipeline.
+ const bool forceStorageBufferAsUAV =
+ (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
+ (bgl->GetBindingInfo(bindingIndex).buffer.type ==
+ wgpu::BufferBindingType::Storage ||
+ bgl->GetBindingInfo(bindingIndex).buffer.type ==
+ kInternalStorageBufferBinding));
+ if (forceStorageBufferAsUAV) {
+ accessControls.emplace(srcBindingPoint, tint::ast::Access::kReadWrite);
+ }
+ }
+ }
+
+ ShaderCompilationRequest request;
+ request.compiler = compiler;
+ request.program = program;
+ request.entryPointName = entryPointName;
+ request.stage = stage;
+ request.compileFlags = compileFlags;
+ request.disableSymbolRenaming =
+ device->IsToggleEnabled(Toggle::DisableSymbolRenaming);
+ request.bindingPoints = std::move(bindingPoints);
+ request.accessControls = std::move(accessControls);
+ request.isRobustnessEnabled = device->IsRobustnessEnabled();
+ request.disableWorkgroupInit =
+ device->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+ request.fxcVersion = compiler == Compiler::FXC ? GetD3DCompilerVersion() : 0;
+ request.dxcVersion = compiler == Compiler::DXC ? dxcVersion : 0;
+ request.deviceInfo = &device->GetDeviceInfo();
+ request.hasShaderFloat16Feature = device->IsFeatureEnabled(Feature::ShaderFloat16);
+ return std::move(request);
+ }
+
+ ResultOrError<PersistentCacheKey> CreateCacheKey() const {
+ // Generate the WGSL from the Tint program so it's normalized.
+ // TODO(tint:1180): Consider using a binary serialization of the tint AST for a more
+ // compact representation.
+ auto result = tint::writer::wgsl::Generate(program, tint::writer::wgsl::Options{});
+ if (!result.success) {
+ std::ostringstream errorStream;
+ errorStream << "Tint WGSL failure:" << std::endl;
+ errorStream << "Generator: " << result.error << std::endl;
+ return DAWN_INTERNAL_ERROR(errorStream.str().c_str());
+ }
+
+ std::stringstream stream;
+
+ // Prefix the key with the type to avoid collisions from another type that could
+ // have the same key.
+ stream << static_cast<uint32_t>(PersistentKeyType::Shader);
+ stream << "\n";
+
+ stream << result.wgsl.length();
+ stream << "\n";
+
+ stream << result.wgsl;
+ stream << "\n";
+
+ stream << "(ShaderCompilationRequest";
+ stream << " compiler=" << compiler;
+ stream << " entryPointName=" << entryPointName;
+ stream << " stage=" << uint32_t(stage);
+ stream << " compileFlags=" << compileFlags;
+ stream << " disableSymbolRenaming=" << disableSymbolRenaming;
+
+ stream << " bindingPoints=";
+ Serialize(stream, bindingPoints);
+
+ stream << " accessControls=";
+ Serialize(stream, accessControls);
+
+ stream << " shaderModel=" << deviceInfo->shaderModel;
+ stream << " disableWorkgroupInit=" << disableWorkgroupInit;
+ stream << " isRobustnessEnabled=" << isRobustnessEnabled;
+ stream << " fxcVersion=" << fxcVersion;
+ stream << " dxcVersion=" << dxcVersion;
+ stream << " hasShaderFloat16Feature=" << hasShaderFloat16Feature;
+ stream << ")";
+ stream << "\n";
+
+ return PersistentCacheKey(std::istreambuf_iterator<char>{stream},
+ std::istreambuf_iterator<char>{});
+ }
+ };
+
std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
std::vector<const wchar_t*> arguments;
if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
@@ -83,84 +299,172 @@ namespace dawn_native { namespace d3d12 {
return arguments;
}
- } // anonymous namespace
+ ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(IDxcLibrary* dxcLibrary,
+ IDxcCompiler* dxcCompiler,
+ const ShaderCompilationRequest& request,
+ const std::string& hlslSource) {
+ ComPtr<IDxcBlobEncoding> sourceBlob;
+ DAWN_TRY(
+ CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
+ hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
+ "DXC create blob"));
+
+ std::wstring entryPointW;
+ DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(request.entryPointName));
+
+ std::vector<const wchar_t*> arguments =
+ GetDXCArguments(request.compileFlags, request.hasShaderFloat16Feature);
+
+ ComPtr<IDxcOperationResult> result;
+ DAWN_TRY(CheckHRESULT(
+ dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
+ request.deviceInfo->shaderProfiles[request.stage].c_str(),
+ arguments.data(), arguments.size(), nullptr, 0, nullptr,
+ &result),
+ "DXC compile"));
+
+ HRESULT hr;
+ DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
+
+ if (FAILED(hr)) {
+ ComPtr<IDxcBlobEncoding> errors;
+ DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
+
+ std::string message = std::string("DXC compile failed with ") +
+ static_cast<char*>(errors->GetBufferPointer());
+ return DAWN_VALIDATION_ERROR(message);
+ }
- ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(Device* device,
- SingleShaderStage stage,
- const std::string& hlslSource,
- const char* entryPoint,
- uint32_t compileFlags) {
- ComPtr<IDxcLibrary> dxcLibrary = device->GetDxcLibrary();
+ ComPtr<IDxcBlob> compiledShader;
+ DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
+ return std::move(compiledShader);
+ }
- ComPtr<IDxcBlobEncoding> sourceBlob;
- DAWN_TRY(CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
- hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
- "DXC create blob"));
+ ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(const PlatformFunctions* functions,
+ const ShaderCompilationRequest& request,
+ const std::string& hlslSource) {
+ const char* targetProfile = nullptr;
+ switch (request.stage) {
+ case SingleShaderStage::Vertex:
+ targetProfile = "vs_5_1";
+ break;
+ case SingleShaderStage::Fragment:
+ targetProfile = "ps_5_1";
+ break;
+ case SingleShaderStage::Compute:
+ targetProfile = "cs_5_1";
+ break;
+ }
- ComPtr<IDxcCompiler> dxcCompiler = device->GetDxcCompiler();
+ ComPtr<ID3DBlob> compiledShader;
+ ComPtr<ID3DBlob> errors;
- std::wstring entryPointW;
- DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(entryPoint));
+ if (FAILED(functions->d3dCompile(hlslSource.c_str(), hlslSource.length(), nullptr,
+ nullptr, nullptr, request.entryPointName,
+ targetProfile, request.compileFlags, 0,
+ &compiledShader, &errors))) {
+ std::string message = std::string("D3D compile failed with ") +
+ static_cast<char*>(errors->GetBufferPointer());
+ return DAWN_VALIDATION_ERROR(message);
+ }
- std::vector<const wchar_t*> arguments =
- GetDXCArguments(compileFlags, device->IsExtensionEnabled(Extension::ShaderFloat16));
+ return std::move(compiledShader);
+ }
- ComPtr<IDxcOperationResult> result;
- DAWN_TRY(CheckHRESULT(
- dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
- device->GetDeviceInfo().shaderProfiles[stage].c_str(),
- arguments.data(), arguments.size(), nullptr, 0, nullptr, &result),
- "DXC compile"));
+ ResultOrError<std::string> TranslateToHLSL(const ShaderCompilationRequest& request,
+ std::string* remappedEntryPointName) {
+ std::ostringstream errorStream;
+ errorStream << "Tint HLSL failure:" << std::endl;
- HRESULT hr;
- DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
+ tint::transform::Manager transformManager;
+ tint::transform::DataMap transformInputs;
- if (FAILED(hr)) {
- ComPtr<IDxcBlobEncoding> errors;
- DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
+ if (request.isRobustnessEnabled) {
+ transformManager.Add<tint::transform::Robustness>();
+ }
+ transformManager.Add<tint::transform::BindingRemapper>();
- std::string message = std::string("DXC compile failed with ") +
- static_cast<char*>(errors->GetBufferPointer());
- return DAWN_VALIDATION_ERROR(message);
- }
+ transformManager.Add<tint::transform::Renamer>();
- ComPtr<IDxcBlob> compiledShader;
- DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
- return std::move(compiledShader);
- }
+ if (request.disableSymbolRenaming) {
+ // We still need to rename HLSL reserved keywords
+ transformInputs.Add<tint::transform::Renamer::Config>(
+ tint::transform::Renamer::Target::kHlslKeywords);
+ }
- ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(Device* device,
- SingleShaderStage stage,
- const std::string& hlslSource,
- const char* entryPoint,
- uint32_t compileFlags) {
- const char* targetProfile = nullptr;
- switch (stage) {
- case SingleShaderStage::Vertex:
- targetProfile = "vs_5_1";
- break;
- case SingleShaderStage::Fragment:
- targetProfile = "ps_5_1";
- break;
- case SingleShaderStage::Compute:
- targetProfile = "cs_5_1";
- break;
+ // D3D12 registers like `t3` and `c3` have the same bindingOffset number in
+ // the remapping but should not be considered a collision because they have
+ // different types.
+ const bool mayCollide = true;
+ transformInputs.Add<tint::transform::BindingRemapper::Remappings>(
+ std::move(request.bindingPoints), std::move(request.accessControls), mayCollide);
+
+ tint::Program transformedProgram;
+ tint::transform::DataMap transformOutputs;
+ DAWN_TRY_ASSIGN(transformedProgram,
+ RunTransforms(&transformManager, request.program, transformInputs,
+ &transformOutputs, nullptr));
+
+ if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+ auto it = data->remappings.find(request.entryPointName);
+ if (it != data->remappings.end()) {
+ *remappedEntryPointName = it->second;
+ } else {
+ if (request.disableSymbolRenaming) {
+ *remappedEntryPointName = request.entryPointName;
+ } else {
+ return DAWN_VALIDATION_ERROR(
+ "Could not find remapped name for entry point.");
+ }
+ }
+ } else {
+ return DAWN_VALIDATION_ERROR("Transform output missing renamer data.");
+ }
+
+ tint::writer::hlsl::Options options;
+ options.disable_workgroup_init = request.disableWorkgroupInit;
+ auto result = tint::writer::hlsl::Generate(&transformedProgram, options);
+ if (!result.success) {
+ errorStream << "Generator: " << result.error << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ return std::move(result.hlsl);
}
- ComPtr<ID3DBlob> compiledShader;
- ComPtr<ID3DBlob> errors;
+ template <typename F>
+ MaybeError CompileShader(const PlatformFunctions* functions,
+ IDxcLibrary* dxcLibrary,
+ IDxcCompiler* dxcCompiler,
+ ShaderCompilationRequest&& request,
+ bool dumpShaders,
+ F&& DumpShadersEmitLog,
+ CompiledShader* compiledShader) {
+ // Compile the source shader to HLSL.
+ std::string hlslSource;
+ std::string remappedEntryPoint;
+ DAWN_TRY_ASSIGN(hlslSource, TranslateToHLSL(request, &remappedEntryPoint));
+ if (dumpShaders) {
+ std::ostringstream dumpedMsg;
+ dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
+ DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+ }
+ request.entryPointName = remappedEntryPoint.c_str();
+ switch (request.compiler) {
+ case ShaderCompilationRequest::Compiler::DXC:
+ DAWN_TRY_ASSIGN(compiledShader->compiledDXCShader,
+ CompileShaderDXC(dxcLibrary, dxcCompiler, request, hlslSource));
+ break;
+ case ShaderCompilationRequest::Compiler::FXC:
+ DAWN_TRY_ASSIGN(compiledShader->compiledFXCShader,
+ CompileShaderFXC(functions, request, hlslSource));
+ break;
+ }
- const PlatformFunctions* functions = device->GetFunctions();
- if (FAILED(functions->d3dCompile(hlslSource.c_str(), hlslSource.length(), nullptr, nullptr,
- nullptr, entryPoint, targetProfile, compileFlags, 0,
- &compiledShader, &errors))) {
- std::string message = std::string("D3D compile failed with ") +
- static_cast<char*>(errors->GetBufferPointer());
- return DAWN_VALIDATION_ERROR(message);
+ return {};
}
- return std::move(compiledShader);
- }
+ } // anonymous namespace
// static
ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
@@ -180,188 +484,79 @@ namespace dawn_native { namespace d3d12 {
return InitializeBase(parseResult);
}
- ResultOrError<std::string> ShaderModule::TranslateToHLSLWithTint(
- const char* entryPointName,
- SingleShaderStage stage,
- PipelineLayout* layout,
- std::string* remappedEntryPointName,
- FirstOffsetInfo* firstOffsetInfo) const {
+ ResultOrError<CompiledShader> ShaderModule::Compile(const char* entryPointName,
+ SingleShaderStage stage,
+ PipelineLayout* layout,
+ uint32_t compileFlags) {
ASSERT(!IsError());
-
ScopedTintICEHandler scopedICEHandler(GetDevice());
- using BindingRemapper = tint::transform::BindingRemapper;
- using BindingPoint = tint::transform::BindingPoint;
- BindingRemapper::BindingPoints bindingPoints;
- BindingRemapper::AccessControls accessControls;
-
- const EntryPointMetadata::BindingInfoArray& moduleBindingInfo =
- GetEntryPoint(entryPointName).bindings;
-
- // d3d12::BindGroupLayout packs the bindings per HLSL register-space.
- // We modify the Tint AST to make the "bindings" decoration match the
- // offset chosen by d3d12::BindGroupLayout so that Tint produces HLSL
- // with the correct registers assigned to each interface variable.
- for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
- const auto& bindingOffsets = bgl->GetBindingOffsets();
- const auto& groupBindingInfo = moduleBindingInfo[group];
- for (const auto& it : groupBindingInfo) {
- BindingNumber binding = it.first;
- auto const& bindingInfo = it.second;
- BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
- uint32_t bindingOffset = bindingOffsets[bindingIndex];
- BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
- static_cast<uint32_t>(binding)};
- BindingPoint dstBindingPoint{static_cast<uint32_t>(group), bindingOffset};
- if (srcBindingPoint != dstBindingPoint) {
- bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
- }
-
- // Declaring a read-only storage buffer in HLSL but specifying a
- // storage buffer in the BGL produces the wrong output.
- // Force read-only storage buffer bindings to be treated as UAV
- // instead of SRV.
- // Internal storage buffer is a storage buffer used in the internal pipeline.
- const bool forceStorageBufferAsUAV =
- (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
- (bgl->GetBindingInfo(bindingIndex).buffer.type ==
- wgpu::BufferBindingType::Storage ||
- bgl->GetBindingInfo(bindingIndex).buffer.type ==
- kInternalStorageBufferBinding));
- if (forceStorageBufferAsUAV) {
- accessControls.emplace(srcBindingPoint, tint::ast::Access::kReadWrite);
- }
- }
- }
+ Device* device = ToBackend(GetDevice());
- std::ostringstream errorStream;
- errorStream << "Tint HLSL failure:" << std::endl;
+ CompiledShader compiledShader = {};
tint::transform::Manager transformManager;
tint::transform::DataMap transformInputs;
- if (GetDevice()->IsRobustnessEnabled()) {
- transformManager.Add<tint::transform::BoundArrayAccessors>();
- }
- transformManager.Add<tint::transform::BindingRemapper>();
-
- // The FirstIndexOffset transform must be done after the BindingRemapper because it assumes
- // that the register space has already flattened (and uses the next register). Otherwise
- // intermediate ASTs can be produced where the extra registers conflict with one of the
- // user-declared bind points.
+ const tint::Program* program;
+ tint::Program programAsValue;
if (stage == SingleShaderStage::Vertex) {
transformManager.Add<tint::transform::FirstIndexOffset>();
transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
layout->GetFirstIndexOffsetShaderRegister(),
layout->GetFirstIndexOffsetRegisterSpace());
- }
-
- transformManager.Add<tint::transform::Renamer>();
-
- if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
- // We still need to rename HLSL reserved keywords
- transformInputs.Add<tint::transform::Renamer::Config>(
- tint::transform::Renamer::Target::kHlslKeywords);
- }
- // D3D12 registers like `t3` and `c3` have the same bindingOffset number in the
- // remapping but should not be considered a collision because they have different types.
- const bool mayCollide = true;
- transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
- std::move(accessControls), mayCollide);
-
- tint::Program program;
- tint::transform::DataMap transformOutputs;
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
- &transformOutputs, nullptr));
-
- if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
- firstOffsetInfo->usesVertexIndex = data->has_vertex_index;
- if (firstOffsetInfo->usesVertexIndex) {
- firstOffsetInfo->vertexIndexOffset = data->first_vertex_offset;
- }
- firstOffsetInfo->usesInstanceIndex = data->has_instance_index;
- if (firstOffsetInfo->usesInstanceIndex) {
- firstOffsetInfo->instanceIndexOffset = data->first_instance_offset;
- }
- }
-
- if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
- auto it = data->remappings.find(entryPointName);
- if (it != data->remappings.end()) {
- *remappedEntryPointName = it->second;
- } else {
- if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
- *remappedEntryPointName = entryPointName;
- } else {
- return DAWN_VALIDATION_ERROR("Could not find remapped name for entry point.");
+ tint::transform::DataMap transformOutputs;
+ DAWN_TRY_ASSIGN(programAsValue,
+ RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+ &transformOutputs, nullptr));
+
+ if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
+ // TODO(dawn:549): Consider adding this information to the pipeline cache once we
+ // can store more than the shader blob in it.
+ compiledShader.firstOffsetInfo.usesVertexIndex = data->has_vertex_index;
+ if (compiledShader.firstOffsetInfo.usesVertexIndex) {
+ compiledShader.firstOffsetInfo.vertexIndexOffset = data->first_vertex_offset;
+ }
+ compiledShader.firstOffsetInfo.usesInstanceIndex = data->has_instance_index;
+ if (compiledShader.firstOffsetInfo.usesInstanceIndex) {
+ compiledShader.firstOffsetInfo.instanceIndexOffset =
+ data->first_instance_offset;
}
}
- } else {
- return DAWN_VALIDATION_ERROR("Transform output missing renamer data.");
- }
- tint::writer::hlsl::Options options;
- options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
- auto result = tint::writer::hlsl::Generate(&program, options);
- if (!result.success) {
- errorStream << "Generator: " << result.error << std::endl;
- return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ program = &programAsValue;
+ } else {
+ program = GetTintProgram();
}
- return std::move(result.hlsl);
- }
-
- ResultOrError<CompiledShader> ShaderModule::Compile(const char* entryPointName,
- SingleShaderStage stage,
- PipelineLayout* layout,
- uint32_t compileFlags) {
- Device* device = ToBackend(GetDevice());
-
- // Compile the source shader to HLSL.
- std::string hlslSource;
- std::string remappedEntryPoint;
- CompiledShader compiledShader = {};
- DAWN_TRY_ASSIGN(hlslSource,
- TranslateToHLSLWithTint(entryPointName, stage, layout, &remappedEntryPoint,
- &compiledShader.firstOffsetInfo));
- entryPointName = remappedEntryPoint.c_str();
-
- if (device->IsToggleEnabled(Toggle::DumpShaders)) {
- std::ostringstream dumpedMsg;
- dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
- GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
- }
+ ShaderCompilationRequest request;
+ DAWN_TRY_ASSIGN(request, ShaderCompilationRequest::Create(
+ entryPointName, stage, layout, compileFlags, device, program,
+ GetEntryPoint(entryPointName).bindings));
- // Use HLSL source as the input for the key since it does need to know about the pipeline
- // layout. The pipeline layout is only required if we key from WGSL: two different pipeline
- // layouts could be used to produce different shader blobs and the wrong shader blob could
- // be loaded since the pipeline layout was missing from the key.
- // The compiler flags or version used could also produce different HLSL source. HLSL key
- // needs both to ensure the shader cache key is unique to the HLSL source.
- // TODO(dawn:549): Consider keying from WGSL and serialize the pipeline layout it used.
PersistentCacheKey shaderCacheKey;
- DAWN_TRY_ASSIGN(shaderCacheKey,
- CreateHLSLKey(entryPointName, stage, hlslSource, compileFlags));
-
- DAWN_TRY_ASSIGN(compiledShader.cachedShader,
- device->GetPersistentCache()->GetOrCreate(
- shaderCacheKey, [&](auto doCache) -> MaybeError {
- if (device->IsToggleEnabled(Toggle::UseDXC)) {
- DAWN_TRY_ASSIGN(compiledShader.compiledDXCShader,
- CompileShaderDXC(device, stage, hlslSource,
- entryPointName, compileFlags));
- } else {
- DAWN_TRY_ASSIGN(compiledShader.compiledFXCShader,
- CompileShaderFXC(device, stage, hlslSource,
- entryPointName, compileFlags));
- }
- const D3D12_SHADER_BYTECODE shader =
- compiledShader.GetD3D12ShaderBytecode();
- doCache(shader.pShaderBytecode, shader.BytecodeLength);
- return {};
- }));
+ DAWN_TRY_ASSIGN(shaderCacheKey, request.CreateCacheKey());
+
+ DAWN_TRY_ASSIGN(
+ compiledShader.cachedShader,
+ device->GetPersistentCache()->GetOrCreate(
+ shaderCacheKey, [&](auto doCache) -> MaybeError {
+ DAWN_TRY(CompileShader(
+ device->GetFunctions(),
+ device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcLibrary().Get()
+ : nullptr,
+ device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcCompiler().Get()
+ : nullptr,
+ std::move(request), device->IsToggleEnabled(Toggle::DumpShaders),
+ [&](WGPULoggingType loggingType, const char* message) {
+ GetDevice()->EmitLog(loggingType, message);
+ },
+ &compiledShader));
+ const D3D12_SHADER_BYTECODE shader = compiledShader.GetD3D12ShaderBytecode();
+ doCache(shader.pShaderBytecode, shader.BytecodeLength);
+ return {};
+ }));
return std::move(compiledShader);
}
@@ -377,69 +572,4 @@ namespace dawn_native { namespace d3d12 {
UNREACHABLE();
return {};
}
-
- ResultOrError<PersistentCacheKey> ShaderModule::CreateHLSLKey(const char* entryPointName,
- SingleShaderStage stage,
- const std::string& hlslSource,
- uint32_t compileFlags) const {
- std::stringstream stream;
-
- // Prefix the key with the type to avoid collisions from another type that could have the
- // same key.
- stream << static_cast<uint32_t>(PersistentKeyType::Shader);
-
- // Provide "guard" strings that the user cannot provide to help ensure the generated HLSL
- // used to create this key is not being manufactured by the user to load the wrong shader
- // blob.
- // These strings can be HLSL comments because Tint does not emit HLSL comments.
- // TODO(dawn:549): Replace guards strings with something more secure.
- constexpr char kStartGuard[] = "// Start shader autogenerated by Dawn.";
- constexpr char kEndGuard[] = "// End shader autogenerated by Dawn.";
- ASSERT(hlslSource.find(kStartGuard) == std::string::npos);
- ASSERT(hlslSource.find(kEndGuard) == std::string::npos);
-
- stream << kStartGuard << "\n";
- stream << hlslSource;
- stream << "\n" << kEndGuard;
-
- stream << compileFlags;
-
- // Add the HLSL compiler version for good measure.
- // Prepend the compiler name to ensure the version is always unique.
- if (GetDevice()->IsToggleEnabled(Toggle::UseDXC)) {
- uint64_t dxCompilerVersion;
- DAWN_TRY_ASSIGN(dxCompilerVersion, GetDXCompilerVersion());
- stream << "DXC" << dxCompilerVersion;
- } else {
- stream << "FXC" << GetD3DCompilerVersion();
- }
-
- // If the source contains multiple entry points, ensure they are cached seperately
- // per stage since DX shader code can only be compiled per stage using the same
- // entry point.
- stream << static_cast<uint32_t>(stage);
- stream << entryPointName;
-
- return PersistentCacheKey(std::istreambuf_iterator<char>{stream},
- std::istreambuf_iterator<char>{});
- }
-
- ResultOrError<uint64_t> ShaderModule::GetDXCompilerVersion() const {
- ComPtr<IDxcValidator> dxcValidator = ToBackend(GetDevice())->GetDxcValidator();
-
- ComPtr<IDxcVersionInfo> versionInfo;
- DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
- "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
-
- uint32_t compilerMajor, compilerMinor;
- DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
- "IDxcVersionInfo::GetVersion"));
-
- // Pack both into a single version number.
- return (uint64_t(compilerMajor) << uint64_t(32)) + compilerMinor;
- }
-
- uint64_t ShaderModule::GetD3DCompilerVersion() const {
- return D3D_COMPILER_VERSION;
- }
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
index d1b2a0ac1c7..880a35cf45f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
@@ -58,20 +58,6 @@ namespace dawn_native { namespace d3d12 {
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
~ShaderModule() override = default;
MaybeError Initialize(ShaderModuleParseResult* parseResult);
-
- ResultOrError<std::string> TranslateToHLSLWithTint(const char* entryPointName,
- SingleShaderStage stage,
- PipelineLayout* layout,
- std::string* remappedEntryPointName,
- FirstOffsetInfo* firstOffsetInfo) const;
-
- ResultOrError<PersistentCacheKey> CreateHLSLKey(const char* entryPointName,
- SingleShaderStage stage,
- const std::string& hlslSource,
- uint32_t compileFlags) const;
-
- ResultOrError<uint64_t> GetDXCompilerVersion() const;
- uint64_t GetD3DCompilerVersion() const;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
index 4c1e90b96ed..d35622e9e06 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
@@ -17,6 +17,7 @@
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/HeapD3D12.h"
#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn_native/d3d12/UtilsD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -47,7 +48,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(
ToBackend(mUploadHeap.GetResourceHeap())));
- DAWN_TRY(mUploadHeap.SetDebugName("Dawn_StagingBuffer"));
+ SetDebugName(mDevice, GetResource(), "Dawn_StagingBuffer");
return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
index d9671c6b0e2..81c4ba05ad0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
@@ -21,6 +21,7 @@
#include "dawn_native/Error.h"
#include "dawn_native/d3d12/BufferD3D12.h"
#include "dawn_native/d3d12/CommandRecordingContext.h"
+#include "dawn_native/d3d12/D3D11on12Util.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/HeapD3D12.h"
@@ -49,7 +50,7 @@ namespace dawn_native { namespace d3d12 {
if (usage & wgpu::TextureUsage::CopyDst) {
resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
}
- if (usage & (wgpu::TextureUsage::TextureBinding | kReadOnlyStorageTexture)) {
+ if (usage & (wgpu::TextureUsage::TextureBinding)) {
resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
}
@@ -202,8 +203,51 @@ namespace dawn_native { namespace d3d12 {
case wgpu::TextureFormat::BC7RGBAUnormSrgb:
return DXGI_FORMAT_BC7_TYPELESS;
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ // TODO(dawn:666): implement stencil8
case wgpu::TextureFormat::Stencil8:
+ // TODO(dawn:570): implement depth16unorm
+ case wgpu::TextureFormat::Depth16Unorm:
case wgpu::TextureFormat::Undefined:
UNREACHABLE();
}
@@ -329,7 +373,50 @@ namespace dawn_native { namespace d3d12 {
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
return DXGI_FORMAT_NV12;
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+ // TODO(dawn:666): implement stencil8
case wgpu::TextureFormat::Stencil8:
+ // TODO(dawn:570): implement depth16unorm
+ case wgpu::TextureFormat::Depth16Unorm:
case wgpu::TextureFormat::Undefined:
UNREACHABLE();
}
@@ -418,18 +505,20 @@ namespace dawn_native { namespace d3d12 {
}
// static
- ResultOrError<Ref<Texture>> Texture::CreateExternalImage(Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture,
- bool isInitialized) {
+ ResultOrError<Ref<Texture>> Texture::CreateExternalImage(
+ Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
+ bool isSwapChainTexture,
+ bool isInitialized) {
Ref<Texture> dawnTexture =
AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
- DAWN_TRY(dawnTexture->InitializeAsExternalTexture(descriptor, std::move(d3d12Texture),
- acquireMutexKey, releaseMutexKey,
- isSwapChainTexture));
+ DAWN_TRY(dawnTexture->InitializeAsExternalTexture(
+ descriptor, std::move(d3d12Texture), std::move(d3d11on12Resource), acquireMutexKey,
+ releaseMutexKey, isSwapChainTexture));
// Importing a multi-planar format must be initialized. This is required because
// a shared multi-planar format cannot be initialized by Dawn.
@@ -453,22 +542,20 @@ namespace dawn_native { namespace d3d12 {
return std::move(dawnTexture);
}
- MaybeError Texture::InitializeAsExternalTexture(const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture) {
- Device* dawnDevice = ToBackend(GetDevice());
-
- ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
- DAWN_TRY_ASSIGN(dxgiKeyedMutex, dawnDevice->CreateKeyedMutexForTexture(d3d12Texture.Get()));
-
- DAWN_TRY(CheckHRESULT(dxgiKeyedMutex->AcquireSync(uint64_t(acquireMutexKey), INFINITE),
+ MaybeError Texture::InitializeAsExternalTexture(
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
+ bool isSwapChainTexture) {
+ DAWN_TRY(CheckHRESULT(d3d11on12Resource->GetDXGIKeyedMutex()->AcquireSync(
+ uint64_t(acquireMutexKey), INFINITE),
"D3D12 acquiring shared mutex"));
mAcquireMutexKey = acquireMutexKey;
mReleaseMutexKey = releaseMutexKey;
- mDxgiKeyedMutex = std::move(dxgiKeyedMutex);
+ mD3D11on12Resource = std::move(d3d11on12Resource);
mSwapChainTexture = isSwapChainTexture;
D3D12_RESOURCE_DESC desc = d3d12Texture->GetDesc();
@@ -481,7 +568,7 @@ namespace dawn_native { namespace d3d12 {
// memory management.
mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
- DAWN_TRY(mResourceAllocation.SetDebugName("Dawn_ExternalTexture"));
+ SetLabelHelper("Dawn_ExternalTexture");
return {};
}
@@ -520,7 +607,7 @@ namespace dawn_native { namespace d3d12 {
->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
D3D12_RESOURCE_STATE_COMMON));
- DAWN_TRY(mResourceAllocation.SetDebugName("Dawn_InternalTexture"));
+ SetLabelImpl();
Device* device = ToBackend(GetDevice());
@@ -543,7 +630,8 @@ namespace dawn_native { namespace d3d12 {
// memory management.
mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
- DAWN_TRY(mResourceAllocation.SetDebugName("Dawn_SwapChainTexture"));
+ SetLabelHelper("Dawn_SwapChainTexture");
+
return {};
}
@@ -583,9 +671,8 @@ namespace dawn_native { namespace d3d12 {
// ID3D12SharingContract::Present.
mSwapChainTexture = false;
- if (mDxgiKeyedMutex != nullptr) {
- mDxgiKeyedMutex->ReleaseSync(uint64_t(mReleaseMutexKey));
- device->ReleaseKeyedMutexForTexture(std::move(mDxgiKeyedMutex));
+ if (mD3D11on12Resource != nullptr) {
+ mD3D11on12Resource->GetDXGIKeyedMutex()->ReleaseSync(uint64_t(mReleaseMutexKey));
}
}
@@ -754,7 +841,7 @@ namespace dawn_native { namespace d3d12 {
// Textures with keyed mutexes can be written from other graphics queues. Hence, they
// must be acquired before command list submission to ensure work from the other queues
// has finished. See Device::ExecuteCommandContext.
- if (mDxgiKeyedMutex != nullptr) {
+ if (mD3D11on12Resource != nullptr) {
commandContext->AddToSharedTextureList(this);
}
}
@@ -1026,6 +1113,15 @@ namespace dawn_native { namespace d3d12 {
return {};
}
+ void Texture::SetLabelHelper(const char* prefix) {
+ SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), prefix,
+ GetLabel());
+ }
+
+ void Texture::SetLabelImpl() {
+ SetLabelHelper("Dawn_InternalTexture");
+ }
+
void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
const SubresourceRange& range) {
if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
index 9082ffcafb0..c414a8ae617 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
@@ -28,6 +28,7 @@ namespace dawn_native { namespace d3d12 {
class CommandRecordingContext;
class Device;
+ class D3D11on12ResourceCacheEntry;
DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
@@ -39,13 +40,15 @@ namespace dawn_native { namespace d3d12 {
public:
static ResultOrError<Ref<Texture>> Create(Device* device,
const TextureDescriptor* descriptor);
- static ResultOrError<Ref<Texture>> CreateExternalImage(Device* device,
- const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> d3d12Texture,
- ExternalMutexSerial acquireMutexKey,
- ExternalMutexSerial releaseMutexKey,
- bool isSwapChainTexture,
- bool isInitialized);
+ static ResultOrError<Ref<Texture>> CreateExternalImage(
+ Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+ ExternalMutexSerial acquireMutexKey,
+ ExternalMutexSerial releaseMutexKey,
+ bool isSwapChainTexture,
+ bool isInitialized);
static ResultOrError<Ref<Texture>> Create(Device* device,
const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture);
@@ -89,13 +92,18 @@ namespace dawn_native { namespace d3d12 {
MaybeError InitializeAsInternalTexture();
MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture,
+ Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
ExternalMutexSerial acquireMutexKey,
ExternalMutexSerial releaseMutexKey,
bool isSwapChainTexture);
MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
+ void SetLabelHelper(const char* prefix);
+
// Dawn API
+ void SetLabelImpl() override;
void DestroyImpl() override;
+
MaybeError ClearTexture(CommandRecordingContext* commandContext,
const SubresourceRange& range,
TextureBase::ClearValue clearValue);
@@ -127,7 +135,7 @@ namespace dawn_native { namespace d3d12 {
ExternalMutexSerial mAcquireMutexKey = ExternalMutexSerial(0);
ExternalMutexSerial mReleaseMutexKey = ExternalMutexSerial(0);
- ComPtr<IDXGIKeyedMutex> mDxgiKeyedMutex;
+ Ref<D3D11on12ResourceCacheEntry> mD3D11on12Resource;
};
class TextureView final : public TextureViewBase {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
index b52dc0ba22c..38479eba103 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
@@ -18,6 +18,8 @@
#include "dawn_native/Format.h"
#include "dawn_native/d3d12/BufferD3D12.h"
#include "dawn_native/d3d12/CommandRecordingContext.h"
+#include "dawn_native/d3d12/D3D12Error.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
#include <stringapiset.h>
@@ -370,4 +372,20 @@ namespace dawn_native { namespace d3d12 {
}
}
+ void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label) {
+ if (!object) {
+ return;
+ }
+
+ if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+ object->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(prefix), prefix);
+ return;
+ }
+
+ std::string objectName = prefix;
+ objectName += "_";
+ objectName += label;
+ object->SetPrivateData(WKPDID_D3DDebugObjectName, objectName.length(), objectName.c_str());
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
index fea27d57a7d..2a3f3d5b95c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
@@ -81,6 +81,11 @@ namespace dawn_native { namespace d3d12 {
Buffer* buffer,
const Extent3D& copySize);
+ void SetDebugName(Device* device,
+ ID3D12Object* object,
+ const char* prefix,
+ std::string label = "");
+
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_UTILSD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/dawn_platform.h b/chromium/third_party/dawn/src/dawn_native/dawn_platform.h
index 537a5029e74..d36a48af0cf 100644
--- a/chromium/third_party/dawn/src/dawn_native/dawn_platform.h
+++ b/chromium/third_party/dawn/src/dawn_native/dawn_platform.h
@@ -23,12 +23,21 @@
#include <dawn_native/wgpu_structs_autogen.h>
namespace dawn_native {
- // Add an extra buffer usage (readonly storage buffer usage) and an extra texture usage
- // (readonly storage texture usage) for render pass resource tracking
+ // Extra buffer usages
+ // Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
+ // usage as storage buffer in the internal pipeline.
+ static constexpr wgpu::BufferUsage kInternalStorageBuffer =
+ static_cast<wgpu::BufferUsage>(0x40000000);
+
+ // Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
static constexpr wgpu::BufferUsage kReadOnlyStorageBuffer =
static_cast<wgpu::BufferUsage>(0x80000000);
- static constexpr wgpu::TextureUsage kReadOnlyStorageTexture =
- static_cast<wgpu::TextureUsage>(0x80000000);
+
+ // Extra texture usages
+ // Add an extra texture usage (readonly render attachment usage) for render pass resource
+ // tracking
+ static constexpr wgpu::TextureUsage kReadOnlyRenderAttachment =
+ static_cast<wgpu::TextureUsage>(0x40000000);
// Internal usage to help tracking when a subresource is used as render attachment usage
// more than once in a render pass.
@@ -41,10 +50,6 @@ namespace dawn_native {
// some bit when wgpu::TextureUsage::Present is removed.
static constexpr wgpu::TextureUsage kPresentTextureUsage = wgpu::TextureUsage::Present;
- // Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
- // usage as storage buffer in the internal pipeline.
- static constexpr wgpu::BufferUsage kInternalStorageBuffer =
- static_cast<wgpu::BufferUsage>(0x40000000);
static constexpr wgpu::BufferBindingType kInternalStorageBufferBinding =
static_cast<wgpu::BufferBindingType>(0xFFFFFFFF);
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
index a3a1212b943..d25069885dd 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
@@ -28,6 +28,8 @@
# include "common/IOKitRef.h"
#endif
+#include <vector>
+
namespace dawn_native { namespace metal {
namespace {
@@ -168,6 +170,66 @@ namespace dawn_native { namespace metal {
#else
# error "Unsupported Apple platform."
#endif
+
+ bool IsCounterSamplingBoundarySupport(id<MTLDevice> device)
+ API_AVAILABLE(macos(11.0), ios(14.0)) {
+ bool isBlitBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtBlitBoundary];
+ bool isDispatchBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtDispatchBoundary];
+ bool isDrawBoundarySupported =
+ [device supportsCounterSampling:MTLCounterSamplingPointAtDrawBoundary];
+
+ return isBlitBoundarySupported && isDispatchBoundarySupported &&
+ isDrawBoundarySupported;
+ }
+
+ bool IsGPUCounterSupported(id<MTLDevice> device,
+ MTLCommonCounterSet counterSetName,
+ std::vector<MTLCommonCounter> counters)
+ API_AVAILABLE(macos(10.15), ios(14.0)) {
+ // MTLDevice’s counterSets property declares which counter sets it supports. Check
+ // whether it's available on the device before requesting a counter set.
+ id<MTLCounterSet> counterSet = nil;
+ for (id<MTLCounterSet> set in device.counterSets) {
+ if ([set.name caseInsensitiveCompare:counterSetName] == NSOrderedSame) {
+ counterSet = set;
+ break;
+ }
+ }
+
+ // The counter set is not supported.
+ if (counterSet == nil) {
+ return false;
+ }
+
+ // A GPU might support a counter set, but only support a subset of the counters in that
+ // set, check if the counter set supports all specific counters we need. Return false
+ // if there is a counter unsupported.
+ std::vector<NSString*> supportedCounters;
+ for (id<MTLCounter> counter in counterSet.counters) {
+ supportedCounters.push_back(counter.name);
+ }
+ for (const auto& counterName : counters) {
+ if (std::find(supportedCounters.begin(), supportedCounters.end(), counterName) ==
+ supportedCounters.end()) {
+ return false;
+ }
+ }
+
+ if (@available(macOS 11.0, iOS 14.0, *)) {
+ // Check whether it can read GPU counters at the specified command boundary. Apple
+ // family GPUs do not support sampling between different Metal commands, because
+ // they defer fragment processing until after the GPU processes all the primitives
+ // in the render pass.
+ if (!IsCounterSamplingBoundarySupport(device)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
} // anonymous namespace
// The Metal backend's Adapter.
@@ -202,7 +264,7 @@ namespace dawn_native { namespace metal {
mDriverDescription =
"Metal driver on " + std::string(systemName) + [osVersion UTF8String];
- InitializeSupportedExtensions();
+ InitializeSupportedFeatures();
}
// AdapterBase Implementation
@@ -216,18 +278,24 @@ namespace dawn_native { namespace metal {
return Device::Create(this, mDevice, descriptor);
}
- void InitializeSupportedExtensions() {
+ void InitializeSupportedFeatures() {
#if defined(DAWN_PLATFORM_MACOS)
if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
- mSupportedExtensions.EnableExtension(Extension::TextureCompressionBC);
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
}
#endif
if (@available(macOS 10.15, iOS 14.0, *)) {
- if ([*mDevice supportsFamily:MTLGPUFamilyMac2] ||
- [*mDevice supportsFamily:MTLGPUFamilyApple5]) {
- mSupportedExtensions.EnableExtension(Extension::PipelineStatisticsQuery);
+ if (IsGPUCounterSupported(
+ *mDevice, MTLCommonCounterSetStatistic,
+ {MTLCommonCounterVertexInvocations, MTLCommonCounterClipperInvocations,
+ MTLCommonCounterClipperPrimitivesOut, MTLCommonCounterFragmentInvocations,
+ MTLCommonCounterComputeKernelInvocations})) {
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ }
+ if (IsGPUCounterSupported(*mDevice, MTLCommonCounterSetTimestamp,
+ {MTLCommonCounterTimestamp})) {
// Disable timestamp query on macOS 10.15 on AMD GPU because WriteTimestamp
// fails to call without any copy commands on MTLBlitCommandEncoder. This issue
// has been fixed on macOS 11.0. See crbug.com/dawn/545
@@ -238,12 +306,13 @@ namespace dawn_native { namespace metal {
enableTimestampQuery &= !IsMacOSVersionAtLeast(11);
#endif
if (enableTimestampQuery) {
- mSupportedExtensions.EnableExtension(Extension::TimestampQuery);
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
}
}
}
+
if (@available(macOS 10.11, iOS 11.0, *)) {
- mSupportedExtensions.EnableExtension(Extension::DepthClamping);
+ mSupportedFeatures.EnableFeature(Feature::DepthClamping);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
index edd9c35b1c9..1d2c2a93342 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
@@ -26,13 +26,16 @@ namespace dawn_native { namespace metal {
class BindGroupLayout final : public BindGroupLayoutBase {
public:
static Ref<BindGroupLayout> Create(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor);
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
void DeallocateBindGroup(BindGroup* bindGroup);
private:
- BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
~BindGroupLayout() override = default;
SlabAllocator<BindGroup> mBindGroupAllocator;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
index 535979bb835..5d748c1f787 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
@@ -19,14 +19,17 @@
namespace dawn_native { namespace metal {
// static
- Ref<BindGroupLayout> BindGroupLayout::Create(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor) {
- return AcquireRef(new BindGroupLayout(device, descriptor));
+ Ref<BindGroupLayout> BindGroupLayout::Create(
+ DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
}
BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor)
- : BindGroupLayoutBase(device, descriptor),
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
index f41cff71ee3..eb9892b6afb 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
@@ -17,6 +17,7 @@
#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
+#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ExternalTexture.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/metal/BindGroupMTL.h"
@@ -27,6 +28,7 @@
#include "dawn_native/metal/QuerySetMTL.h"
#include "dawn_native/metal/RenderPipelineMTL.h"
#include "dawn_native/metal/SamplerMTL.h"
+#include "dawn_native/metal/StagingBufferMTL.h"
#include "dawn_native/metal/TextureMTL.h"
#include "dawn_native/metal/UtilsMetal.h"
@@ -101,7 +103,6 @@ namespace dawn_native { namespace metal {
kMTLStoreActionStoreAndMultisampleResolve;
break;
case wgpu::StoreOp::Discard:
- case wgpu::StoreOp::Clear:
descriptor.colorAttachments[i].storeAction =
MTLStoreActionMultisampleResolve;
break;
@@ -112,7 +113,6 @@ namespace dawn_native { namespace metal {
descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
break;
case wgpu::StoreOp::Discard:
- case wgpu::StoreOp::Clear:
descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
break;
}
@@ -137,7 +137,6 @@ namespace dawn_native { namespace metal {
break;
case wgpu::StoreOp::Discard:
- case wgpu::StoreOp::Clear:
descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
break;
}
@@ -165,7 +164,6 @@ namespace dawn_native { namespace metal {
break;
case wgpu::StoreOp::Discard:
- case wgpu::StoreOp::Clear:
descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
break;
}
@@ -989,6 +987,40 @@ namespace dawn_native { namespace metal {
break;
}
+ case Command::SetValidatedBufferLocationsInternal:
+ DoNextSetValidatedBufferLocationsInternal();
+ break;
+
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
+ }
+
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset, size);
+
+ [commandContext->EnsureBlit()
+ copyFromBuffer:ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle()
+ sourceOffset:uploadHandle.startOffset
+ toBuffer:dstBuffer->GetMTLBuffer()
+ destinationOffset:offset
+ size:size];
+ break;
+ }
+
default:
UNREACHABLE();
}
@@ -1304,20 +1336,21 @@ namespace dawn_native { namespace metal {
}
case Command::DrawIndexedIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
bindGroups.Apply(encoder);
storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
- Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+ ASSERT(!draw->indirectBufferLocation->IsNull());
+ Buffer* buffer = ToBackend(draw->indirectBufferLocation->GetBuffer());
id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
[encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
indexType:indexBufferType
indexBuffer:indexBuffer
indexBufferOffset:indexBufferBaseOffset
indirectBuffer:indirectBuffer
- indirectBufferOffset:draw->indirectOffset];
+ indirectBufferOffset:draw->indirectBufferLocation->GetOffset()];
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
index 09ab23a8d9e..4ecb4500309 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
@@ -42,11 +42,12 @@ namespace dawn_native { namespace metal {
private:
using ComputePipelineBase::ComputePipelineBase;
- MaybeError Initialize(const ComputePipelineDescriptor* descriptor) override;
+ MaybeError Initialize() override;
NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
MTLSize mLocalWorkgroupSize;
bool mRequiresStorageBufferLength;
+ std::vector<uint32_t> mWorkgroupAllocations;
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
index b98a3694d57..8879fb21277 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
@@ -25,15 +25,16 @@ namespace dawn_native { namespace metal {
Device* device,
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
- DAWN_TRY(pipeline->Initialize(descriptor));
+ DAWN_TRY(pipeline->Initialize());
return pipeline;
}
- MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
+ MaybeError ComputePipeline::Initialize() {
auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
- ShaderModule* computeModule = ToBackend(descriptor->compute.module);
- const char* computeEntryPoint = descriptor->compute.entryPoint;
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+ ShaderModule* computeModule = ToBackend(computeStage.module.Get());
+ const char* computeEntryPoint = computeStage.entryPoint.c_str();
ShaderModule::MetalFunctionData computeData;
DAWN_TRY(computeModule->CreateFunction(computeEntryPoint, SingleShaderStage::Compute,
ToBackend(GetLayout()), &computeData));
@@ -53,11 +54,18 @@ namespace dawn_native { namespace metal {
mLocalWorkgroupSize = MTLSizeMake(localSize.x, localSize.y, localSize.z);
mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
+ mWorkgroupAllocations = std::move(computeData.workgroupAllocations);
return {};
}
void ComputePipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
[encoder setComputePipelineState:mMtlComputePipelineState.Get()];
+ for (size_t i = 0; i < mWorkgroupAllocations.size(); ++i) {
+ if (mWorkgroupAllocations[i] == 0) {
+ continue;
+ }
+ [encoder setThreadgroupMemoryLength:mWorkgroupAllocations[i] atIndex:i];
+ }
}
MTLSize ComputePipeline::GetLocalWorkGroupSize() const {
@@ -75,8 +83,8 @@ namespace dawn_native { namespace metal {
void* userdata) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(pipeline, descriptor, blueprintHash,
- callback, userdata);
+ std::make_unique<CreateComputePipelineAsyncTask>(pipeline, blueprintHash, callback,
+ userdata);
CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
index d1881e51e69..e5232cbbfa1 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
@@ -82,7 +82,8 @@ namespace dawn_native { namespace metal {
ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) override;
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
@@ -94,7 +95,7 @@ namespace dawn_native { namespace metal {
const PipelineLayoutDescriptor* descriptor) override;
ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
const SamplerDescriptor* descriptor) override;
@@ -116,6 +117,9 @@ namespace dawn_native { namespace metal {
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
void InitTogglesFromDriver();
void ShutDownImpl() override;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
index 17c6ba98f11..67ce44bf856 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
@@ -134,7 +134,7 @@ namespace dawn_native { namespace metal {
DAWN_TRY(mCommandContext.PrepareNextCommandBuffer(*mCommandQueue));
- if (IsExtensionEnabled(Extension::TimestampQuery)) {
+ if (IsFeatureEnabled(Feature::TimestampQuery)) {
// Make a best guess of timestamp period based on device vendor info, and converge it to
// an accurate value by the following calculations.
mTimestampPeriod =
@@ -199,13 +199,31 @@ namespace dawn_native { namespace metal {
// TODO(crbug.com/dawn/846): tighten this workaround when the driver bug is fixed.
SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
+ const PCIInfo& pciInfo = GetAdapter()->GetPCIInfo();
+
// TODO(crbug.com/dawn/847): Use MTLStorageModeShared instead of MTLStorageModePrivate when
// creating MTLCounterSampleBuffer in QuerySet on Intel platforms, otherwise it fails to
// create the buffer. Change to use MTLStorageModePrivate when the bug is fixed.
if (@available(macOS 10.15, iOS 14.0, *)) {
- bool useSharedMode = gpu_info::IsIntel(this->GetAdapter()->GetPCIInfo().vendorId);
+ bool useSharedMode = gpu_info::IsIntel(pciInfo.vendorId);
SetToggle(Toggle::MetalUseSharedModeForCounterSampleBuffer, useSharedMode);
}
+
+ // TODO(crbug.com/dawn/1071): r8unorm and rg8unorm textures with multiple mip levels don't
+ // clear properly on Intel Macs.
+ if (gpu_info::IsIntel(pciInfo.vendorId)) {
+ SetToggle(Toggle::DisableR8RG8Mipmaps, true);
+ }
+
+ // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
+ // shader provided. Create a dummy fragment shader module to work around this issue.
+ if (gpu_info::IsIntel(this->GetAdapter()->GetPCIInfo().vendorId)) {
+ bool useDummyFragmentShader = true;
+ if (gpu_info::IsSkylake(this->GetAdapter()->GetPCIInfo().deviceId)) {
+ useDummyFragmentShader = false;
+ }
+ SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, useDummyFragmentShader);
+ }
}
ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
@@ -213,8 +231,9 @@ namespace dawn_native { namespace metal {
return BindGroup::Create(this, descriptor);
}
ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) {
- return BindGroupLayout::Create(this, descriptor);
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return Buffer::Create(this, descriptor);
@@ -236,9 +255,9 @@ namespace dawn_native { namespace metal {
const QuerySetDescriptor* descriptor) {
return QuerySet::Create(this, descriptor);
}
- ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::Create(this, descriptor);
+ return RenderPipeline::CreateUninitialized(this, descriptor);
}
ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
return Sampler::Create(this, descriptor);
@@ -272,6 +291,11 @@ namespace dawn_native { namespace metal {
void* userdata) {
ComputePipeline::CreateAsync(this, descriptor, blueprintHash, callback, userdata);
}
+ void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(renderPipeline, callback, userdata);
+ }
ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
@@ -288,8 +312,8 @@ namespace dawn_native { namespace metal {
MaybeError Device::TickImpl() {
DAWN_TRY(SubmitPendingCommandBuffer());
- // Just run timestamp period calculation when timestamp extension is enabled.
- if (IsExtensionEnabled(Extension::TimestampQuery)) {
+ // Just run timestamp period calculation when timestamp feature is enabled.
+ if (IsFeatureEnabled(Feature::TimestampQuery)) {
if (@available(macos 10.15, iOS 14.0, *)) {
UpdateTimestampPeriod(GetMTLDevice(), mKalmanInfo.get(), &mCpuTimestamp,
&mGpuTimestamp, &mTimestampPeriod);
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
index 43eebec365f..d6bedfaedb1 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
@@ -27,9 +27,12 @@ namespace dawn_native { namespace metal {
class RenderPipeline final : public RenderPipelineBase {
public:
- static ResultOrError<Ref<RenderPipeline>> Create(
+ static Ref<RenderPipelineBase> CreateUninitialized(
Device* device,
const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
MTLPrimitiveType GetMTLPrimitiveTopology() const;
MTLWinding GetMTLFrontFace() const;
@@ -45,9 +48,10 @@ namespace dawn_native { namespace metal {
wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
+ MaybeError Initialize() override;
+
private:
using RenderPipelineBase::RenderPipelineBase;
- MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
MTLVertexDescriptor* MakeVertexDesc();
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
index 0910db16610..1537abfa441 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
@@ -14,6 +14,7 @@
#include "dawn_native/metal/RenderPipelineMTL.h"
+#include "dawn_native/CreatePipelineAsyncTask.h"
#include "dawn_native/VertexFormat.h"
#include "dawn_native/metal/DeviceMTL.h"
#include "dawn_native/metal/PipelineLayoutMTL.h"
@@ -158,15 +159,6 @@ namespace dawn_native { namespace metal {
case wgpu::BlendFactor::OneMinusConstant:
return alpha ? MTLBlendFactorOneMinusBlendAlpha
: MTLBlendFactorOneMinusBlendColor;
-
- // Deprecated blend factors should be normalized prior to this call.
- case wgpu::BlendFactor::SrcColor:
- case wgpu::BlendFactor::OneMinusSrcColor:
- case wgpu::BlendFactor::DstColor:
- case wgpu::BlendFactor::OneMinusDstColor:
- case wgpu::BlendFactor::BlendColor:
- case wgpu::BlendFactor::OneMinusBlendColor:
- UNREACHABLE();
}
}
@@ -319,15 +311,13 @@ namespace dawn_native { namespace metal {
} // anonymous namespace
// static
- ResultOrError<Ref<RenderPipeline>> RenderPipeline::Create(
+ Ref<RenderPipelineBase> RenderPipeline::CreateUninitialized(
Device* device,
const RenderPipelineDescriptor* descriptor) {
- Ref<RenderPipeline> pipeline = AcquireRef(new RenderPipeline(device, descriptor));
- DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline;
+ return AcquireRef(new RenderPipeline(device, descriptor));
}
- MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ MaybeError RenderPipeline::Initialize() {
mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
mMtlFrontFace = MTLFrontFace(GetFrontFace());
mMtlCullMode = ToMTLCullMode(GetCullMode());
@@ -347,36 +337,42 @@ namespace dawn_native { namespace metal {
}
descriptorMTL.vertexDescriptor = vertexDesc.Get();
- ShaderModule* vertexModule = ToBackend(descriptor->vertex.module);
- const char* vertexEntryPoint = descriptor->vertex.entryPoint;
+ const PerStage<ProgrammableStage>& allStages = GetAllStages();
+ const ProgrammableStage& vertexStage = allStages[wgpu::ShaderStage::Vertex];
+ ShaderModule* vertexModule = ToBackend(vertexStage.module).Get();
+ const char* vertexEntryPoint = vertexStage.entryPoint.c_str();
ShaderModule::MetalFunctionData vertexData;
-
- const VertexState* vertexStatePtr = &descriptor->vertex;
- VertexState vertexState;
- if (vertexStatePtr == nullptr) {
- vertexState = {};
- vertexStatePtr = &vertexState;
- }
-
DAWN_TRY(vertexModule->CreateFunction(vertexEntryPoint, SingleShaderStage::Vertex,
- ToBackend(GetLayout()), &vertexData, 0xFFFFFFFF, this,
- vertexStatePtr));
+ ToBackend(GetLayout()), &vertexData, 0xFFFFFFFF,
+ this));
descriptorMTL.vertexFunction = vertexData.function.Get();
if (vertexData.needsStorageBufferLength) {
mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
}
- ShaderModule* fragmentModule = ToBackend(descriptor->fragment->module);
- const char* fragmentEntryPoint = descriptor->fragment->entryPoint;
- ShaderModule::MetalFunctionData fragmentData;
- DAWN_TRY(fragmentModule->CreateFunction(fragmentEntryPoint, SingleShaderStage::Fragment,
- ToBackend(GetLayout()), &fragmentData,
- GetSampleMask()));
+ if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+ const ProgrammableStage& fragmentStage = allStages[wgpu::ShaderStage::Fragment];
+ ShaderModule* fragmentModule = ToBackend(fragmentStage.module).Get();
+ const char* fragmentEntryPoint = fragmentStage.entryPoint.c_str();
+ ShaderModule::MetalFunctionData fragmentData;
+ DAWN_TRY(fragmentModule->CreateFunction(fragmentEntryPoint, SingleShaderStage::Fragment,
+ ToBackend(GetLayout()), &fragmentData,
+ GetSampleMask()));
+
+ descriptorMTL.fragmentFunction = fragmentData.function.Get();
+ if (fragmentData.needsStorageBufferLength) {
+ mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
+ }
- descriptorMTL.fragmentFunction = fragmentData.function.Get();
- if (fragmentData.needsStorageBufferLength) {
- mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
+ const auto& fragmentOutputsWritten = fragmentStage.metadata->fragmentOutputsWritten;
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
+ MetalPixelFormat(GetColorAttachmentFormat(i));
+ const ColorTargetState* descriptor = GetColorTargetState(i);
+ ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)],
+ descriptor, fragmentOutputsWritten[i]);
+ }
}
if (HasDepthStencilAttachment()) {
@@ -392,16 +388,6 @@ namespace dawn_native { namespace metal {
}
}
- const auto& fragmentOutputsWritten =
- GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
- MetalPixelFormat(GetColorAttachmentFormat(i));
- const ColorTargetState* descriptor = GetColorTargetState(i);
- ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)], descriptor,
- fragmentOutputsWritten[i]);
- }
-
descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
descriptorMTL.sampleCount = GetSampleCount();
descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
@@ -514,4 +500,13 @@ namespace dawn_native { namespace metal {
return mtlVertexDescriptor;
}
+ void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
index a6608bda6ed..4cb91a4db68 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
@@ -37,25 +37,25 @@ namespace dawn_native { namespace metal {
struct MetalFunctionData {
NSPRef<id<MTLFunction>> function;
bool needsStorageBufferLength;
+ std::vector<uint32_t> workgroupAllocations;
};
MaybeError CreateFunction(const char* entryPointName,
SingleShaderStage stage,
const PipelineLayout* layout,
MetalFunctionData* out,
uint32_t sampleMask = 0xFFFFFFFF,
- const RenderPipeline* renderPipeline = nullptr,
- const VertexState* vertexState = nullptr);
+ const RenderPipeline* renderPipeline = nullptr);
private:
- ResultOrError<std::string> TranslateToMSLWithTint(const char* entryPointName,
- SingleShaderStage stage,
- const PipelineLayout* layout,
- uint32_t sampleMask,
- const RenderPipeline* renderPipeline,
- const VertexState* vertexState,
- std::string* remappedEntryPointName,
- bool* needsStorageBufferLength,
- bool* hasInvariantAttribute);
+ ResultOrError<std::string> TranslateToMSL(const char* entryPointName,
+ SingleShaderStage stage,
+ const PipelineLayout* layout,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline,
+ std::string* remappedEntryPointName,
+ bool* needsStorageBufferLength,
+ bool* hasInvariantAttribute,
+ std::vector<uint32_t>* workgroupAllocations);
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
~ShaderModule() override = default;
MaybeError Initialize(ShaderModuleParseResult* parseResult);
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
index 800a3beaf65..c994ceef905 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
@@ -44,16 +44,16 @@ namespace dawn_native { namespace metal {
return InitializeBase(parseResult);
}
- ResultOrError<std::string> ShaderModule::TranslateToMSLWithTint(
+ ResultOrError<std::string> ShaderModule::TranslateToMSL(
const char* entryPointName,
SingleShaderStage stage,
const PipelineLayout* layout,
uint32_t sampleMask,
const RenderPipeline* renderPipeline,
- const VertexState* vertexState,
std::string* remappedEntryPointName,
bool* needsStorageBufferLength,
- bool* hasInvariantAttribute) {
+ bool* hasInvariantAttribute,
+ std::vector<uint32_t>* workgroupAllocations) {
ScopedTintICEHandler scopedICEHandler(GetDevice());
std::ostringstream errorStream;
@@ -93,11 +93,16 @@ namespace dawn_native { namespace metal {
tint::transform::Manager transformManager;
tint::transform::DataMap transformInputs;
+ // We only remap bindings for the target entry point, so we need to strip all other entry
+ // points to avoid generating invalid bindings for them.
+ transformManager.Add<tint::transform::SingleEntryPoint>();
+ transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+
if (stage == SingleShaderStage::Vertex &&
GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
transformManager.Add<tint::transform::VertexPulling>();
- AddVertexPullingTransformConfig(*vertexState, entryPointName, kPullingBufferBindingSet,
- &transformInputs);
+ AddVertexPullingTransformConfig(*renderPipeline, entryPointName,
+ kPullingBufferBindingSet, &transformInputs);
for (VertexBufferSlot slot :
IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
@@ -113,7 +118,7 @@ namespace dawn_native { namespace metal {
}
}
if (GetDevice()->IsRobustnessEnabled()) {
- transformManager.Add<tint::transform::BoundArrayAccessors>();
+ transformManager.Add<tint::transform::Robustness>();
}
transformManager.Add<tint::transform::BindingRemapper>();
transformManager.Add<tint::transform::Renamer>();
@@ -124,7 +129,6 @@ namespace dawn_native { namespace metal {
tint::transform::Renamer::Target::kMslKeywords);
}
-
transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
std::move(accessControls),
/* mayCollide */ true);
@@ -164,6 +168,7 @@ namespace dawn_native { namespace metal {
*needsStorageBufferLength = result.needs_storage_buffer_sizes;
*hasInvariantAttribute = result.has_invariant_attribute;
+ *workgroupAllocations = std::move(result.workgroup_allocations[*remappedEntryPointName]);
return std::move(result.msl);
}
@@ -173,24 +178,22 @@ namespace dawn_native { namespace metal {
const PipelineLayout* layout,
ShaderModule::MetalFunctionData* out,
uint32_t sampleMask,
- const RenderPipeline* renderPipeline,
- const VertexState* vertexState) {
+ const RenderPipeline* renderPipeline) {
ASSERT(!IsError());
ASSERT(out);
- // Vertex stages must specify a renderPipeline and vertexState
+ // Vertex stages must specify a renderPipeline
if (stage == SingleShaderStage::Vertex) {
ASSERT(renderPipeline != nullptr);
- ASSERT(vertexState != nullptr);
}
std::string remappedEntryPointName;
std::string msl;
bool hasInvariantAttribute = false;
- DAWN_TRY_ASSIGN(
- msl, TranslateToMSLWithTint(entryPointName, stage, layout, sampleMask, renderPipeline,
- vertexState, &remappedEntryPointName,
- &out->needsStorageBufferLength, &hasInvariantAttribute));
+ DAWN_TRY_ASSIGN(msl,
+ TranslateToMSL(entryPointName, stage, layout, sampleMask, renderPipeline,
+ &remappedEntryPointName, &out->needsStorageBufferLength,
+ &hasInvariantAttribute, &out->workgroupAllocations));
// Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
// category. -Wunused-variable in particular comes up a lot in generated code, and some
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
index 120b21491fe..0a8a607cfe4 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
@@ -252,9 +252,70 @@ namespace dawn_native { namespace metal {
return MTLPixelFormatBC7_RGBAUnorm;
case wgpu::TextureFormat::BC7RGBAUnormSrgb:
return MTLPixelFormatBC7_RGBAUnorm_sRGB;
+#else
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
#endif
- default:
+ case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+ // TODO(dawn:666): implement stencil8
+ case wgpu::TextureFormat::Stencil8:
+ // TODO(dawn:570): implement depth16unorm
+ case wgpu::TextureFormat::Depth16Unorm:
+ case wgpu::TextureFormat::Undefined:
UNREACHABLE();
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
index 0c0b0d3d2e2..ab6ec331f30 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
@@ -28,8 +28,8 @@ namespace dawn_native { namespace null {
mPCIInfo.name = "Null backend";
mAdapterType = wgpu::AdapterType::CPU;
- // Enable all extensions by default for the convenience of tests.
- mSupportedExtensions.extensionsBitSet.set();
+ // Enable all features by default for the convenience of tests.
+ mSupportedFeatures.featuresBitSet.set();
}
Adapter::~Adapter() = default;
@@ -38,9 +38,9 @@ namespace dawn_native { namespace null {
return false;
}
- // Used for the tests that intend to use an adapter without all extensions enabled.
- void Adapter::SetSupportedExtensions(const std::vector<const char*>& requiredExtensions) {
- mSupportedExtensions = GetInstance()->ExtensionNamesToExtensionsSet(requiredExtensions);
+ // Used for the tests that intend to use an adapter without all features enabled.
+ void Adapter::SetSupportedFeatures(const std::vector<const char*>& requiredFeatures) {
+ mSupportedFeatures = GetInstance()->FeatureNamesToFeaturesSet(requiredFeatures);
}
ResultOrError<DeviceBase*> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
@@ -99,8 +99,9 @@ namespace dawn_native { namespace null {
return AcquireRef(new BindGroup(this, descriptor));
}
ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) {
- return AcquireRef(new BindGroupLayout(this, descriptor));
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
DAWN_TRY(IncrementMemoryUsage(descriptor->size));
@@ -123,7 +124,7 @@ namespace dawn_native { namespace null {
const QuerySetDescriptor* descriptor) {
return AcquireRef(new QuerySet(this, descriptor));
}
- ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
return AcquireRef(new RenderPipeline(this, descriptor));
}
@@ -358,6 +359,11 @@ namespace dawn_native { namespace null {
return {};
}
+ // RenderPipeline
+ MaybeError RenderPipeline::Initialize() {
+ return {};
+ }
+
// SwapChain
// static
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
index 0021056ccb3..c51152dd696 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
@@ -48,7 +48,7 @@ namespace dawn_native { namespace null {
using PipelineLayout = PipelineLayoutBase;
class QuerySet;
class Queue;
- using RenderPipeline = RenderPipelineBase;
+ class RenderPipeline;
using Sampler = SamplerBase;
class ShaderModule;
class SwapChain;
@@ -125,7 +125,8 @@ namespace dawn_native { namespace null {
ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) override;
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
@@ -134,7 +135,7 @@ namespace dawn_native { namespace null {
const PipelineLayoutDescriptor* descriptor) override;
ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
const SamplerDescriptor* descriptor) override;
@@ -172,8 +173,8 @@ namespace dawn_native { namespace null {
// AdapterBase Implementation
bool SupportsExternalImages() const override;
- // Used for the tests that intend to use an adapter without all extensions enabled.
- void SetSupportedExtensions(const std::vector<const char*>& requiredExtensions);
+ // Used for the tests that intend to use an adapter without all features enabled.
+ void SetSupportedFeatures(const std::vector<const char*>& requiredFeatures);
private:
ResultOrError<DeviceBase*> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
@@ -250,6 +251,13 @@ namespace dawn_native { namespace null {
size_t size) override;
};
+ class RenderPipeline final : public RenderPipelineBase {
+ public:
+ using RenderPipelineBase::RenderPipelineBase;
+
+ MaybeError Initialize() override;
+ };
+
class ShaderModule final : public ShaderModuleBase {
public:
using ShaderModuleBase::ShaderModuleBase;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
index c2b24be1ffe..b92599ede86 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
@@ -187,7 +187,7 @@ namespace dawn_native { namespace opengl {
mAdapterType = wgpu::AdapterType::CPU;
}
- InitializeSupportedExtensions();
+ InitializeSupportedFeatures();
return {};
}
@@ -209,7 +209,7 @@ namespace dawn_native { namespace opengl {
return Device::Create(this, descriptor, mFunctions);
}
- void InitializeSupportedExtensions() {
+ void InitializeSupportedFeatures() {
// TextureCompressionBC
{
// BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
@@ -249,8 +249,7 @@ namespace dawn_native { namespace opengl {
if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
supportsBPTC) {
- mSupportedExtensions.EnableExtension(
- dawn_native::Extension::TextureCompressionBC);
+ mSupportedFeatures.EnableFeature(dawn_native::Feature::TextureCompressionBC);
}
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
index 619e4e62ee9..d008b1d48a4 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
@@ -19,8 +19,9 @@
namespace dawn_native { namespace opengl {
BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor)
- : BindGroupLayoutBase(device, descriptor),
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
index edd1dd050b4..136bd0a7e5a 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
@@ -25,7 +25,9 @@ namespace dawn_native { namespace opengl {
class BindGroupLayout final : public BindGroupLayoutBase {
public:
- BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
void DeallocateBindGroup(BindGroup* bindGroup);
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
index 1534e090d6b..788eb9e2756 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
@@ -45,8 +45,9 @@ namespace dawn_native { namespace opengl {
case wgpu::IndexFormat::Uint32:
return GL_UNSIGNED_INT;
case wgpu::IndexFormat::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
GLenum VertexFormatType(wgpu::VertexFormat format) {
@@ -339,9 +340,6 @@ namespace dawn_native { namespace opengl {
GLenum access;
switch (bindingInfo.storageTexture.access) {
- case wgpu::StorageTextureAccess::ReadOnly:
- access = GL_READ_ONLY;
- break;
case wgpu::StorageTextureAccess::WriteOnly:
access = GL_WRITE_ONLY;
break;
@@ -846,6 +844,27 @@ namespace dawn_native { namespace opengl {
break;
}
+ case Command::SetValidatedBufferLocationsInternal:
+ DoNextSetValidatedBufferLocationsInternal();
+ break;
+
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ uint64_t offset = write->offset;
+ uint64_t size = write->size;
+ if (size == 0) {
+ continue;
+ }
+
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ dstBuffer->EnsureDataInitializedAsDestination(offset, size);
+
+ gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
+ gl.BufferSubData(GL_ARRAY_BUFFER, offset, size, data);
+ break;
+ }
+
default:
UNREACHABLE();
}
@@ -1055,8 +1074,7 @@ namespace dawn_native { namespace opengl {
}
}
- if (attachmentInfo->storeOp == wgpu::StoreOp::Discard ||
- attachmentInfo->storeOp == wgpu::StoreOp::Clear) {
+ if (attachmentInfo->storeOp == wgpu::StoreOp::Discard) {
// TODO(natlee@microsoft.com): call glDiscard to do optimization
}
}
@@ -1169,16 +1187,17 @@ namespace dawn_native { namespace opengl {
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+ ASSERT(!draw->indirectBufferLocation->IsNull());
+
vertexStateBufferBindingTracker.Apply(gl);
bindGroupTracker.Apply(gl);
- uint64_t indirectBufferOffset = draw->indirectOffset;
- Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
-
+ Buffer* indirectBuffer = ToBackend(draw->indirectBufferLocation->GetBuffer());
gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
gl.DrawElementsIndirect(
lastPipeline->GetGLPrimitiveTopology(), indexBufferFormat,
- reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
+ reinterpret_cast<void*>(
+ static_cast<intptr_t>(draw->indirectBufferLocation->GetOffset())));
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp
index 7680ceebb0b..e51e217a8f6 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.cpp
@@ -23,11 +23,15 @@ namespace dawn_native { namespace opengl {
Device* device,
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
- DAWN_TRY(pipeline->Initialize(descriptor));
+ DAWN_TRY(pipeline->Initialize());
return pipeline;
}
- MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor*) {
+ ComputePipeline::~ComputePipeline() {
+ DeleteProgram(ToBackend(GetDevice())->gl);
+ }
+
+ MaybeError ComputePipeline::Initialize() {
DAWN_TRY(
InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h
index e84e3666761..dd5c0b3f36d 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ComputePipelineGL.h
@@ -35,8 +35,8 @@ namespace dawn_native { namespace opengl {
private:
using ComputePipelineBase::ComputePipelineBase;
- ~ComputePipeline() override = default;
- MaybeError Initialize(const ComputePipelineDescriptor* descriptor) override;
+ ~ComputePipeline() override;
+ MaybeError Initialize() override;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
index 5d9ef310be5..ac8be45ba23 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
@@ -99,6 +99,8 @@ namespace dawn_native { namespace opengl {
SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
+ // For OpenGL ES, we must use dummy fragment shader for vertex-only render pipeline.
+ SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
}
const GLFormat& Device::GetGLFormat(const Format& format) {
@@ -116,8 +118,9 @@ namespace dawn_native { namespace opengl {
return BindGroup::Create(this, descriptor);
}
ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) {
- return AcquireRef(new BindGroupLayout(this, descriptor));
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return AcquireRef(new Buffer(this, descriptor));
@@ -139,9 +142,9 @@ namespace dawn_native { namespace opengl {
const QuerySetDescriptor* descriptor) {
return AcquireRef(new QuerySet(this, descriptor));
}
- ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::Create(this, descriptor);
+ return RenderPipeline::CreateUninitialized(this, descriptor);
}
ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
return AcquireRef(new Sampler(this, descriptor));
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
index 1bb16bc7713..b2596474994 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
@@ -87,7 +87,8 @@ namespace dawn_native { namespace opengl {
ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) override;
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
@@ -96,7 +97,7 @@ namespace dawn_native { namespace opengl {
const PipelineLayoutDescriptor* descriptor) override;
ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
const SamplerDescriptor* descriptor) override;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
index 4541e651f74..c2fcbe6eac9 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
@@ -40,11 +40,14 @@ namespace dawn_native { namespace opengl {
case SingleShaderStage::Compute:
return GL_COMPUTE_SHADER;
}
+ UNREACHABLE();
}
} // namespace
- PipelineGL::PipelineGL() = default;
+ PipelineGL::PipelineGL() : mProgram(0) {
+ }
+
PipelineGL::~PipelineGL() = default;
MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
@@ -86,6 +89,7 @@ namespace dawn_native { namespace opengl {
// Create an OpenGL shader for each stage and gather the list of combined samplers.
PerStage<CombinedSamplerInfo> combinedSamplers;
bool needsDummySampler = false;
+ std::vector<GLuint> glShaders;
for (SingleShaderStage stage : IterateStages(activeStages)) {
const ShaderModule* module = ToBackend(stages[stage].module.Get());
std::string glsl;
@@ -95,6 +99,7 @@ namespace dawn_native { namespace opengl {
GLuint shader;
DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
gl.AttachShader(mProgram, shader);
+ glShaders.push_back(shader);
}
if (needsDummySampler) {
@@ -177,9 +182,19 @@ namespace dawn_native { namespace opengl {
textureUnit++;
}
+
+ for (GLuint glShader : glShaders) {
+ gl.DetachShader(mProgram, glShader);
+ gl.DeleteShader(glShader);
+ }
+
return {};
}
+ void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
+ gl.DeleteProgram(mProgram);
+ }
+
const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
GLuint index) const {
ASSERT(index < mUnitsForSamplers.size());
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
index e21060696ab..be6c1dd5538 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
@@ -47,12 +47,12 @@ namespace dawn_native { namespace opengl {
const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
GLuint GetProgramHandle() const;
- void ApplyNow(const OpenGLFunctions& gl);
-
protected:
+ void ApplyNow(const OpenGLFunctions& gl);
MaybeError InitializeBase(const OpenGLFunctions& gl,
const PipelineLayout* layout,
const PerStage<ProgrammableStage>& stages);
+ void DeleteProgram(const OpenGLFunctions& gl);
private:
GLuint mProgram;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
index 5bc596f0539..1b4b5cee74e 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
@@ -36,6 +36,7 @@ namespace dawn_native { namespace opengl {
case wgpu::PrimitiveTopology::TriangleStrip:
return GL_TRIANGLE_STRIP;
}
+ UNREACHABLE();
}
void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
@@ -84,16 +85,8 @@ namespace dawn_native { namespace opengl {
return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
case wgpu::BlendFactor::OneMinusConstant:
return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
-
- // Deprecated blend factors should be normalized prior to this call.
- case wgpu::BlendFactor::SrcColor:
- case wgpu::BlendFactor::OneMinusSrcColor:
- case wgpu::BlendFactor::DstColor:
- case wgpu::BlendFactor::OneMinusDstColor:
- case wgpu::BlendFactor::BlendColor:
- case wgpu::BlendFactor::OneMinusBlendColor:
- UNREACHABLE();
}
+ UNREACHABLE();
}
GLenum GLBlendMode(wgpu::BlendOperation operation) {
@@ -109,6 +102,7 @@ namespace dawn_native { namespace opengl {
case wgpu::BlendOperation::Max:
return GL_MAX;
}
+ UNREACHABLE();
}
void ApplyColorState(const OpenGLFunctions& gl,
@@ -151,7 +145,7 @@ namespace dawn_native { namespace opengl {
state->writeMask & wgpu::ColorWriteMask::Alpha);
}
- bool Equal(const BlendDescriptor& lhs, const BlendDescriptor& rhs) {
+ bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
lhs.dstFactor == rhs.dstFactor;
}
@@ -175,6 +169,7 @@ namespace dawn_native { namespace opengl {
case wgpu::StencilOperation::DecrementWrap:
return GL_DECR_WRAP;
}
+ UNREACHABLE();
}
void ApplyDepthStencilState(const OpenGLFunctions& gl,
@@ -220,12 +215,10 @@ namespace dawn_native { namespace opengl {
} // anonymous namespace
// static
- ResultOrError<Ref<RenderPipeline>> RenderPipeline::Create(
+ Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
Device* device,
const RenderPipelineDescriptor* descriptor) {
- Ref<RenderPipeline> pipeline = AcquireRef(new RenderPipeline(device, descriptor));
- DAWN_TRY(pipeline->Initialize());
- return pipeline;
+ return AcquireRef(new RenderPipeline(device, descriptor));
}
RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
@@ -245,6 +238,7 @@ namespace dawn_native { namespace opengl {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
gl.DeleteVertexArrays(1, &mVertexArrayObject);
gl.BindVertexArray(0);
+ DeleteProgram(gl);
}
GLenum RenderPipeline::GetGLPrimitiveTopology() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
index 3c7a4d321af..cd3290fa1ca 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
@@ -29,9 +29,8 @@ namespace dawn_native { namespace opengl {
class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
public:
- static ResultOrError<Ref<RenderPipeline>> Create(
- Device* device,
- const RenderPipelineDescriptor* descriptor);
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
GLenum GetGLPrimitiveTopology() const;
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
@@ -39,10 +38,11 @@ namespace dawn_native { namespace opengl {
void ApplyNow(PersistentPipelineState& persistentPipelineState);
+ MaybeError Initialize() override;
+
private:
RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
~RenderPipeline() override;
- MaybeError Initialize();
void CreateVAOForVertexState();
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp
index 32aa56f620f..81fdd46da26 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp
@@ -28,6 +28,7 @@ namespace dawn_native { namespace opengl {
case wgpu::FilterMode::Linear:
return GL_LINEAR;
}
+ UNREACHABLE();
}
GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
@@ -47,6 +48,7 @@ namespace dawn_native { namespace opengl {
return GL_LINEAR_MIPMAP_LINEAR;
}
}
+ UNREACHABLE();
}
GLenum WrapMode(wgpu::AddressMode mode) {
@@ -58,6 +60,7 @@ namespace dawn_native { namespace opengl {
case wgpu::AddressMode::ClampToEdge:
return GL_CLAMP_TO_EDGE;
}
+ UNREACHABLE();
}
} // namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
index 1ea18a33bd1..5019593ba6a 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
@@ -66,31 +66,19 @@ namespace dawn_native { namespace opengl {
return o.str();
}
- ResultOrError<std::unique_ptr<EntryPointMetadata>> ExtractSpirvInfo(
+ ResultOrError<std::unique_ptr<BindingInfoArray>> ExtractSpirvInfo(
const DeviceBase* device,
const spirv_cross::Compiler& compiler,
const std::string& entryPointName,
SingleShaderStage stage) {
- std::unique_ptr<EntryPointMetadata> metadata = std::make_unique<EntryPointMetadata>();
- metadata->stage = stage;
-
const auto& resources = compiler.get_shader_resources();
- if (resources.push_constant_buffers.size() > 0) {
- return DAWN_VALIDATION_ERROR("Push constants aren't supported.");
- }
-
- if (resources.sampled_images.size() > 0) {
- return DAWN_VALIDATION_ERROR("Combined images and samplers aren't supported.");
- }
-
// Fill in bindingInfo with the SPIRV bindings
auto ExtractResourcesBinding =
[](const DeviceBase* device,
const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
const spirv_cross::Compiler& compiler, BindingInfoType bindingType,
- EntryPointMetadata::BindingInfoArray* metadataBindings,
- bool isStorageBuffer = false) -> MaybeError {
+ BindingInfoArray* bindings, bool isStorageBuffer = false) -> MaybeError {
for (const auto& resource : resources) {
if (!compiler.get_decoration_bitset(resource.id).get(spv::DecorationBinding)) {
return DAWN_VALIDATION_ERROR("No Binding decoration set for resource");
@@ -110,13 +98,13 @@ namespace dawn_native { namespace opengl {
return DAWN_VALIDATION_ERROR("Bind group index over limits in the SPIRV");
}
- const auto& it = (*metadataBindings)[bindGroupIndex].emplace(
- bindingNumber, EntryPointMetadata::ShaderBindingInfo{});
+ const auto& it =
+ (*bindings)[bindGroupIndex].emplace(bindingNumber, ShaderBindingInfo{});
if (!it.second) {
return DAWN_VALIDATION_ERROR("Shader has duplicate bindings");
}
- EntryPointMetadata::ShaderBindingInfo* info = &it.first->second;
+ ShaderBindingInfo* info = &it.first->second;
info->id = resource.id;
info->base_type_id = resource.base_type_id;
info->bindingType = bindingType;
@@ -176,8 +164,6 @@ namespace dawn_native { namespace opengl {
spirv_cross::Bitset flags = compiler.get_decoration_bitset(resource.id);
if (flags.get(spv::DecorationNonReadable)) {
info->storageTexture.access = wgpu::StorageTextureAccess::WriteOnly;
- } else if (flags.get(spv::DecorationNonWritable)) {
- info->storageTexture.access = wgpu::StorageTextureAccess::ReadOnly;
} else {
return DAWN_VALIDATION_ERROR(
"Read-write storage textures are not supported");
@@ -220,92 +206,21 @@ namespace dawn_native { namespace opengl {
return {};
};
+ std::unique_ptr<BindingInfoArray> resultBindings = std::make_unique<BindingInfoArray>();
+ BindingInfoArray* bindings = resultBindings.get();
DAWN_TRY(ExtractResourcesBinding(device, resources.uniform_buffers, compiler,
- BindingInfoType::Buffer, &metadata->bindings));
+ BindingInfoType::Buffer, bindings));
DAWN_TRY(ExtractResourcesBinding(device, resources.separate_images, compiler,
- BindingInfoType::Texture, &metadata->bindings));
+ BindingInfoType::Texture, bindings));
DAWN_TRY(ExtractResourcesBinding(device, resources.separate_samplers, compiler,
- BindingInfoType::Sampler, &metadata->bindings));
+ BindingInfoType::Sampler, bindings));
DAWN_TRY(ExtractResourcesBinding(device, resources.storage_buffers, compiler,
- BindingInfoType::Buffer, &metadata->bindings, true));
+ BindingInfoType::Buffer, bindings, true));
// ReadonlyStorageTexture is used as a tag to do general storage texture handling.
DAWN_TRY(ExtractResourcesBinding(device, resources.storage_images, compiler,
- BindingInfoType::StorageTexture, &metadata->bindings));
-
- // Extract the vertex attributes
- if (stage == SingleShaderStage::Vertex) {
- for (const auto& attrib : resources.stage_inputs) {
- if (!(compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation))) {
- return DAWN_VALIDATION_ERROR(
- "Unable to find Location decoration for Vertex input");
- }
- uint32_t unsanitizedLocation =
- compiler.get_decoration(attrib.id, spv::DecorationLocation);
-
- if (unsanitizedLocation >= kMaxVertexAttributes) {
- return DAWN_VALIDATION_ERROR("Attribute location over limits in the SPIRV");
- }
- VertexAttributeLocation location(static_cast<uint8_t>(unsanitizedLocation));
-
- spirv_cross::SPIRType::BaseType inputBaseType =
- compiler.get_type(attrib.base_type_id).basetype;
- metadata->vertexInputBaseTypes[location] =
- SpirvBaseTypeToVertexFormatBaseType(inputBaseType);
- metadata->usedVertexInputs.set(location);
- }
-
- // Without a location qualifier on vertex outputs, spirv_cross::CompilerMSL gives
- // them all the location 0, causing a compile error.
- for (const auto& attrib : resources.stage_outputs) {
- if (!compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation)) {
- return DAWN_VALIDATION_ERROR("Need location qualifier on vertex output");
- }
- }
- }
-
- if (stage == SingleShaderStage::Fragment) {
- // Without a location qualifier on vertex inputs, spirv_cross::CompilerMSL gives
- // them all the location 0, causing a compile error.
- for (const auto& attrib : resources.stage_inputs) {
- if (!compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation)) {
- return DAWN_VALIDATION_ERROR("Need location qualifier on fragment input");
- }
- }
-
- for (const auto& fragmentOutput : resources.stage_outputs) {
- if (!compiler.get_decoration_bitset(fragmentOutput.id)
- .get(spv::DecorationLocation)) {
- return DAWN_VALIDATION_ERROR(
- "Unable to find Location decoration for Fragment output");
- }
- uint32_t unsanitizedAttachment =
- compiler.get_decoration(fragmentOutput.id, spv::DecorationLocation);
-
- if (unsanitizedAttachment >= kMaxColorAttachments) {
- return DAWN_VALIDATION_ERROR(
- "Fragment output index must be less than max number of color "
- "attachments");
- }
- ColorAttachmentIndex attachment(static_cast<uint8_t>(unsanitizedAttachment));
-
- spirv_cross::SPIRType::BaseType shaderFragmentOutputBaseType =
- compiler.get_type(fragmentOutput.base_type_id).basetype;
- // spriv path so temporarily always set to 4u to always pass validation
- metadata->fragmentOutputVariables[attachment] = {
- SpirvBaseTypeToTextureComponentType(shaderFragmentOutputBaseType), 4u};
- metadata->fragmentOutputsWritten.set(attachment);
- }
- }
-
- if (stage == SingleShaderStage::Compute) {
- const spirv_cross::SPIREntryPoint& spirEntryPoint =
- compiler.get_entry_point(entryPointName, spv::ExecutionModelGLCompute);
- metadata->localWorkgroupSize.x = spirEntryPoint.workgroup_size.x;
- metadata->localWorkgroupSize.y = spirEntryPoint.workgroup_size.y;
- metadata->localWorkgroupSize.z = spirEntryPoint.workgroup_size.z;
- }
+ BindingInfoType::StorageTexture, resultBindings.get()));
- return {std::move(metadata)};
+ return {std::move(resultBindings)};
}
// static
@@ -322,10 +237,10 @@ namespace dawn_native { namespace opengl {
}
// static
- ResultOrError<EntryPointMetadataTable> ShaderModule::ReflectShaderUsingSPIRVCross(
+ ResultOrError<BindingInfoArrayTable> ShaderModule::ReflectShaderUsingSPIRVCross(
DeviceBase* device,
const std::vector<uint32_t>& spirv) {
- EntryPointMetadataTable result;
+ BindingInfoArrayTable result;
spirv_cross::Compiler compiler(spirv);
for (const spirv_cross::EntryPoint& entryPoint : compiler.get_entry_points_and_stages()) {
ASSERT(result.count(entryPoint.name) == 0);
@@ -333,9 +248,9 @@ namespace dawn_native { namespace opengl {
SingleShaderStage stage = ExecutionModelToShaderStage(entryPoint.execution_model);
compiler.set_entry_point(entryPoint.name, entryPoint.execution_model);
- std::unique_ptr<EntryPointMetadata> metadata;
- DAWN_TRY_ASSIGN(metadata, ExtractSpirvInfo(device, compiler, entryPoint.name, stage));
- result[entryPoint.name] = std::move(metadata);
+ std::unique_ptr<BindingInfoArray> bindings;
+ DAWN_TRY_ASSIGN(bindings, ExtractSpirvInfo(device, compiler, entryPoint.name, stage));
+ result[entryPoint.name] = std::move(bindings);
}
return std::move(result);
}
@@ -355,7 +270,7 @@ namespace dawn_native { namespace opengl {
return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
}
- DAWN_TRY_ASSIGN(mGLEntryPoints, ReflectShaderUsingSPIRVCross(GetDevice(), result.spirv));
+ DAWN_TRY_ASSIGN(mGLBindings, ReflectShaderUsingSPIRVCross(GetDevice(), result.spirv));
return {};
}
@@ -445,8 +360,7 @@ namespace dawn_native { namespace opengl {
compiler.set_name(combined.combined_id, info->GetName());
}
- const EntryPointMetadata::BindingInfoArray& bindingInfo =
- (*mGLEntryPoints.at(entryPointName)).bindings;
+ const BindingInfoArray& bindingInfo = *(mGLBindings.at(entryPointName));
// Change binding names to be "dawn_binding_<group>_<binding>".
// Also unsets the SPIRV "Binding" decoration as it outputs "layout(binding=)" which
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
index 78a2f2a2726..d9552253574 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
@@ -44,6 +44,9 @@ namespace dawn_native { namespace opengl {
using CombinedSamplerInfo = std::vector<CombinedSampler>;
+ using BindingInfoArrayTable =
+ std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
+
class ShaderModule final : public ShaderModuleBase {
public:
static ResultOrError<Ref<ShaderModule>> Create(Device* device,
@@ -60,11 +63,11 @@ namespace dawn_native { namespace opengl {
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
~ShaderModule() override = default;
MaybeError Initialize(ShaderModuleParseResult* parseResult);
- static ResultOrError<EntryPointMetadataTable> ReflectShaderUsingSPIRVCross(
+ static ResultOrError<BindingInfoArrayTable> ReflectShaderUsingSPIRVCross(
DeviceBase* device,
const std::vector<uint32_t>& spirv);
- EntryPointMetadataTable mGLEntryPoints;
+ BindingInfoArrayTable mGLBindings;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp
index a274300b312..6ce41ca7be4 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SpirvUtils.cpp
@@ -25,6 +25,7 @@ namespace dawn_native {
case SingleShaderStage::Compute:
return spv::ExecutionModelGLCompute;
}
+ UNREACHABLE();
}
SingleShaderStage ExecutionModelToShaderStage(spv::ExecutionModel model) {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
index 1f40f96c196..f8f76bf65de 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
@@ -44,8 +44,9 @@ namespace dawn_native { namespace opengl {
return GL_TEXTURE_3D;
case wgpu::TextureDimension::e1D:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
@@ -69,8 +70,9 @@ namespace dawn_native { namespace opengl {
case wgpu::TextureViewDimension::e1D:
case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
GLuint GenTexture(const OpenGLFunctions& gl) {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp
index 804df662f10..413336b5e61 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp
@@ -38,8 +38,9 @@ namespace dawn_native { namespace opengl {
return GL_ALWAYS;
case wgpu::CompareFunction::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
index 6d52f31615a..55a2473b039 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
@@ -53,7 +53,7 @@ namespace dawn_native { namespace vulkan {
"Vulkan driver version: " + std::to_string(mDeviceInfo.properties.driverVersion);
}
- InitializeSupportedExtensions();
+ InitializeSupportedFeatures();
mPCIInfo.deviceId = mDeviceInfo.properties.deviceID;
mPCIInfo.vendorId = mDeviceInfo.properties.vendorID;
@@ -88,9 +88,12 @@ namespace dawn_native { namespace vulkan {
return DAWN_INTERNAL_ERROR("Vulkan robustBufferAccess feature required.");
}
- // TODO(crbug.com/dawn/955): Require BC || (ETC && ASTC) instead.
- if (!mDeviceInfo.features.textureCompressionBC) {
- return DAWN_INTERNAL_ERROR("Vulkan textureCompressionBC feature required.");
+ if (!mDeviceInfo.features.textureCompressionBC &&
+ !(mDeviceInfo.features.textureCompressionETC2 &&
+ mDeviceInfo.features.textureCompressionASTC_LDR)) {
+ return DAWN_INTERNAL_ERROR(
+ "Vulkan textureCompressionBC feature required or both textureCompressionETC2 and "
+ "textureCompressionASTC required.");
}
// Needed for the respective WebGPU features.
@@ -148,10 +151,6 @@ namespace dawn_native { namespace vulkan {
}
if (limits.maxPerStageDescriptorSampledImages < kMaxSampledTexturesPerShaderStage) {
return DAWN_INTERNAL_ERROR(
- "Insufficient Vulkan limits for maxDynamicStorageBuffersPerPipelineLayout");
- }
- if (limits.maxPerStageDescriptorSampledImages < kMaxSampledTexturesPerShaderStage) {
- return DAWN_INTERNAL_ERROR(
"Insufficient Vulkan limits for maxSampledTexturesPerShaderStage");
}
if (limits.maxPerStageDescriptorSamplers < kMaxSamplersPerShaderStage) {
@@ -258,17 +257,29 @@ namespace dawn_native { namespace vulkan {
mBackend->GetFunctions());
}
- void Adapter::InitializeSupportedExtensions() {
+ void Adapter::InitializeSupportedFeatures() {
if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
- mSupportedExtensions.EnableExtension(Extension::TextureCompressionBC);
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+ }
+
+ if (mDeviceInfo.features.textureCompressionETC2 == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+ }
+
+ if (mDeviceInfo.features.textureCompressionASTC_LDR == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
}
if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
- mSupportedExtensions.EnableExtension(Extension::PipelineStatisticsQuery);
+ mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+ }
+
+ if (mDeviceInfo.features.depthClamp == VK_TRUE) {
+ mSupportedFeatures.EnableFeature(Feature::DepthClamping);
}
if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
- mSupportedExtensions.EnableExtension(Extension::TimestampQuery);
+ mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h
index a6141c42cc9..47679e8dd33 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.h
@@ -41,7 +41,7 @@ namespace dawn_native { namespace vulkan {
private:
ResultOrError<DeviceBase*> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
MaybeError CheckCoreWebGPUSupport();
- void InitializeSupportedExtensions();
+ void InitializeSupportedFeatures();
VkPhysicalDevice mPhysicalDevice;
Backend* mBackend;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
index 7fc85428629..c9bcdcc494f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
@@ -52,12 +52,6 @@ constexpr char kVulkanLibName[] = "libvulkan.so";
# error "Unimplemented Vulkan backend platform"
#endif
-// List of Vulkan MessageIdNames to suppress validation messages for. These should be used sparingly
-// but may be useful to temporarily quiet issues while a fix is in the works.
-constexpr const char* kSuppressedValidationMessageNames[] = {
- "UNASSIGNED-CoreValidation-DrawState-InvalidImageLayout", // (ISSUE: dawn:785)
-};
-
namespace dawn_native { namespace vulkan {
namespace {
@@ -67,13 +61,6 @@ namespace dawn_native { namespace vulkan {
VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* /* pUserData */) {
- // If the message is of a suppressed type, ignore it.
- for (const char* msgName : kSuppressedValidationMessageNames) {
- if (strstr(pCallbackData->pMessageIdName, msgName) != nullptr) {
- return VK_FALSE;
- }
- }
-
dawn::WarningLog() << pCallbackData->pMessage;
ASSERT((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) == 0);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
index 1e1b673c0d1..b4647582e97 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
@@ -73,13 +73,16 @@ namespace dawn_native { namespace vulkan {
case BindingInfoType::StorageTexture:
return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
}
+ UNREACHABLE();
}
// static
ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
Device* device,
- const BindGroupLayoutDescriptor* descriptor) {
- Ref<BindGroupLayout> bgl = AcquireRef(new BindGroupLayout(device, descriptor));
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ Ref<BindGroupLayout> bgl =
+ AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
DAWN_TRY(bgl->Initialize());
return bgl;
}
@@ -139,8 +142,9 @@ namespace dawn_native { namespace vulkan {
}
BindGroupLayout::BindGroupLayout(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor)
- : BindGroupLayoutBase(device, descriptor),
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken)
+ : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
index 72f8b698d7a..0956b8ce33c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
@@ -47,9 +47,12 @@ namespace dawn_native { namespace vulkan {
public:
static ResultOrError<Ref<BindGroupLayout>> Create(
Device* device,
- const BindGroupLayoutDescriptor* descriptor);
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
- BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
+ BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken);
VkDescriptorSetLayout GetHandle() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
index 8139b29062e..75a32fd6ad9 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
@@ -19,6 +19,7 @@
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/ResourceHeapVk.h"
#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
#include <cstring>
@@ -229,6 +230,9 @@ namespace dawn_native { namespace vulkan {
ClearBuffer(recordingContext, 0, clearOffset, clearSize);
}
}
+
+ SetLabelImpl();
+
return {};
}
@@ -374,6 +378,11 @@ namespace dawn_native { namespace vulkan {
}
}
+ void Buffer::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_BUFFER,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_Buffer", GetLabel());
+ }
+
void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse));
ASSERT(!IsDataInitialized());
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
index 1c40140f12d..3fcab6075a2 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
@@ -49,6 +49,9 @@ namespace dawn_native { namespace vulkan {
void EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
const CopyTextureToBufferCmd* copy);
+ // Dawn API
+ void SetLabelImpl() override;
+
private:
~Buffer() override;
using BufferBase::BufferBase;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
index e5ca8c61cbf..30fd55e799c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
@@ -18,6 +18,7 @@
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
+#include "dawn_native/DynamicUploader.h"
#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/vulkan/BindGroupVk.h"
@@ -30,6 +31,7 @@
#include "dawn_native/vulkan/QuerySetVk.h"
#include "dawn_native/vulkan/RenderPassCache.h"
#include "dawn_native/vulkan/RenderPipelineVk.h"
+#include "dawn_native/vulkan/StagingBufferVk.h"
#include "dawn_native/vulkan/TextureVk.h"
#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
@@ -47,8 +49,9 @@ namespace dawn_native { namespace vulkan {
case wgpu::IndexFormat::Uint32:
return VK_INDEX_TYPE_UINT32;
case wgpu::IndexFormat::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
bool HasSameTextureCopyExtent(const TextureCopy& srcCopy,
@@ -206,17 +209,18 @@ namespace dawn_native { namespace vulkan {
const auto& attachmentInfo = renderPass->colorAttachments[i];
bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
- wgpu::LoadOp loadOp = attachmentInfo.loadOp;
- query.SetColor(i, attachmentInfo.view->GetFormat().format, loadOp,
- hasResolveTarget);
+ query.SetColor(i, attachmentInfo.view->GetFormat().format,
+ attachmentInfo.loadOp, attachmentInfo.storeOp, hasResolveTarget);
}
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
const auto& attachmentInfo = renderPass->depthStencilAttachment;
query.SetDepthStencil(attachmentInfo.view->GetTexture()->GetFormat().format,
- attachmentInfo.depthLoadOp, attachmentInfo.stencilLoadOp);
+ attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
+ attachmentInfo.stencilLoadOp,
+ attachmentInfo.stencilStoreOp);
}
query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
@@ -822,6 +826,44 @@ namespace dawn_native { namespace vulkan {
break;
}
+ case Command::SetValidatedBufferLocationsInternal:
+ DoNextSetValidatedBufferLocationsInternal();
+ break;
+
+ case Command::WriteBuffer: {
+ WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+ const uint64_t offset = write->offset;
+ const uint64_t size = write->size;
+ if (size == 0) {
+ continue;
+ }
+
+ Buffer* dstBuffer = ToBackend(write->buffer.Get());
+ uint8_t* data = mCommands.NextData<uint8_t>(size);
+ Device* device = ToBackend(GetDevice());
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+ memcpy(uploadHandle.mappedBuffer, data, size);
+
+ dstBuffer->EnsureDataInitializedAsDestination(recordingContext, offset, size);
+
+ dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+ VkBufferCopy copy;
+ copy.srcOffset = uploadHandle.startOffset;
+ copy.dstOffset = offset;
+ copy.size = size;
+
+ device->fn.CmdCopyBuffer(
+ commands, ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
+ dstBuffer->GetHandle(), 1, &copy);
+ break;
+ }
+
default:
break;
}
@@ -1043,13 +1085,15 @@ namespace dawn_native { namespace vulkan {
}
case Command::DrawIndexedIndirect: {
- DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- VkBuffer indirectBuffer = ToBackend(draw->indirectBuffer)->GetHandle();
+ DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+ ASSERT(!draw->indirectBufferLocation->IsNull());
+ VkBuffer indirectBuffer =
+ ToBackend(draw->indirectBufferLocation->GetBuffer())->GetHandle();
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDrawIndexedIndirect(
- commands, indirectBuffer, static_cast<VkDeviceSize>(draw->indirectOffset),
- 1, 0);
+ commands, indirectBuffer,
+ static_cast<VkDeviceSize>(draw->indirectBufferLocation->GetOffset()), 1, 0);
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
index d6a471f6b3a..649ab5478ff 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
@@ -29,16 +29,16 @@ namespace dawn_native { namespace vulkan {
Device* device,
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
- DAWN_TRY(pipeline->Initialize(descriptor));
+ DAWN_TRY(pipeline->Initialize());
return pipeline;
}
- MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
+ MaybeError ComputePipeline::Initialize() {
VkComputePipelineCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
- createInfo.layout = ToBackend(descriptor->layout)->GetHandle();
+ createInfo.layout = ToBackend(GetLayout())->GetHandle();
createInfo.basePipelineHandle = ::VK_NULL_HANDLE;
createInfo.basePipelineIndex = -1;
@@ -47,12 +47,20 @@ namespace dawn_native { namespace vulkan {
createInfo.stage.flags = 0;
createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
// Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
+ const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
DAWN_TRY_ASSIGN(createInfo.stage.module,
- ToBackend(descriptor->compute.module)
- ->GetTransformedModuleHandle(descriptor->compute.entryPoint,
+ ToBackend(computeStage.module.Get())
+ ->GetTransformedModuleHandle(computeStage.entryPoint.c_str(),
ToBackend(GetLayout())));
- createInfo.stage.pName = descriptor->compute.entryPoint;
- createInfo.stage.pSpecializationInfo = nullptr;
+
+ createInfo.stage.pName = computeStage.entryPoint.c_str();
+
+ std::vector<SpecializationDataEntry> specializationDataEntries;
+ std::vector<VkSpecializationMapEntry> specializationMapEntries;
+ VkSpecializationInfo specializationInfo{};
+ createInfo.stage.pSpecializationInfo =
+ GetVkSpecializationInfo(computeStage, &specializationInfo, &specializationDataEntries,
+ &specializationMapEntries);
Device* device = ToBackend(GetDevice());
@@ -68,10 +76,19 @@ namespace dawn_native { namespace vulkan {
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
}
- return CheckVkSuccess(
+ DAWN_TRY(CheckVkSuccess(
device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
&createInfo, nullptr, &*mHandle),
- "CreateComputePipeline");
+ "CreateComputePipeline"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ void ComputePipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_ComputePipeline", GetLabel());
}
ComputePipeline::~ComputePipeline() {
@@ -92,8 +109,8 @@ namespace dawn_native { namespace vulkan {
void* userdata) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
- std::make_unique<CreateComputePipelineAsyncTask>(pipeline, descriptor, blueprintHash,
- callback, userdata);
+ std::make_unique<CreateComputePipelineAsyncTask>(pipeline, blueprintHash, callback,
+ userdata);
CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
index f5ac7879936..72e2716beb1 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
@@ -37,10 +37,13 @@ namespace dawn_native { namespace vulkan {
VkPipeline GetHandle() const;
+ // Dawn API
+ void SetLabelImpl() override;
+
private:
~ComputePipeline() override;
using ComputePipelineBase::ComputePipelineBase;
- MaybeError Initialize(const ComputePipelineDescriptor* descriptor) override;
+ MaybeError Initialize() override;
VkPipeline mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
index 5e72eac2ead..1834ae637b3 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
@@ -108,8 +108,9 @@ namespace dawn_native { namespace vulkan {
return BindGroup::Create(this, descriptor);
}
ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) {
- return BindGroupLayout::Create(this, descriptor);
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) {
+ return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return Buffer::Create(this, descriptor);
@@ -131,9 +132,9 @@ namespace dawn_native { namespace vulkan {
const QuerySetDescriptor* descriptor) {
return QuerySet::Create(this, descriptor);
}
- ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
- return RenderPipeline::Create(this, descriptor);
+ return RenderPipeline::CreateUninitialized(this, descriptor);
}
ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
return Sampler::Create(this, descriptor);
@@ -167,6 +168,11 @@ namespace dawn_native { namespace vulkan {
void* userdata) {
ComputePipeline::CreateAsync(this, descriptor, blueprintHash, callback, userdata);
}
+ void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ RenderPipeline::InitializeAsync(renderPipeline, callback, userdata);
+ }
MaybeError Device::TickImpl() {
RecycleCompletedCommands();
@@ -342,19 +348,31 @@ namespace dawn_native { namespace vulkan {
usedKnobs.features.samplerAnisotropy = VK_TRUE;
}
- if (IsExtensionEnabled(Extension::TextureCompressionBC)) {
+ if (IsFeatureEnabled(Feature::TextureCompressionBC)) {
ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC ==
VK_TRUE);
usedKnobs.features.textureCompressionBC = VK_TRUE;
}
- if (IsExtensionEnabled(Extension::PipelineStatisticsQuery)) {
+ if (IsFeatureEnabled(Feature::TextureCompressionETC2)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionETC2 ==
+ VK_TRUE);
+ usedKnobs.features.textureCompressionETC2 = VK_TRUE;
+ }
+
+ if (IsFeatureEnabled(Feature::TextureCompressionASTC)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionASTC_LDR ==
+ VK_TRUE);
+ usedKnobs.features.textureCompressionASTC_LDR = VK_TRUE;
+ }
+
+ if (IsFeatureEnabled(Feature::PipelineStatisticsQuery)) {
ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
VK_TRUE);
usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
}
- if (IsExtensionEnabled(Extension::ShaderFloat16)) {
+ if (IsFeatureEnabled(Feature::ShaderFloat16)) {
const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
@@ -372,6 +390,11 @@ namespace dawn_native { namespace vulkan {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
}
+ if (IsFeatureEnabled(Feature::DepthClamping)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.depthClamp == VK_TRUE);
+ usedKnobs.features.depthClamp = VK_TRUE;
+ }
+
// Find a universal queue family
{
// Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
@@ -864,6 +887,16 @@ namespace dawn_native { namespace vulkan {
VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
do {
+ // If WaitForIdleForDesctruction is called while we are Disconnected, it means that
+ // the device lost came from the ErrorInjector and we need to wait without allowing
+ // any more error to be injected. This is because the device lost was "fake" and
+ // commands might still be running.
+ if (GetState() == State::Disconnected) {
+ result = VkResult::WrapUnsafe(
+ fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX));
+ continue;
+ }
+
result = VkResult::WrapUnsafe(
INJECT_ERROR_OR_RUN(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX),
VK_ERROR_DEVICE_LOST));
@@ -923,7 +956,11 @@ namespace dawn_native { namespace vulkan {
}
mRecordingContext.signalSemaphores.clear();
+ // Some commands might still be marked as in-flight if we shut down because of a device
+ // loss. Recycle them as unused so that we free them below.
+ RecycleCompletedCommands();
ASSERT(mCommandsInFlight.Empty());
+
for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
// The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
// destroyed, but that's not the case in some drivers and the leak memory.
@@ -934,6 +971,13 @@ namespace dawn_native { namespace vulkan {
}
mUnusedCommands.clear();
+ // Some fences might still be marked as in-flight if we shut down because of a device loss.
+ // Delete them since at this point all commands are complete.
+ while (!mFencesInFlight.empty()) {
+ fn.DestroyFence(mVkDevice, *mFencesInFlight.front().first, nullptr);
+ mFencesInFlight.pop();
+ }
+
for (VkFence fence : mUnusedFences) {
fn.DestroyFence(mVkDevice, fence, nullptr);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
index d28cec3f137..447023f1801 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
@@ -109,7 +109,8 @@ namespace dawn_native { namespace vulkan {
ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
- const BindGroupLayoutDescriptor* descriptor) override;
+ const BindGroupLayoutDescriptor* descriptor,
+ PipelineCompatibilityToken pipelineCompatibilityToken) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
@@ -118,7 +119,7 @@ namespace dawn_native { namespace vulkan {
const PipelineLayoutDescriptor* descriptor) override;
ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) override;
ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
const SamplerDescriptor* descriptor) override;
@@ -140,6 +141,9 @@ namespace dawn_native { namespace vulkan {
size_t blueprintHash,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) override;
+ void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) override;
ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
void GatherQueueFromDevice();
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
index fe24980d70e..ba60c3dc7cb 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
@@ -31,6 +31,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::QueryType::Timestamp:
return VK_QUERY_TYPE_TIMESTAMP;
}
+ UNREACHABLE();
}
VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
index 1bafac2088f..b91e46fb970 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
@@ -30,6 +30,17 @@ namespace dawn_native { namespace vulkan {
case wgpu::LoadOp::Clear:
return VK_ATTACHMENT_LOAD_OP_CLEAR;
}
+ UNREACHABLE();
+ }
+
+ VkAttachmentStoreOp VulkanAttachmentStoreOp(wgpu::StoreOp op) {
+ switch (op) {
+ case wgpu::StoreOp::Store:
+ return VK_ATTACHMENT_STORE_OP_STORE;
+ case wgpu::StoreOp::Discard:
+ return VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ }
+ UNREACHABLE();
}
} // anonymous namespace
@@ -38,20 +49,26 @@ namespace dawn_native { namespace vulkan {
void RenderPassCacheQuery::SetColor(ColorAttachmentIndex index,
wgpu::TextureFormat format,
wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
bool hasResolveTarget) {
colorMask.set(index);
colorFormats[index] = format;
colorLoadOp[index] = loadOp;
+ colorStoreOp[index] = storeOp;
resolveTargetMask[index] = hasResolveTarget;
}
void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
- wgpu::LoadOp depthLoadOp,
- wgpu::LoadOp stencilLoadOp) {
+ wgpu::LoadOp depthLoadOpIn,
+ wgpu::StoreOp depthStoreOpIn,
+ wgpu::LoadOp stencilLoadOpIn,
+ wgpu::StoreOp stencilStoreOpIn) {
hasDepthStencil = true;
depthStencilFormat = format;
- this->depthLoadOp = depthLoadOp;
- this->stencilLoadOp = stencilLoadOp;
+ depthLoadOp = depthLoadOpIn;
+ depthStoreOp = depthStoreOpIn;
+ stencilLoadOp = stencilLoadOpIn;
+ stencilStoreOp = stencilStoreOpIn;
}
void RenderPassCacheQuery::SetSampleCount(uint32_t sampleCount) {
@@ -64,13 +81,16 @@ namespace dawn_native { namespace vulkan {
}
RenderPassCache::~RenderPassCache() {
+ std::lock_guard<std::mutex> lock(mMutex);
for (auto it : mCache) {
mDevice->fn.DestroyRenderPass(mDevice->GetVkDevice(), it.second, nullptr);
}
+
mCache.clear();
}
ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
+ std::lock_guard<std::mutex> lock(mMutex);
auto it = mCache.find(query);
if (it != mCache.end()) {
return VkRenderPass(it->second);
@@ -109,7 +129,7 @@ namespace dawn_native { namespace vulkan {
attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
attachmentDesc.samples = vkSampleCount;
attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
- attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.colorStoreOp[i]);
attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
@@ -130,9 +150,9 @@ namespace dawn_native { namespace vulkan {
attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
attachmentDesc.samples = vkSampleCount;
attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
- attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.depthStoreOp);
attachmentDesc.stencilLoadOp = VulkanAttachmentLoadOp(query.stencilLoadOp);
- attachmentDesc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachmentDesc.stencilStoreOp = VulkanAttachmentStoreOp(query.stencilStoreOp);
attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
index 4ee944105d9..4503e1fe5e0 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
@@ -25,6 +25,7 @@
#include <array>
#include <bitset>
+#include <mutex>
#include <unordered_map>
namespace dawn_native { namespace vulkan {
@@ -40,21 +41,27 @@ namespace dawn_native { namespace vulkan {
void SetColor(ColorAttachmentIndex index,
wgpu::TextureFormat format,
wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
bool hasResolveTarget);
void SetDepthStencil(wgpu::TextureFormat format,
wgpu::LoadOp depthLoadOp,
- wgpu::LoadOp stencilLoadOp);
+ wgpu::StoreOp depthStoreOp,
+ wgpu::LoadOp stencilLoadOp,
+ wgpu::StoreOp stencilStoreOp);
void SetSampleCount(uint32_t sampleCount);
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> colorMask;
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> resolveTargetMask;
ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
ityp::array<ColorAttachmentIndex, wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
+ ityp::array<ColorAttachmentIndex, wgpu::StoreOp, kMaxColorAttachments> colorStoreOp;
bool hasDepthStencil = false;
wgpu::TextureFormat depthStencilFormat;
wgpu::LoadOp depthLoadOp;
+ wgpu::StoreOp depthStoreOp;
wgpu::LoadOp stencilLoadOp;
+ wgpu::StoreOp stencilStoreOp;
uint32_t sampleCount;
};
@@ -63,6 +70,7 @@ namespace dawn_native { namespace vulkan {
// render pass. We always arrange the order of attachments in "color-depthstencil-resolve" order
// when creating render pass and framebuffer so that we can always make sure the order of
// attachments in the rendering pipeline matches the one of the framebuffer.
+ // All the operations on RenderPassCache are guaranteed to be thread-safe.
// TODO(cwallez@chromium.org): Make it an LRU cache somehow?
class RenderPassCache {
public:
@@ -86,6 +94,8 @@ namespace dawn_native { namespace vulkan {
std::unordered_map<RenderPassCacheQuery, VkRenderPass, CacheFuncs, CacheFuncs>;
Device* mDevice = nullptr;
+
+ std::mutex mMutex;
Cache mCache;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
index 79dbb406273..93e6eb3ae47 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/vulkan/RenderPipelineVk.h"
+#include "dawn_native/CreatePipelineAsyncTask.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/PipelineLayoutVk.h"
@@ -34,6 +35,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::VertexStepMode::Instance:
return VK_VERTEX_INPUT_RATE_INSTANCE;
}
+ UNREACHABLE();
}
VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
@@ -116,6 +118,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::PrimitiveTopology::TriangleStrip:
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
}
+ UNREACHABLE();
}
bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
@@ -130,6 +133,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::PrimitiveTopology::TriangleStrip:
return true;
}
+ UNREACHABLE();
}
VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
@@ -139,6 +143,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::FrontFace::CW:
return VK_FRONT_FACE_CLOCKWISE;
}
+ UNREACHABLE();
}
VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
@@ -150,6 +155,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::CullMode::Back:
return VK_CULL_MODE_BACK_BIT;
}
+ UNREACHABLE();
}
VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
@@ -180,16 +186,8 @@ namespace dawn_native { namespace vulkan {
return VK_BLEND_FACTOR_CONSTANT_COLOR;
case wgpu::BlendFactor::OneMinusConstant:
return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
-
- // Deprecated blend factors should be normalized prior to this call.
- case wgpu::BlendFactor::SrcColor:
- case wgpu::BlendFactor::OneMinusSrcColor:
- case wgpu::BlendFactor::DstColor:
- case wgpu::BlendFactor::OneMinusDstColor:
- case wgpu::BlendFactor::BlendColor:
- case wgpu::BlendFactor::OneMinusBlendColor:
- UNREACHABLE();
}
+ UNREACHABLE();
}
VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
@@ -205,6 +203,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::BlendOperation::Max:
return VK_BLEND_OP_MAX;
}
+ UNREACHABLE();
}
VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
@@ -276,6 +275,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::StencilOperation::DecrementWrap:
return VK_STENCIL_OP_DECREMENT_AND_WRAP;
}
+ UNREACHABLE();
}
VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(
@@ -328,42 +328,61 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
// static
- ResultOrError<Ref<RenderPipeline>> RenderPipeline::Create(
+ Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
Device* device,
const RenderPipelineDescriptor* descriptor) {
- Ref<RenderPipeline> pipeline = AcquireRef(new RenderPipeline(device, descriptor));
- DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline;
+ return AcquireRef(new RenderPipeline(device, descriptor));
}
- MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ MaybeError RenderPipeline::Initialize() {
Device* device = ToBackend(GetDevice());
- VkPipelineShaderStageCreateInfo shaderStages[2];
- {
- // Generate a new VkShaderModule with BindingRemapper tint transform for each
- // pipeline
- DAWN_TRY_ASSIGN(shaderStages[0].module,
- ToBackend(descriptor->vertex.module)
- ->GetTransformedModuleHandle(descriptor->vertex.entryPoint,
- ToBackend(GetLayout())));
- DAWN_TRY_ASSIGN(shaderStages[1].module,
- ToBackend(descriptor->fragment->module)
- ->GetTransformedModuleHandle(descriptor->fragment->entryPoint,
+ // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment
+ std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
+ std::array<std::vector<SpecializationDataEntry>, 2> specializationDataEntriesPerStages;
+ std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages;
+ std::array<VkSpecializationInfo, 2> specializationInfoPerStages;
+ uint32_t stageCount = 0;
+
+ for (auto stage : IterateStages(this->GetStageMask())) {
+ VkPipelineShaderStageCreateInfo shaderStage;
+
+ const ProgrammableStage& programmableStage = GetStage(stage);
+ DAWN_TRY_ASSIGN(shaderStage.module,
+ ToBackend(programmableStage.module)
+ ->GetTransformedModuleHandle(programmableStage.entryPoint.c_str(),
ToBackend(GetLayout())));
- shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
- shaderStages[0].pNext = nullptr;
- shaderStages[0].flags = 0;
- shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
- shaderStages[0].pSpecializationInfo = nullptr;
- shaderStages[0].pName = descriptor->vertex.entryPoint;
-
- shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
- shaderStages[1].pNext = nullptr;
- shaderStages[1].flags = 0;
- shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
- shaderStages[1].pSpecializationInfo = nullptr;
- shaderStages[1].pName = descriptor->fragment->entryPoint;
+
+ shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ shaderStage.pNext = nullptr;
+ shaderStage.flags = 0;
+ shaderStage.pSpecializationInfo = nullptr;
+ shaderStage.pName = programmableStage.entryPoint.c_str();
+
+ switch (stage) {
+ case dawn_native::SingleShaderStage::Vertex: {
+ shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
+ break;
+ }
+ case dawn_native::SingleShaderStage::Fragment: {
+ shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+ break;
+ }
+ default: {
+ // For render pipeline only Vertex and Fragment stage is possible
+ DAWN_UNREACHABLE();
+ break;
+ }
+ }
+
+ shaderStage.pSpecializationInfo =
+ GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount],
+ &specializationDataEntriesPerStages[stageCount],
+ &specializationMapEntriesPerStages[stageCount]);
+
+ DAWN_ASSERT(stageCount < 2);
+ shaderStages[stageCount] = shaderStage;
+ stageCount++;
}
PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
@@ -404,7 +423,7 @@ namespace dawn_native { namespace vulkan {
rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization.pNext = nullptr;
rasterization.flags = 0;
- rasterization.depthClampEnable = VK_FALSE;
+ rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE;
rasterization.rasterizerDiscardEnable = VK_FALSE;
rasterization.polygonMode = VK_POLYGON_MODE_FILL;
rasterization.cullMode = VulkanCullMode(GetCullMode());
@@ -434,30 +453,35 @@ namespace dawn_native { namespace vulkan {
VkPipelineDepthStencilStateCreateInfo depthStencilState =
ComputeDepthStencilDesc(GetDepthStencilState());
- // Initialize the "blend state info" that will be chained in the "create info" from the data
- // pre-computed in the ColorState
+ VkPipelineColorBlendStateCreateInfo colorBlend;
+ // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a
+ // definition scope as same as colorBlend
ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments>
colorBlendAttachments;
- const auto& fragmentOutputsWritten =
- GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
- for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- const ColorTargetState* target = GetColorTargetState(i);
- colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
+ if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+ // Initialize the "blend state info" that will be chained in the "create info" from the
+ // data pre-computed in the ColorState
+ const auto& fragmentOutputsWritten =
+ GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
+ for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+ const ColorTargetState* target = GetColorTargetState(i);
+ colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
+ }
+
+ colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ colorBlend.pNext = nullptr;
+ colorBlend.flags = 0;
+ // LogicOp isn't supported so we disable it.
+ colorBlend.logicOpEnable = VK_FALSE;
+ colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
+ colorBlend.attachmentCount = static_cast<uint32_t>(GetColorAttachmentsMask().count());
+ colorBlend.pAttachments = colorBlendAttachments.data();
+ // The blend constant is always dynamic so we fill in a dummy value
+ colorBlend.blendConstants[0] = 0.0f;
+ colorBlend.blendConstants[1] = 0.0f;
+ colorBlend.blendConstants[2] = 0.0f;
+ colorBlend.blendConstants[3] = 0.0f;
}
- VkPipelineColorBlendStateCreateInfo colorBlend;
- colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
- colorBlend.pNext = nullptr;
- colorBlend.flags = 0;
- // LogicOp isn't supported so we disable it.
- colorBlend.logicOpEnable = VK_FALSE;
- colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
- colorBlend.attachmentCount = static_cast<uint32_t>(GetColorAttachmentsMask().count());
- colorBlend.pAttachments = colorBlendAttachments.data();
- // The blend constant is always dynamic so we fill in a dummy value
- colorBlend.blendConstants[0] = 0.0f;
- colorBlend.blendConstants[1] = 0.0f;
- colorBlend.blendConstants[2] = 0.0f;
- colorBlend.blendConstants[3] = 0.0f;
// Tag all state as dynamic but stencil masks and depth bias.
VkDynamicState dynamicStates[] = {
@@ -472,19 +496,21 @@ namespace dawn_native { namespace vulkan {
dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]);
dynamic.pDynamicStates = dynamicStates;
- // Get a VkRenderPass that matches the attachment formats for this pipeline, load ops don't
- // matter so set them all to LoadOp::Load
+ // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops
+ // don't matter so set them all to LoadOp::Load / StoreOp::Store
VkRenderPass renderPass = VK_NULL_HANDLE;
{
RenderPassCacheQuery query;
for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load, false);
+ query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load,
+ wgpu::StoreOp::Store, false);
}
if (HasDepthStencilAttachment()) {
query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load,
- wgpu::LoadOp::Load);
+ wgpu::StoreOp::Store, wgpu::LoadOp::Load,
+ wgpu::StoreOp::Store);
}
query.SetSampleCount(GetSampleCount());
@@ -498,8 +524,8 @@ namespace dawn_native { namespace vulkan {
createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
- createInfo.stageCount = 2;
- createInfo.pStages = shaderStages;
+ createInfo.stageCount = stageCount;
+ createInfo.pStages = shaderStages.data();
createInfo.pVertexInputState = &vertexInputCreateInfo;
createInfo.pInputAssemblyState = &inputAssembly;
createInfo.pTessellationState = nullptr;
@@ -507,7 +533,8 @@ namespace dawn_native { namespace vulkan {
createInfo.pRasterizationState = &rasterization;
createInfo.pMultisampleState = &multisample;
createInfo.pDepthStencilState = &depthStencilState;
- createInfo.pColorBlendState = &colorBlend;
+ createInfo.pColorBlendState =
+ (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr;
createInfo.pDynamicState = &dynamic;
createInfo.layout = ToBackend(GetLayout())->GetHandle();
createInfo.renderPass = renderPass;
@@ -515,10 +542,19 @@ namespace dawn_native { namespace vulkan {
createInfo.basePipelineHandle = VkPipeline{};
createInfo.basePipelineIndex = -1;
- return CheckVkSuccess(
+ DAWN_TRY(CheckVkSuccess(
device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{}, 1,
&createInfo, nullptr, &*mHandle),
- "CreateGraphicsPipeline");
+ "CreateGraphicsPipeline"));
+
+ SetLabelImpl();
+
+ return {};
+ }
+
+ void RenderPipeline::SetLabelImpl() {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_PIPELINE,
+ reinterpret_cast<uint64_t&>(mHandle), "Dawn_RenderPipeline", GetLabel());
}
VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
@@ -552,15 +588,15 @@ namespace dawn_native { namespace vulkan {
}
// Build the create info
- VkPipelineVertexInputStateCreateInfo mCreateInfo;
- mCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
- mCreateInfo.pNext = nullptr;
- mCreateInfo.flags = 0;
- mCreateInfo.vertexBindingDescriptionCount = bindingCount;
- mCreateInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
- mCreateInfo.vertexAttributeDescriptionCount = attributeCount;
- mCreateInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
- return mCreateInfo;
+ VkPipelineVertexInputStateCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.vertexBindingDescriptionCount = bindingCount;
+ createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
+ createInfo.vertexAttributeDescriptionCount = attributeCount;
+ createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
+ return createInfo;
}
RenderPipeline::~RenderPipeline() {
@@ -574,4 +610,13 @@ namespace dawn_native { namespace vulkan {
return mHandle;
}
+ void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+ userdata);
+ CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+ }
+
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
index 7dfc4681871..0efd8680b71 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
@@ -26,16 +26,22 @@ namespace dawn_native { namespace vulkan {
class RenderPipeline final : public RenderPipelineBase {
public:
- static ResultOrError<Ref<RenderPipeline>> Create(
- Device* device,
- const RenderPipelineDescriptor* descriptor);
+ static Ref<RenderPipeline> CreateUninitialized(Device* device,
+ const RenderPipelineDescriptor* descriptor);
+ static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
VkPipeline GetHandle() const;
+ MaybeError Initialize() override;
+
+ // Dawn API
+ void SetLabelImpl() override;
+
private:
~RenderPipeline() override;
using RenderPipelineBase::RenderPipelineBase;
- MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
index c5e852b5e97..c056515a5c4 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
@@ -31,6 +31,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::AddressMode::ClampToEdge:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
}
+ UNREACHABLE();
}
VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
@@ -40,6 +41,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::FilterMode::Nearest:
return VK_FILTER_NEAREST;
}
+ UNREACHABLE();
}
VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
@@ -49,6 +51,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::FilterMode::Nearest:
return VK_SAMPLER_MIPMAP_MODE_NEAREST;
}
+ UNREACHABLE();
}
} // anonymous namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
index 0282b77126e..9cad7086897 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
@@ -20,6 +20,7 @@
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/PipelineLayoutVk.h"
+#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
#include <tint/tint.h>
@@ -83,13 +84,12 @@ namespace dawn_native { namespace vulkan {
if (GetDevice()->IsRobustnessEnabled()) {
ScopedTintICEHandler scopedICEHandler(GetDevice());
- tint::transform::BoundArrayAccessors boundArrayAccessors;
+ tint::transform::Robustness robustness;
tint::transform::DataMap transformInputs;
tint::Program program;
- DAWN_TRY_ASSIGN(program,
- RunTransforms(&boundArrayAccessors, parseResult->tintProgram.get(),
- transformInputs, nullptr, nullptr));
+ DAWN_TRY_ASSIGN(program, RunTransforms(&robustness, parseResult->tintProgram.get(),
+ transformInputs, nullptr, nullptr));
// Rather than use a new ParseResult object, we just reuse the original parseResult
parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
}
@@ -121,8 +121,7 @@ namespace dawn_native { namespace vulkan {
BindingRemapper::BindingPoints bindingPoints;
BindingRemapper::AccessControls accessControls;
- const EntryPointMetadata::BindingInfoArray& moduleBindingInfo =
- GetEntryPoint(entryPointName).bindings;
+ const BindingInfoArray& moduleBindingInfo = GetEntryPoint(entryPointName).bindings;
for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
@@ -188,6 +187,9 @@ namespace dawn_native { namespace vulkan {
mTransformedShaderModuleCache.AddOrGetCachedShaderModule(cacheKey, newHandle);
}
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_SHADER_MODULE,
+ reinterpret_cast<uint64_t&>(newHandle), "Dawn_ShaderModule", GetLabel());
+
return newHandle;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
index cf3129caa16..c78f0fc4368 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
@@ -17,6 +17,7 @@
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/ResourceHeapVk.h"
#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
+#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -57,6 +58,9 @@ namespace dawn_native { namespace vulkan {
return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
}
+ SetDebugName(mDevice, VK_OBJECT_TYPE_BUFFER, reinterpret_cast<uint64_t&>(mBuffer),
+ "Dawn_StagingBuffer");
+
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
index 933f7b4d99c..c0f856fca3b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
@@ -194,6 +194,7 @@ namespace dawn_native { namespace vulkan {
case wgpu::PresentMode::Mailbox:
return VK_PRESENT_MODE_MAILBOX_KHR;
}
+ UNREACHABLE();
}
uint32_t MinImageCountForPresentMode(VkPresentModeKHR mode) {
@@ -204,8 +205,9 @@ namespace dawn_native { namespace vulkan {
case VK_PRESENT_MODE_MAILBOX_KHR:
return 3;
default:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
} // anonymous namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
index cbb2f2cef7b..70fbeee776a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
@@ -51,8 +51,9 @@ namespace dawn_native { namespace vulkan {
case wgpu::TextureViewDimension::e1D:
case wgpu::TextureViewDimension::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
// Computes which vulkan access type could be required for the given Dawn usage.
@@ -73,9 +74,6 @@ namespace dawn_native { namespace vulkan {
if (usage & wgpu::TextureUsage::StorageBinding) {
flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (usage & kReadOnlyStorageTexture) {
- flags |= VK_ACCESS_SHADER_READ_BIT;
- }
if (usage & wgpu::TextureUsage::RenderAttachment) {
if (format.HasDepthOrStencil()) {
flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
@@ -119,7 +117,7 @@ namespace dawn_native { namespace vulkan {
if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
}
- if (usage & (wgpu::TextureUsage::TextureBinding | kReadOnlyStorageTexture)) {
+ if (usage & wgpu::TextureUsage::TextureBinding) {
// TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
// introducing FS -> VS dependencies that would prevent parallelization on tiler
// GPUs
@@ -337,11 +335,94 @@ namespace dawn_native { namespace vulkan {
return VK_FORMAT_BC7_UNORM_BLOCK;
case wgpu::TextureFormat::BC7RGBAUnormSrgb:
return VK_FORMAT_BC7_SRGB_BLOCK;
+
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
+ case wgpu::TextureFormat::EACR11Unorm:
+ return VK_FORMAT_EAC_R11_UNORM_BLOCK;
+ case wgpu::TextureFormat::EACR11Snorm:
+ return VK_FORMAT_EAC_R11_SNORM_BLOCK;
+ case wgpu::TextureFormat::EACRG11Unorm:
+ return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
+
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ // TODO(dawn:666): implement stencil8
case wgpu::TextureFormat::Stencil8:
+ // TODO(dawn:570): implement depth16unorm
+ case wgpu::TextureFormat::Depth16Unorm:
case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
// Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
@@ -358,7 +439,7 @@ namespace dawn_native { namespace vulkan {
if (usage & wgpu::TextureUsage::TextureBinding) {
flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
}
- if (usage & (wgpu::TextureUsage::StorageBinding | kReadOnlyStorageTexture)) {
+ if (usage & wgpu::TextureUsage::StorageBinding) {
flags |= VK_IMAGE_USAGE_STORAGE_BIT;
}
if (usage & wgpu::TextureUsage::RenderAttachment) {
@@ -383,7 +464,7 @@ namespace dawn_native { namespace vulkan {
if (!wgpu::HasZeroOrOneBits(usage)) {
// Sampled | ReadOnlyStorage is the only possible multi-bit usage, if more appear we
// might need additional special-casing.
- ASSERT(usage == (wgpu::TextureUsage::TextureBinding | kReadOnlyStorageTexture));
+ ASSERT(usage == wgpu::TextureUsage::TextureBinding);
return VK_IMAGE_LAYOUT_GENERAL;
}
@@ -416,7 +497,6 @@ namespace dawn_native { namespace vulkan {
// and store operations on storage images can only be done on the images in
// VK_IMAGE_LAYOUT_GENERAL layout.
case wgpu::TextureUsage::StorageBinding:
- case kReadOnlyStorageTexture:
return VK_IMAGE_LAYOUT_GENERAL;
case wgpu::TextureUsage::RenderAttachment:
@@ -430,8 +510,9 @@ namespace dawn_native { namespace vulkan {
return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
case wgpu::TextureUsage::None:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
@@ -440,9 +521,8 @@ namespace dawn_native { namespace vulkan {
return VK_SAMPLE_COUNT_1_BIT;
case 4:
return VK_SAMPLE_COUNT_4_BIT;
- default:
- UNREACHABLE();
}
+ UNREACHABLE();
}
MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
@@ -577,6 +657,8 @@ namespace dawn_native { namespace vulkan {
GetAllSubresources(), TextureBase::ClearValue::NonZero));
}
+ SetLabelImpl();
+
return {};
}
@@ -611,11 +693,15 @@ namespace dawn_native { namespace vulkan {
baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
+
+ SetLabelHelper("Dawn_ExternalTexture");
+
return {};
}
void Texture::InitializeForSwapChain(VkImage nativeImage) {
mHandle = nativeImage;
+ SetLabelHelper("Dawn_SwapChainTexture");
}
MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
@@ -720,6 +806,15 @@ namespace dawn_native { namespace vulkan {
DestroyInternal();
}
+ void Texture::SetLabelHelper(const char* prefix) {
+ SetDebugName(ToBackend(GetDevice()), VK_OBJECT_TYPE_IMAGE,
+ reinterpret_cast<uint64_t&>(mHandle), prefix, GetLabel());
+ }
+
+ void Texture::SetLabelImpl() {
+ SetLabelHelper("Dawn_InternalTexture");
+ }
+
void Texture::DestroyImpl() {
if (GetTextureState() == TextureState::OwnedInternal) {
Device* device = ToBackend(GetDevice());
@@ -760,8 +855,9 @@ namespace dawn_native { namespace vulkan {
return VulkanAspectMask(Aspect::Stencil);
case wgpu::TextureAspect::Plane0Only:
case wgpu::TextureAspect::Plane1Only:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
index e985c55071f..779d51b3398 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
@@ -93,6 +93,11 @@ namespace dawn_native { namespace vulkan {
VkImageLayout* releasedOldLayout,
VkImageLayout* releasedNewLayout);
+ void SetLabelHelper(const char* prefix);
+
+ // Dawn API
+ void SetLabelImpl() override;
+
private:
~Texture() override;
Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
index c6952d4a923..6d73eec2891 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
@@ -17,8 +17,12 @@
#include "common/Assert.h"
#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/Format.h"
+#include "dawn_native/Pipeline.h"
+#include "dawn_native/ShaderModule.h"
+#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/Forward.h"
#include "dawn_native/vulkan/TextureVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -42,8 +46,9 @@ namespace dawn_native { namespace vulkan {
return VK_COMPARE_OP_ALWAYS;
case wgpu::CompareFunction::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
// Convert Dawn texture aspects to Vulkan texture aspect flags
@@ -162,4 +167,93 @@ namespace dawn_native { namespace vulkan {
return region;
}
+
+ void SetDebugName(Device* device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ const char* prefix,
+ std::string label) {
+ if (!objectHandle) {
+ return;
+ }
+
+ if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+ VkDebugUtilsObjectNameInfoEXT objectNameInfo;
+ objectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
+ objectNameInfo.pNext = nullptr;
+ objectNameInfo.objectType = objectType;
+ objectNameInfo.objectHandle = objectHandle;
+
+ if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+ objectNameInfo.pObjectName = prefix;
+ device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
+ return;
+ }
+
+ std::string objectName = prefix;
+ objectName += "_";
+ objectName += label;
+ objectNameInfo.pObjectName = objectName.c_str();
+ device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
+ }
+ }
+
+ VkSpecializationInfo* GetVkSpecializationInfo(
+ const ProgrammableStage& programmableStage,
+ VkSpecializationInfo* specializationInfo,
+ std::vector<SpecializationDataEntry>* specializationDataEntries,
+ std::vector<VkSpecializationMapEntry>* specializationMapEntries) {
+ ASSERT(specializationInfo);
+ ASSERT(specializationDataEntries);
+ ASSERT(specializationMapEntries);
+
+ if (programmableStage.constants.size() == 0) {
+ return nullptr;
+ }
+
+ const EntryPointMetadata& entryPointMetaData =
+ programmableStage.module->GetEntryPoint(programmableStage.entryPoint);
+
+ for (const auto& pipelineConstant : programmableStage.constants) {
+ const std::string& name = pipelineConstant.first;
+ double value = pipelineConstant.second;
+
+ // This is already validated so `name` must exist
+ const auto& moduleConstant = entryPointMetaData.overridableConstants.at(name);
+
+ specializationMapEntries->push_back(
+ VkSpecializationMapEntry{moduleConstant.id,
+ static_cast<uint32_t>(specializationDataEntries->size() *
+ sizeof(SpecializationDataEntry)),
+ sizeof(SpecializationDataEntry)});
+
+ SpecializationDataEntry entry;
+ switch (moduleConstant.type) {
+ case EntryPointMetadata::OverridableConstant::Type::Boolean:
+ entry.b = static_cast<bool>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Float32:
+ entry.f32 = static_cast<float>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Int32:
+ entry.i32 = static_cast<int32_t>(value);
+ break;
+ case EntryPointMetadata::OverridableConstant::Type::Uint32:
+ entry.u32 = static_cast<uint32_t>(value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ specializationDataEntries->push_back(entry);
+ }
+
+ specializationInfo->mapEntryCount = static_cast<uint32_t>(specializationMapEntries->size());
+ specializationInfo->pMapEntries = specializationMapEntries->data();
+ specializationInfo->dataSize =
+ specializationDataEntries->size() * sizeof(SpecializationDataEntry);
+ specializationInfo->pData = specializationDataEntries->data();
+
+ return specializationInfo;
+ }
+
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
index e57e3f4c768..8bd2a845444 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
@@ -19,8 +19,14 @@
#include "dawn_native/Commands.h"
#include "dawn_native/dawn_platform.h"
+namespace dawn_native {
+ struct ProgrammableStage;
+} // namespace dawn_native
+
namespace dawn_native { namespace vulkan {
+ class Device;
+
// A Helper type used to build a pNext chain of extension structs.
// Usage is:
// 1) Create instance, passing the address of the first struct in the
@@ -99,6 +105,30 @@ namespace dawn_native { namespace vulkan {
const TextureCopy& textureCopy,
const Extent3D& copySize);
+ void SetDebugName(Device* device,
+ VkObjectType objectType,
+ uint64_t objectHandle,
+ const char* prefix,
+ std::string label = "");
+
+ // Helpers for creating VkSpecializationInfo
+ // The WebGPU overridable constants only support these scalar types
+ union SpecializationDataEntry {
+ bool b;
+ float f32;
+ int32_t i32;
+ uint32_t u32;
+ };
+
+ // Returns nullptr or &specializationInfo
+ // specializationInfo, specializationDataEntries, specializationMapEntries needs to
+ // be alive at least until VkSpecializationInfo is passed into Vulkan Create*Pipelines
+ VkSpecializationInfo* GetVkSpecializationInfo(
+ const ProgrammableStage& programmableStage,
+ VkSpecializationInfo* specializationInfo,
+ std::vector<SpecializationDataEntry>* specializationDataEntries,
+ std::vector<VkSpecializationMapEntry>* specializationMapEntries);
+
}} // namespace dawn_native::vulkan
#endif // DAWNNATIVE_VULKAN_UTILSVULKAN_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
index 8e1304faf1d..30aa336462b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
@@ -75,19 +75,6 @@ namespace dawn_native { namespace vulkan {
ExternalImageExportInfoDmaBuf::ExternalImageExportInfoDmaBuf()
: ExternalImageExportInfoFD(ExternalImageType::DmaBuf) {
}
-
- int ExportSignalSemaphoreOpaqueFD(WGPUDevice cDevice, WGPUTexture cTexture) {
- // Doesn't actually matter if we use OpaqueFD or DmaBuf since these paths are the same right
- // now. This function will be removed.
- Device* device = reinterpret_cast<Device*>(cDevice);
- device->EmitDeprecationWarning(
- "ExportSignalSemaphoreOpaqueFD is deprecated. Please use ExportVulkanImage instead.");
- ExternalImageExportInfoOpaqueFD info;
- if (!ExportVulkanImage(cTexture, VK_IMAGE_LAYOUT_GENERAL, &info)) {
- return -1;
- }
- return info.semaphoreHandles[0];
- }
#endif // DAWN_PLATFORM_LINUX
WGPUTexture WrapVulkanImage(WGPUDevice cDevice, const ExternalImageDescriptorVk* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h
index 5eace87f7e3..91d9576b716 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreService.h
@@ -18,6 +18,8 @@
#include "common/vulkan_platform.h"
#include "dawn_native/Error.h"
#include "dawn_native/vulkan/ExternalHandle.h"
+#include "dawn_native/vulkan/VulkanFunctions.h"
+#include "dawn_native/vulkan/VulkanInfo.h"
namespace dawn_native { namespace vulkan {
class Device;
diff --git a/chromium/third_party/dawn/src/dawn_node/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_node/CMakeLists.txt
new file mode 100644
index 00000000000..c8ac7af3a6f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/CMakeLists.txt
@@ -0,0 +1,124 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set(GEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/gen")
+set(IDLGEN_TOOL_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tools/src/cmd/idlgen")
+
+# idlgen() is a function that uses the tools/cmd/idlgen/main.go tool to generate
+# code from an IDL file and template.
+# idlgen() accepts the following named arguments:
+# TEMPLATE <path> - (required) the path to the root .tmpl file. If the
+# template imports other templates, then these should be
+# added to the DEPENDS argument list.
+# OUTPUT <path> - (required) the output file path.
+# IDLS <paths> - (at least one required) the list of input WebIDL files.
+# DEPENDS <paths> - an optional list of additional file dependencies used.
+function(idlgen)
+ cmake_parse_arguments(IDLGEN
+ "" # options
+ "TEMPLATE;OUTPUT" # one_value_keywords
+ "IDLS;DEPENDS" # multi_value_keywords
+ ${ARGN})
+
+ if(NOT IDLGEN_TEMPLATE)
+ message(FATAL_ERROR "idlgen() missing TEMPLATE argument")
+ endif()
+ if(NOT IDLGEN_OUTPUT)
+ message(FATAL_ERROR "idlgen() missing OUTPUT argument")
+ endif()
+ if(NOT IDLGEN_IDLS)
+ message(FATAL_ERROR "idlgen() missing IDLS argument(s)")
+ endif()
+ add_custom_command(
+ COMMAND "go" "run" "main.go"
+ "--template" "${IDLGEN_TEMPLATE}"
+ "--output" "${IDLGEN_OUTPUT}"
+ ${IDLGEN_IDLS}
+ DEPENDS "${IDLGEN_TOOL_DIR}/main.go"
+ ${IDLGEN_TEMPLATE}
+ ${IDLGEN_DEPENDS}
+ ${IDLGEN_IDLS}
+ OUTPUT ${IDLGEN_OUTPUT}
+ WORKING_DIRECTORY ${IDLGEN_TOOL_DIR}
+ COMMENT "Generating ${IDLGEN_OUTPUT}"
+ )
+endfunction()
+
+add_subdirectory(binding)
+add_subdirectory(interop)
+
+add_library(dawn_node SHARED
+ "Module.cpp"
+)
+set_target_properties(dawn_node PROPERTIES
+ PREFIX ""
+ OUTPUT_NAME "dawn"
+ SUFFIX ".node"
+ RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}"
+ LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}"
+ CXX_STANDARD 17
+)
+target_link_libraries(dawn_node dawn_node_binding dawn_node_interop dawn_native dawncpp dawn_proc)
+target_include_directories(dawn_node PRIVATE
+ "${CMAKE_SOURCE_DIR}"
+ "${NODE_API_HEADERS_DIR}/include"
+ "${NODE_ADDON_API_DIR}"
+ "${GEN_DIR}"
+)
+
+# To reduce the build dependencies for compiling the dawn.node targets, we do
+# not use cmake-js for building, but instead just depend on node_api_headers.
+# As the name suggests, node_api_headers contains just the *headers* of Napi,
+# and does not provide a library to link against.
+# Fortunately node_api_headers provides a list of Napi symbols exported by Node,
+# which we can use to either produce weak-symbol stubs (unix) or generate a .lib
+# (Windows).
+
+# Parse the Napi symbols from ${NODE_API_HEADERS_DIR}/symbols.js
+file(READ "${NODE_API_HEADERS_DIR}/symbols.js" NAPI_SYMBOLS_JS_CONTENT)
+string(REGEX MATCHALL "napi_[a-z0-9_]*" NAPI_SYMBOLS "${NAPI_SYMBOLS_JS_CONTENT}")
+
+if (WIN32)
+ # Generate the NapiSymbols.def file from the Napi symbol list
+ set(NAPI_SYMBOLS_DEF "${GEN_DIR}/NapiSymbols.def")
+ list(TRANSFORM NAPI_SYMBOLS PREPEND " ")
+ list(TRANSFORM NAPI_SYMBOLS APPEND "\n")
+ string(REPLACE ";" "" NAPI_SYMBOLS "${NAPI_SYMBOLS}")
+ string(PREPEND NAPI_SYMBOLS "LIBRARY node.exe\nEXPORTS\n")
+ file(GENERATE OUTPUT "${NAPI_SYMBOLS_DEF}" CONTENT "${NAPI_SYMBOLS}")
+ # Generate the NapiSymbols.lib from the NapiSymbols.def file
+ set(NAPI_SYMBOLS_LIB "${GEN_DIR}/NapiSymbols.lib")
+ # Resolve path to lib.exe
+ get_filename_component(VS_BIN_DIR "${CMAKE_LINKER}" DIRECTORY)
+ set(LIB_EXE "${VS_BIN_DIR}/lib.exe")
+ add_custom_command(
+ COMMAND "${LIB_EXE}"
+ "/DEF:${NAPI_SYMBOLS_DEF}"
+ "/OUT:${NAPI_SYMBOLS_LIB}"
+ DEPENDS "${NAPI_SYMBOLS_DEF}"
+ OUTPUT "${NAPI_SYMBOLS_LIB}"
+ COMMENT "Generating ${NAPI_SYMBOLS_LIB}"
+ )
+ add_custom_target(napi-symbols DEPENDS "${NAPI_SYMBOLS_LIB}")
+ add_dependencies(dawn_node napi-symbols)
+ target_link_libraries(dawn_node "${NAPI_SYMBOLS_LIB}")
+else()
+ # Generate the NapiSymbols.h file from the Napi symbol list
+ set(NAPI_SYMBOLS_H "${GEN_DIR}/NapiSymbols.h")
+ list(TRANSFORM NAPI_SYMBOLS PREPEND "NAPI_SYMBOL(")
+ list(TRANSFORM NAPI_SYMBOLS APPEND ")\n")
+ string(REPLACE ";" "" NAPI_SYMBOLS "${NAPI_SYMBOLS}")
+ file(GENERATE OUTPUT "${NAPI_SYMBOLS_H}" CONTENT "${NAPI_SYMBOLS}")
+ target_sources(dawn_node PRIVATE "NapiSymbols.cpp")
+endif()
diff --git a/chromium/third_party/dawn/src/dawn_node/Module.cpp b/chromium/third_party/dawn/src/dawn_node/Module.cpp
new file mode 100644
index 00000000000..d7669b8e18e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/Module.cpp
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn/dawn_proc.h"
+#include "src/dawn_node/binding/GPU.h"
+
+// Initialize() initializes the Dawn node module, registering all the WebGPU
+// types into the global object, and adding the 'gpu' property on the exported
+// object.
+Napi::Object Initialize(Napi::Env env, Napi::Object exports) {
+ // Begin by setting the Dawn procedure function pointers.
+ dawnProcSetProcs(&dawn_native::GetProcs());
+
+ // Register all the interop types
+ wgpu::interop::Initialize(env);
+
+ // Construct a wgpu::interop::GPU interface, implemented by
+ // wgpu::bindings::GPU. This will be the 'gpu' field of exported object.
+ auto gpu = wgpu::interop::GPU::Create<wgpu::binding::GPU>(env);
+ exports.Set(Napi::String::New(env, "gpu"), gpu);
+ return exports;
+}
+
+NODE_API_MODULE(addon, Initialize)
diff --git a/chromium/third_party/dawn/src/dawn_node/NapiSymbols.cpp b/chromium/third_party/dawn/src/dawn_node/NapiSymbols.cpp
new file mode 100644
index 00000000000..3c4aac1e2ed
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/NapiSymbols.cpp
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/utils/Debug.h"
+
+// To reduce the build dependencies for compiling the dawn.node targets, we do
+// not use cmake-js for building, but instead just depend on node_api_headers.
+// As the name suggests, node_api_headers contains just the *headers* of Napi,
+// and does not provide a library to link against.
+// Fortunately node_api_headers provides a list of Napi symbols exported by Node,
+// which we can use to produce weak-symbol stubs.
+
+#ifdef _WIN32
+# error "NapiSymbols.cpp is not used on Windows"
+#endif
+
+#define NAPI_SYMBOL(NAME) \
+ __attribute__((weak)) void NAME() { \
+ UNREACHABLE( \
+ "#NAME is a weak stub, and should have been runtime replaced by the node " \
+ "implementation"); \
+ }
+
+extern "C" {
+// List of Napi symbols generated from the node_api_headers/symbols.js file
+#include "NapiSymbols.h"
+}
diff --git a/chromium/third_party/dawn/src/dawn_node/README.md b/chromium/third_party/dawn/src/dawn_node/README.md
new file mode 100644
index 00000000000..fe2b6f18e39
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/README.md
@@ -0,0 +1,70 @@
+# Dawn bindings for NodeJS
+
+Note: This code is currently WIP. There are a number of [known issues](#known-issues).
+
+## Building
+
+## System requirements
+
+- [CMake 3.10](https://cmake.org/download/) or greater
+- [Go 1.13](https://golang.org/dl/) or greater
+
+## Install `depot_tools`
+
+Dawn uses the Chromium build system and dependency management so you need to [install depot_tools] and add it to the PATH.
+
+[install depot_tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up
+
+### Fetch dependencies
+
+First, the steps are similar to [`doc/building.md`](../../docs/building.md), but instead of the `Get the code` step, run:
+
+```sh
+# Clone the repo as "dawn"
+git clone https://dawn.googlesource.com/dawn dawn && cd dawn
+
+# Bootstrap the NodeJS binding gclient configuration
+cp scripts/standalone-with-node.gclient .gclient
+
+# Fetch external dependencies and toolchains with gclient
+gclient sync
+```
+
+### Build
+
+Currently, the node bindings can only be built with CMake:
+
+```sh
+mkdir <build-output-path>
+cd <build-output-path>
+cmake <dawn-root-path> -GNinja -DDAWN_BUILD_NODE_BINDINGS=1 -DDAWN_ENABLE_PIC=1
+ninja dawn.node
+```
+
+### Running WebGPU CTS
+
+1. [Build](#build) the `dawn.node` NodeJS module.
+2. Checkout the [WebGPU CTS repo](https://github.com/gpuweb/cts)
+
+```sh
+./src/dawn_node/tools/run-cts --cts=<path-to-webgpu-cts> --dawn-node=<path-to-dawn.node> [WebGPU CTS query]
+```
+
+If this fails with the error message `TypeError: expander is not a function or its return value is not iterable`, try appending `--build=false` to the start of the `run-cts` command line flags.
+
+To test against SwiftShader instead of the default Vulkan device, prefix `./src/dawn_node/tools/run-cts` with `VK_ICD_FILENAMES=<swiftshader-cmake-build>/Linux/vk_swiftshader_icd.json`
+
+## Known issues
+
+- Many WebGPU CTS tests are currently known to fail
+- Dawn uses special token values for some parameters / fields. These are currently passed straight through to dawn from the JavaScript. discussions: [1](https://dawn-review.googlesource.com/c/dawn/+/64907/5/src/dawn_node/binding/Converter.cpp#167), [2](https://dawn-review.googlesource.com/c/dawn/+/64907/5/src/dawn_node/binding/Converter.cpp#928), [3](https://dawn-review.googlesource.com/c/dawn/+/64909/4/src/dawn_node/binding/GPUTexture.cpp#42)
+- Backend validation is currently always set to 'full' to aid in debugging. This can be extremely slow. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64916/4/src/dawn_node/binding/GPU.cpp#25)
+- Attempting to call `new T` in JavaScript, where `T` is an IDL interface type, should result in a TypeError "Illegal constructor". [discussion](https://dawn-review.googlesource.com/c/dawn/+/64902/9/src/dawn_node/interop/WebGPU.cpp.tmpl#293)
+- `GPUDevice` currently maintains a list of "lost promises". This should return the same promise. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64906/6/src/dawn_node/binding/GPUDevice.h#107)
+
+## Remaining work
+
+- Investigate CTS failures that are not expected to fail.
+- Generated includes live in `src/` for `dawn_node`, but outside for Dawn. [discussion](https://dawn-review.googlesource.com/c/dawn/+/64903/9/src/dawn_node/interop/CMakeLists.txt#56)
+- Hook up to presubmit bots (CQ / Kokoro)
+- `binding::GPU` will require significant rework [once Dawn implements the device / adapter creation path properly](https://dawn-review.googlesource.com/c/dawn/+/64916/4/src/dawn_node/binding/GPU.cpp).
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.cpp b/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.cpp
new file mode 100644
index 00000000000..7162b2e754e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.cpp
@@ -0,0 +1,55 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/AsyncRunner.h"
+
+#include <cassert>
+#include <limits>
+
+namespace wgpu { namespace binding {
+
+ AsyncRunner::AsyncRunner(Napi::Env env, wgpu::Device device) : env_(env), device_(device) {
+ }
+
+ void AsyncRunner::Begin() {
+ assert(count_ != std::numeric_limits<decltype(count_)>::max());
+ if (count_++ == 0) {
+ QueueTick();
+ }
+ }
+
+ void AsyncRunner::End() {
+ assert(count_ > 0);
+ count_--;
+ }
+
+ void AsyncRunner::QueueTick() {
+ // TODO(crbug.com/dawn/1127): We probably want to reduce the frequency at which this gets
+ // called.
+ env_.Global()
+ .Get("setImmediate")
+ .As<Napi::Function>()
+ .Call({
+ // TODO(crbug.com/dawn/1127): Create once, reuse.
+ Napi::Function::New(env_,
+ [this](const Napi::CallbackInfo&) {
+ if (count_ > 0) {
+ device_.Tick();
+ QueueTick();
+ }
+ }),
+ });
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.h b/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.h
new file mode 100644
index 00000000000..83644c09af8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/AsyncRunner.h
@@ -0,0 +1,76 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_ASYNC_RUNNER_H_
+#define DAWN_NODE_BINDING_ASYNC_RUNNER_H_
+
+#include <stdint.h>
+#include <memory>
+
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+
+namespace wgpu { namespace binding {
+
+ // AsyncRunner is used to poll a wgpu::Device with calls to Tick() while there are asynchronous
+ // tasks in flight.
+ class AsyncRunner {
+ public:
+ AsyncRunner(Napi::Env env, wgpu::Device device);
+
+ // Begin() should be called when a new asynchronous task is started.
+ // If the number of executing asynchronous tasks transitions from 0 to 1, then a function
+ // will be scheduled on the main JavaScript thread to call wgpu::Device::Tick() whenever the
+ // thread is idle. This will be repeatedly called until the number of executing asynchronous
+ // tasks reaches 0 again.
+ void Begin();
+
+ // End() should be called once the asynchronous task has finished.
+ // Every call to Begin() should eventually result in a call to End().
+ void End();
+
+ private:
+ void QueueTick();
+ Napi::Env env_;
+ wgpu::Device const device_;
+ uint64_t count_ = 0;
+ };
+
+ // AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
+ // AsyncRunner::End() on destruction.
+ class AsyncTask {
+ public:
+ inline AsyncTask(AsyncTask&&) = default;
+
+ // Constructor.
+ // Calls AsyncRunner::Begin()
+ inline AsyncTask(std::shared_ptr<AsyncRunner> runner) : runner_(std::move(runner)) {
+ runner_->Begin();
+ };
+
+ // Destructor.
+ // Calls AsyncRunner::End()
+ inline ~AsyncTask() {
+ runner_->End();
+ }
+
+ private:
+ AsyncTask(const AsyncTask&) = delete;
+ AsyncTask& operator=(const AsyncTask&) = delete;
+ std::shared_ptr<AsyncRunner> runner_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_ASYNC_RUNNER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_node/binding/CMakeLists.txt
new file mode 100644
index 00000000000..554459d7aa3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/CMakeLists.txt
@@ -0,0 +1,86 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_node_binding STATIC
+ "AsyncRunner.cpp"
+ "AsyncRunner.h"
+ "Converter.cpp"
+ "Converter.h"
+ "Errors.cpp"
+ "Errors.h"
+ "GPU.cpp"
+ "GPU.h"
+ "GPUAdapter.cpp"
+ "GPUAdapter.h"
+ "GPUBindGroup.cpp"
+ "GPUBindGroup.h"
+ "GPUBindGroupLayout.cpp"
+ "GPUBindGroupLayout.h"
+ "GPUBuffer.cpp"
+ "GPUBuffer.h"
+ "GPUCommandBuffer.cpp"
+ "GPUCommandBuffer.h"
+ "GPUCommandEncoder.cpp"
+ "GPUCommandEncoder.h"
+ "GPUComputePassEncoder.cpp"
+ "GPUComputePassEncoder.h"
+ "GPUComputePipeline.cpp"
+ "GPUComputePipeline.h"
+ "GPUDevice.cpp"
+ "GPUDevice.h"
+ "GPUPipelineLayout.cpp"
+ "GPUPipelineLayout.h"
+ "GPUQuerySet.cpp"
+ "GPUQuerySet.h"
+ "GPUQueue.cpp"
+ "GPUQueue.h"
+ "GPURenderBundle.cpp"
+ "GPURenderBundle.h"
+ "GPURenderBundleEncoder.cpp"
+ "GPURenderBundleEncoder.h"
+ "GPURenderPassEncoder.cpp"
+ "GPURenderPassEncoder.h"
+ "GPURenderPipeline.cpp"
+ "GPURenderPipeline.h"
+ "GPUSampler.cpp"
+ "GPUSampler.h"
+ "GPUShaderModule.cpp"
+ "GPUShaderModule.h"
+ "GPUSupportedLimits.cpp"
+ "GPUSupportedLimits.h"
+ "GPUTexture.cpp"
+ "GPUTexture.h"
+ "GPUTextureView.cpp"
+ "GPUTextureView.h"
+)
+
+target_include_directories(dawn_node_binding
+ PRIVATE
+ "${CMAKE_SOURCE_DIR}"
+ "${NODE_API_HEADERS_DIR}/include"
+ "${NODE_ADDON_API_DIR}"
+ "${GEN_DIR}"
+)
+
+target_link_libraries(dawn_node_binding
+ PRIVATE
+ dawncpp
+ dawn_node_interop
+)
+
+# dawn_node targets require C++17
+set_property(
+ TARGET dawn_node_binding
+ PROPERTY CXX_STANDARD 17
+)
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Converter.cpp b/chromium/third_party/dawn/src/dawn_node/binding/Converter.cpp
new file mode 100644
index 00000000000..61ba6cae350
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/Converter.cpp
@@ -0,0 +1,1141 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/Converter.h"
+
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/binding/GPUPipelineLayout.h"
+#include "src/dawn_node/binding/GPUSampler.h"
+#include "src/dawn_node/binding/GPUShaderModule.h"
+#include "src/dawn_node/binding/GPUTexture.h"
+#include "src/dawn_node/binding/GPUTextureView.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ Converter::~Converter() {
+ for (auto& free : free_) {
+ free();
+ }
+ }
+
+ bool Converter::Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in) {
+ out = {};
+ if (auto* dict = std::get_if<interop::GPUExtent3DDict>(&in)) {
+ out.depthOrArrayLayers = dict->depthOrArrayLayers;
+ out.width = dict->width;
+ out.height = dict->height;
+ return true;
+ }
+ if (auto* vec = std::get_if<std::vector<interop::GPUIntegerCoordinate>>(&in)) {
+ switch (vec->size()) {
+ default:
+ case 3:
+ out.depthOrArrayLayers = (*vec)[2];
+ case 2: // fallthrough
+ out.height = (*vec)[1];
+ case 1: // fallthrough
+ out.width = (*vec)[0];
+ return true;
+ case 0:
+ break;
+ }
+ }
+ Napi::Error::New(env, "invalid value for GPUExtent3D").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in) {
+ out = {};
+ out.x = in.x;
+ out.y = in.y;
+ out.z = in.z;
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::Color& out, const interop::GPUColor& in) {
+ out = {};
+ if (auto* dict = std::get_if<interop::GPUColorDict>(&in)) {
+ out.r = dict->r;
+ out.g = dict->g;
+ out.b = dict->b;
+ out.a = dict->a;
+ return true;
+ }
+ if (auto* vec = std::get_if<std::vector<double>>(&in)) {
+ switch (vec->size()) {
+ default:
+ case 4:
+ out.a = (*vec)[3];
+ case 3: // fallthrough
+ out.b = (*vec)[2];
+ case 2: // fallthrough
+ out.g = (*vec)[1];
+ case 1: // fallthrough
+ out.r = (*vec)[0];
+ return true;
+ case 0:
+ break;
+ }
+ }
+ Napi::Error::New(env, "invalid value for GPUColor").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::Origin3D& out,
+ const std::vector<interop::GPUIntegerCoordinate>& in) {
+ out = {};
+ switch (in.size()) {
+ default:
+ case 3:
+ out.z = in[2];
+ case 2: // fallthrough
+ out.y = in[1];
+ case 1: // fallthrough
+ out.x = in[0];
+ case 0:
+ break;
+ }
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in) {
+ out = wgpu::TextureAspect::All;
+ switch (in) {
+ case interop::GPUTextureAspect::kAll:
+ out = wgpu::TextureAspect::All;
+ return true;
+ case interop::GPUTextureAspect::kStencilOnly:
+ out = wgpu::TextureAspect::StencilOnly;
+ return true;
+ case interop::GPUTextureAspect::kDepthOnly:
+ out = wgpu::TextureAspect::DepthOnly;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureAspect").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in) {
+ out = {};
+ return Convert(out.texture, in.texture) && Convert(out.mipLevel, in.mipLevel) &&
+ Convert(out.origin, in.origin) && Convert(out.aspect, in.aspect);
+ }
+
+ bool Converter::Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in) {
+ out = {};
+ out.buffer = *in.buffer.As<GPUBuffer>();
+ return Convert(out.layout.offset, in.offset) &&
+ Convert(out.layout.bytesPerRow, in.bytesPerRow) &&
+ Convert(out.layout.rowsPerImage, in.rowsPerImage);
+ }
+
+ bool Converter::Convert(BufferSource& out, interop::BufferSource in) {
+ out = {};
+ if (auto* view = std::get_if<interop::ArrayBufferView>(&in)) {
+ std::visit(
+ [&](auto&& v) {
+ auto arr = v.ArrayBuffer();
+ out.data = arr.Data();
+ out.size = arr.ByteLength();
+ },
+ *view);
+ return true;
+ }
+ if (auto* arr = std::get_if<interop::ArrayBuffer>(&in)) {
+ out.data = arr->Data();
+ out.size = arr->ByteLength();
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for BufferSource").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in) {
+ out = {};
+ return Convert(out.bytesPerRow, in.bytesPerRow) && Convert(out.offset, in.offset) &&
+ Convert(out.rowsPerImage, in.rowsPerImage);
+ }
+
+ bool Converter::Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in) {
+ out = wgpu::TextureFormat::Undefined;
+ switch (in) {
+ case interop::GPUTextureFormat::kR8Unorm:
+ out = wgpu::TextureFormat::R8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kR8Snorm:
+ out = wgpu::TextureFormat::R8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kR8Uint:
+ out = wgpu::TextureFormat::R8Uint;
+ return true;
+ case interop::GPUTextureFormat::kR8Sint:
+ out = wgpu::TextureFormat::R8Sint;
+ return true;
+ case interop::GPUTextureFormat::kR16Uint:
+ out = wgpu::TextureFormat::R16Uint;
+ return true;
+ case interop::GPUTextureFormat::kR16Sint:
+ out = wgpu::TextureFormat::R16Sint;
+ return true;
+ case interop::GPUTextureFormat::kR16Float:
+ out = wgpu::TextureFormat::R16Float;
+ return true;
+ case interop::GPUTextureFormat::kRg8Unorm:
+ out = wgpu::TextureFormat::RG8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRg8Snorm:
+ out = wgpu::TextureFormat::RG8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kRg8Uint:
+ out = wgpu::TextureFormat::RG8Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg8Sint:
+ out = wgpu::TextureFormat::RG8Sint;
+ return true;
+ case interop::GPUTextureFormat::kR32Uint:
+ out = wgpu::TextureFormat::R32Uint;
+ return true;
+ case interop::GPUTextureFormat::kR32Sint:
+ out = wgpu::TextureFormat::R32Sint;
+ return true;
+ case interop::GPUTextureFormat::kR32Float:
+ out = wgpu::TextureFormat::R32Float;
+ return true;
+ case interop::GPUTextureFormat::kRg16Uint:
+ out = wgpu::TextureFormat::RG16Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg16Sint:
+ out = wgpu::TextureFormat::RG16Sint;
+ return true;
+ case interop::GPUTextureFormat::kRg16Float:
+ out = wgpu::TextureFormat::RG16Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Unorm:
+ out = wgpu::TextureFormat::RGBA8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRgba8UnormSrgb:
+ out = wgpu::TextureFormat::RGBA8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Snorm:
+ out = wgpu::TextureFormat::RGBA8Snorm;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Uint:
+ out = wgpu::TextureFormat::RGBA8Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba8Sint:
+ out = wgpu::TextureFormat::RGBA8Sint;
+ return true;
+ case interop::GPUTextureFormat::kBgra8Unorm:
+ out = wgpu::TextureFormat::BGRA8Unorm;
+ return true;
+ case interop::GPUTextureFormat::kBgra8UnormSrgb:
+ out = wgpu::TextureFormat::BGRA8UnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kRgb9E5Ufloat:
+ out = wgpu::TextureFormat::RGB9E5Ufloat;
+ return true;
+ case interop::GPUTextureFormat::kRgb10A2Unorm:
+ out = wgpu::TextureFormat::RGB10A2Unorm;
+ return true;
+ case interop::GPUTextureFormat::kRg11B10Ufloat:
+ out = wgpu::TextureFormat::RG11B10Ufloat;
+ return true;
+ case interop::GPUTextureFormat::kRg32Uint:
+ out = wgpu::TextureFormat::RG32Uint;
+ return true;
+ case interop::GPUTextureFormat::kRg32Sint:
+ out = wgpu::TextureFormat::RG32Sint;
+ return true;
+ case interop::GPUTextureFormat::kRg32Float:
+ out = wgpu::TextureFormat::RG32Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Uint:
+ out = wgpu::TextureFormat::RGBA16Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Sint:
+ out = wgpu::TextureFormat::RGBA16Sint;
+ return true;
+ case interop::GPUTextureFormat::kRgba16Float:
+ out = wgpu::TextureFormat::RGBA16Float;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Uint:
+ out = wgpu::TextureFormat::RGBA32Uint;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Sint:
+ out = wgpu::TextureFormat::RGBA32Sint;
+ return true;
+ case interop::GPUTextureFormat::kRgba32Float:
+ out = wgpu::TextureFormat::RGBA32Float;
+ return true;
+ case interop::GPUTextureFormat::kStencil8:
+ out = wgpu::TextureFormat::Stencil8;
+ return true;
+ case interop::GPUTextureFormat::kDepth16Unorm:
+ break; // TODO(crbug.com/dawn/1130): Unsupported.
+ case interop::GPUTextureFormat::kDepth24Plus:
+ out = wgpu::TextureFormat::Depth24Plus;
+ return true;
+ case interop::GPUTextureFormat::kDepth24PlusStencil8:
+ out = wgpu::TextureFormat::Depth24PlusStencil8;
+ return true;
+ case interop::GPUTextureFormat::kDepth32Float:
+ out = wgpu::TextureFormat::Depth32Float;
+ return true;
+ case interop::GPUTextureFormat::kBc1RgbaUnorm:
+ out = wgpu::TextureFormat::BC1RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc1RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC1RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc2RgbaUnorm:
+ out = wgpu::TextureFormat::BC2RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc2RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC2RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc3RgbaUnorm:
+ out = wgpu::TextureFormat::BC3RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc3RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC3RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kBc4RUnorm:
+ out = wgpu::TextureFormat::BC4RUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc4RSnorm:
+ out = wgpu::TextureFormat::BC4RSnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc5RgUnorm:
+ out = wgpu::TextureFormat::BC5RGUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc5RgSnorm:
+ out = wgpu::TextureFormat::BC5RGSnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc6HRgbUfloat:
+ out = wgpu::TextureFormat::BC6HRGBUfloat;
+ return true;
+ case interop::GPUTextureFormat::kBc6HRgbFloat:
+ out = wgpu::TextureFormat::BC6HRGBFloat;
+ return true;
+ case interop::GPUTextureFormat::kBc7RgbaUnorm:
+ out = wgpu::TextureFormat::BC7RGBAUnorm;
+ return true;
+ case interop::GPUTextureFormat::kBc7RgbaUnormSrgb:
+ out = wgpu::TextureFormat::BC7RGBAUnormSrgb;
+ return true;
+ case interop::GPUTextureFormat::kDepth24UnormStencil8:
+ break; // TODO(crbug.com/dawn/1130): Unsupported.
+ case interop::GPUTextureFormat::kDepth32FloatStencil8:
+ break; // TODO(crbug.com/dawn/1130): Unsupported.
+ }
+ // TODO(crbug.com/dawn/1130): Add ASTC and ETC formats.
+ Napi::Error::New(env, "invalid value for GPUTextureFormat").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in) {
+ out = static_cast<wgpu::TextureUsage>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in) {
+ out = static_cast<wgpu::ColorWriteMask>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in) {
+ out = static_cast<wgpu::BufferUsage>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in) {
+ out = static_cast<wgpu::MapMode>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in) {
+ out = static_cast<wgpu::ShaderStage>(in);
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in) {
+ out = wgpu::TextureDimension::e1D;
+ switch (in) {
+ case interop::GPUTextureDimension::k1D:
+ out = wgpu::TextureDimension::e1D;
+ return true;
+ case interop::GPUTextureDimension::k2D:
+ out = wgpu::TextureDimension::e2D;
+ return true;
+ case interop::GPUTextureDimension::k3D:
+ out = wgpu::TextureDimension::e3D;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureDimension").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::TextureViewDimension& out,
+ const interop::GPUTextureViewDimension& in) {
+ out = wgpu::TextureViewDimension::Undefined;
+ switch (in) {
+ case interop::GPUTextureViewDimension::k1D:
+ out = wgpu::TextureViewDimension::e1D;
+ return true;
+ case interop::GPUTextureViewDimension::k2D:
+ out = wgpu::TextureViewDimension::e2D;
+ return true;
+ case interop::GPUTextureViewDimension::k2DArray:
+ out = wgpu::TextureViewDimension::e2DArray;
+ return true;
+ case interop::GPUTextureViewDimension::kCube:
+ out = wgpu::TextureViewDimension::Cube;
+ return true;
+ case interop::GPUTextureViewDimension::kCubeArray:
+ out = wgpu::TextureViewDimension::CubeArray;
+ return true;
+ case interop::GPUTextureViewDimension::k3D:
+ out = wgpu::TextureViewDimension::e3D;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureViewDimension")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::ProgrammableStageDescriptor& out,
+ const interop::GPUProgrammableStage& in) {
+ out = {};
+ out.entryPoint = in.entryPoint.c_str();
+ out.module = *in.module.As<GPUShaderModule>();
+ return true;
+ }
+
+ bool Converter::Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in) {
+ out = {};
+ return Convert(out.operation, in.operation) && Convert(out.dstFactor, in.dstFactor) &&
+ Convert(out.srcFactor, in.srcFactor);
+ }
+
+ bool Converter::Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in) {
+ out = wgpu::BlendFactor::Zero;
+ switch (in) {
+ case interop::GPUBlendFactor::kZero:
+ out = wgpu::BlendFactor::Zero;
+ return true;
+ case interop::GPUBlendFactor::kOne:
+ out = wgpu::BlendFactor::One;
+ return true;
+ case interop::GPUBlendFactor::kSrc:
+ out = wgpu::BlendFactor::Src;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusSrc:
+ out = wgpu::BlendFactor::OneMinusSrc;
+ return true;
+ case interop::GPUBlendFactor::kSrcAlpha:
+ out = wgpu::BlendFactor::SrcAlpha;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusSrcAlpha:
+ out = wgpu::BlendFactor::OneMinusSrcAlpha;
+ return true;
+ case interop::GPUBlendFactor::kDst:
+ out = wgpu::BlendFactor::Dst;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusDst:
+ out = wgpu::BlendFactor::OneMinusDst;
+ return true;
+ case interop::GPUBlendFactor::kDstAlpha:
+ out = wgpu::BlendFactor::DstAlpha;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusDstAlpha:
+ out = wgpu::BlendFactor::OneMinusDstAlpha;
+ return true;
+ case interop::GPUBlendFactor::kSrcAlphaSaturated:
+ out = wgpu::BlendFactor::SrcAlphaSaturated;
+ return true;
+ case interop::GPUBlendFactor::kConstant:
+ out = wgpu::BlendFactor::Constant;
+ return true;
+ case interop::GPUBlendFactor::kOneMinusConstant:
+ out = wgpu::BlendFactor::OneMinusConstant;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUBlendFactor").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in) {
+ out = wgpu::BlendOperation::Add;
+ switch (in) {
+ case interop::GPUBlendOperation::kAdd:
+ out = wgpu::BlendOperation::Add;
+ return true;
+ case interop::GPUBlendOperation::kSubtract:
+ out = wgpu::BlendOperation::Subtract;
+ return true;
+ case interop::GPUBlendOperation::kReverseSubtract:
+ out = wgpu::BlendOperation::ReverseSubtract;
+ return true;
+ case interop::GPUBlendOperation::kMin:
+ out = wgpu::BlendOperation::Min;
+ return true;
+ case interop::GPUBlendOperation::kMax:
+ out = wgpu::BlendOperation::Max;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUBlendOperation").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::BlendState& out, const interop::GPUBlendState& in) {
+ out = {};
+ return Convert(out.alpha, in.alpha) && Convert(out.color, in.color);
+ }
+
+ bool Converter::Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in) {
+ out = {};
+ return Convert(out.topology, in.topology) &&
+ Convert(out.stripIndexFormat, in.stripIndexFormat) &&
+ Convert(out.frontFace, in.frontFace) && Convert(out.cullMode, in.cullMode);
+ }
+
+ bool Converter::Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in) {
+ out = {};
+ return Convert(out.format, in.format) && Convert(out.blend, in.blend) &&
+ Convert(out.writeMask, in.writeMask);
+ }
+
+ bool Converter::Convert(wgpu::DepthStencilState& out, const interop::GPUDepthStencilState& in) {
+ out = {};
+ return Convert(out.format, in.format) &&
+ Convert(out.depthWriteEnabled, in.depthWriteEnabled) &&
+ Convert(out.depthCompare, in.depthCompare) &&
+ Convert(out.stencilFront, in.stencilFront) &&
+ Convert(out.stencilBack, in.stencilBack) &&
+ Convert(out.stencilReadMask, in.stencilReadMask) &&
+ Convert(out.stencilWriteMask, in.stencilWriteMask) &&
+ Convert(out.depthBias, in.depthBias) &&
+ Convert(out.depthBiasSlopeScale, in.depthBiasSlopeScale) &&
+ Convert(out.depthBiasClamp, in.depthBiasClamp);
+ }
+
+ bool Converter::Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in) {
+ out = {};
+ return Convert(out.count, in.count) && Convert(out.mask, in.mask) &&
+ Convert(out.alphaToCoverageEnabled, in.alphaToCoverageEnabled);
+ }
+
+ bool Converter::Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in) {
+ out = {};
+ return Convert(out.targets, out.targetCount, in.targets) &&
+ Convert(out.module, in.module) && Convert(out.entryPoint, in.entryPoint);
+ }
+
+ bool Converter::Convert(wgpu::PrimitiveTopology& out, const interop::GPUPrimitiveTopology& in) {
+ out = wgpu::PrimitiveTopology::LineList;
+ switch (in) {
+ case interop::GPUPrimitiveTopology::kPointList:
+ out = wgpu::PrimitiveTopology::PointList;
+ return true;
+ case interop::GPUPrimitiveTopology::kLineList:
+ out = wgpu::PrimitiveTopology::LineList;
+ return true;
+ case interop::GPUPrimitiveTopology::kLineStrip:
+ out = wgpu::PrimitiveTopology::LineStrip;
+ return true;
+ case interop::GPUPrimitiveTopology::kTriangleList:
+ out = wgpu::PrimitiveTopology::TriangleList;
+ return true;
+ case interop::GPUPrimitiveTopology::kTriangleStrip:
+ out = wgpu::PrimitiveTopology::TriangleStrip;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUPrimitiveTopology")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in) {
+ out = wgpu::FrontFace::CW;
+ switch (in) {
+ case interop::GPUFrontFace::kCw:
+ out = wgpu::FrontFace::CW;
+ return true;
+ case interop::GPUFrontFace::kCcw:
+ out = wgpu::FrontFace::CCW;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUFrontFace").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::CullMode& out, const interop::GPUCullMode& in) {
+ out = wgpu::CullMode::None;
+ switch (in) {
+ case interop::GPUCullMode::kNone:
+ out = wgpu::CullMode::None;
+ return true;
+ case interop::GPUCullMode::kFront:
+ out = wgpu::CullMode::Front;
+ return true;
+ case interop::GPUCullMode::kBack:
+ out = wgpu::CullMode::Back;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUCullMode").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in) {
+ out = wgpu::CompareFunction::Undefined;
+ switch (in) {
+ case interop::GPUCompareFunction::kNever:
+ out = wgpu::CompareFunction::Never;
+ return true;
+ case interop::GPUCompareFunction::kLess:
+ out = wgpu::CompareFunction::Less;
+ return true;
+ case interop::GPUCompareFunction::kLessEqual:
+ out = wgpu::CompareFunction::LessEqual;
+ return true;
+ case interop::GPUCompareFunction::kGreater:
+ out = wgpu::CompareFunction::Greater;
+ return true;
+ case interop::GPUCompareFunction::kGreaterEqual:
+ out = wgpu::CompareFunction::GreaterEqual;
+ return true;
+ case interop::GPUCompareFunction::kEqual:
+ out = wgpu::CompareFunction::Equal;
+ return true;
+ case interop::GPUCompareFunction::kNotEqual:
+ out = wgpu::CompareFunction::NotEqual;
+ return true;
+ case interop::GPUCompareFunction::kAlways:
+ out = wgpu::CompareFunction::Always;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUCompareFunction").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in) {
+ out = wgpu::IndexFormat::Undefined;
+ switch (in) {
+ case interop::GPUIndexFormat::kUint16:
+ out = wgpu::IndexFormat::Uint16;
+ return true;
+ case interop::GPUIndexFormat::kUint32:
+ out = wgpu::IndexFormat::Uint32;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUIndexFormat").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in) {
+ out = wgpu::StencilOperation::Zero;
+ switch (in) {
+ case interop::GPUStencilOperation::kKeep:
+ out = wgpu::StencilOperation::Keep;
+ return true;
+ case interop::GPUStencilOperation::kZero:
+ out = wgpu::StencilOperation::Zero;
+ return true;
+ case interop::GPUStencilOperation::kReplace:
+ out = wgpu::StencilOperation::Replace;
+ return true;
+ case interop::GPUStencilOperation::kInvert:
+ out = wgpu::StencilOperation::Invert;
+ return true;
+ case interop::GPUStencilOperation::kIncrementClamp:
+ out = wgpu::StencilOperation::IncrementClamp;
+ return true;
+ case interop::GPUStencilOperation::kDecrementClamp:
+ out = wgpu::StencilOperation::DecrementClamp;
+ return true;
+ case interop::GPUStencilOperation::kIncrementWrap:
+ out = wgpu::StencilOperation::IncrementWrap;
+ return true;
+ case interop::GPUStencilOperation::kDecrementWrap:
+ out = wgpu::StencilOperation::DecrementWrap;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUStencilOperation").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in) {
+ return Convert(out.compare, in.compare) && Convert(out.failOp, in.failOp) &&
+ Convert(out.depthFailOp, in.depthFailOp) && Convert(out.passOp, in.passOp);
+ }
+
+ bool Converter::Convert(wgpu::VertexBufferLayout& out,
+ const interop::GPUVertexBufferLayout& in) {
+ out = {};
+ return Convert(out.attributes, out.attributeCount, in.attributes) &&
+ Convert(out.arrayStride, in.arrayStride) && Convert(out.stepMode, in.stepMode);
+ }
+
+ bool Converter::Convert(wgpu::VertexState& out, const interop::GPUVertexState& in) {
+ out = {};
+ return Convert(out.module, in.module) &&
+ Convert(out.buffers, out.bufferCount, in.buffers) &&
+ Convert(out.entryPoint, in.entryPoint);
+ }
+
+ bool Converter::Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in) {
+ out = wgpu::VertexStepMode::Instance;
+ switch (in) {
+ case interop::GPUVertexStepMode::kInstance:
+ out = wgpu::VertexStepMode::Instance;
+ return true;
+ case interop::GPUVertexStepMode::kVertex:
+ out = wgpu::VertexStepMode::Vertex;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUVertexStepMode").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in) {
+ return Convert(out.format, in.format) && Convert(out.offset, in.offset) &&
+ Convert(out.shaderLocation, in.shaderLocation);
+ }
+
+ bool Converter::Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in) {
+ out = wgpu::VertexFormat::Undefined;
+ switch (in) {
+ case interop::GPUVertexFormat::kUint8X2:
+ out = wgpu::VertexFormat::Uint8x2;
+ return true;
+ case interop::GPUVertexFormat::kUint8X4:
+ out = wgpu::VertexFormat::Uint8x4;
+ return true;
+ case interop::GPUVertexFormat::kSint8X2:
+ out = wgpu::VertexFormat::Sint8x2;
+ return true;
+ case interop::GPUVertexFormat::kSint8X4:
+ out = wgpu::VertexFormat::Sint8x4;
+ return true;
+ case interop::GPUVertexFormat::kUnorm8X2:
+ out = wgpu::VertexFormat::Unorm8x2;
+ return true;
+ case interop::GPUVertexFormat::kUnorm8X4:
+ out = wgpu::VertexFormat::Unorm8x4;
+ return true;
+ case interop::GPUVertexFormat::kSnorm8X2:
+ out = wgpu::VertexFormat::Snorm8x2;
+ return true;
+ case interop::GPUVertexFormat::kSnorm8X4:
+ out = wgpu::VertexFormat::Snorm8x4;
+ return true;
+ case interop::GPUVertexFormat::kUint16X2:
+ out = wgpu::VertexFormat::Uint16x2;
+ return true;
+ case interop::GPUVertexFormat::kUint16X4:
+ out = wgpu::VertexFormat::Uint16x4;
+ return true;
+ case interop::GPUVertexFormat::kSint16X2:
+ out = wgpu::VertexFormat::Sint16x2;
+ return true;
+ case interop::GPUVertexFormat::kSint16X4:
+ out = wgpu::VertexFormat::Sint16x4;
+ return true;
+ case interop::GPUVertexFormat::kUnorm16X2:
+ out = wgpu::VertexFormat::Unorm16x2;
+ return true;
+ case interop::GPUVertexFormat::kUnorm16X4:
+ out = wgpu::VertexFormat::Unorm16x4;
+ return true;
+ case interop::GPUVertexFormat::kSnorm16X2:
+ out = wgpu::VertexFormat::Snorm16x2;
+ return true;
+ case interop::GPUVertexFormat::kSnorm16X4:
+ out = wgpu::VertexFormat::Snorm16x4;
+ return true;
+ case interop::GPUVertexFormat::kFloat16X2:
+ out = wgpu::VertexFormat::Float16x2;
+ return true;
+ case interop::GPUVertexFormat::kFloat16X4:
+ out = wgpu::VertexFormat::Float16x4;
+ return true;
+ case interop::GPUVertexFormat::kFloat32:
+ out = wgpu::VertexFormat::Float32;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X2:
+ out = wgpu::VertexFormat::Float32x2;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X3:
+ out = wgpu::VertexFormat::Float32x3;
+ return true;
+ case interop::GPUVertexFormat::kFloat32X4:
+ out = wgpu::VertexFormat::Float32x4;
+ return true;
+ case interop::GPUVertexFormat::kUint32:
+ out = wgpu::VertexFormat::Uint32;
+ return true;
+ case interop::GPUVertexFormat::kUint32X2:
+ out = wgpu::VertexFormat::Uint32x2;
+ return true;
+ case interop::GPUVertexFormat::kUint32X3:
+ out = wgpu::VertexFormat::Uint32x3;
+ return true;
+ case interop::GPUVertexFormat::kUint32X4:
+ out = wgpu::VertexFormat::Uint32x4;
+ return true;
+ case interop::GPUVertexFormat::kSint32:
+ out = wgpu::VertexFormat::Sint32;
+ return true;
+ case interop::GPUVertexFormat::kSint32X2:
+ out = wgpu::VertexFormat::Sint32x2;
+ return true;
+ case interop::GPUVertexFormat::kSint32X3:
+ out = wgpu::VertexFormat::Sint32x3;
+ return true;
+ case interop::GPUVertexFormat::kSint32X4:
+ out = wgpu::VertexFormat::Sint32x4;
+ return true;
+ default:
+ break;
+ }
+ Napi::Error::New(env, "invalid value for GPUVertexFormat").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::RenderPassColorAttachment& out,
+ const interop::GPURenderPassColorAttachment& in) {
+ out = {};
+ if (auto* op = std::get_if<interop::GPULoadOp>(&in.loadValue)) {
+ if (!Convert(out.loadOp, *op)) {
+ return false;
+ }
+ } else if (auto* color = std::get_if<interop::GPUColor>(&in.loadValue)) {
+ out.loadOp = wgpu::LoadOp::Clear;
+ if (!Convert(out.clearColor, *color)) {
+ return false;
+ }
+ } else {
+ Napi::Error::New(env, "invalid value for GPURenderPassColorAttachment.loadValue")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ return Convert(out.view, in.view) && Convert(out.resolveTarget, in.resolveTarget) &&
+ Convert(out.storeOp, in.storeOp);
+ }
+
+ bool Converter::Convert(wgpu::RenderPassDepthStencilAttachment& out,
+ const interop::GPURenderPassDepthStencilAttachment& in) {
+ out = {};
+ if (auto* op = std::get_if<interop::GPULoadOp>(&in.depthLoadValue)) {
+ if (!Convert(out.depthLoadOp, *op)) {
+ return false;
+ }
+ } else if (auto* value = std::get_if<float>(&in.depthLoadValue)) {
+ out.stencilLoadOp = wgpu::LoadOp::Clear;
+ if (!Convert(out.clearDepth, *value)) {
+ return false;
+ }
+ } else {
+ Napi::Error::New(env,
+ "invalid value for GPURenderPassDepthStencilAttachment.depthLoadValue")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ if (auto* op = std::get_if<interop::GPULoadOp>(&in.stencilLoadValue)) {
+ if (!Convert(out.stencilLoadOp, *op)) {
+ return false;
+ }
+ } else if (auto* value = std::get_if<interop::GPUStencilValue>(&in.stencilLoadValue)) {
+ if (!Convert(out.clearStencil, *value)) {
+ return false;
+ }
+ } else {
+ Napi::Error::New(env,
+ "invalid value for "
+ "GPURenderPassDepthStencilAttachment.stencilLoadValue")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ return Convert(out.view, in.view) && Convert(out.depthStoreOp, in.depthStoreOp) &&
+ Convert(out.depthReadOnly, in.depthReadOnly) &&
+ Convert(out.stencilStoreOp, in.stencilStoreOp) &&
+ Convert(out.stencilReadOnly, in.stencilReadOnly);
+ }
+
+ bool Converter::Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in) {
+ out = wgpu::LoadOp::Clear;
+ switch (in) {
+ case interop::GPULoadOp::kLoad:
+ out = wgpu::LoadOp::Load;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPULoadOp").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in) {
+ out = wgpu::StoreOp::Store;
+ switch (in) {
+ case interop::GPUStoreOp::kStore:
+ out = wgpu::StoreOp::Store;
+ return true;
+ case interop::GPUStoreOp::kDiscard:
+ out = wgpu::StoreOp::Discard;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUStoreOp").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in) {
+ out = {};
+ if (!Convert(out.binding, in.binding)) {
+ return false;
+ }
+
+ if (auto* res = std::get_if<interop::Interface<interop::GPUSampler>>(&in.resource)) {
+ return Convert(out.sampler, *res);
+ }
+ if (auto* res = std::get_if<interop::Interface<interop::GPUTextureView>>(&in.resource)) {
+ return Convert(out.textureView, *res);
+ }
+ if (auto* res = std::get_if<interop::GPUBufferBinding>(&in.resource)) {
+ auto buffer = res->buffer.As<GPUBuffer>();
+ out.size = wgpu::kWholeSize;
+ if (!buffer || !Convert(out.offset, res->offset) || !Convert(out.size, res->size)) {
+ return false;
+ }
+ out.buffer = *buffer;
+ return true;
+ }
+ if (auto* res =
+ std::get_if<interop::Interface<interop::GPUExternalTexture>>(&in.resource)) {
+ // TODO(crbug.com/dawn/1129): External textures
+ UNIMPLEMENTED();
+ }
+ Napi::Error::New(env, "invalid value for GPUBindGroupEntry.resource")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::BindGroupLayoutEntry& out,
+ const interop::GPUBindGroupLayoutEntry& in) {
+ // TODO(crbug.com/dawn/1129): External textures
+ return Convert(out.binding, in.binding) && Convert(out.visibility, in.visibility) &&
+ Convert(out.buffer, in.buffer) && Convert(out.sampler, in.sampler) &&
+ Convert(out.texture, in.texture) && Convert(out.storageTexture, in.storageTexture);
+ }
+
+ bool Converter::Convert(wgpu::BufferBindingLayout& out,
+ const interop::GPUBufferBindingLayout& in) {
+ return Convert(out.type, in.type) && Convert(out.hasDynamicOffset, in.hasDynamicOffset) &&
+ Convert(out.minBindingSize, in.minBindingSize);
+ }
+
+ bool Converter::Convert(wgpu::SamplerBindingLayout& out,
+ const interop::GPUSamplerBindingLayout& in) {
+ return Convert(out.type, in.type);
+ }
+
+ bool Converter::Convert(wgpu::TextureBindingLayout& out,
+ const interop::GPUTextureBindingLayout& in) {
+ return Convert(out.sampleType, in.sampleType) &&
+ Convert(out.viewDimension, in.viewDimension) &&
+ Convert(out.multisampled, in.multisampled);
+ }
+
+ bool Converter::Convert(wgpu::StorageTextureBindingLayout& out,
+ const interop::GPUStorageTextureBindingLayout& in) {
+ return Convert(out.access, in.access) && Convert(out.format, in.format) &&
+ Convert(out.viewDimension, in.viewDimension);
+ }
+
+ bool Converter::Convert(wgpu::BufferBindingType& out, const interop::GPUBufferBindingType& in) {
+ out = wgpu::BufferBindingType::Undefined;
+ switch (in) {
+ case interop::GPUBufferBindingType::kUniform:
+ out = wgpu::BufferBindingType::Uniform;
+ return true;
+ case interop::GPUBufferBindingType::kStorage:
+ out = wgpu::BufferBindingType::Storage;
+ return true;
+ case interop::GPUBufferBindingType::kReadOnlyStorage:
+ out = wgpu::BufferBindingType::ReadOnlyStorage;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUBufferBindingType")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::TextureSampleType& out, const interop::GPUTextureSampleType& in) {
+ out = wgpu::TextureSampleType::Undefined;
+ switch (in) {
+ case interop::GPUTextureSampleType::kFloat:
+ out = wgpu::TextureSampleType::Float;
+ return true;
+ case interop::GPUTextureSampleType::kUnfilterableFloat:
+ out = wgpu::TextureSampleType::UnfilterableFloat;
+ return true;
+ case interop::GPUTextureSampleType::kDepth:
+ out = wgpu::TextureSampleType::Depth;
+ return true;
+ case interop::GPUTextureSampleType::kSint:
+ out = wgpu::TextureSampleType::Sint;
+ return true;
+ case interop::GPUTextureSampleType::kUint:
+ out = wgpu::TextureSampleType::Uint;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUTextureSampleType")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::SamplerBindingType& out,
+ const interop::GPUSamplerBindingType& in) {
+ out = wgpu::SamplerBindingType::Undefined;
+ switch (in) {
+ case interop::GPUSamplerBindingType::kFiltering:
+ out = wgpu::SamplerBindingType::Filtering;
+ return true;
+ case interop::GPUSamplerBindingType::kNonFiltering:
+ out = wgpu::SamplerBindingType::NonFiltering;
+ return true;
+ case interop::GPUSamplerBindingType::kComparison:
+ out = wgpu::SamplerBindingType::Comparison;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUSamplerBindingType")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::StorageTextureAccess& out,
+ const interop::GPUStorageTextureAccess& in) {
+ out = wgpu::StorageTextureAccess::Undefined;
+ switch (in) {
+ case interop::GPUStorageTextureAccess::kWriteOnly:
+ out = wgpu::StorageTextureAccess::WriteOnly;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUStorageTextureAccess")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::QueryType& out, const interop::GPUQueryType& in) {
+ out = wgpu::QueryType::Occlusion;
+ switch (in) {
+ case interop::GPUQueryType::kOcclusion:
+ out = wgpu::QueryType::Occlusion;
+ return true;
+ case interop::GPUQueryType::kPipelineStatistics:
+ out = wgpu::QueryType::PipelineStatistics;
+ return true;
+ case interop::GPUQueryType::kTimestamp:
+ out = wgpu::QueryType::Timestamp;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUQueryType").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::PipelineStatisticName& out,
+ const interop::GPUPipelineStatisticName& in) {
+ out = wgpu::PipelineStatisticName::VertexShaderInvocations;
+ switch (in) {
+ case interop::GPUPipelineStatisticName::kVertexShaderInvocations:
+ out = wgpu::PipelineStatisticName::VertexShaderInvocations;
+ return true;
+ case interop::GPUPipelineStatisticName::kClipperInvocations:
+ out = wgpu::PipelineStatisticName::ClipperInvocations;
+ return true;
+ case interop::GPUPipelineStatisticName::kClipperPrimitivesOut:
+ out = wgpu::PipelineStatisticName::ClipperPrimitivesOut;
+ return true;
+ case interop::GPUPipelineStatisticName::kFragmentShaderInvocations:
+ out = wgpu::PipelineStatisticName::FragmentShaderInvocations;
+ return true;
+ case interop::GPUPipelineStatisticName::kComputeShaderInvocations:
+ out = wgpu::PipelineStatisticName::ComputeShaderInvocations;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUPipelineStatisticName")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in) {
+ out = wgpu::AddressMode::Repeat;
+ switch (in) {
+ case interop::GPUAddressMode::kClampToEdge:
+ out = wgpu::AddressMode::ClampToEdge;
+ return true;
+ case interop::GPUAddressMode::kRepeat:
+ out = wgpu::AddressMode::Repeat;
+ return true;
+ case interop::GPUAddressMode::kMirrorRepeat:
+ out = wgpu::AddressMode::MirrorRepeat;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUAddressMode").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in) {
+ out = wgpu::FilterMode::Nearest;
+ switch (in) {
+ case interop::GPUFilterMode::kNearest:
+ out = wgpu::FilterMode::Nearest;
+ return true;
+ case interop::GPUFilterMode::kLinear:
+ out = wgpu::FilterMode::Linear;
+ return true;
+ }
+ Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
+ return false;
+ }
+
+ bool Converter::Convert(wgpu::ComputePipelineDescriptor& out,
+ const interop::GPUComputePipelineDescriptor& in) {
+ return Convert(out.label, in.label) && //
+ Convert(out.layout, in.layout) && //
+ Convert(out.compute, in.compute);
+ }
+
+ bool Converter::Convert(wgpu::RenderPipelineDescriptor& out,
+ const interop::GPURenderPipelineDescriptor& in) {
+ wgpu::RenderPipelineDescriptor desc{};
+ return Convert(out.label, in.label) && //
+ Convert(out.layout, in.layout) && //
+ Convert(out.vertex, in.vertex) && //
+ Convert(out.primitive, in.primitive) && //
+ Convert(out.depthStencil, in.depthStencil) && //
+ Convert(out.multisample, in.multisample) && Convert(out.fragment, in.fragment);
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Converter.h b/chromium/third_party/dawn/src/dawn_node/binding/Converter.h
new file mode 100644
index 00000000000..e499e4fa939
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/Converter.h
@@ -0,0 +1,362 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_CONVERTER_H_
+#define DAWN_NODE_BINDING_CONVERTER_H_
+
+#include <functional>
+#include <type_traits>
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/binding/Errors.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // ImplOfTraits is a traits helper that is used to associate the interop interface type to the
+ // binding implementation type.
+ template <typename T>
+ struct ImplOfTraits {};
+
+ // DECLARE_IMPL() is a macro that declares a specialization of ImplOfTraits so that
+ // `typename ImplOfTraits<interop::NAME>::type` is equivalent to `binding::NAME`.
+#define DECLARE_IMPL(NAME) \
+ class NAME; \
+ template <> \
+ struct ImplOfTraits<interop::NAME> { \
+ using type = binding::NAME; \
+ }
+
+ // Declare the interop interface to binding implementations
+ DECLARE_IMPL(GPUBindGroup);
+ DECLARE_IMPL(GPUBindGroupLayout);
+ DECLARE_IMPL(GPUBuffer);
+ DECLARE_IMPL(GPUPipelineLayout);
+ DECLARE_IMPL(GPUQuerySet);
+ DECLARE_IMPL(GPURenderBundle);
+ DECLARE_IMPL(GPURenderPipeline);
+ DECLARE_IMPL(GPUSampler);
+ DECLARE_IMPL(GPUShaderModule);
+ DECLARE_IMPL(GPUTexture);
+ DECLARE_IMPL(GPUTextureView);
+#undef DECLARE_IMPL
+
+ // Helper for obtaining the binding implementation type from the interop interface type
+ template <typename T>
+ using ImplOf = typename ImplOfTraits<T>::type;
+
+ // Converter is a utility class for converting IDL generated interop types into Dawn types.
+ // As the Dawn C++ API uses raw C pointers for a number of its interfaces, Converter performs
+ // heap allocations for conversions of vector or optional types. These pointers are
+ // automatically freed when the Converter is destructed.
+ class Converter {
+ public:
+ Converter(Napi::Env e) : env(e) {
+ }
+ ~Converter();
+
+ // Conversion function. Converts the interop type IN to the Dawn type OUT.
+ // Returns true on success, false on failure.
+ template <typename OUT, typename IN>
+ [[nodiscard]] inline bool operator()(OUT&& out, IN&& in) {
+ return Convert(std::forward<OUT>(out), std::forward<IN>(in));
+ }
+
+ // Vector conversion function. Converts the vector of interop type IN to a pointer of
+ // elements of Dawn type OUT, which is assigned to 'out_els'.
+ // out_count is assigned the number of elements in 'in'.
+ // Returns true on success, false on failure.
+ // The pointer assigned to 'out_els' is valid until the Converter is destructed.
+ template <typename OUT, typename IN>
+ [[nodiscard]] inline bool operator()(OUT*& out_els,
+ uint32_t& out_count,
+ const std::vector<IN>& in) {
+ return Convert(out_els, out_count, in);
+ }
+
+ // Returns the Env that this Converter was constructed with.
+ inline Napi::Env Env() const {
+ return env;
+ }
+
+ // BufferSource is the converted type of interop::BufferSource.
+ struct BufferSource {
+ void* data;
+ size_t size;
+ };
+
+ private:
+ // Below are the various overloads of Convert() used to convert the interop -> Dawn types.
+ [[nodiscard]] bool Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in);
+
+ [[nodiscard]] bool Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in);
+
+ [[nodiscard]] bool Convert(wgpu::Color& out, const interop::GPUColor& in);
+
+ [[nodiscard]] bool Convert(wgpu::Origin3D& out,
+ const std::vector<interop::GPUIntegerCoordinate>& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in);
+
+ [[nodiscard]] bool Convert(wgpu::ImageCopyTexture& out,
+ const interop::GPUImageCopyTexture& in);
+
+ [[nodiscard]] bool Convert(wgpu::ImageCopyBuffer& out,
+ const interop::GPUImageCopyBuffer& in);
+
+ [[nodiscard]] bool Convert(BufferSource& out, interop::BufferSource in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureDataLayout& out,
+ const interop::GPUImageDataLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureUsage& out,
+ const interop::GPUTextureUsageFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::ColorWriteMask& out,
+ const interop::GPUColorWriteFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureDimension& out,
+ const interop::GPUTextureDimension& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureViewDimension& out,
+ const interop::GPUTextureViewDimension& in);
+
+ [[nodiscard]] bool Convert(wgpu::ProgrammableStageDescriptor& out,
+ const interop::GPUProgrammableStage& in);
+
+ [[nodiscard]] bool Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in);
+
+ [[nodiscard]] bool Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in);
+
+ [[nodiscard]] bool Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in);
+
+ [[nodiscard]] bool Convert(wgpu::BlendState& out, const interop::GPUBlendState& in);
+
+ [[nodiscard]] bool Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in);
+
+ [[nodiscard]] bool Convert(wgpu::ColorTargetState& out,
+ const interop::GPUColorTargetState& in);
+
+ [[nodiscard]] bool Convert(wgpu::DepthStencilState& out,
+ const interop::GPUDepthStencilState& in);
+
+ [[nodiscard]] bool Convert(wgpu::MultisampleState& out,
+ const interop::GPUMultisampleState& in);
+
+ [[nodiscard]] bool Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in);
+
+ [[nodiscard]] bool Convert(wgpu::PrimitiveTopology& out,
+ const interop::GPUPrimitiveTopology& in);
+
+ [[nodiscard]] bool Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in);
+
+ [[nodiscard]] bool Convert(wgpu::CullMode& out, const interop::GPUCullMode& in);
+
+ [[nodiscard]] bool Convert(wgpu::CompareFunction& out,
+ const interop::GPUCompareFunction& in);
+
+ [[nodiscard]] bool Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in);
+
+ [[nodiscard]] bool Convert(wgpu::StencilOperation& out,
+ const interop::GPUStencilOperation& in);
+
+ [[nodiscard]] bool Convert(wgpu::StencilFaceState& out,
+ const interop::GPUStencilFaceState& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexState& out, const interop::GPUVertexState& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexBufferLayout& out,
+ const interop::GPUVertexBufferLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexAttribute& out,
+ const interop::GPUVertexAttribute& in);
+
+ [[nodiscard]] bool Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in);
+
+ [[nodiscard]] bool Convert(wgpu::RenderPassColorAttachment& out,
+ const interop::GPURenderPassColorAttachment& in);
+
+ [[nodiscard]] bool Convert(wgpu::RenderPassDepthStencilAttachment& out,
+ const interop::GPURenderPassDepthStencilAttachment& in);
+
+ [[nodiscard]] bool Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in);
+
+ [[nodiscard]] bool Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in);
+
+ [[nodiscard]] bool Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in);
+
+ [[nodiscard]] bool Convert(wgpu::BindGroupLayoutEntry& out,
+ const interop::GPUBindGroupLayoutEntry& in);
+
+ [[nodiscard]] bool Convert(wgpu::BufferBindingLayout& out,
+ const interop::GPUBufferBindingLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::SamplerBindingLayout& out,
+ const interop::GPUSamplerBindingLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureBindingLayout& out,
+ const interop::GPUTextureBindingLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::StorageTextureBindingLayout& out,
+ const interop::GPUStorageTextureBindingLayout& in);
+
+ [[nodiscard]] bool Convert(wgpu::BufferBindingType& out,
+ const interop::GPUBufferBindingType& in);
+
+ [[nodiscard]] bool Convert(wgpu::SamplerBindingType& out,
+ const interop::GPUSamplerBindingType& in);
+
+ [[nodiscard]] bool Convert(wgpu::TextureSampleType& out,
+ const interop::GPUTextureSampleType& in);
+
+ [[nodiscard]] bool Convert(wgpu::StorageTextureAccess& out,
+ const interop::GPUStorageTextureAccess& in);
+
+ [[nodiscard]] bool Convert(wgpu::QueryType& out, const interop::GPUQueryType& in);
+
+ [[nodiscard]] bool Convert(wgpu::PipelineStatisticName& out,
+ const interop::GPUPipelineStatisticName& in);
+
+ [[nodiscard]] bool Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in);
+
+ [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in);
+
+ [[nodiscard]] bool Convert(wgpu::ComputePipelineDescriptor& out,
+ const interop::GPUComputePipelineDescriptor& in);
+
+ [[nodiscard]] bool Convert(wgpu::RenderPipelineDescriptor& out,
+ const interop::GPURenderPipelineDescriptor& in);
+
+ // std::string to C string
+ inline bool Convert(const char*& out, const std::string& in) {
+ out = in.c_str();
+ return true;
+ }
+
+ // Pass-through (no conversion)
+ template <typename T>
+ inline bool Convert(T& out, const T& in) {
+ out = in;
+ return true;
+ }
+
+ // Integral number conversion, with dynamic limit checking
+ template <typename OUT,
+ typename IN,
+ typename = std::enable_if_t<std::is_integral_v<IN> && std::is_integral_v<OUT>>>
+ inline bool Convert(OUT& out, const IN& in) {
+ out = static_cast<OUT>(in);
+ if (static_cast<IN>(out) != in) {
+ Napi::Error::New(env, "Integer value (" + std::to_string(in) +
+ ") cannot be converted to the Dawn data type without "
+ "truncation of the value")
+ .ThrowAsJavaScriptException();
+ return false;
+ }
+ return true;
+ }
+
+ template <typename OUT, typename... IN_TYPES>
+ inline bool Convert(OUT& out, const std::variant<IN_TYPES...>& in) {
+ return std::visit([&](auto&& i) { return Convert(out, i); }, in);
+ }
+
+ // If the std::optional does not have a value, then Convert() simply returns true and 'out'
+ // is not assigned a new value.
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT& out, const std::optional<IN>& in) {
+ if (in.has_value()) {
+ return Convert(out, in.value());
+ }
+ return true;
+ }
+
+ // std::optional -> T*
+ // OUT* is assigned either a pointer to the converted value, or nullptr, depending on
+ // whether 'in' has a value.
+ template <typename OUT,
+ typename IN,
+ typename _ = std::enable_if_t<!std::is_same_v<IN, std::string>>>
+ inline bool Convert(OUT*& out, const std::optional<IN>& in) {
+ if (in.has_value()) {
+ auto* el = Allocate<std::remove_const_t<OUT>>();
+ if (!Convert(*el, in.value())) {
+ return false;
+ }
+ out = el;
+ } else {
+ out = nullptr;
+ }
+ return true;
+ }
+
+ // interop::Interface -> Dawn object
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT& out, const interop::Interface<IN>& in) {
+ using Impl = ImplOf<IN>;
+ out = *in.template As<Impl>();
+ if (!out) {
+ LOG("Dawn object has been destroyed. This should not happen");
+ return false;
+ }
+ return true;
+ }
+
+ // vector -> raw pointer + count
+ template <typename OUT, typename IN>
+ inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::vector<IN>& in) {
+ if (in.size() == 0) {
+ out_els = nullptr;
+ out_count = 0;
+ return true;
+ }
+ auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+ for (size_t i = 0; i < in.size(); i++) {
+ if (!Convert(els[i], in[i])) {
+ return false;
+ }
+ }
+ out_els = els;
+ return Convert(out_count, in.size());
+ }
+
+ Napi::Env env;
+
+ // Allocate() allocates and constructs an array of 'n' elements, and returns a pointer to
+ // the first element. The array is freed when the Converter is destructed.
+ template <typename T>
+ T* Allocate(size_t n = 1) {
+ auto* ptr = new T[n]{};
+ free_.emplace_back([ptr] { delete[] ptr; });
+ return ptr;
+ }
+
+ std::vector<std::function<void()>> free_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_CONVERTER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Errors.cpp b/chromium/third_party/dawn/src/dawn_node/binding/Errors.cpp
new file mode 100644
index 00000000000..953a1a8bc6f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/Errors.cpp
@@ -0,0 +1,179 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/Errors.h"
+
+namespace wgpu { namespace binding {
+
+ namespace {
+ constexpr char kHierarchyRequestError[] = "HierarchyRequestError";
+ constexpr char kWrongDocumentError[] = "WrongDocumentError";
+ constexpr char kInvalidCharacterError[] = "InvalidCharacterError";
+ constexpr char kNoModificationAllowedError[] = "NoModificationAllowedError";
+ constexpr char kNotFoundError[] = "NotFoundError";
+ constexpr char kNotSupportedError[] = "NotSupportedError";
+ constexpr char kInUseAttributeError[] = "InUseAttributeError";
+ constexpr char kInvalidStateError[] = "InvalidStateError";
+ constexpr char kSyntaxError[] = "SyntaxError";
+ constexpr char kInvalidModificationError[] = "InvalidModificationError";
+ constexpr char kNamespaceError[] = "NamespaceError";
+ constexpr char kSecurityError[] = "SecurityError";
+ constexpr char kNetworkError[] = "NetworkError";
+ constexpr char kAbortError[] = "AbortError";
+ constexpr char kURLMismatchError[] = "URLMismatchError";
+ constexpr char kQuotaExceededError[] = "QuotaExceededError";
+ constexpr char kTimeoutError[] = "TimeoutError";
+ constexpr char kInvalidNodeTypeError[] = "InvalidNodeTypeError";
+ constexpr char kDataCloneError[] = "DataCloneError";
+ constexpr char kEncodingError[] = "EncodingError";
+ constexpr char kNotReadableError[] = "NotReadableError";
+ constexpr char kUnknownError[] = "UnknownError";
+ constexpr char kConstraintError[] = "ConstraintError";
+ constexpr char kDataError[] = "DataError";
+ constexpr char kTransactionInactiveError[] = "TransactionInactiveError";
+ constexpr char kReadOnlyError[] = "ReadOnlyError";
+ constexpr char kVersionError[] = "VersionError";
+ constexpr char kOperationError[] = "OperationError";
+ constexpr char kNotAllowedError[] = "NotAllowedError";
+
+ static Napi::Error New(Napi::Env env,
+ std::string name,
+ std::string message = {},
+ unsigned short code = 0) {
+ auto err = Napi::Error::New(env);
+ err.Set("name", name);
+ err.Set("message", message.empty() ? name : message);
+ err.Set("code", static_cast<double>(code));
+ return err;
+ }
+
+ } // namespace
+
+ Napi::Error Errors::HierarchyRequestError(Napi::Env env) {
+ return New(env, kHierarchyRequestError);
+ }
+
+ Napi::Error Errors::WrongDocumentError(Napi::Env env) {
+ return New(env, kWrongDocumentError);
+ }
+
+ Napi::Error Errors::InvalidCharacterError(Napi::Env env) {
+ return New(env, kInvalidCharacterError);
+ }
+
+ Napi::Error Errors::NoModificationAllowedError(Napi::Env env) {
+ return New(env, kNoModificationAllowedError);
+ }
+
+ Napi::Error Errors::NotFoundError(Napi::Env env) {
+ return New(env, kNotFoundError);
+ }
+
+ Napi::Error Errors::NotSupportedError(Napi::Env env) {
+ return New(env, kNotSupportedError);
+ }
+
+ Napi::Error Errors::InUseAttributeError(Napi::Env env) {
+ return New(env, kInUseAttributeError);
+ }
+
+ Napi::Error Errors::InvalidStateError(Napi::Env env) {
+ return New(env, kInvalidStateError);
+ }
+
+ Napi::Error Errors::SyntaxError(Napi::Env env) {
+ return New(env, kSyntaxError);
+ }
+
+ Napi::Error Errors::InvalidModificationError(Napi::Env env) {
+ return New(env, kInvalidModificationError);
+ }
+
+ Napi::Error Errors::NamespaceError(Napi::Env env) {
+ return New(env, kNamespaceError);
+ }
+
+ Napi::Error Errors::SecurityError(Napi::Env env) {
+ return New(env, kSecurityError);
+ }
+
+ Napi::Error Errors::NetworkError(Napi::Env env) {
+ return New(env, kNetworkError);
+ }
+
+ Napi::Error Errors::AbortError(Napi::Env env) {
+ return New(env, kAbortError);
+ }
+
+ Napi::Error Errors::URLMismatchError(Napi::Env env) {
+ return New(env, kURLMismatchError);
+ }
+
+ Napi::Error Errors::QuotaExceededError(Napi::Env env) {
+ return New(env, kQuotaExceededError);
+ }
+
+ Napi::Error Errors::TimeoutError(Napi::Env env) {
+ return New(env, kTimeoutError);
+ }
+
+ Napi::Error Errors::InvalidNodeTypeError(Napi::Env env) {
+ return New(env, kInvalidNodeTypeError);
+ }
+
+ Napi::Error Errors::DataCloneError(Napi::Env env) {
+ return New(env, kDataCloneError);
+ }
+
+ Napi::Error Errors::EncodingError(Napi::Env env) {
+ return New(env, kEncodingError);
+ }
+
+ Napi::Error Errors::NotReadableError(Napi::Env env) {
+ return New(env, kNotReadableError);
+ }
+
+ Napi::Error Errors::UnknownError(Napi::Env env) {
+ return New(env, kUnknownError);
+ }
+
+ Napi::Error Errors::ConstraintError(Napi::Env env) {
+ return New(env, kConstraintError);
+ }
+
+ Napi::Error Errors::DataError(Napi::Env env) {
+ return New(env, kDataError);
+ }
+
+ Napi::Error Errors::TransactionInactiveError(Napi::Env env) {
+ return New(env, kTransactionInactiveError);
+ }
+
+ Napi::Error Errors::ReadOnlyError(Napi::Env env) {
+ return New(env, kReadOnlyError);
+ }
+
+ Napi::Error Errors::VersionError(Napi::Env env) {
+ return New(env, kVersionError);
+ }
+
+ Napi::Error Errors::OperationError(Napi::Env env) {
+ return New(env, kOperationError);
+ }
+
+ Napi::Error Errors::NotAllowedError(Napi::Env env) {
+ return New(env, kNotAllowedError);
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/Errors.h b/chromium/third_party/dawn/src/dawn_node/binding/Errors.h
new file mode 100644
index 00000000000..0f1a40e6b8e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/Errors.h
@@ -0,0 +1,60 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_ERRORS_H_
+#define DAWN_NODE_BINDING_ERRORS_H_
+
+#include "napi.h"
+
+namespace wgpu { namespace binding {
+
+ // Errors contains static helper methods for creating DOMException error
+ // messages as documented at:
+ // https://heycam.github.io/webidl/#idl-DOMException-error-names
+ class Errors {
+ public:
+ static Napi::Error HierarchyRequestError(Napi::Env);
+ static Napi::Error WrongDocumentError(Napi::Env);
+ static Napi::Error InvalidCharacterError(Napi::Env);
+ static Napi::Error NoModificationAllowedError(Napi::Env);
+ static Napi::Error NotFoundError(Napi::Env);
+ static Napi::Error NotSupportedError(Napi::Env);
+ static Napi::Error InUseAttributeError(Napi::Env);
+ static Napi::Error InvalidStateError(Napi::Env);
+ static Napi::Error SyntaxError(Napi::Env);
+ static Napi::Error InvalidModificationError(Napi::Env);
+ static Napi::Error NamespaceError(Napi::Env);
+ static Napi::Error SecurityError(Napi::Env);
+ static Napi::Error NetworkError(Napi::Env);
+ static Napi::Error AbortError(Napi::Env);
+ static Napi::Error URLMismatchError(Napi::Env);
+ static Napi::Error QuotaExceededError(Napi::Env);
+ static Napi::Error TimeoutError(Napi::Env);
+ static Napi::Error InvalidNodeTypeError(Napi::Env);
+ static Napi::Error DataCloneError(Napi::Env);
+ static Napi::Error EncodingError(Napi::Env);
+ static Napi::Error NotReadableError(Napi::Env);
+ static Napi::Error UnknownError(Napi::Env);
+ static Napi::Error ConstraintError(Napi::Env);
+ static Napi::Error DataError(Napi::Env);
+ static Napi::Error TransactionInactiveError(Napi::Env);
+ static Napi::Error ReadOnlyError(Napi::Env);
+ static Napi::Error VersionError(Napi::Env);
+ static Napi::Error OperationError(Napi::Env);
+ static Napi::Error NotAllowedError(Napi::Env);
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_ERRORS_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPU.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPU.cpp
new file mode 100644
index 00000000000..2f649786af4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPU.cpp
@@ -0,0 +1,123 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPU.h"
+
+#include "src/dawn_node/binding/GPUAdapter.h"
+
+#include <cstdlib>
+
+namespace {
+ std::string getEnvVar(const char* varName) {
+#if defined(_WIN32)
+ // Use _dupenv_s to avoid unsafe warnings about std::getenv
+ char* value = nullptr;
+ _dupenv_s(&value, nullptr, varName);
+ if (value) {
+ std::string result = value;
+ free(value);
+ return result;
+ }
+ return "";
+#else
+ if (auto* val = std::getenv(varName)) {
+ return val;
+ }
+ return "";
+#endif
+ }
+} // namespace
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPU
+ ////////////////////////////////////////////////////////////////////////////////
+ GPU::GPU() {
+ // TODO: Disable in 'release'
+ instance_.EnableBackendValidation(true);
+ instance_.SetBackendValidationLevel(dawn_native::BackendValidationLevel::Full);
+
+ instance_.DiscoverDefaultAdapters();
+ }
+
+ interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> GPU::requestAdapter(
+ Napi::Env env,
+ interop::GPURequestAdapterOptions options) {
+ auto promise =
+ interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>>(env);
+
+ if (options.forceFallbackAdapter) {
+ // Software adapters are not currently supported.
+ promise.Resolve({});
+ return promise;
+ }
+
+ auto adapters = instance_.GetAdapters();
+ if (adapters.empty()) {
+ promise.Resolve({});
+ return promise;
+ }
+
+#if defined(_WIN32)
+ constexpr auto defaultBackendType = wgpu::BackendType::D3D12;
+#elif defined(__linux__)
+ constexpr auto defaultBackendType = wgpu::BackendType::Vulkan;
+#elif defined(__APPLE__)
+ constexpr auto defaultBackendType = wgpu::BackendType::Metal;
+#else
+# error "Unsupported platform"
+#endif
+
+ auto targetBackendType = defaultBackendType;
+
+ // Check for override from env var
+ std::string envVar = getEnvVar("DAWNNODE_BACKEND");
+ std::transform(envVar.begin(), envVar.end(), envVar.begin(),
+ [](char c) { return std::tolower(c); });
+ if (envVar == "null") {
+ targetBackendType = wgpu::BackendType::Null;
+ } else if (envVar == "webgpu") {
+ targetBackendType = wgpu::BackendType::WebGPU;
+ } else if (envVar == "d3d11") {
+ targetBackendType = wgpu::BackendType::D3D11;
+ } else if (envVar == "d3d12" || envVar == "d3d") {
+ targetBackendType = wgpu::BackendType::D3D12;
+ } else if (envVar == "metal") {
+ targetBackendType = wgpu::BackendType::Metal;
+ } else if (envVar == "vulkan" || envVar == "vk") {
+ targetBackendType = wgpu::BackendType::Vulkan;
+ } else if (envVar == "opengl" || envVar == "gl") {
+ targetBackendType = wgpu::BackendType::OpenGL;
+ } else if (envVar == "opengles" || envVar == "gles") {
+ targetBackendType = wgpu::BackendType::OpenGLES;
+ }
+
+ // Default to first adapter if we don't find a match
+ size_t adapterIndex = 0;
+ for (size_t i = 0; i < adapters.size(); ++i) {
+ wgpu::AdapterProperties props;
+ adapters[i].GetProperties(&props);
+ if (props.backendType == targetBackendType) {
+ adapterIndex = i;
+ break;
+ }
+ }
+
+ auto adapter = GPUAdapter::Create<GPUAdapter>(env, adapters[adapterIndex]);
+ promise.Resolve(std::optional<interop::Interface<interop::GPUAdapter>>(adapter));
+ return promise;
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPU.h b/chromium/third_party/dawn/src/dawn_node/binding/GPU.h
new file mode 100644
index 00000000000..131ce42ad97
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPU.h
@@ -0,0 +1,41 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPU_H_
+#define DAWN_NODE_BINDING_GPU_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPU is an implementation of interop::GPU that wraps a dawn_native::Instance.
+ class GPU final : public interop::GPU {
+ public:
+ GPU();
+
+ // interop::GPU interface compliance
+ interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> requestAdapter(
+ Napi::Env env,
+ interop::GPURequestAdapterOptions options) override;
+
+ private:
+ dawn_native::Instance instance_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPU_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.cpp
new file mode 100644
index 00000000000..d7b1d14ce2d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.cpp
@@ -0,0 +1,138 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUAdapter.h"
+
+#include <unordered_set>
+
+#include "src/dawn_node/binding/GPUDevice.h"
+#include "src/dawn_node/binding/GPUSupportedLimits.h"
+
+namespace wgpu { namespace binding {
+
+ namespace {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::binding::<anon>::Features
+ // Implements interop::GPUSupportedFeatures
+ ////////////////////////////////////////////////////////////////////////////////
+ class Features : public interop::GPUSupportedFeatures {
+ public:
+ Features(WGPUDeviceProperties properties) {
+ if (properties.depthClamping) {
+ enabled_.emplace(interop::GPUFeatureName::kDepthClamping);
+ }
+ if (properties.pipelineStatisticsQuery) {
+ enabled_.emplace(interop::GPUFeatureName::kPipelineStatisticsQuery);
+ }
+ if (properties.textureCompressionBC) {
+ enabled_.emplace(interop::GPUFeatureName::kTextureCompressionBc);
+ }
+ if (properties.timestampQuery) {
+ enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
+ }
+
+ // TODO(crbug.com/dawn/1130)
+ // interop::GPUFeatureName::kDepth24UnormStencil8:
+ // interop::GPUFeatureName::kDepth32FloatStencil8:
+ }
+
+ bool has(interop::GPUFeatureName feature) {
+ return enabled_.count(feature) != 0;
+ }
+
+ // interop::GPUSupportedFeatures compliance
+ bool has(Napi::Env, std::string name) override {
+ interop::GPUFeatureName feature;
+ if (interop::Converter<interop::GPUFeatureName>::FromString(name, feature)) {
+ return has(feature);
+ }
+ return false;
+ }
+ std::vector<std::string> keys(Napi::Env) override {
+ std::vector<std::string> out;
+ out.reserve(enabled_.size());
+ for (auto feature : enabled_) {
+ out.push_back(interop::Converter<interop::GPUFeatureName>::ToString(feature));
+ }
+ return out;
+ }
+
+ private:
+ std::unordered_set<interop::GPUFeatureName> enabled_;
+ };
+
+ } // namespace
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUAdapter
+ // TODO(crbug.com/dawn/1133): This is a stub implementation. Properly implement.
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUAdapter::GPUAdapter(dawn_native::Adapter a) : adapter_(a) {
+ }
+
+ std::string GPUAdapter::getName(Napi::Env) {
+ return "dawn-adapter";
+ }
+
+ interop::Interface<interop::GPUSupportedFeatures> GPUAdapter::getFeatures(Napi::Env env) {
+ return interop::GPUSupportedFeatures::Create<Features>(env,
+ adapter_.GetAdapterProperties());
+ }
+
+ interop::Interface<interop::GPUSupportedLimits> GPUAdapter::getLimits(Napi::Env env) {
+ return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env);
+ }
+
+ bool GPUAdapter::getIsFallbackAdapter(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
+ Napi::Env env,
+ interop::GPUDeviceDescriptor descriptor) {
+ dawn_native::DeviceDescriptor desc{}; // TODO(crbug.com/dawn/1133): Fill in.
+ interop::Promise<interop::Interface<interop::GPUDevice>> promise(env);
+
+ // See src/dawn_native/Features.cpp for enum <-> string mappings.
+ for (auto required : descriptor.requiredFeatures) {
+ switch (required) {
+ case interop::GPUFeatureName::kDepthClamping:
+ desc.requiredFeatures.emplace_back("depth_clamping");
+ continue;
+ case interop::GPUFeatureName::kPipelineStatisticsQuery:
+ desc.requiredFeatures.emplace_back("pipeline_statistics_query");
+ continue;
+ case interop::GPUFeatureName::kTextureCompressionBc:
+ desc.requiredFeatures.emplace_back("texture_compression_bc");
+ continue;
+ case interop::GPUFeatureName::kTimestampQuery:
+ desc.requiredFeatures.emplace_back("timestamp_query");
+ continue;
+ case interop::GPUFeatureName::kDepth24UnormStencil8:
+ case interop::GPUFeatureName::kDepth32FloatStencil8:
+ continue; // TODO(crbug.com/dawn/1130)
+ }
+ UNIMPLEMENTED("required: ", required);
+ }
+
+ auto wgpu_device = adapter_.CreateDevice(&desc);
+ if (wgpu_device) {
+ promise.Resolve(interop::GPUDevice::Create<GPUDevice>(env, env, wgpu_device));
+ } else {
+ Napi::Error::New(env, "failed to create device").ThrowAsJavaScriptException();
+ }
+ return promise;
+ }
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.h
new file mode 100644
index 00000000000..1b44f57993a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUAdapter.h
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUADAPTER_H_
+#define DAWN_NODE_BINDING_GPUADAPTER_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUAdapter is an implementation of interop::GPUAdapter that wraps a dawn_native::Adapter.
+ class GPUAdapter final : public interop::GPUAdapter {
+ public:
+ GPUAdapter(dawn_native::Adapter a);
+
+ // interop::GPUAdapter interface compliance
+ std::string getName(Napi::Env) override;
+ interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+ interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+ bool getIsFallbackAdapter(Napi::Env) override;
+ interop::Promise<interop::Interface<interop::GPUDevice>> requestDevice(
+ Napi::Env env,
+ interop::GPUDeviceDescriptor descriptor) override;
+
+ private:
+ dawn_native::Adapter adapter_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUADAPTER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.cpp
new file mode 100644
index 00000000000..e1567005808
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.cpp
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUBindGroup.h"
+
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUBindGroup
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUBindGroup::GPUBindGroup(wgpu::BindGroup group) : group_(std::move(group)) {
+ }
+
+ std::optional<std::string> GPUBindGroup::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUBindGroup::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.h
new file mode 100644
index 00000000000..e71fc8be5ce
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroup.h
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBINDGROUP_H_
+#define DAWN_NODE_BINDING_GPUBINDGROUP_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUBindGroup is an implementation of interop::GPUBindGroup that wraps a wgpu::BindGroup.
+ class GPUBindGroup final : public interop::GPUBindGroup {
+ public:
+ GPUBindGroup(wgpu::BindGroup group);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::BindGroup &() const {
+ return group_;
+ }
+
+ // interop::GPUBindGroup interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::BindGroup group_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUBINDGROUP_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.cpp
new file mode 100644
index 00000000000..ddaeaba6f11
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUBindGroupLayout.h"
+
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUBindGroupLayout
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUBindGroupLayout::GPUBindGroupLayout(wgpu::BindGroupLayout layout)
+ : layout_(std::move(layout)) {
+ }
+
+ std::optional<std::string> GPUBindGroupLayout::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUBindGroupLayout::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.h
new file mode 100644
index 00000000000..34874bf1d91
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUBindGroupLayout.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
+#define DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUBindGroupLayout is an implementation of interop::GPUBindGroupLayout that wraps a
+ // wgpu::BindGroupLayout.
+ class GPUBindGroupLayout final : public interop::GPUBindGroupLayout {
+ public:
+ GPUBindGroupLayout(wgpu::BindGroupLayout layout);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::BindGroupLayout &() const {
+ return layout_;
+ }
+
+ // interop::GPUBindGroupLayout interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::BindGroupLayout layout_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUBINDGROUPLAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.cpp
new file mode 100644
index 00000000000..ac5ae544b4c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.cpp
@@ -0,0 +1,167 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUBuffer.h"
+
+#include <memory>
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/Errors.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUBuffer
+ // TODO(crbug.com/dawn/1134): We may be doing more validation here than necessary. Once CTS is
+ // robustly passing, pull out validation and see what / if breaks.
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUBuffer::GPUBuffer(wgpu::Buffer buffer,
+ wgpu::BufferDescriptor desc,
+ wgpu::Device device,
+ std::shared_ptr<AsyncRunner> async)
+ : buffer_(std::move(buffer)),
+ desc_(desc),
+ device_(std::move(device)),
+ async_(std::move(async)) {
+ if (desc.mappedAtCreation) {
+ state_ = State::MappedAtCreation;
+ }
+ }
+
+ interop::Promise<void> GPUBuffer::mapAsync(Napi::Env env,
+ interop::GPUMapModeFlags mode,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ wgpu::MapMode md{};
+ Converter conv(env);
+ if (!conv(md, mode)) {
+ return {env};
+ }
+
+ if (state_ != State::Unmapped) {
+ interop::Promise<void> promise(env);
+ promise.Reject(Errors::OperationError(env));
+ device_.InjectError(wgpu::ErrorType::Validation,
+ "mapAsync called on buffer that is not in the unmapped state");
+ return promise;
+ }
+
+ struct Context {
+ Napi::Env env;
+ interop::Promise<void> promise;
+ AsyncTask task;
+ State& state;
+ };
+ auto ctx = new Context{env, interop::Promise<void>(env), async_, state_};
+ auto promise = ctx->promise;
+
+ uint64_t s = size.has_value() ? size.value() : (desc_.size - offset);
+
+ state_ = State::MappingPending;
+
+ buffer_.MapAsync(
+ md, offset, s,
+ [](WGPUBufferMapAsyncStatus status, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ c->state = State::Unmapped;
+ switch (status) {
+ case WGPUBufferMapAsyncStatus_Force32:
+ UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
+ break;
+ case WGPUBufferMapAsyncStatus_Success:
+ c->promise.Resolve();
+ c->state = State::Mapped;
+ break;
+ case WGPUBufferMapAsyncStatus_Error:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
+ case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
+ c->promise.Reject(Errors::AbortError(c->env));
+ break;
+ case WGPUBufferMapAsyncStatus_Unknown:
+ case WGPUBufferMapAsyncStatus_DeviceLost:
+ // TODO: The spec is a bit vague around what the promise should do
+ // here.
+ c->promise.Reject(Errors::UnknownError(c->env));
+ break;
+ }
+ },
+ ctx);
+
+ return promise;
+ }
+
+ interop::ArrayBuffer GPUBuffer::getMappedRange(Napi::Env env,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ if (state_ != State::Mapped && state_ != State::MappedAtCreation) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+
+ uint64_t s = size.has_value() ? size.value() : (desc_.size - offset);
+
+ uint64_t start = offset;
+ uint64_t end = offset + s;
+ for (auto& mapping : mapped_) {
+ if (mapping.Intersects(start, end)) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+ }
+
+ auto* ptr = (desc_.usage & wgpu::BufferUsage::MapWrite)
+ ? buffer_.GetMappedRange(offset, s)
+ : const_cast<void*>(buffer_.GetConstMappedRange(offset, s));
+ if (!ptr) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+ auto array_buffer = Napi::ArrayBuffer::New(env, ptr, s);
+ // TODO(crbug.com/dawn/1135): Ownership here is the wrong way around.
+ mapped_.emplace_back(Mapping{start, end, Napi::Persistent(array_buffer)});
+ return array_buffer;
+ }
+
+ void GPUBuffer::unmap(Napi::Env env) {
+ if (state_ == State::Destroyed) {
+ device_.InjectError(wgpu::ErrorType::Validation,
+ "unmap() called on a destroyed buffer");
+ return;
+ }
+
+ for (auto& mapping : mapped_) {
+ mapping.buffer.Value().Detach();
+ }
+ mapped_.clear();
+ buffer_.Unmap();
+ state_ = State::Unmapped;
+ }
+
+ void GPUBuffer::destroy(Napi::Env) {
+ buffer_.Destroy();
+ state_ = State::Destroyed;
+ }
+
+ std::optional<std::string> GPUBuffer::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUBuffer::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.h
new file mode 100644
index 00000000000..c3d8b031988
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUBuffer.h
@@ -0,0 +1,86 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUBUFFER_H_
+#define DAWN_NODE_BINDING_GPUBUFFER_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/binding/AsyncRunner.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUBuffer is an implementation of interop::GPUBuffer that wraps a wgpu::Buffer.
+ class GPUBuffer final : public interop::GPUBuffer {
+ public:
+ GPUBuffer(wgpu::Buffer buffer,
+ wgpu::BufferDescriptor desc,
+ wgpu::Device device,
+ std::shared_ptr<AsyncRunner> async);
+
+ // Desc() returns the wgpu::BufferDescriptor used to construct the buffer
+ const wgpu::BufferDescriptor& Desc() const {
+ return desc_;
+ }
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Buffer &() const {
+ return buffer_;
+ }
+
+ // interop::GPUBuffer interface compliance
+ interop::Promise<void> mapAsync(Napi::Env env,
+ interop::GPUMapModeFlags mode,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ interop::ArrayBuffer getMappedRange(Napi::Env env,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void unmap(Napi::Env) override;
+ void destroy(Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ struct Mapping {
+ uint64_t start;
+ uint64_t end;
+ inline bool Intersects(uint64_t s, uint64_t e) const {
+ return s < end && e > start;
+ }
+ Napi::Reference<interop::ArrayBuffer> buffer;
+ };
+
+ // https://www.w3.org/TR/webgpu/#buffer-interface
+ enum class State {
+ Unmapped,
+ Mapped,
+ MappedAtCreation,
+ MappingPending,
+ Destroyed,
+ };
+
+ wgpu::Buffer buffer_;
+ wgpu::BufferDescriptor const desc_;
+ wgpu::Device const device_;
+ std::shared_ptr<AsyncRunner> async_;
+ State state_ = State::Unmapped;
+ std::vector<Mapping> mapped_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.cpp
new file mode 100644
index 00000000000..0ff503fc54c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.cpp
@@ -0,0 +1,40 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUCommandBuffer.h"
+
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUCommandBuffer
+ ////////////////////////////////////////////////////////////////////////////////
+
+ GPUCommandBuffer::GPUCommandBuffer(wgpu::CommandBuffer cmd_buf) : cmd_buf_(std::move(cmd_buf)) {
+ }
+
+ interop::Promise<double> GPUCommandBuffer::getExecutionTime(Napi::Env) {
+ UNIMPLEMENTED();
+ };
+
+ std::optional<std::string> GPUCommandBuffer::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUCommandBuffer::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.h
new file mode 100644
index 00000000000..b6fc3ba511e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandBuffer.h
@@ -0,0 +1,47 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
+#define DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUCommandBuffer is an implementation of interop::GPUCommandBuffer that wraps a
+ // wgpu::CommandBuffer.
+ class GPUCommandBuffer final : public interop::GPUCommandBuffer {
+ public:
+ GPUCommandBuffer(wgpu::CommandBuffer cmd_buf);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::CommandBuffer &() const {
+ return cmd_buf_;
+ }
+
+ // interop::GPUCommandBuffer interface compliance
+ interop::Promise<double> getExecutionTime(Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::CommandBuffer cmd_buf_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUCOMMANDBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.cpp
new file mode 100644
index 00000000000..cf3925c2680
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.cpp
@@ -0,0 +1,196 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUCommandEncoder.h"
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/GPU.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/binding/GPUCommandBuffer.h"
+#include "src/dawn_node/binding/GPUComputePassEncoder.h"
+#include "src/dawn_node/binding/GPUQuerySet.h"
+#include "src/dawn_node/binding/GPURenderPassEncoder.h"
+#include "src/dawn_node/binding/GPUTexture.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUCommandEncoder
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUCommandEncoder::GPUCommandEncoder(wgpu::CommandEncoder enc) : enc_(std::move(enc)) {
+ }
+
+ interop::Interface<interop::GPURenderPassEncoder> GPUCommandEncoder::beginRenderPass(
+ Napi::Env env,
+ interop::GPURenderPassDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderPassDescriptor desc{};
+ if (!conv(desc.colorAttachments, desc.colorAttachmentCount, descriptor.colorAttachments) ||
+ !conv(desc.depthStencilAttachment, descriptor.depthStencilAttachment) ||
+ !conv(desc.label, descriptor.label) ||
+ !conv(desc.occlusionQuerySet, descriptor.occlusionQuerySet)) {
+ return {};
+ }
+ return interop::GPURenderPassEncoder::Create<GPURenderPassEncoder>(
+ env, enc_.BeginRenderPass(&desc));
+ }
+
+ interop::Interface<interop::GPUComputePassEncoder> GPUCommandEncoder::beginComputePass(
+ Napi::Env env,
+ interop::GPUComputePassDescriptor descriptor) {
+ wgpu::ComputePassDescriptor desc{};
+ return interop::GPUComputePassEncoder::Create<GPUComputePassEncoder>(
+ env, enc_.BeginComputePass(&desc));
+ }
+
+ void GPUCommandEncoder::copyBufferToBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> source,
+ interop::GPUSize64 sourceOffset,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset,
+ interop::GPUSize64 size) {
+ Converter conv(env);
+
+ wgpu::Buffer src{};
+ wgpu::Buffer dst{};
+ if (!conv(src, source) || //
+ !conv(dst, destination)) {
+ return;
+ }
+
+ enc_.CopyBufferToBuffer(src, sourceOffset, dst, destinationOffset, size);
+ }
+
+ void GPUCommandEncoder::copyBufferToTexture(Napi::Env env,
+ interop::GPUImageCopyBuffer source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyBuffer src{};
+ wgpu::ImageCopyTexture dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyBufferToTexture(&src, &dst, &size);
+ }
+
+ void GPUCommandEncoder::copyTextureToBuffer(Napi::Env env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyBuffer destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyTexture src{};
+ wgpu::ImageCopyBuffer dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyTextureToBuffer(&src, &dst, &size);
+ }
+
+ void GPUCommandEncoder::copyTextureToTexture(Napi::Env env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) {
+ Converter conv(env);
+
+ wgpu::ImageCopyTexture src{};
+ wgpu::ImageCopyTexture dst{};
+ wgpu::Extent3D size{};
+ if (!conv(src, source) || //
+ !conv(dst, destination) || //
+ !conv(size, copySize)) {
+ return;
+ }
+
+ enc_.CopyTextureToTexture(&src, &dst, &size);
+ }
+
+ void GPUCommandEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+ }
+
+ void GPUCommandEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+ }
+
+ void GPUCommandEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+ }
+
+ void GPUCommandEncoder::writeTimestamp(Napi::Env env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) {
+ Converter conv(env);
+
+ wgpu::QuerySet q{};
+ if (!conv(q, querySet)) {
+ return;
+ }
+
+ enc_.WriteTimestamp(q, queryIndex);
+ }
+
+ void GPUCommandEncoder::resolveQuerySet(Napi::Env env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 firstQuery,
+ interop::GPUSize32 queryCount,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset) {
+ Converter conv(env);
+
+ wgpu::QuerySet q{};
+ uint32_t f = 0;
+ uint32_t c = 0;
+ wgpu::Buffer b{};
+ uint64_t o = 0;
+
+ if (!conv(q, querySet) || //
+ !conv(f, firstQuery) || //
+ !conv(c, queryCount) || //
+ !conv(b, destination) || //
+ !conv(o, destinationOffset)) {
+ return;
+ }
+
+ enc_.ResolveQuerySet(q, f, c, b, o);
+ }
+
+ interop::Interface<interop::GPUCommandBuffer> GPUCommandEncoder::finish(
+ Napi::Env env,
+ interop::GPUCommandBufferDescriptor descriptor) {
+ wgpu::CommandBufferDescriptor desc{};
+ return interop::GPUCommandBuffer::Create<GPUCommandBuffer>(env, enc_.Finish(&desc));
+ }
+
+ std::optional<std::string> GPUCommandEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUCommandEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.h
new file mode 100644
index 00000000000..f23e281234f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUCommandEncoder.h
@@ -0,0 +1,80 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
+#define DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUCommandEncoder is an implementation of interop::GPUCommandEncoder that wraps a
+ // wgpu::CommandEncoder.
+ class GPUCommandEncoder final : public interop::GPUCommandEncoder {
+ public:
+ GPUCommandEncoder(wgpu::CommandEncoder enc);
+
+ // interop::GPUCommandEncoder interface compliance
+ interop::Interface<interop::GPURenderPassEncoder> beginRenderPass(
+ Napi::Env,
+ interop::GPURenderPassDescriptor descriptor) override;
+ interop::Interface<interop::GPUComputePassEncoder> beginComputePass(
+ Napi::Env,
+ interop::GPUComputePassDescriptor descriptor) override;
+ void copyBufferToBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> source,
+ interop::GPUSize64 sourceOffset,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset,
+ interop::GPUSize64 size) override;
+ void copyBufferToTexture(Napi::Env,
+ interop::GPUImageCopyBuffer source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) override;
+ void copyTextureToBuffer(Napi::Env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyBuffer destination,
+ interop::GPUExtent3D copySize) override;
+ void copyTextureToTexture(Napi::Env,
+ interop::GPUImageCopyTexture source,
+ interop::GPUImageCopyTexture destination,
+ interop::GPUExtent3D copySize) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void writeTimestamp(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) override;
+ void resolveQuerySet(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 firstQuery,
+ interop::GPUSize32 queryCount,
+ interop::Interface<interop::GPUBuffer> destination,
+ interop::GPUSize64 destinationOffset) override;
+ interop::Interface<interop::GPUCommandBuffer> finish(
+ Napi::Env env,
+ interop::GPUCommandBufferDescriptor descriptor) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::CommandEncoder enc_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUCOMMANDENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.cpp
new file mode 100644
index 00000000000..6edc467ccf5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.cpp
@@ -0,0 +1,135 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUComputePassEncoder.h"
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/GPUBindGroup.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/binding/GPUComputePipeline.h"
+#include "src/dawn_node/binding/GPUQuerySet.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUComputePassEncoder
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUComputePassEncoder::GPUComputePassEncoder(wgpu::ComputePassEncoder enc)
+ : enc_(std::move(enc)) {
+ }
+
+ void GPUComputePassEncoder::setPipeline(
+ Napi::Env,
+ interop::Interface<interop::GPUComputePipeline> pipeline) {
+ enc_.SetPipeline(*pipeline.As<GPUComputePipeline>());
+ }
+
+ void GPUComputePassEncoder::dispatch(Napi::Env,
+ interop::GPUSize32 x,
+ interop::GPUSize32 y,
+ interop::GPUSize32 z) {
+ enc_.Dispatch(x, y, z);
+ }
+
+ void GPUComputePassEncoder::dispatchIndirect(
+ Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ enc_.DispatchIndirect(*indirectBuffer.As<GPUBuffer>(), indirectOffset);
+ }
+
+ void GPUComputePassEncoder::beginPipelineStatisticsQuery(
+ Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUComputePassEncoder::endPipelineStatisticsQuery(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUComputePassEncoder::writeTimestamp(Napi::Env env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) {
+ Converter conv(env);
+
+ wgpu::QuerySet q{};
+ if (!conv(q, querySet)) {
+ return;
+ }
+
+ enc_.WriteTimestamp(q, queryIndex);
+ }
+
+ void GPUComputePassEncoder::endPass(Napi::Env) {
+ enc_.EndPass();
+ }
+
+ void GPUComputePassEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+ }
+
+ void GPUComputePassEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+ }
+
+ void GPUComputePassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+ }
+
+ void GPUComputePassEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+ }
+
+ void GPUComputePassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+ }
+
+ std::optional<std::string> GPUComputePassEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUComputePassEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.h
new file mode 100644
index 00000000000..9c7064be99b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePassEncoder.h
@@ -0,0 +1,76 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
+#define DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUComputePassEncoder is an implementation of interop::GPUComputePassEncoder that wraps a
+ // wgpu::ComputePassEncoder.
+ class GPUComputePassEncoder final : public interop::GPUComputePassEncoder {
+ public:
+ GPUComputePassEncoder(wgpu::ComputePassEncoder enc);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ComputePassEncoder &() const {
+ return enc_;
+ }
+
+ // interop::GPUComputePassEncoder interface compliance
+ void setPipeline(Napi::Env,
+ interop::Interface<interop::GPUComputePipeline> pipeline) override;
+ void dispatch(Napi::Env,
+ interop::GPUSize32 x,
+ interop::GPUSize32 y,
+ interop::GPUSize32 z) override;
+ void dispatchIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void beginPipelineStatisticsQuery(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) override;
+ void endPipelineStatisticsQuery(Napi::Env) override;
+ void writeTimestamp(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) override;
+ void endPass(Napi::Env) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::ComputePassEncoder enc_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUCOMPUTEPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.cpp
new file mode 100644
index 00000000000..0eef82ec544
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.cpp
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUComputePipeline.h"
+
+#include "src/dawn_node/binding/GPUBindGroupLayout.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUComputePipeline
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUComputePipeline::GPUComputePipeline(wgpu::ComputePipeline pipeline)
+ : pipeline_(std::move(pipeline)) {
+ }
+
+ interop::Interface<interop::GPUBindGroupLayout> GPUComputePipeline::getBindGroupLayout(
+ Napi::Env env,
+ uint32_t index) {
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, pipeline_.GetBindGroupLayout(index));
+ }
+
+ std::optional<std::string> GPUComputePipeline::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUComputePipeline::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.h
new file mode 100644
index 00000000000..e9dde76a6a7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUComputePipeline.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUPIPELINE_H_
+#define DAWN_NODE_BINDING_GPUPIPELINE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUComputePipeline is an implementation of interop::GPUComputePipeline that wraps a
+ // wgpu::ComputePipeline.
+ class GPUComputePipeline final : public interop::GPUComputePipeline {
+ public:
+ GPUComputePipeline(wgpu::ComputePipeline pipeline);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ComputePipeline &() const {
+ return pipeline_;
+ }
+
+ // interop::GPUComputePipeline interface compliance
+ interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+ uint32_t index) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::ComputePipeline pipeline_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.cpp
new file mode 100644
index 00000000000..73b5e18f4ea
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.cpp
@@ -0,0 +1,518 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUDevice.h"
+
+#include <memory>
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/Errors.h"
+#include "src/dawn_node/binding/GPUBindGroup.h"
+#include "src/dawn_node/binding/GPUBindGroupLayout.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/binding/GPUCommandBuffer.h"
+#include "src/dawn_node/binding/GPUCommandEncoder.h"
+#include "src/dawn_node/binding/GPUComputePipeline.h"
+#include "src/dawn_node/binding/GPUPipelineLayout.h"
+#include "src/dawn_node/binding/GPUQuerySet.h"
+#include "src/dawn_node/binding/GPUQueue.h"
+#include "src/dawn_node/binding/GPURenderBundleEncoder.h"
+#include "src/dawn_node/binding/GPURenderPipeline.h"
+#include "src/dawn_node/binding/GPUSampler.h"
+#include "src/dawn_node/binding/GPUShaderModule.h"
+#include "src/dawn_node/binding/GPUSupportedLimits.h"
+#include "src/dawn_node/binding/GPUTexture.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ namespace {
+
+ class DeviceLostInfo : public interop::GPUDeviceLostInfo {
+ public:
+ DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
+ : reason_(reason), message_(message) {
+ }
+ std::variant<interop::GPUDeviceLostReason> getReason(Napi::Env env) override {
+ return reason_;
+ }
+ std::string getMessage(Napi::Env) override {
+ return message_;
+ }
+
+ private:
+ interop::GPUDeviceLostReason reason_;
+ std::string message_;
+ };
+
+ class OOMError : public interop::GPUOutOfMemoryError {};
+ class ValidationError : public interop::GPUValidationError {
+ public:
+ ValidationError(std::string message) : message_(std::move(message)) {
+ }
+
+ std::string getMessage(Napi::Env) override {
+ return message_;
+ };
+
+ private:
+ std::string message_;
+ };
+
+ } // namespace
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUDevice
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUDevice::GPUDevice(Napi::Env env, wgpu::Device device)
+ : env_(env), device_(device), async_(std::make_shared<AsyncRunner>(env, device)) {
+ device_.SetLoggingCallback(
+ [](WGPULoggingType type, char const* message, void* userdata) {
+ std::cout << type << ": " << message << std::endl;
+ },
+ nullptr);
+ device_.SetUncapturedErrorCallback(
+ [](WGPUErrorType type, char const* message, void* userdata) {
+ std::cout << type << ": " << message << std::endl;
+ },
+ nullptr);
+
+ device_.SetDeviceLostCallback(
+ [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
+ auto r = interop::GPUDeviceLostReason::kDestroyed;
+ switch (reason) {
+ case WGPUDeviceLostReason_Force32:
+ UNREACHABLE("WGPUDeviceLostReason_Force32");
+ break;
+ case WGPUDeviceLostReason_Destroyed:
+ case WGPUDeviceLostReason_Undefined:
+ r = interop::GPUDeviceLostReason::kDestroyed;
+ break;
+ }
+ auto* self = static_cast<GPUDevice*>(userdata);
+ for (auto promise : self->lost_promises_) {
+ promise.Resolve(
+ interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
+ }
+ },
+ this);
+ }
+
+ GPUDevice::~GPUDevice() {
+ }
+
+ interop::Interface<interop::GPUSupportedFeatures> GPUDevice::getFeatures(Napi::Env env) {
+ class Features : public interop::GPUSupportedFeatures {
+ public:
+ bool has(Napi::Env, std::string feature) override {
+ UNIMPLEMENTED();
+ }
+ std::vector<std::string> keys(Napi::Env) override {
+ UNIMPLEMENTED();
+ }
+ };
+ return interop::GPUSupportedFeatures::Create<Features>(env);
+ }
+
+ interop::Interface<interop::GPUSupportedLimits> GPUDevice::getLimits(Napi::Env env) {
+ return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env);
+ }
+
+ interop::Interface<interop::GPUQueue> GPUDevice::getQueue(Napi::Env env) {
+ // TODO(crbug.com/dawn/1144): Should probably return the same Queue JS object.
+ return interop::GPUQueue::Create<GPUQueue>(env, device_.GetQueue(), async_);
+ }
+
+ void GPUDevice::destroy(Napi::Env) {
+ device_.Release();
+ }
+
+ interop::Interface<interop::GPUBuffer> GPUDevice::createBuffer(
+ Napi::Env env,
+ interop::GPUBufferDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::BufferDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.mappedAtCreation, descriptor.mappedAtCreation) ||
+ !conv(desc.size, descriptor.size) || !conv(desc.usage, descriptor.usage)) {
+ return {};
+ }
+ return interop::GPUBuffer::Create<GPUBuffer>(env, device_.CreateBuffer(&desc), desc,
+ device_, async_);
+ }
+
+ interop::Interface<interop::GPUTexture> GPUDevice::createTexture(
+ Napi::Env env,
+ interop::GPUTextureDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::TextureDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.usage, descriptor.usage) || //
+ !conv(desc.size, descriptor.size) || //
+ !conv(desc.dimension, descriptor.dimension) || //
+ !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
+ !conv(desc.sampleCount, descriptor.sampleCount) || //
+ !conv(desc.format, descriptor.format)) {
+ return {};
+ }
+ return interop::GPUTexture::Create<GPUTexture>(env, device_.CreateTexture(&desc));
+ }
+
+ interop::Interface<interop::GPUSampler> GPUDevice::createSampler(
+ Napi::Env env,
+ interop::GPUSamplerDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::SamplerDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || //
+ !conv(desc.addressModeU, descriptor.addressModeU) || //
+ !conv(desc.addressModeV, descriptor.addressModeV) || //
+ !conv(desc.addressModeW, descriptor.addressModeW) || //
+ !conv(desc.magFilter, descriptor.magFilter) || //
+ !conv(desc.minFilter, descriptor.minFilter) || //
+ !conv(desc.mipmapFilter, descriptor.mipmapFilter) || //
+ !conv(desc.lodMinClamp, descriptor.lodMinClamp) || //
+ !conv(desc.lodMaxClamp, descriptor.lodMaxClamp) || //
+ !conv(desc.compare, descriptor.compare) || //
+ !conv(desc.maxAnisotropy, descriptor.maxAnisotropy)) {
+ return {};
+ }
+ return interop::GPUSampler::Create<GPUSampler>(env, device_.CreateSampler(&desc));
+ }
+
+ interop::Interface<interop::GPUExternalTexture> GPUDevice::importExternalTexture(
+ Napi::Env,
+ interop::GPUExternalTextureDescriptor descriptor) {
+ UNIMPLEMENTED();
+ }
+
+ interop::Interface<interop::GPUBindGroupLayout> GPUDevice::createBindGroupLayout(
+ Napi::Env env,
+ interop::GPUBindGroupLayoutDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::BindGroupLayoutDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+ return {};
+ }
+
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, device_.CreateBindGroupLayout(&desc));
+ }
+
+ interop::Interface<interop::GPUPipelineLayout> GPUDevice::createPipelineLayout(
+ Napi::Env env,
+ interop::GPUPipelineLayoutDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::PipelineLayoutDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.bindGroupLayouts, desc.bindGroupLayoutCount, descriptor.bindGroupLayouts)) {
+ return {};
+ }
+
+ return interop::GPUPipelineLayout::Create<GPUPipelineLayout>(
+ env, device_.CreatePipelineLayout(&desc));
+ }
+
+ interop::Interface<interop::GPUBindGroup> GPUDevice::createBindGroup(
+ Napi::Env env,
+ interop::GPUBindGroupDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::BindGroupDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.layout, descriptor.layout) ||
+ !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+ return {};
+ }
+
+ return interop::GPUBindGroup::Create<GPUBindGroup>(env, device_.CreateBindGroup(&desc));
+ }
+
+ interop::Interface<interop::GPUShaderModule> GPUDevice::createShaderModule(
+ Napi::Env env,
+ interop::GPUShaderModuleDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::ShaderModuleWGSLDescriptor wgsl_desc{};
+ wgpu::ShaderModuleDescriptor sm_desc{};
+ if (!conv(wgsl_desc.source, descriptor.code) || !conv(sm_desc.label, descriptor.label)) {
+ return {};
+ }
+ sm_desc.nextInChain = &wgsl_desc;
+
+ return interop::GPUShaderModule::Create<GPUShaderModule>(
+ env, device_.CreateShaderModule(&sm_desc), async_);
+ }
+
+ interop::Interface<interop::GPUComputePipeline> GPUDevice::createComputePipeline(
+ Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::ComputePipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ return {};
+ }
+
+ return interop::GPUComputePipeline::Create<GPUComputePipeline>(
+ env, device_.CreateComputePipeline(&desc));
+ }
+
+ interop::Interface<interop::GPURenderPipeline> GPUDevice::createRenderPipeline(
+ Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderPipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ return {};
+ }
+
+ return interop::GPURenderPipeline::Create<GPURenderPipeline>(
+ env, device_.CreateRenderPipeline(&desc));
+ }
+
+ interop::Promise<interop::Interface<interop::GPUComputePipeline>>
+ GPUDevice::createComputePipelineAsync(Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::ComputePipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ return {env};
+ }
+
+ using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
+
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, env, async_};
+ auto promise = ctx->promise;
+
+ device_.CreateComputePipelineAsync(
+ &desc,
+ [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
+ char const* message, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ switch (status) {
+ case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+ c->promise.Resolve(interop::GPUComputePipeline::Create<GPUComputePipeline>(
+ c->env, pipeline));
+ break;
+ default:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ }
+ },
+ ctx);
+
+ return promise;
+ }
+
+ interop::Promise<interop::Interface<interop::GPURenderPipeline>>
+ GPUDevice::createRenderPipelineAsync(Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderPipelineDescriptor desc{};
+ if (!conv(desc, descriptor)) {
+ return {env};
+ }
+
+ using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
+
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, env, async_};
+ auto promise = ctx->promise;
+
+ device_.CreateRenderPipelineAsync(
+ &desc,
+ [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
+ char const* message, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ switch (status) {
+ case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+ c->promise.Resolve(interop::GPURenderPipeline::Create<GPURenderPipeline>(
+ c->env, pipeline));
+ break;
+ default:
+ c->promise.Reject(Errors::OperationError(c->env));
+ break;
+ }
+ },
+ ctx);
+
+ return promise;
+ }
+
+ interop::Interface<interop::GPUCommandEncoder> GPUDevice::createCommandEncoder(
+ Napi::Env env,
+ interop::GPUCommandEncoderDescriptor descriptor) {
+ wgpu::CommandEncoderDescriptor desc{};
+ return interop::GPUCommandEncoder::Create<GPUCommandEncoder>(
+ env, device_.CreateCommandEncoder(&desc));
+ }
+
+ interop::Interface<interop::GPURenderBundleEncoder> GPUDevice::createRenderBundleEncoder(
+ Napi::Env env,
+ interop::GPURenderBundleEncoderDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::RenderBundleEncoderDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) ||
+ !conv(desc.colorFormats, desc.colorFormatsCount, descriptor.colorFormats) ||
+ !conv(desc.depthStencilFormat, descriptor.depthStencilFormat) ||
+ !conv(desc.sampleCount, descriptor.sampleCount)) {
+ return {};
+ }
+
+ return interop::GPURenderBundleEncoder::Create<GPURenderBundleEncoder>(
+ env, device_.CreateRenderBundleEncoder(&desc));
+ }
+
+ interop::Interface<interop::GPUQuerySet> GPUDevice::createQuerySet(
+ Napi::Env env,
+ interop::GPUQuerySetDescriptor descriptor) {
+ Converter conv(env);
+
+ wgpu::QuerySetDescriptor desc{};
+ if (!conv(desc.label, descriptor.label) || !conv(desc.type, descriptor.type) ||
+ !conv(desc.count, descriptor.count) ||
+ !conv(desc.pipelineStatistics, desc.pipelineStatisticsCount,
+ descriptor.pipelineStatistics)) {
+ return {};
+ }
+
+ return interop::GPUQuerySet::Create<GPUQuerySet>(env, device_.CreateQuerySet(&desc));
+ }
+
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> GPUDevice::getLost(
+ Napi::Env env) {
+ auto promise = interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>>(env);
+ lost_promises_.emplace_back(promise);
+ return promise;
+ }
+
+ void GPUDevice::pushErrorScope(Napi::Env env, interop::GPUErrorFilter filter) {
+ wgpu::ErrorFilter f = wgpu::ErrorFilter::None;
+ switch (filter) {
+ case interop::GPUErrorFilter::kOutOfMemory:
+ f = wgpu::ErrorFilter::OutOfMemory;
+ break;
+ case interop::GPUErrorFilter::kValidation:
+ f = wgpu::ErrorFilter::Validation;
+ break;
+ default:
+ Napi::Error::New(env, "unhandled GPUErrorFilter value")
+ .ThrowAsJavaScriptException();
+ return;
+ }
+ device_.PushErrorScope(f);
+ }
+
+ interop::Promise<std::optional<interop::GPUError>> GPUDevice::popErrorScope(Napi::Env env) {
+ using Promise = interop::Promise<std::optional<interop::GPUError>>;
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto* ctx = new Context{env, env, async_};
+ auto promise = ctx->promise;
+
+ bool ok = device_.PopErrorScope(
+ [](WGPUErrorType type, char const* message, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ auto env = c->env;
+ switch (type) {
+ case WGPUErrorType::WGPUErrorType_NoError:
+ c->promise.Resolve({});
+ break;
+ case WGPUErrorType::WGPUErrorType_OutOfMemory:
+ c->promise.Resolve(interop::GPUOutOfMemoryError::Create<OOMError>(env));
+ break;
+ case WGPUErrorType::WGPUErrorType_Unknown:
+ case WGPUErrorType::WGPUErrorType_DeviceLost:
+ case WGPUErrorType::WGPUErrorType_Validation:
+ c->promise.Resolve(
+ interop::GPUValidationError::Create<ValidationError>(env, message));
+ break;
+ default:
+ c->promise.Reject("unhandled error type");
+ break;
+ }
+ },
+ ctx);
+
+ if (ok) {
+ return promise;
+ }
+
+ delete ctx;
+ Promise p(env);
+ p.Resolve(
+ interop::GPUValidationError::Create<ValidationError>(env, "failed to pop error scope"));
+ return p;
+ }
+
+ std::optional<std::string> GPUDevice::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ };
+
+ void GPUDevice::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ };
+
+ interop::Interface<interop::EventHandler> GPUDevice::getOnuncapturederror(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUDevice::setOnuncapturederror(Napi::Env,
+ interop::Interface<interop::EventHandler> value) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUDevice::addEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUDevice::removeEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::EventListenerOptions, bool>> options) {
+ UNIMPLEMENTED();
+ }
+
+ bool GPUDevice::dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.h
new file mode 100644
index 00000000000..a3c5dce31df
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUDevice.h
@@ -0,0 +1,113 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUDEVICE_H_
+#define DAWN_NODE_BINDING_GPUDEVICE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "napi.h"
+#include "src/dawn_node/binding/AsyncRunner.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+ // GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
+ class GPUDevice final : public interop::GPUDevice {
+ public:
+ GPUDevice(Napi::Env env, wgpu::Device device);
+ ~GPUDevice();
+
+ // interop::GPUDevice interface compliance
+ interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+ interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+ interop::Interface<interop::GPUQueue> getQueue(Napi::Env env) override;
+ void destroy(Napi::Env) override;
+ interop::Interface<interop::GPUBuffer> createBuffer(
+ Napi::Env env,
+ interop::GPUBufferDescriptor descriptor) override;
+ interop::Interface<interop::GPUTexture> createTexture(
+ Napi::Env,
+ interop::GPUTextureDescriptor descriptor) override;
+ interop::Interface<interop::GPUSampler> createSampler(
+ Napi::Env,
+ interop::GPUSamplerDescriptor descriptor) override;
+ interop::Interface<interop::GPUExternalTexture> importExternalTexture(
+ Napi::Env,
+ interop::GPUExternalTextureDescriptor descriptor) override;
+ interop::Interface<interop::GPUBindGroupLayout> createBindGroupLayout(
+ Napi::Env,
+ interop::GPUBindGroupLayoutDescriptor descriptor) override;
+ interop::Interface<interop::GPUPipelineLayout> createPipelineLayout(
+ Napi::Env,
+ interop::GPUPipelineLayoutDescriptor descriptor) override;
+ interop::Interface<interop::GPUBindGroup> createBindGroup(
+ Napi::Env,
+ interop::GPUBindGroupDescriptor descriptor) override;
+ interop::Interface<interop::GPUShaderModule> createShaderModule(
+ Napi::Env,
+ interop::GPUShaderModuleDescriptor descriptor) override;
+ interop::Interface<interop::GPUComputePipeline> createComputePipeline(
+ Napi::Env,
+ interop::GPUComputePipelineDescriptor descriptor) override;
+ interop::Interface<interop::GPURenderPipeline> createRenderPipeline(
+ Napi::Env,
+ interop::GPURenderPipelineDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPUComputePipeline>>
+ createComputePipelineAsync(Napi::Env env,
+ interop::GPUComputePipelineDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPURenderPipeline>> createRenderPipelineAsync(
+ Napi::Env env,
+ interop::GPURenderPipelineDescriptor descriptor) override;
+ interop::Interface<interop::GPUCommandEncoder> createCommandEncoder(
+ Napi::Env env,
+ interop::GPUCommandEncoderDescriptor descriptor) override;
+ interop::Interface<interop::GPURenderBundleEncoder> createRenderBundleEncoder(
+ Napi::Env,
+ interop::GPURenderBundleEncoderDescriptor descriptor) override;
+ interop::Interface<interop::GPUQuerySet> createQuerySet(
+ Napi::Env,
+ interop::GPUQuerySetDescriptor descriptor) override;
+ interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> getLost(
+ Napi::Env env) override;
+ void pushErrorScope(Napi::Env, interop::GPUErrorFilter filter) override;
+ interop::Promise<std::optional<interop::GPUError>> popErrorScope(Napi::Env env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+ interop::Interface<interop::EventHandler> getOnuncapturederror(Napi::Env) override;
+ void setOnuncapturederror(Napi::Env,
+ interop::Interface<interop::EventHandler> value) override;
+ void addEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) override;
+ void removeEventListener(
+ Napi::Env,
+ std::string type,
+ std::optional<interop::Interface<interop::EventListener>> callback,
+ std::optional<std::variant<interop::EventListenerOptions, bool>> options) override;
+ bool dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) override;
+
+ private:
+ void QueueTick();
+
+ Napi::Env env_;
+ wgpu::Device device_;
+ std::shared_ptr<AsyncRunner> async_;
+ std::vector<interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>>>
+ lost_promises_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUDEVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.cpp
new file mode 100644
index 00000000000..861df210d4e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.cpp
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUPipelineLayout.h"
+
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUPipelineLayout
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUPipelineLayout::GPUPipelineLayout(wgpu::PipelineLayout layout) : layout_(std::move(layout)) {
+ }
+
+ std::optional<std::string> GPUPipelineLayout::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUPipelineLayout::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.h
new file mode 100644
index 00000000000..a1d0b87bcd4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUPipelineLayout.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
+#define DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUPipelineLayout is an implementation of interop::GPUPipelineLayout that wraps a
+ // wgpu::PipelineLayout.
+ class GPUPipelineLayout final : public interop::GPUPipelineLayout {
+ public:
+ GPUPipelineLayout(wgpu::PipelineLayout layout);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::PipelineLayout &() const {
+ return layout_;
+ }
+
+ // interop::GPUPipelineLayout interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::PipelineLayout layout_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUPIPELINELAYOUT_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.cpp
new file mode 100644
index 00000000000..e56564a94c0
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.cpp
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUQuerySet.h"
+
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUQuerySet
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUQuerySet::GPUQuerySet(wgpu::QuerySet query_set) : query_set_(std::move(query_set)) {
+ }
+
+ void GPUQuerySet::destroy(Napi::Env) {
+ query_set_.Destroy();
+ }
+
+ std::optional<std::string> GPUQuerySet::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUQuerySet::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.h
new file mode 100644
index 00000000000..8669e16ac28
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUQuerySet.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUQUERYSET_H_
+#define DAWN_NODE_BINDING_GPUQUERYSET_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUQuerySet is an implementation of interop::GPUQuerySet that wraps a wgpu::QuerySet.
+ class GPUQuerySet final : public interop::GPUQuerySet {
+ public:
+ GPUQuerySet(wgpu::QuerySet query_set);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::QuerySet &() const {
+ return query_set_;
+ }
+
+ // interop::GPUQuerySet interface compliance
+ void destroy(Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::QuerySet query_set_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUQUERYSET_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.cpp
new file mode 100644
index 00000000000..c8e39fe23e9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.cpp
@@ -0,0 +1,132 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUQueue.h"
+
+#include <memory>
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/binding/GPUCommandBuffer.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUQueue
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUQueue::GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async)
+ : queue_(std::move(queue)), async_(std::move(async)) {
+ }
+
+ void GPUQueue::submit(
+ Napi::Env env,
+ std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) {
+ std::vector<wgpu::CommandBuffer> bufs(commandBuffers.size());
+ for (size_t i = 0; i < commandBuffers.size(); i++) {
+ bufs[i] = *commandBuffers[i].As<GPUCommandBuffer>();
+ }
+ Converter conv(env);
+ uint32_t bufs_size;
+ if (!conv(bufs_size, bufs.size())) {
+ return;
+ }
+ queue_.Submit(bufs_size, bufs.data());
+ }
+
+ interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
+ struct Context {
+ Napi::Env env;
+ interop::Promise<void> promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, interop::Promise<void>(env), async_};
+ auto promise = ctx->promise;
+
+ queue_.OnSubmittedWorkDone(
+ 0,
+ [](WGPUQueueWorkDoneStatus status, void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+ if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
+ Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
+ .ThrowAsJavaScriptException();
+ }
+ c->promise.Resolve();
+ },
+ ctx);
+
+ return promise;
+ }
+
+ void GPUQueue::writeBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 bufferOffset,
+ interop::BufferSource data,
+ interop::GPUSize64 dataOffset,
+ std::optional<interop::GPUSize64> size) {
+ wgpu::Buffer buf = *buffer.As<GPUBuffer>();
+ Converter::BufferSource src{};
+ Converter conv(env);
+ if (!conv(src, data)) {
+ return;
+ }
+
+ // TODO(crbug.com/dawn/1132): Bounds check
+ if (src.data) {
+ src.data = reinterpret_cast<uint8_t*>(src.data) + dataOffset;
+ }
+ src.size -= dataOffset;
+ if (size.has_value()) {
+ src.size = size.value();
+ }
+
+ queue_.WriteBuffer(buf, bufferOffset, src.data, src.size);
+ }
+
+ void GPUQueue::writeTexture(Napi::Env env,
+ interop::GPUImageCopyTexture destination,
+ interop::BufferSource data,
+ interop::GPUImageDataLayout dataLayout,
+ interop::GPUExtent3D size) {
+ wgpu::ImageCopyTexture dst{};
+ Converter::BufferSource src{};
+ wgpu::TextureDataLayout layout{};
+ wgpu::Extent3D sz{};
+ Converter conv(env);
+ if (!conv(dst, destination) || //
+ !conv(src, data) || //
+ !conv(layout, dataLayout) || //
+ !conv(sz, size)) {
+ return;
+ }
+
+ queue_.WriteTexture(&dst, src.data, src.size, &layout, &sz);
+ }
+
+ void GPUQueue::copyExternalImageToTexture(Napi::Env,
+ interop::GPUImageCopyExternalImage source,
+ interop::GPUImageCopyTextureTagged destination,
+ interop::GPUExtent3D copySize) {
+ UNIMPLEMENTED();
+ }
+
+ std::optional<std::string> GPUQueue::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUQueue::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.h
new file mode 100644
index 00000000000..69952c6dd42
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUQueue.h
@@ -0,0 +1,61 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUQUEUE_H_
+#define DAWN_NODE_BINDING_GPUQUEUE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/binding/AsyncRunner.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUQueue is an implementation of interop::GPUQueue that wraps a wgpu::Queue.
+ class GPUQueue final : public interop::GPUQueue {
+ public:
+ GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async);
+
+ // interop::GPUQueue interface compliance
+ void submit(
+ Napi::Env,
+ std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) override;
+ interop::Promise<void> onSubmittedWorkDone(Napi::Env) override;
+ void writeBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 bufferOffset,
+ interop::BufferSource data,
+ interop::GPUSize64 dataOffset,
+ std::optional<interop::GPUSize64> size) override;
+ void writeTexture(Napi::Env,
+ interop::GPUImageCopyTexture destination,
+ interop::BufferSource data,
+ interop::GPUImageDataLayout dataLayout,
+ interop::GPUExtent3D size) override;
+ void copyExternalImageToTexture(Napi::Env,
+ interop::GPUImageCopyExternalImage source,
+ interop::GPUImageCopyTextureTagged destination,
+ interop::GPUExtent3D copySize) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::Queue queue_;
+ std::shared_ptr<AsyncRunner> async_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUQUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.cpp
new file mode 100644
index 00000000000..2f42ac72d3c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.cpp
@@ -0,0 +1,39 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPURenderBundle.h"
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/binding/GPURenderBundle.h"
+#include "src/dawn_node/binding/GPURenderPipeline.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPURenderBundle
+ ////////////////////////////////////////////////////////////////////////////////
+ GPURenderBundle::GPURenderBundle(wgpu::RenderBundle bundle) : bundle_(std::move(bundle)) {
+ }
+
+ std::optional<std::string> GPURenderBundle::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderBundle::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.h b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.h
new file mode 100644
index 00000000000..9f824f239ea
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundle.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
+#define DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPURenderBundle is an implementation of interop::GPURenderBundle that wraps a
+ // wgpu::RenderBundle.
+ class GPURenderBundle final : public interop::GPURenderBundle {
+ public:
+ GPURenderBundle(wgpu::RenderBundle bundle);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderBundle &() const {
+ return bundle_;
+ }
+
+ // interop::GPURenderBundle interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::RenderBundle bundle_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPURENDERBUNDLE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.cpp
new file mode 100644
index 00000000000..123741d6c4d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.cpp
@@ -0,0 +1,192 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPURenderBundleEncoder.h"
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/GPUBindGroup.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/binding/GPURenderBundle.h"
+#include "src/dawn_node/binding/GPURenderPipeline.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPURenderBundleEncoder
+ ////////////////////////////////////////////////////////////////////////////////
+ GPURenderBundleEncoder::GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc)
+ : enc_(std::move(enc)) {
+ }
+
+ interop::Interface<interop::GPURenderBundle> GPURenderBundleEncoder::finish(
+ Napi::Env env,
+ interop::GPURenderBundleDescriptor descriptor) {
+ wgpu::RenderBundleDescriptor desc{};
+
+ return interop::GPURenderBundle::Create<GPURenderBundle>(env, enc_.Finish(&desc));
+ }
+
+ void GPURenderBundleEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+ }
+
+ void GPURenderBundleEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+ }
+
+ void GPURenderBundleEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+ }
+
+ void GPURenderBundleEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+ }
+
+ void GPURenderBundleEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+ }
+
+ void GPURenderBundleEncoder::setPipeline(
+ Napi::Env env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) {
+ Converter conv(env);
+
+ wgpu::RenderPipeline p{};
+ if (!conv(p, pipeline)) {
+ return;
+ }
+
+ enc_.SetPipeline(p);
+ }
+
+ void GPURenderBundleEncoder::setIndexBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ wgpu::IndexFormat f{};
+ uint64_t o = 0;
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || //
+ !conv(f, indexFormat) || //
+ !conv(o, offset) || //
+ !conv(s, size)) {
+ return;
+ }
+
+ enc_.SetIndexBuffer(b, f, o, s);
+ }
+
+ void GPURenderBundleEncoder::setVertexBuffer(Napi::Env env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || !conv(s, size)) {
+ return;
+ }
+ enc_.SetVertexBuffer(slot, b, offset, s);
+ }
+
+ void GPURenderBundleEncoder::draw(Napi::Env env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+ }
+
+ void GPURenderBundleEncoder::drawIndexed(Napi::Env env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+ }
+
+ void GPURenderBundleEncoder::drawIndirect(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint32_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndirect(b, o);
+ }
+
+ void GPURenderBundleEncoder::drawIndexedIndirect(
+ Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint32_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndexedIndirect(b, o);
+ }
+
+ std::optional<std::string> GPURenderBundleEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderBundleEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.h
new file mode 100644
index 00000000000..3d11e330e4c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderBundleEncoder.h
@@ -0,0 +1,86 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
+#define DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPURenderBundleEncoder is an implementation of interop::GPURenderBundleEncoder that wraps a
+ // wgpu::RenderBundleEncoder.
+ class GPURenderBundleEncoder final : public interop::GPURenderBundleEncoder {
+ public:
+ GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc);
+
+ // interop::GPURenderBundleEncoder interface compliance
+ interop::Interface<interop::GPURenderBundle> finish(
+ Napi::Env,
+ interop::GPURenderBundleDescriptor descriptor) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void setPipeline(Napi::Env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) override;
+ void setIndexBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void setVertexBuffer(Napi::Env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void draw(Napi::Env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndexed(Napi::Env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void drawIndexedIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::RenderBundleEncoder enc_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPURENDERBUNDLEENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.cpp
new file mode 100644
index 00000000000..5dce4f23894
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.cpp
@@ -0,0 +1,262 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPURenderPassEncoder.h"
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/GPUBindGroup.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/binding/GPUQuerySet.h"
+#include "src/dawn_node/binding/GPURenderBundle.h"
+#include "src/dawn_node/binding/GPURenderPipeline.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPURenderPassEncoder
+ ////////////////////////////////////////////////////////////////////////////////
+ GPURenderPassEncoder::GPURenderPassEncoder(wgpu::RenderPassEncoder enc) : enc_(std::move(enc)) {
+ }
+
+ void GPURenderPassEncoder::setViewport(Napi::Env,
+ float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) {
+ enc_.SetViewport(x, y, width, height, minDepth, maxDepth);
+ }
+
+ void GPURenderPassEncoder::setScissorRect(Napi::Env,
+ interop::GPUIntegerCoordinate x,
+ interop::GPUIntegerCoordinate y,
+ interop::GPUIntegerCoordinate width,
+ interop::GPUIntegerCoordinate height) {
+ enc_.SetScissorRect(x, y, width, height);
+ }
+
+ void GPURenderPassEncoder::setBlendConstant(Napi::Env env, interop::GPUColor color) {
+ Converter conv(env);
+
+ wgpu::Color c{};
+ if (!conv(c, color)) {
+ return;
+ }
+
+ enc_.SetBlendConstant(&c);
+ }
+
+ void GPURenderPassEncoder::setStencilReference(Napi::Env, interop::GPUStencilValue reference) {
+ enc_.SetStencilReference(reference);
+ }
+
+ void GPURenderPassEncoder::beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) {
+ enc_.BeginOcclusionQuery(queryIndex);
+ }
+
+ void GPURenderPassEncoder::endOcclusionQuery(Napi::Env) {
+ enc_.EndOcclusionQuery();
+ }
+
+ void GPURenderPassEncoder::beginPipelineStatisticsQuery(
+ Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderPassEncoder::endPipelineStatisticsQuery(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderPassEncoder::writeTimestamp(Napi::Env env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) {
+ Converter conv(env);
+
+ wgpu::QuerySet q{};
+ if (!conv(q, querySet)) {
+ return;
+ }
+
+ enc_.WriteTimestamp(q, queryIndex);
+ }
+
+ void GPURenderPassEncoder::executeBundles(
+ Napi::Env env,
+ std::vector<interop::Interface<interop::GPURenderBundle>> bundles_in) {
+ Converter conv(env);
+
+ wgpu::RenderBundle* bundles = nullptr;
+ uint32_t bundleCount = 0;
+ if (!conv(bundles, bundleCount, bundles_in)) {
+ return;
+ }
+
+ enc_.ExecuteBundles(bundleCount, bundles);
+ }
+
+ void GPURenderPassEncoder::endPass(Napi::Env) {
+ enc_.EndPass();
+ }
+
+ void GPURenderPassEncoder::setBindGroup(
+ Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ uint32_t* offsets = nullptr;
+ uint32_t num_offsets = 0;
+ if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, num_offsets, offsets);
+ }
+
+ void GPURenderPassEncoder::setBindGroup(Napi::Env env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) {
+ Converter conv(env);
+
+ wgpu::BindGroup bg{};
+ if (!conv(bg, bindGroup)) {
+ return;
+ }
+
+ enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+ dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+ }
+
+ void GPURenderPassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+ enc_.PushDebugGroup(groupLabel.c_str());
+ }
+
+ void GPURenderPassEncoder::popDebugGroup(Napi::Env) {
+ enc_.PopDebugGroup();
+ }
+
+ void GPURenderPassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+ enc_.InsertDebugMarker(markerLabel.c_str());
+ }
+
+ void GPURenderPassEncoder::setPipeline(
+ Napi::Env env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) {
+ Converter conv(env);
+ wgpu::RenderPipeline rp{};
+ if (!conv(rp, pipeline)) {
+ return;
+ }
+ enc_.SetPipeline(rp);
+ }
+
+ void GPURenderPassEncoder::setIndexBuffer(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ wgpu::IndexFormat f;
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || //
+ !conv(f, indexFormat) || //
+ !conv(s, size)) {
+ return;
+ }
+ enc_.SetIndexBuffer(b, f, offset, s);
+ }
+
+ void GPURenderPassEncoder::setVertexBuffer(Napi::Env env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint64_t s = wgpu::kWholeSize;
+ if (!conv(b, buffer) || !conv(s, size)) {
+ return;
+ }
+ enc_.SetVertexBuffer(slot, b, offset, s);
+ }
+
+ void GPURenderPassEncoder::draw(Napi::Env env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+ }
+
+ void GPURenderPassEncoder::drawIndexed(Napi::Env env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) {
+ enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+ }
+
+ void GPURenderPassEncoder::drawIndirect(Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint32_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndirect(b, o);
+ }
+
+ void GPURenderPassEncoder::drawIndexedIndirect(
+ Napi::Env env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) {
+ Converter conv(env);
+
+ wgpu::Buffer b{};
+ uint32_t o = 0;
+
+ if (!conv(b, indirectBuffer) || //
+ !conv(o, indirectOffset)) {
+ return;
+ }
+ enc_.DrawIndexedIndirect(b, o);
+ }
+
+ std::optional<std::string> GPURenderPassEncoder::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderPassEncoder::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.h b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.h
new file mode 100644
index 00000000000..866aaab822c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPassEncoder.h
@@ -0,0 +1,115 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
+#define DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPURenderPassEncoder is an implementation of interop::GPURenderPassEncoder that wraps a
+ // wgpu::RenderPassEncoder.
+ class GPURenderPassEncoder final : public interop::GPURenderPassEncoder {
+ public:
+ GPURenderPassEncoder(wgpu::RenderPassEncoder enc);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderPassEncoder &() const {
+ return enc_;
+ }
+
+ // interop::GPURenderPassEncoder interface compliance
+ void setViewport(Napi::Env,
+ float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) override;
+ void setScissorRect(Napi::Env,
+ interop::GPUIntegerCoordinate x,
+ interop::GPUIntegerCoordinate y,
+ interop::GPUIntegerCoordinate width,
+ interop::GPUIntegerCoordinate height) override;
+ void setBlendConstant(Napi::Env, interop::GPUColor color) override;
+ void setStencilReference(Napi::Env, interop::GPUStencilValue reference) override;
+ void beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) override;
+ void endOcclusionQuery(Napi::Env) override;
+ void beginPipelineStatisticsQuery(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) override;
+ void endPipelineStatisticsQuery(Napi::Env) override;
+ void writeTimestamp(Napi::Env,
+ interop::Interface<interop::GPUQuerySet> querySet,
+ interop::GPUSize32 queryIndex) override;
+ void executeBundles(
+ Napi::Env,
+ std::vector<interop::Interface<interop::GPURenderBundle>> bundles) override;
+ void endPass(Napi::Env) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+ void setBindGroup(Napi::Env,
+ interop::GPUIndex32 index,
+ interop::Interface<interop::GPUBindGroup> bindGroup,
+ interop::Uint32Array dynamicOffsetsData,
+ interop::GPUSize64 dynamicOffsetsDataStart,
+ interop::GPUSize32 dynamicOffsetsDataLength) override;
+ void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+ void popDebugGroup(Napi::Env) override;
+ void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+ void setPipeline(Napi::Env,
+ interop::Interface<interop::GPURenderPipeline> pipeline) override;
+ void setIndexBuffer(Napi::Env,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUIndexFormat indexFormat,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void setVertexBuffer(Napi::Env,
+ interop::GPUIndex32 slot,
+ interop::Interface<interop::GPUBuffer> buffer,
+ interop::GPUSize64 offset,
+ std::optional<interop::GPUSize64> size) override;
+ void draw(Napi::Env,
+ interop::GPUSize32 vertexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndexed(Napi::Env,
+ interop::GPUSize32 indexCount,
+ interop::GPUSize32 instanceCount,
+ interop::GPUSize32 firstIndex,
+ interop::GPUSignedOffset32 baseVertex,
+ interop::GPUSize32 firstInstance) override;
+ void drawIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ void drawIndexedIndirect(Napi::Env,
+ interop::Interface<interop::GPUBuffer> indirectBuffer,
+ interop::GPUSize64 indirectOffset) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::RenderPassEncoder enc_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPURENDERPASSENCODER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.cpp
new file mode 100644
index 00000000000..3f363fc2a46
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.cpp
@@ -0,0 +1,45 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPURenderPipeline.h"
+
+#include "src/dawn_node/binding/GPUBindGroupLayout.h"
+#include "src/dawn_node/binding/GPUBuffer.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPURenderPipeline
+ ////////////////////////////////////////////////////////////////////////////////
+ GPURenderPipeline::GPURenderPipeline(wgpu::RenderPipeline pipeline)
+ : pipeline_(std::move(pipeline)) {
+ }
+
+ interop::Interface<interop::GPUBindGroupLayout> GPURenderPipeline::getBindGroupLayout(
+ Napi::Env env,
+ uint32_t index) {
+ return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+ env, pipeline_.GetBindGroupLayout(index));
+ }
+
+ std::optional<std::string> GPURenderPipeline::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPURenderPipeline::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.h b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.h
new file mode 100644
index 00000000000..fc15fc6acbd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPURenderPipeline.h
@@ -0,0 +1,48 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
+#define DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPURenderPipeline is an implementation of interop::GPURenderPipeline that wraps a
+ // wgpu::RenderPipeline.
+ class GPURenderPipeline final : public interop::GPURenderPipeline {
+ public:
+ GPURenderPipeline(wgpu::RenderPipeline pipeline);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::RenderPipeline &() const {
+ return pipeline_;
+ }
+
+ // interop::GPURenderPipeline interface compliance
+ interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+ uint32_t index) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::RenderPipeline pipeline_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPURENDERPIPELINE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.cpp
new file mode 100644
index 00000000000..6de3aa49c0c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.cpp
@@ -0,0 +1,36 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUSampler.h"
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUSampler
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUSampler::GPUSampler(wgpu::Sampler sampler) : sampler_(std::move(sampler)) {
+ }
+
+ std::optional<std::string> GPUSampler::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUSampler::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.h
new file mode 100644
index 00000000000..24e77d2b5e7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUSampler.h
@@ -0,0 +1,44 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSAMPLER_H_
+#define DAWN_NODE_BINDING_GPUSAMPLER_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+ // GPUSampler is an implementation of interop::GPUSampler that wraps a wgpu::Sampler.
+ class GPUSampler final : public interop::GPUSampler {
+ public:
+ GPUSampler(wgpu::Sampler sampler);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Sampler &() const {
+ return sampler_;
+ }
+
+ // interop::GPUSampler interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::Sampler sampler_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUSAMPLER_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.cpp
new file mode 100644
index 00000000000..3323ba5c83d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.cpp
@@ -0,0 +1,125 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUShaderModule.h"
+
+#include <memory>
+
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUShaderModule
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUShaderModule::GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async)
+ : shader_(std::move(shader)), async_(std::move(async)) {
+ }
+
+ interop::Promise<interop::Interface<interop::GPUCompilationInfo>>
+ GPUShaderModule::compilationInfo(Napi::Env env) {
+ struct GPUCompilationMessage : public interop::GPUCompilationMessage {
+ WGPUCompilationMessage message;
+
+ GPUCompilationMessage(const WGPUCompilationMessage& m) : message(m) {
+ }
+ std::string getMessage(Napi::Env) override {
+ return message.message;
+ }
+ interop::GPUCompilationMessageType getType(Napi::Env) override {
+ switch (message.type) {
+ case WGPUCompilationMessageType_Error:
+ return interop::GPUCompilationMessageType::kError;
+ case WGPUCompilationMessageType_Warning:
+ return interop::GPUCompilationMessageType::kWarning;
+ case WGPUCompilationMessageType_Info:
+ return interop::GPUCompilationMessageType::kInfo;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+ uint64_t getLineNum(Napi::Env) override {
+ return message.lineNum;
+ }
+ uint64_t getLinePos(Napi::Env) override {
+ return message.linePos;
+ }
+ uint64_t getOffset(Napi::Env) override {
+ return message.offset;
+ }
+ uint64_t getLength(Napi::Env) override {
+ return message.length;
+ }
+ };
+
+ using Messages = std::vector<interop::Interface<interop::GPUCompilationMessage>>;
+
+ struct GPUCompilationInfo : public interop::GPUCompilationInfo {
+ std::vector<Napi::ObjectReference> messages;
+
+ GPUCompilationInfo(Napi::Env env, Messages msgs) {
+ messages.reserve(msgs.size());
+ for (auto& msg : msgs) {
+ messages.emplace_back(Napi::Persistent(Napi::Object(env, msg)));
+ }
+ }
+ Messages getMessages(Napi::Env) override {
+ Messages out;
+ out.reserve(messages.size());
+ for (auto& msg : messages) {
+ out.emplace_back(msg.Value());
+ }
+ return out;
+ }
+ };
+
+ using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
+
+ struct Context {
+ Napi::Env env;
+ Promise promise;
+ AsyncTask task;
+ };
+ auto ctx = new Context{env, env, async_};
+ auto promise = ctx->promise;
+
+ shader_.GetCompilationInfo(
+ [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
+ void* userdata) {
+ auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+ Messages messages(compilationInfo->messageCount);
+ for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
+ auto& msg = compilationInfo->messages[i];
+ messages[i] =
+ interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
+ }
+
+ c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
+ c->env, c->env, std::move(messages)));
+ },
+ ctx);
+
+ return promise;
+ }
+
+ std::optional<std::string> GPUShaderModule::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUShaderModule::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.h
new file mode 100644
index 00000000000..2fcd140120c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUShaderModule.h
@@ -0,0 +1,50 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSHADERMODULE_H_
+#define DAWN_NODE_BINDING_GPUSHADERMODULE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/binding/AsyncRunner.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUShaderModule is an implementation of interop::GPUShaderModule that wraps a
+ // wgpu::ShaderModule.
+ class GPUShaderModule final : public interop::GPUShaderModule {
+ public:
+ GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::ShaderModule &() const {
+ return shader_;
+ }
+
+ // interop::GPUShaderModule interface compliance
+ interop::Promise<interop::Interface<interop::GPUCompilationInfo>> compilationInfo(
+ Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::ShaderModule shader_;
+ std::shared_ptr<AsyncRunner> async_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUSHADERMODULE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.cpp
new file mode 100644
index 00000000000..8587031879a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.cpp
@@ -0,0 +1,131 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUSupportedLimits.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUSupportedLimits
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Values taken from.
+ // https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/modules/webgpu/gpu_supported_limits.h
+ // TODO(crbug.com/dawn/1131): Actually use limits reported by the device / adapter.
+
+ uint32_t GPUSupportedLimits::getMaxTextureDimension1D(Napi::Env) {
+ return 8192;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxTextureDimension2D(Napi::Env) {
+ return 8192;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxTextureDimension3D(Napi::Env) {
+ return 2048;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxTextureArrayLayers(Napi::Env) {
+ return 2048;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxBindGroups(Napi::Env) {
+ return 4;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) {
+ return 8;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) {
+ return 4;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxSampledTexturesPerShaderStage(Napi::Env) {
+ return 16;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxSamplersPerShaderStage(Napi::Env) {
+ return 16;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxStorageBuffersPerShaderStage(Napi::Env) {
+ return 4;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxStorageTexturesPerShaderStage(Napi::Env) {
+ return 4;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxUniformBuffersPerShaderStage(Napi::Env) {
+ return 12;
+ }
+
+ uint64_t GPUSupportedLimits::getMaxUniformBufferBindingSize(Napi::Env) {
+ return 16384;
+ }
+
+ uint64_t GPUSupportedLimits::getMaxStorageBufferBindingSize(Napi::Env) {
+ return 134217728;
+ }
+
+ uint32_t GPUSupportedLimits::getMinUniformBufferOffsetAlignment(Napi::Env) {
+ return 256;
+ }
+
+ uint32_t GPUSupportedLimits::getMinStorageBufferOffsetAlignment(Napi::Env) {
+ return 256;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxVertexBuffers(Napi::Env) {
+ return 8;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxVertexAttributes(Napi::Env) {
+ return 16;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxVertexBufferArrayStride(Napi::Env) {
+ return 2048;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxInterStageShaderComponents(Napi::Env) {
+ return 60;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupStorageSize(Napi::Env) {
+ return 16352;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeInvocationsPerWorkgroup(Napi::Env) {
+ return 256;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeX(Napi::Env) {
+ return 256;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeY(Napi::Env) {
+ return 256;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeZ(Napi::Env) {
+ return 64;
+ }
+
+ uint32_t GPUSupportedLimits::getMaxComputeWorkgroupsPerDimension(Napi::Env) {
+ return 65535;
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.h
new file mode 100644
index 00000000000..b50753d822a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUSupportedLimits.h
@@ -0,0 +1,59 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
+#define DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUSupportedLimits is an implementation of interop::GPUSupportedLimits.
+ class GPUSupportedLimits final : public interop::GPUSupportedLimits {
+ public:
+ // interop::GPUSupportedLimits interface compliance
+ uint32_t getMaxTextureDimension1D(Napi::Env) override;
+ uint32_t getMaxTextureDimension2D(Napi::Env) override;
+ uint32_t getMaxTextureDimension3D(Napi::Env) override;
+ uint32_t getMaxTextureArrayLayers(Napi::Env) override;
+ uint32_t getMaxBindGroups(Napi::Env) override;
+ uint32_t getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) override;
+ uint32_t getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) override;
+ uint32_t getMaxSampledTexturesPerShaderStage(Napi::Env) override;
+ uint32_t getMaxSamplersPerShaderStage(Napi::Env) override;
+ uint32_t getMaxStorageBuffersPerShaderStage(Napi::Env) override;
+ uint32_t getMaxStorageTexturesPerShaderStage(Napi::Env) override;
+ uint32_t getMaxUniformBuffersPerShaderStage(Napi::Env) override;
+ uint64_t getMaxUniformBufferBindingSize(Napi::Env) override;
+ uint64_t getMaxStorageBufferBindingSize(Napi::Env) override;
+ uint32_t getMinUniformBufferOffsetAlignment(Napi::Env) override;
+ uint32_t getMinStorageBufferOffsetAlignment(Napi::Env) override;
+ uint32_t getMaxVertexBuffers(Napi::Env) override;
+ uint32_t getMaxVertexAttributes(Napi::Env) override;
+ uint32_t getMaxVertexBufferArrayStride(Napi::Env) override;
+ uint32_t getMaxInterStageShaderComponents(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupStorageSize(Napi::Env) override;
+ uint32_t getMaxComputeInvocationsPerWorkgroup(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeX(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeY(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupSizeZ(Napi::Env) override;
+ uint32_t getMaxComputeWorkgroupsPerDimension(Napi::Env) override;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUSUPPORTEDLIMITS_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.cpp
new file mode 100644
index 00000000000..284cd8a91f9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.cpp
@@ -0,0 +1,64 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUTexture.h"
+
+#include "src/dawn_node/binding/Converter.h"
+#include "src/dawn_node/binding/Errors.h"
+#include "src/dawn_node/binding/GPUTextureView.h"
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUTexture
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUTexture::GPUTexture(wgpu::Texture texture) : texture_(std::move(texture)) {
+ }
+
+ interop::Interface<interop::GPUTextureView> GPUTexture::createView(
+ Napi::Env env,
+ interop::GPUTextureViewDescriptor descriptor) {
+ if (!texture_) {
+ Errors::OperationError(env).ThrowAsJavaScriptException();
+ return {};
+ }
+
+ wgpu::TextureViewDescriptor desc{};
+ Converter conv(env);
+ if (!conv(desc.baseMipLevel, descriptor.baseMipLevel) || //
+ !conv(desc.mipLevelCount, descriptor.mipLevelCount) || //
+ !conv(desc.baseArrayLayer, descriptor.baseArrayLayer) || //
+ !conv(desc.arrayLayerCount, descriptor.arrayLayerCount) || //
+ !conv(desc.format, descriptor.format) || //
+ !conv(desc.dimension, descriptor.dimension) || //
+ !conv(desc.aspect, descriptor.aspect)) {
+ return {};
+ }
+ return interop::GPUTextureView::Create<GPUTextureView>(env, texture_.CreateView(&desc));
+ }
+
+ void GPUTexture::destroy(Napi::Env) {
+ texture_.Destroy();
+ }
+
+ std::optional<std::string> GPUTexture::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUTexture::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ }
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.h
new file mode 100644
index 00000000000..f5a2a473928
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUTexture.h
@@ -0,0 +1,49 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUTEXTURE_H_
+#define DAWN_NODE_BINDING_GPUTEXTURE_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUTexture is an implementation of interop::GPUTexture that wraps a wgpu::Texture.
+ class GPUTexture final : public interop::GPUTexture {
+ public:
+ GPUTexture(wgpu::Texture texture);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::Texture &() const {
+ return texture_;
+ }
+
+ // interop::GPUTexture interface compliance
+ interop::Interface<interop::GPUTextureView> createView(
+ Napi::Env,
+ interop::GPUTextureViewDescriptor descriptor) override;
+ void destroy(Napi::Env) override;
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::Texture texture_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUTEXTURE_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.cpp b/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.cpp
new file mode 100644
index 00000000000..f03bc3f266e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.cpp
@@ -0,0 +1,35 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/binding/GPUTextureView.h"
+
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu { namespace binding {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // wgpu::bindings::GPUTextureView
+ ////////////////////////////////////////////////////////////////////////////////
+ GPUTextureView::GPUTextureView(wgpu::TextureView view) : view_(std::move(view)) {
+ }
+
+ std::optional<std::string> GPUTextureView::getLabel(Napi::Env) {
+ UNIMPLEMENTED();
+ }
+
+ void GPUTextureView::setLabel(Napi::Env, std::optional<std::string> value) {
+ UNIMPLEMENTED();
+ };
+
+}} // namespace wgpu::binding
diff --git a/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.h b/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.h
new file mode 100644
index 00000000000..8590735093d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/binding/GPUTextureView.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
+#define DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
+
+#include "dawn/webgpu_cpp.h"
+#include "dawn_native/DawnNative.h"
+#include "napi.h"
+#include "src/dawn_node/interop/WebGPU.h"
+
+namespace wgpu { namespace binding {
+
+ // GPUTextureView is an implementation of interop::GPUTextureView that wraps a
+ // wgpu::TextureView.
+ class GPUTextureView final : public interop::GPUTextureView {
+ public:
+ GPUTextureView(wgpu::TextureView view);
+
+ // Implicit cast operator to Dawn GPU object
+ inline operator const wgpu::TextureView &() const {
+ return view_;
+ }
+
+ // interop::GPUTextureView interface compliance
+ std::optional<std::string> getLabel(Napi::Env) override;
+ void setLabel(Napi::Env, std::optional<std::string> value) override;
+
+ private:
+ wgpu::TextureView view_;
+ };
+
+}} // namespace wgpu::binding
+
+#endif // DAWN_NODE_BINDING_GPUTEXTUREVIEW_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/Browser.idl b/chromium/third_party/dawn/src/dawn_node/interop/Browser.idl
new file mode 100644
index 00000000000..8208058f822
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/interop/Browser.idl
@@ -0,0 +1,84 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// An IDL file that provides stub definitions for dictionaries and interfaces
+// used by the webgpu.idl file
+
+dictionary EventInit {
+ boolean bubbles = false;
+ boolean cancelable = false;
+ boolean composed = false;
+};
+
+interface Navigator {
+ readonly attribute DOMString vendorSub;
+ readonly attribute DOMString productSub;
+ readonly attribute DOMString vendor;
+};
+
+interface Event {
+ readonly attribute boolean bubbles;
+ readonly attribute boolean cancelable;
+ attribute boolean returnValue;
+};
+
+interface WorkerNavigator{};
+
+interface EventListener {
+ undefined handleEvent(Event event);
+};
+
+interface EventTarget {
+ undefined addEventListener(DOMString type, EventListener? callback, optional (AddEventListenerOptions or boolean) options);
+ undefined removeEventListener(DOMString type, EventListener? callback, optional (EventListenerOptions or boolean) options);
+ boolean dispatchEvent(Event event);
+};
+
+dictionary EventListenerOptions { boolean capture = false; };
+
+dictionary AddEventListenerOptions : EventListenerOptions {
+ boolean passive = false;
+ boolean once = false;
+};
+
+interface HTMLVideoElement {
+ attribute unsigned long width;
+ attribute unsigned long height;
+ readonly attribute unsigned long videoWidth;
+ readonly attribute unsigned long videoHeight;
+ attribute DOMString poster;
+};
+
+typedef(Int8Array or Int16Array or Int32Array or Uint8Array or Uint16Array or
+ Uint32Array or Float32Array or Float64Array or
+ DataView) ArrayBufferView;
+
+typedef(ArrayBufferView or ArrayBuffer) BufferSource;
+
+interface ImageBitmap {
+ readonly attribute unsigned long width;
+ readonly attribute unsigned long height;
+};
+
+interface HTMLCanvasElement {
+ attribute unsigned long width;
+ attribute unsigned long height;
+};
+
+interface OffscreenCanvas {
+ attribute unsigned long width;
+ attribute unsigned long height;
+};
+
+interface EventHandler{};
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_node/interop/CMakeLists.txt
new file mode 100644
index 00000000000..0b84c0ab971
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/interop/CMakeLists.txt
@@ -0,0 +1,68 @@
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Paths to generated files
+set(INTEROP_GEN_DIR "${GEN_DIR}/src/dawn_node/interop")
+set(INTEROP_WEBGPU_H "${INTEROP_GEN_DIR}/WebGPU.h")
+set(INTEROP_WEBGPU_CPP "${INTEROP_GEN_DIR}/WebGPU.cpp")
+
+idlgen(
+ TEMPLATE
+ "${CMAKE_CURRENT_SOURCE_DIR}/WebGPU.h.tmpl"
+ IDLS
+ "${CMAKE_CURRENT_SOURCE_DIR}/Browser.idl"
+ "${WEBGPU_IDL_PATH}"
+ DEPENDS
+ "${CMAKE_CURRENT_SOURCE_DIR}/WebGPUCommon.tmpl"
+ OUTPUT
+ "${INTEROP_WEBGPU_H}"
+)
+
+idlgen(
+ TEMPLATE
+ "${CMAKE_CURRENT_SOURCE_DIR}/WebGPU.cpp.tmpl"
+ IDLS
+ "${CMAKE_CURRENT_SOURCE_DIR}/Browser.idl"
+ "${WEBGPU_IDL_PATH}"
+ DEPENDS
+ "${CMAKE_CURRENT_SOURCE_DIR}/WebGPUCommon.tmpl"
+ OUTPUT
+ "${INTEROP_WEBGPU_CPP}"
+)
+
+add_library(dawn_node_interop STATIC
+ "Core.cpp"
+ "Core.h"
+ "${INTEROP_WEBGPU_H}"
+ "${INTEROP_WEBGPU_CPP}"
+)
+
+target_include_directories(dawn_node_interop
+ PRIVATE
+ "${CMAKE_SOURCE_DIR}"
+ "${NODE_API_HEADERS_DIR}/include"
+ "${NODE_ADDON_API_DIR}"
+ "${GEN_DIR}"
+)
+
+target_link_libraries(dawn_node_interop
+ PRIVATE
+ dawncpp
+)
+
+# dawn_node targets require C++17
+set_property(
+ TARGET dawn_node_interop
+ PROPERTY CXX_STANDARD 17
+)
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/Core.cpp b/chromium/third_party/dawn/src/dawn_node/interop/Core.cpp
new file mode 100644
index 00000000000..8ee22cf90fb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/interop/Core.cpp
@@ -0,0 +1,160 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "src/dawn_node/interop/Core.h"
+
+namespace wgpu { namespace interop {
+
+ Result Success;
+
+ Result Error(std::string msg) {
+ return {msg};
+ }
+
+ Result Converter<bool>::FromJS(Napi::Env env, Napi::Value value, bool& out) {
+ if (value.IsBoolean()) {
+ out = value.ToBoolean();
+ return Success;
+ }
+ return Error("value is not a boolean");
+ }
+ Napi::Value Converter<bool>::ToJS(Napi::Env env, bool value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<std::string>::FromJS(Napi::Env env, Napi::Value value, std::string& out) {
+ if (value.IsString()) {
+ out = value.ToString();
+ return Success;
+ }
+ return Error("value is not a string");
+ }
+ Napi::Value Converter<std::string>::ToJS(Napi::Env env, std::string value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<int8_t>::FromJS(Napi::Env env, Napi::Value value, int8_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<int8_t>::ToJS(Napi::Env env, int8_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<uint8_t>::FromJS(Napi::Env env, Napi::Value value, uint8_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<uint8_t>::ToJS(Napi::Env env, uint8_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<int16_t>::FromJS(Napi::Env env, Napi::Value value, int16_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<int16_t>::ToJS(Napi::Env env, int16_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<uint16_t>::FromJS(Napi::Env env, Napi::Value value, uint16_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<uint16_t>::ToJS(Napi::Env env, uint16_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<int32_t>::FromJS(Napi::Env env, Napi::Value value, int32_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<int32_t>::ToJS(Napi::Env env, int32_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<uint32_t>::FromJS(Napi::Env env, Napi::Value value, uint32_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Uint32Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<uint32_t>::ToJS(Napi::Env env, uint32_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<int64_t>::FromJS(Napi::Env env, Napi::Value value, int64_t& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().Int64Value();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<int64_t>::ToJS(Napi::Env env, int64_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<uint64_t>::FromJS(Napi::Env env, Napi::Value value, uint64_t& out) {
+ if (value.IsNumber()) {
+ // Note that the JS Number type only stores doubles, so the max integer
+ // range of values without precision loss is -2^53 to 2^53 (52 bit mantissa
+ // with 1 implicit bit). This is why there's no UInt64Value() function.
+ out = static_cast<uint64_t>(value.ToNumber().Int64Value());
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<uint64_t>::ToJS(Napi::Env env, uint64_t value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<float>::FromJS(Napi::Env env, Napi::Value value, float& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().FloatValue();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<float>::ToJS(Napi::Env env, float value) {
+ return Napi::Value::From(env, value);
+ }
+
+ Result Converter<double>::FromJS(Napi::Env env, Napi::Value value, double& out) {
+ if (value.IsNumber()) {
+ out = value.ToNumber().DoubleValue();
+ return Success;
+ }
+ return Error("value is not a number");
+ }
+ Napi::Value Converter<double>::ToJS(Napi::Env env, double value) {
+ return Napi::Value::From(env, value);
+ }
+
+}} // namespace wgpu::interop
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/Core.h b/chromium/third_party/dawn/src/dawn_node/interop/Core.h
new file mode 100644
index 00000000000..ff26930959c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/interop/Core.h
@@ -0,0 +1,662 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file provides core interop helpers used by the code generated by the
+// templates.
+
+#ifndef DAWN_NODE_INTEROP_CORE_WEBGPU_H_
+#define DAWN_NODE_INTEROP_CORE_WEBGPU_H_
+
+#include <cstdint>
+#include <optional>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <variant>
+#include <vector>
+
+#include "napi.h"
+
+#include "src/dawn_node/utils/Debug.h"
+
+#define ENABLE_INTEROP_LOGGING 0 // Enable for verbose interop logging
+
+#if ENABLE_INTEROP_LOGGING
+# define INTEROP_LOG(...) LOG(__VA_ARGS__)
+#else
+# define INTEROP_LOG(...)
+#endif
+
+namespace wgpu { namespace interop {
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Primitive JavaScript types
+ ////////////////////////////////////////////////////////////////////////////////
+ using Object = Napi::Object;
+ using ArrayBuffer = Napi::ArrayBuffer;
+ using Int8Array = Napi::TypedArrayOf<int8_t>;
+ using Int16Array = Napi::TypedArrayOf<int16_t>;
+ using Int32Array = Napi::TypedArrayOf<int32_t>;
+ using Uint8Array = Napi::TypedArrayOf<uint8_t>;
+ using Uint16Array = Napi::TypedArrayOf<uint16_t>;
+ using Uint32Array = Napi::TypedArrayOf<uint32_t>;
+ using Float32Array = Napi::TypedArrayOf<float>;
+ using Float64Array = Napi::TypedArrayOf<double>;
+ using DataView = Napi::TypedArray;
+
+ template <typename T>
+ using FrozenArray = std::vector<T>;
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Result
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Result is used to hold an success / error state by functions that perform JS <-> C++
+ // conversion
+ struct [[nodiscard]] Result {
+ // Returns true if the operation succeeded, false if there was an error
+ inline operator bool() const {
+ return error.empty();
+ }
+
+ // If Result is an error, then a new Error is returned with the
+ // stringified values append to the error message.
+ // If Result is a success, then a success Result is returned.
+ template <typename... VALUES>
+ Result Append(VALUES && ... values) {
+ if (*this) {
+ return *this;
+ }
+ std::stringstream ss;
+ ss << error << "\n";
+ utils::Write(ss, std::forward<VALUES>(values)...);
+ return {ss.str()};
+ }
+
+ // The error message, if the operation failed.
+ std::string error;
+ };
+
+ // A successful result
+ extern Result Success;
+
+ // Returns a Result with the given error message
+ Result Error(std::string msg);
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Interface<T>
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Interface<T> is a templated wrapper around a JavaScript object, which
+ // implements the template-generated interface type T. Interfaces are returned
+ // by either calling T::Bind() or T::Create().
+ template <typename T>
+ class Interface {
+ public:
+ // Constructs an Interface with no JS object.
+ inline Interface() {
+ }
+
+ // Constructs an Interface wrapping the given JS object.
+ // The JS object must have been created with a call to T::Bind().
+ explicit inline Interface(Napi::Object o) : object(o) {
+ }
+
+ // Implicit conversion operators to Napi objects.
+ inline operator napi_value() const {
+ return object;
+ }
+ inline operator const Napi::Value &() const {
+ return object;
+ }
+ inline operator const Napi::Object &() const {
+ return object;
+ }
+
+ // Member and dereference operators
+ inline T* operator->() const {
+ return T::Unwrap(object);
+ }
+ inline T* operator*() const {
+ return T::Unwrap(object);
+ }
+
+ // As<IMPL>() returns the unwrapped object cast to the implementation type.
+ // The interface implementation *must* be of the template type IMPL.
+ template <typename IMPL>
+ inline IMPL* As() const {
+ return static_cast<IMPL*>(T::Unwrap(object));
+ }
+
+ private:
+ Napi::Object object;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Promise<T>
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Promise<T> is a templated wrapper around a JavaScript promise, which can
+ // resolve to the template type T.
+ template <typename T>
+ class Promise {
+ public:
+ // Constructor
+ Promise(Napi::Env env) : deferred(Napi::Promise::Deferred::New(env)) {
+ }
+
+ // Implicit conversion operators to Napi promises.
+ inline operator napi_value() const {
+ return deferred.Promise();
+ }
+ inline operator Napi::Value() const {
+ return deferred.Promise();
+ }
+ inline operator Napi::Promise() const {
+ return deferred.Promise();
+ }
+
+ // Resolve() fulfills the promise with the given value.
+ void Resolve(T&& value) const {
+ deferred.Resolve(ToJS(deferred.Env(), std::forward<T>(value)));
+ }
+
+ // Reject() rejects the promise with the given failure value.
+ void Reject(Napi::Object obj) const {
+ deferred.Reject(obj);
+ }
+ void Reject(Napi::Error err) const {
+ deferred.Reject(err.Value());
+ }
+ void Reject(std::string err) const {
+ Reject(Napi::Error::New(deferred.Env(), err));
+ }
+
+ private:
+ Napi::Promise::Deferred deferred;
+ };
+
+ // Specialization for Promises that resolve with no value
+ template <>
+ class Promise<void> {
+ public:
+ // Constructor
+ Promise(Napi::Env env) : deferred(Napi::Promise::Deferred::New(env)) {
+ }
+
+ // Implicit conversion operators to Napi promises.
+ inline operator napi_value() const {
+ return deferred.Promise();
+ }
+ inline operator Napi::Value() const {
+ return deferred.Promise();
+ }
+ inline operator Napi::Promise() const {
+ return deferred.Promise();
+ }
+
+ // Resolve() fulfills the promise.
+ void Resolve() const {
+ deferred.Resolve(deferred.Env().Undefined());
+ }
+
+ // Reject() rejects the promise with the given failure value.
+ void Reject(Napi::Object obj) const {
+ deferred.Reject(obj);
+ }
+ void Reject(Napi::Error err) const {
+ deferred.Reject(err.Value());
+ }
+ void Reject(std::string err) const {
+ Reject(Napi::Error::New(deferred.Env(), err));
+ }
+
+ private:
+ Napi::Promise::Deferred deferred;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Converter<T>
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // Converter<T> is specialized for each type T which can be converted from C++
+ // to JavaScript, or JavaScript to C++.
+ // Each specialization of Converter<T> is expected to have two static methods
+ // with the signatures:
+ //
+ // // FromJS() converts the JavaScript value 'in' to the C++ value 'out'.
+ // static Result FromJS(Napi::Env, Napi::Value in, T& out);
+ //
+ // // ToJS() converts the C++ value 'in' to a JavaScript value, and returns
+ // // this value.
+ // static Napi::Value ToJS(Napi::Env, T in);
+ template <typename T>
+ class Converter {};
+
+ template <>
+ class Converter<Napi::Object> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::Object& out) {
+ if (value.IsObject()) {
+ out = value.ToObject();
+ return Success;
+ }
+ return Error("value is not an object");
+ }
+ static inline Napi::Value ToJS(Napi::Env, Napi::Object value) {
+ return value;
+ }
+ };
+
+ template <>
+ class Converter<ArrayBuffer> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, ArrayBuffer& out) {
+ if (value.IsArrayBuffer()) {
+ out = value.As<ArrayBuffer>();
+ return Success;
+ }
+ return Error("value is not a ArrayBuffer");
+ };
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+ return value;
+ }
+ };
+
+ template <>
+ class Converter<Napi::TypedArray> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArray& out) {
+ if (value.IsTypedArray()) {
+ out = value.As<Napi::TypedArray>();
+ return Success;
+ }
+ return Error("value is not a TypedArray");
+ };
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+ return value;
+ }
+ };
+
+ template <typename T>
+ class Converter<Napi::TypedArrayOf<T>> {
+ public:
+ // clang-format off
+ // The Napi element type of T
+ static constexpr napi_typedarray_type element_type =
+ std::is_same<T, int8_t>::value ? napi_int8_array
+ : std::is_same<T, uint8_t>::value ? napi_uint8_array
+ : std::is_same<T, int16_t>::value ? napi_int16_array
+ : std::is_same<T, uint16_t>::value ? napi_uint16_array
+ : std::is_same<T, int32_t>::value ? napi_int32_array
+ : std::is_same<T, uint32_t>::value ? napi_uint32_array
+ : std::is_same<T, float>::value ? napi_float32_array
+ : std::is_same<T, double>::value ? napi_float64_array
+ : std::is_same<T, int64_t>::value ? napi_bigint64_array
+ : std::is_same<T, uint64_t>::value ? napi_biguint64_array
+ : static_cast<napi_typedarray_type>(-1);
+ // clang-format on
+ static_assert(static_cast<int>(element_type) >= 0,
+ "unsupported T type for Napi::TypedArrayOf<T>");
+ static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArrayOf<T>& out) {
+ if (value.IsTypedArray()) {
+ auto arr = value.As<Napi::TypedArrayOf<T>>();
+ if (arr.TypedArrayType() == element_type) {
+ out = arr;
+ return Success;
+ }
+ return Error("value is not a TypedArray of the correct element type");
+ }
+ return Error("value is not a TypedArray");
+ };
+ static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
+ return value;
+ }
+ };
+
+ template <>
+ class Converter<std::string> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, std::string&);
+ static Napi::Value ToJS(Napi::Env, std::string);
+ };
+
+ template <>
+ class Converter<bool> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, bool&);
+ static Napi::Value ToJS(Napi::Env, bool);
+ };
+
+ template <>
+ class Converter<int8_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int8_t&);
+ static Napi::Value ToJS(Napi::Env, int8_t);
+ };
+
+ template <>
+ class Converter<uint8_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint8_t&);
+ static Napi::Value ToJS(Napi::Env, uint8_t);
+ };
+
+ template <>
+ class Converter<int16_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int16_t&);
+ static Napi::Value ToJS(Napi::Env, int16_t);
+ };
+
+ template <>
+ class Converter<uint16_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint16_t&);
+ static Napi::Value ToJS(Napi::Env, uint16_t);
+ };
+
+ template <>
+ class Converter<int32_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int32_t&);
+ static Napi::Value ToJS(Napi::Env, int32_t);
+ };
+
+ template <>
+ class Converter<uint32_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint32_t&);
+ static Napi::Value ToJS(Napi::Env, uint32_t);
+ };
+
+ template <>
+ class Converter<int64_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, int64_t&);
+ static Napi::Value ToJS(Napi::Env, int64_t);
+ };
+
+ template <>
+ class Converter<uint64_t> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, uint64_t&);
+ static Napi::Value ToJS(Napi::Env, uint64_t);
+ };
+
+ template <>
+ class Converter<float> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, float&);
+ static Napi::Value ToJS(Napi::Env, float);
+ };
+
+ template <>
+ class Converter<double> {
+ public:
+ static Result FromJS(Napi::Env, Napi::Value, double&);
+ static Napi::Value ToJS(Napi::Env, double);
+ };
+
+ template <typename T>
+ class Converter<Interface<T>> {
+ public:
+ static Result FromJS(Napi::Env env, Napi::Value value, Interface<T>& out) {
+ if (!value.IsObject()) {
+ return Error("value is not object");
+ }
+ auto obj = value.As<Napi::Object>();
+ if (!T::Unwrap(obj)) {
+ return Error("object is not of the correct interface type");
+ }
+ out = Interface<T>(obj);
+ return Success;
+ }
+ static Napi::Value ToJS(Napi::Env env, const Interface<T>& value) {
+ return {env, value};
+ }
+ };
+
+ template <typename T>
+ class Converter<std::optional<T>> {
+ public:
+ static Result FromJS(Napi::Env env, Napi::Value value, std::optional<T>& out) {
+ if (value.IsNull() || value.IsUndefined()) {
+ out.reset();
+ return Success;
+ }
+ T v{};
+ auto res = Converter<T>::FromJS(env, value, v);
+ if (!res) {
+ return res;
+ }
+ out = std::move(v);
+ return Success;
+ }
+ static Napi::Value ToJS(Napi::Env env, std::optional<T> value) {
+ if (value.has_value()) {
+ return Converter<T>::ToJS(env, value.value());
+ }
+ return env.Null();
+ }
+ };
+
+ template <typename T>
+ class Converter<std::vector<T>> {
+ public:
+ static inline Result FromJS(Napi::Env env, Napi::Value value, std::vector<T>& out) {
+ if (!value.IsArray()) {
+ return Error("value is not an array");
+ }
+ auto arr = value.As<Napi::Array>();
+ std::vector<T> vec(arr.Length());
+ for (size_t i = 0; i < vec.size(); i++) {
+ auto res = Converter<T>::FromJS(env, arr[i], vec[i]);
+ if (!res) {
+ return res.Append("for array element ", i);
+ }
+ }
+ out = std::move(vec);
+ return Success;
+ }
+ static inline Napi::Value ToJS(Napi::Env env, const std::vector<T>& vec) {
+ auto arr = Napi::Array::New(env, vec.size());
+ for (size_t i = 0; i < vec.size(); i++) {
+ arr.Set(static_cast<uint32_t>(i), Converter<T>::ToJS(env, vec[i]));
+ }
+ return arr;
+ }
+ };
+
+ template <typename K, typename V>
+ class Converter<std::unordered_map<K, V>> {
+ public:
+ static inline Result FromJS(Napi::Env env,
+ Napi::Value value,
+ std::unordered_map<K, V>& out) {
+ if (!value.IsObject()) {
+ return Error("value is not an object");
+ }
+ auto obj = value.ToObject();
+ auto keys = obj.GetPropertyNames();
+ std::unordered_map<K, V> map(keys.Length());
+ for (uint32_t i = 0; i < static_cast<uint32_t>(map.size()); i++) {
+ K key{};
+ V value{};
+ auto key_res = Converter<K>::FromJS(env, keys[i], key);
+ if (!key_res) {
+ return key_res.Append("for object key");
+ }
+ auto value_res = Converter<V>::FromJS(env, obj.Get(keys[i]), value);
+ if (!value_res) {
+ return value_res.Append("for object value of key: ", key);
+ }
+ map[key] = value;
+ }
+ out = std::move(map);
+ return Success;
+ }
+ static inline Napi::Value ToJS(Napi::Env env, std::unordered_map<K, V> value) {
+ auto obj = Napi::Object::New(env);
+ for (auto it : value) {
+ obj.Set(Converter<K>::ToJS(env, it.first), Converter<V>::ToJS(env, it.second));
+ }
+ return obj;
+ }
+ };
+
+ template <typename... TYPES>
+ class Converter<std::variant<TYPES...>> {
+ template <typename TY>
+ static inline Result TryFromJS(Napi::Env env,
+ Napi::Value value,
+ std::variant<TYPES...>& out) {
+ TY v{};
+ auto res = Converter<TY>::FromJS(env, value, v);
+ if (!res) {
+ return Error("no possible types matched");
+ }
+ out = std::move(v);
+ return Success;
+ }
+
+ template <typename T0, typename T1, typename... TN>
+ static inline Result TryFromJS(Napi::Env env,
+ Napi::Value value,
+ std::variant<TYPES...>& out) {
+ if (TryFromJS<T0>(env, value, out)) {
+ return Success;
+ }
+ return TryFromJS<T1, TN...>(env, value, out);
+ }
+
+ public:
+ static inline Result FromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+ return TryFromJS<TYPES...>(env, value, out);
+ }
+ static inline Napi::Value ToJS(Napi::Env env, std::variant<TYPES...> value) {
+ return std::visit(
+ [&](auto&& v) {
+ using T = std::remove_cv_t<std::remove_reference_t<decltype(v)>>;
+ return Converter<T>::ToJS(env, v);
+ },
+ value);
+ }
+ };
+
+ template <typename T>
+ class Converter<Promise<T>> {
+ public:
+ static inline Result FromJS(Napi::Env, Napi::Value, Promise<T>&) {
+ UNIMPLEMENTED();
+ }
+ static inline Napi::Value ToJS(Napi::Env, Promise<T> promise) {
+ return promise;
+ }
+ };
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Helpers
+ ////////////////////////////////////////////////////////////////////////////////
+
+ // FromJS() is a helper function which delegates to
+ // Converter<T>::FromJS()
+ template <typename T>
+ inline Result FromJS(Napi::Env env, Napi::Value value, T& out) {
+ return Converter<T>::FromJS(env, value, out);
+ }
+
+ // FromJSOptional() is similar to FromJS(), but if 'value' is either null
+ // or undefined then 'out' is left unassigned.
+ template <typename T>
+ inline Result FromJSOptional(Napi::Env env, Napi::Value value, T& out) {
+ if (value.IsNull() || value.IsUndefined()) {
+ return Success;
+ }
+ return Converter<T>::FromJS(env, value, out);
+ }
+
+ // ToJS() is a helper function which delegates to Converter<T>::ToJS()
+ template <typename T>
+ inline Napi::Value ToJS(Napi::Env env, T&& value) {
+ return Converter<std::remove_cv_t<std::remove_reference_t<T>>>::ToJS(
+ env, std::forward<T>(value));
+ }
+
+ // DefaultedParameter can be used in the tuple parameter types passed to
+ // FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args), for parameters
+ // that have a default value. If the argument is omitted in the call, then
+ // DefaultedParameter::default_value will be assigned to
+ // DefaultedParameter::value.
+ template <typename T>
+ struct DefaultedParameter {
+ T value; // The argument value assigned by FromJS()
+ T default_value; // The default value if no argument supplied
+
+ // Implicit conversion operator. Returns value.
+ inline operator const T&() const {
+ return value;
+ }
+ };
+
+ // IsDefaultedParameter<T>::value is true iff T is of type DefaultedParameter.
+ template <typename T>
+ struct IsDefaultedParameter {
+ static constexpr bool value = false;
+ };
+ template <typename T>
+ struct IsDefaultedParameter<DefaultedParameter<T>> {
+ static constexpr bool value = true;
+ };
+
+ // FromJS() is a helper function for bulk converting the arguments of 'info'.
+ // PARAM_TYPES is a std::tuple<> describing the C++ function parameter types.
+ // Parameters may be of the templated DefaultedParameter type, in which case
+ // the parameter will default to the default-value if omitted.
+ template <typename PARAM_TYPES, int BASE_INDEX = 0>
+ inline Result FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args) {
+ if constexpr (BASE_INDEX < std::tuple_size_v<PARAM_TYPES>) {
+ using T = std::tuple_element_t<BASE_INDEX, PARAM_TYPES>;
+ auto& value = info[BASE_INDEX];
+ auto& out = std::get<BASE_INDEX>(args);
+ if constexpr (IsDefaultedParameter<T>::value) {
+ // Parameter has a default value.
+ // Check whether the argument was provided.
+ if (value.IsNull() || value.IsUndefined()) {
+ // Use default value for this parameter
+ out.value = out.default_value;
+ } else {
+ // Argument was provided
+ auto res = FromJS(info.Env(), value, out.value);
+ if (!res) {
+ return res;
+ }
+ }
+ } else {
+ // Parameter does not have a default value.
+ auto res = FromJS(info.Env(), value, out);
+ if (!res) {
+ return res;
+ }
+ }
+ // Convert the rest of the arguments
+ return FromJS<PARAM_TYPES, BASE_INDEX + 1>(info, args);
+ } else {
+ return Success;
+ }
+ }
+
+}} // namespace wgpu::interop
+
+#endif // DAWN_NODE_INTEROP_CORE_WEBGPU_H_
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.cpp.tmpl b/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.cpp.tmpl
new file mode 100644
index 00000000000..22f5f414c86
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.cpp.tmpl
@@ -0,0 +1,393 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn_node/tools/cmd/idlgen/main.go to generate
+the WebGPU.cpp source file.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+ types used by this template
+* src/dawn_node/tools/cmd/idlgen/main.go for additional structures and functions
+ used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+{{- Include "WebGPUCommon.tmpl" -}}
+
+#include "src/dawn_node/interop/WebGPU.h"
+
+#include <unordered_map>
+
+#include "src/dawn_node/utils/Debug.h"
+
+namespace wgpu {
+namespace interop {
+
+namespace {
+
+{{template "Wrappers" $}}
+
+} // namespace
+
+{{ range $ := .Declarations}}
+{{- if IsDictionary $}}{{template "Dictionary" $}}
+{{- else if IsInterface $}}{{template "Interface" $}}
+{{- else if IsEnum $}}{{template "Enum" $}}
+{{- end}}
+{{- end}}
+
+
+void Initialize(Napi::Env env) {
+ auto* wrapper = Wrappers::Init(env);
+ auto global = env.Global();
+{{ range $ := .Declarations}}
+{{- if IsInterfaceOrNamespace $}}
+ global.Set(Napi::String::New(env, "{{$.Name}}"), wrapper->{{$.Name}}_ctor.Value());
+{{- end}}
+{{- end}}
+}
+
+} // namespace interop
+} // namespace wgpu
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Wrappers emits the C++ 'Wrappers' class, which holds all the interface and
+-- namespace interop wrapper classes.
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Wrappers"}}
+// Wrappers holds all the Napi class constructors, and Napi::ObjectWrap type
+// declarations, for each of the WebIDL interface and namespace types.
+class Wrappers {
+ Wrappers(Napi::Env env) {
+{{- range $ := .Declarations}}
+{{- if IsInterfaceOrNamespace $}}
+ {{$.Name}}_ctor = Napi::Persistent(W{{$.Name}}::Class(env));
+{{- end}}
+{{- end}}
+ }
+
+ static Wrappers* instance;
+
+public:
+{{- range $ := .Declarations}}
+{{- if IsInterfaceOrNamespace $}}{{template "Wrapper" $}}
+{{- end}}
+{{- end}}
+
+ // Allocates and constructs the Wrappers instance
+ static Wrappers* Init(Napi::Env env) {
+ instance = new Wrappers(env);
+ return instance;
+ }
+
+ // Destructs and frees the Wrappers instance
+ static void Term(Napi::Env env) {
+ delete instance;
+ instance = nullptr;
+ }
+
+ static Wrappers* For(Napi::Env env) {
+ // Currently Napi only actually supports a single Env, so there's no point
+ // maintaining a map of Env to Wrapper. Note: This might not always be true.
+ return instance;
+ }
+
+{{ range $ := .Declarations}}
+{{- if IsInterfaceOrNamespace $}}
+ Napi::FunctionReference {{$.Name}}_ctor;
+{{- end}}
+{{- end}}
+};
+
+Wrappers* Wrappers::instance = nullptr;
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Wrapper emits the C++ wrapper class for the given ast.Interface or
+-- ast.Namespace.
+-- This wrapper class inherits from Napi::ObjectWrap, which binds the lifetime
+-- of the JavaScript object to the lifetime of the wrapper class instance.
+-- If the wrapper is for an interface, the wrapper object holds a unique_ptr to
+-- the interface implementation, and delegates all exposed method calls on to
+-- the implementation.
+-- See: https://github.com/nodejs/node-addon-api/blob/main/doc/object_wrap.md
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Wrapper"}}
+ struct W{{$.Name}} : public Napi::ObjectWrap<W{{$.Name}}> {
+{{- if IsInterface $}}
+ std::unique_ptr<{{$.Name}}> impl;
+{{- end}}
+ static Napi::Function Class(Napi::Env env) {
+ return DefineClass(env, "{{$.Name}}", {
+{{ if $s := SetlikeOf $}}
+ InstanceMethod("has", &W{{$.Name}}::has),
+ InstanceMethod("keys", &W{{$.Name}}::keys),
+{{- end}}
+{{- range $m := MethodsOf $}}
+ InstanceMethod("{{$m.Name}}", &W{{$.Name}}::{{$m.Name}}),
+{{- end}}
+{{- range $a := AttributesOf $}}
+ InstanceAccessor("{{$a.Name}}", &W{{$.Name}}::get{{Title $a.Name}},
+{{- if $a.Readonly}} nullptr{{else}} &W{{$.Name}}::set{{Title $a.Name}}{{end -}}
+ ),
+{{- end}}
+{{- range $c := ConstantsOf $}}
+ StaticValue("{{$c.Name}}", ToJS(env, {{$.Name}}::{{$c.Name}}), napi_default_jsproperty),
+{{- end}}
+ });
+ }
+
+ W{{$.Name}}(const Napi::CallbackInfo& info) : ObjectWrap(info) {}
+
+{{ if $s := SetlikeOf $}}
+ Napi::Value has(const Napi::CallbackInfo& info) {
+ std::tuple<{{template "Type" $s.Elem}}> args;
+ auto res = FromJS(info, args);
+ if (res) {
+ return ToJS(info.Env(), impl->has(info.Env(), std::get<0>(args)));
+ }
+ Napi::Error::New(info.Env(), res.error).ThrowAsJavaScriptException();
+ return {};
+ }
+ Napi::Value keys(const Napi::CallbackInfo& info) {
+ return ToJS(info.Env(), impl->keys(info.Env()));
+ }
+{{- end}}
+{{- range $m := MethodsOf $}}
+ Napi::Value {{$m.Name}}(const Napi::CallbackInfo& info) {
+ std::string error;
+{{- range $overload_idx, $o := $m.Overloads}}
+{{- $overloaded := gt (len $m.Overloads) 1}}
+ { {{if $overloaded}}// Overload {{$overload_idx}}{{end}}
+ std::tuple<
+{{- range $i, $p := $o.Parameters}}
+{{- if $i}}, {{end}}
+{{- if $p.Init }}DefaultedParameter<{{template "Type" $p.Type}}>
+{{- else if $p.Optional}}std::optional<{{template "Type" $p.Type}}>
+{{- else }}{{template "Type" $p.Type}}
+{{- end}}
+{{- end}}> args;
+
+{{- range $i, $p := $o.Parameters}}
+{{- if $p.Init}}
+ std::get<{{$i}} /* {{$p.Name}} */>(args).default_value = {{Eval "Literal" "Value" $p.Init "Type" $p.Type}};
+{{- end}}
+{{- end}}
+
+ auto res = FromJS(info, args);
+ if (res) {
+ {{/* indent */}}INTEROP_LOG(
+{{- range $i, $p := $o.Parameters}}
+{{- if $i}}, ", {{$p.Name}}: "{{else}}"{{$p.Name}}: "{{end}}, std::get<{{$i}}>(args)
+{{- end}});
+ {{/* indent */}}
+{{- if not (IsUndefinedType $o.Type) }}auto result = {{end -}}
+ impl->{{$o.Name}}(info.Env(){{range $i, $_ := $o.Parameters}}, std::get<{{$i}}>(args){{end}});
+ {{/* indent */ -}}
+{{- if IsUndefinedType $o.Type}}return info.Env().Null();
+{{- else }}return ToJS(info.Env(), result);
+{{- end }}
+ }
+ error = {{if $overloaded}}"\noverload {{$overload_idx}} failed to match:\n" + {{end}}res.error;
+ }
+{{- end}}
+ Napi::Error::New(info.Env(), "no overload matched for {{$m.Name}}:\n" + error).ThrowAsJavaScriptException();
+ return {};
+ }
+{{- end}}
+
+{{- range $a := AttributesOf $}}
+ Napi::Value get{{Title $a.Name}}(const Napi::CallbackInfo& info) {
+ return ToJS(info.Env(), impl->get{{Title $a.Name}}(info.Env()));
+ }
+{{- if not $a.Readonly}}
+ void set{{Title $a.Name}}(const Napi::CallbackInfo& info, const Napi::Value& value) {
+ {{template "Type" $a.Type}} v{};
+ auto res = FromJS(info.Env(), value, v);
+ if (res) {
+ impl->set{{Title $a.Name}}(info.Env(), std::move(v));
+ } else {
+ res = res.Append("invalid value to {{$a.Name}}");
+ Napi::Error::New(info.Env(), res.error).ThrowAsJavaScriptException();
+ }
+ }
+{{- end }}
+{{- end}}
+ };
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Dictionary emits the C++ method implementations and associated functions of
+-- the interop type that defines the given ast.Dictionary
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Dictionary"}}
+Result Converter<{{$.Name}}>::FromJS(Napi::Env env, Napi::Value value, {{$.Name}}& out) {
+ auto object = value.ToObject();
+ Result res;
+{{- template "DictionaryMembersFromJS" $}};
+ return Success;
+}
+
+Napi::Value Converter<{{$.Name}}>::ToJS(Napi::Env env, {{$.Name}} value) {
+ auto object = Napi::Object::New(env);
+{{- template "DictionaryMembersToJS" $}}
+ return object;
+}
+
+std::ostream& operator<<(std::ostream& o, const {{$.Name}}& dict) {
+ o << "{{$.Name}} {";
+{{- range $i, $m := $.Members}}
+ o << {{if $i}}", "{{else}}" "{{end}} << "{{$m.Name}}: ";
+ utils::Write(o, dict.{{$m.Name}});
+{{- end }}
+ o << "}" << std::endl;
+ return o;
+}
+{{ end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMembersFromJS emits the C++ logic to convert each of the
+-- dictionary ast.Member fields from JavaScript to C++. Each call to ToJS() is
+-- emitted as a separate statement, and requires a 'Result res' local to be
+-- declared
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMembersFromJS"}}
+{{- if $.Inherits}}{{template "DictionaryMembersFromJS" (Lookup $.Inherits)}}{{end}}
+{{- range $i, $m := $.Members}}
+ {{/* indent */}}
+{{- if $m.Init }}res = interop::FromJSOptional(env, object.Get("{{$m.Name}}"), out.{{$m.Name}});
+{{- else }}res = interop::FromJS(env, object.Get("{{$m.Name}}"), out.{{$m.Name}});
+{{- end }}
+ if (!res) {
+ return res.Append("while converting member '{{$.Name}}'");
+ }
+{{- end}}
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMembersToJS emits the C++ logic to convert each of the
+-- dictionary ast.Member fields to JavaScript from C++. Each call to ToJS() is
+-- emitted as a separate statement
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMembersToJS"}}
+{{- if $.Inherits}}{{template "DictionaryMembersToJS" (Lookup $.Inherits)}}{{end}}
+{{- range $m := $.Members}}
+ object.Set(Napi::String::New(env, "{{$m.Name}}"), interop::ToJS(env, value.{{$m.Name}}));
+{{- end}}
+{{- end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Interface emits the C++ method implementations that define the given
+-- ast.Interface.
+-- Note: Most of the actual binding logic lives in the interface wrapper class.
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Interface"}}
+{{$.Name}}::{{$.Name}}() = default;
+
+{{$.Name}}* {{$.Name}}::Unwrap(Napi::Object object) {
+ auto* wrappers = Wrappers::For(object.Env());
+ if (!object.InstanceOf(wrappers->{{$.Name}}_ctor.Value())) {
+ return nullptr;
+ }
+ return Wrappers::W{{$.Name}}::Unwrap(object)->impl.get();
+}
+
+Interface<{{$.Name}}> {{$.Name}}::Bind(Napi::Env env, std::unique_ptr<{{$.Name}}>&& impl) {
+ auto* wrappers = Wrappers::For(env);
+ auto object = wrappers->{{$.Name}}_ctor.New({});
+ auto* wrapper = Wrappers::W{{$.Name}}::Unwrap(object);
+ wrapper->impl = std::move(impl);
+ return Interface<{{$.Name}}>(object);
+}
+
+{{$.Name}}::~{{$.Name}}() = default;
+{{ end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Enum emits the C++ associated functions of the interop type that defines the
+-- given ast.Enum
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Enum"}}
+bool Converter<{{$.Name}}>::FromString(std::string str, {{$.Name}}& out) {
+{{- range $e := $.Values}}
+ if (str == {{$e.Value}}) {
+ out = {{$.Name}}::{{EnumEntryName $e.Value}};
+ return true;
+ }
+{{- end}}
+ return false;
+}
+
+const char* Converter<{{$.Name}}>::ToString({{$.Name}} value) {
+ switch (value) {
+{{- range $e := $.Values}}
+ case {{$.Name}}::{{EnumEntryName $e.Value}}:
+ return {{$e.Value}};
+{{- end}}
+ }
+ return nullptr;
+}
+
+Result Converter<{{$.Name}}>::FromJS(Napi::Env env, Napi::Value value, {{$.Name}}& out) {
+ std::string str = value.ToString();
+ if (FromString(str, out)) {
+ return Success;
+ }
+ return Error(str + " is not a valid enum value of {{$.Name}}");
+}
+
+Napi::Value Converter<{{$.Name}}>::ToJS(Napi::Env env, {{$.Name}} value) {
+ switch (value) {
+{{- range $e := $.Values}}
+ case {{$.Name}}::{{EnumEntryName $e.Value}}:
+ return Napi::String::New(env, {{$e.Value}});
+{{- end}}
+ }
+ return env.Undefined();
+}
+
+std::ostream& operator<<(std::ostream& o, {{$.Name}} value) {
+ if (auto* s = Converter<{{$.Name}}>::ToString(value)) {
+ return o << s;
+ }
+ return o << "undefined<{{$.Name}}>";
+}
+
+{{end}}
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.h.tmpl b/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.h.tmpl
new file mode 100644
index 00000000000..5fbb0ae55c4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/interop/WebGPU.h.tmpl
@@ -0,0 +1,282 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn_node/tools/cmd/idlgen/main.go to generate
+the WebGPU.h header file.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+ types used by this template
+* src/dawn_node/tools/cmd/idlgen/main.go for additional structures and functions
+ used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+{{- Include "WebGPUCommon.tmpl" -}}
+
+#ifndef DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+#define DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+
+#include "src/dawn_node/interop/Core.h"
+
+namespace wgpu {
+namespace interop {
+
+// Initialize() registers the WebGPU types with the Napi environment.
+void Initialize(Napi::Env env);
+
+{{ range $ := .Declarations}}
+{{- if IsDictionary $}}{{template "Dictionary" $}}
+{{- else if IsNamespace $}}{{template "Namespace" $}}
+{{- else if IsInterface $}}{{template "Interface" $}}
+{{- else if IsEnum $}}{{template "Enum" $}}
+{{- else if IsTypedef $}}{{template "Typedef" $}}
+{{- end}}
+{{- end}}
+
+} // namespace interop
+} // namespace wgpu
+
+#endif // DAWN_NODE_GEN_INTEROP_WEBGPU_H_
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Dictionary emits the C++ header declaration that defines the interop type for
+-- the given ast.Dictionary
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Dictionary"}}
+// dictionary {{$.Name}}
+class {{$.Name}} {{- if $.Inherits }} : public {{$.Inherits}}{{end}} {
+public:
+{{ range $m := $.Members}}
+{{- if IsConstructor $m}} {{$.Name}}();
+{{ else if IsMember $m}} {{template "DictionaryMember" $m}}
+{{ end}}
+{{- end -}}
+};
+
+template<>
+class Converter<{{$.Name}}> {
+public:
+ static Result FromJS(Napi::Env, Napi::Value, {{$.Name}}&);
+ static Napi::Value ToJS(Napi::Env, {{$.Name}});
+};
+
+std::ostream& operator<<(std::ostream& o, const {{$.Name}}& desc);
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Namespace emits the C++ header declaration that defines the interop type for
+-- the given ast.Namespace
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Namespace"}}
+// namespace {{$.Name}}
+class {{$.Name}} {
+public:
+ virtual ~{{$.Name}}();
+ {{$.Name}}();
+{{- range $c := ConstantsOf $}}
+{{- template "Constant" $c}}
+{{- end}}
+};
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Interface emits the C++ header declaration that defines the interop type for
+-- the given ast.Interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Interface"}}
+// interface {{$.Name}}
+class {{$.Name}} {{- if $.Inherits }} : public {{$.Inherits}}{{end}} {
+public:
+ static Interface<{{$.Name}}> Bind(Napi::Env, std::unique_ptr<{{$.Name}}>&&);
+ static {{$.Name}}* Unwrap(Napi::Object);
+
+ template<typename T, typename ... ARGS>
+ static inline Interface<{{$.Name}}> Create(Napi::Env env, ARGS&& ... args) {
+ return Bind(env, std::make_unique<T>(std::forward<ARGS>(args)...));
+ }
+
+ virtual ~{{$.Name}}();
+ {{$.Name}}();
+{{- if $s := SetlikeOf $}}
+{{- template "InterfaceSetlike" $s}}
+{{- end}}
+{{- range $m := MethodsOf $}}
+{{- template "InterfaceMethod" $m}}
+{{- end}}
+{{- range $a := AttributesOf $}}
+{{- template "InterfaceAttribute" $a}}
+{{- end}}
+{{- range $c := ConstantsOf $}}
+{{- template "Constant" $c}}
+{{- end}}
+};
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Typedef emits the C++ header declaration that defines the interop type for
+-- the given ast.Interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Typedef"}}
+using {{$.Name}} = {{template "Type" $.Type}};
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Enum emits the C++ header declaration that defines the interop type for
+-- the given ast.Enum
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Enum"}}
+enum class {{$.Name}} {
+{{- range $ := $.Values}}
+ {{EnumEntryName $.Value}},
+{{- end}}
+};
+
+template<>
+class Converter<{{$.Name}}> {
+public:
+ static Result FromJS(Napi::Env, Napi::Value, {{$.Name}}&);
+ static Napi::Value ToJS(Napi::Env, {{$.Name}});
+ static bool FromString(std::string, {{$.Name}}&);
+ static const char* ToString({{$.Name}});
+};
+
+std::ostream& operator<<(std::ostream& o, {{$.Name}});
+{{end}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- DictionaryMember emits the C++ declaration for a single dictionary ast.Member
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "DictionaryMember"}}
+{{- if $.Attribute}}{{template "AttributeType" $}} {{$.Name}}
+{{- if $.Init}} = {{Eval "Literal" "Value" $.Init "Type" $.Type}}{{end}};
+{{- else }}{{template "Type" $.Type}} {{$.Name}}({{template "Parameters" $.Parameters}});
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceSetlike emits the C++ methods for a setlike interface
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceSetlike"}}
+ virtual bool has(Napi::Env, {{template "Type" $.Elem}}) = 0;
+ virtual std::vector<{{template "Type" $.Elem}}> keys(Napi::Env) = 0;
+{{- /* TODO(crbug.com/dawn/1143):
+ entries, forEach, size, values
+ read-write: add, clear, or delete
+*/}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceMethod emits the C++ declaration for a single interface ast.Member
+-- method
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceMethod"}}
+{{- range $o := $.Overloads}}
+ virtual {{template "Type" $o.Type}} {{$.Name}}(Napi::Env{{template "ParametersWithLeadingComma" $o.Parameters}}) = 0;
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- InterfaceAttribute emits the C++ declaration for a single interface
+-- ast.Member attribute
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "InterfaceAttribute"}}
+ virtual {{template "Type" $.Type}} get{{Title $.Name}}(Napi::Env) = 0;
+{{- if not $.Readonly}}
+ virtual void set{{Title $.Name}}(Napi::Env, {{template "Type" $.Type}} value) = 0;
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Constant emits the C++ declaration for a single ast.Member constant
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Constant"}}
+ static constexpr {{template "Type" $.Type}} {{$.Name}} = {{Eval "Literal" "Value" $.Init "Type" $.Type}};
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Parameters emits the C++ comma separated list of parameter declarations for
+-- the given []ast.Parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Parameters"}}
+{{- range $i, $param := $ }}
+{{- if $i }}, {{end}}
+{{- template "Parameter" $param}}
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- ParametersWithLeadingComma emits the C++ comma separated list of parameter
+-- declarations for the given []ast.Parameter, starting with a leading comma
+-- for the first parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "ParametersWithLeadingComma"}}
+{{- range $i, $param := $ }}, {{/* */}}
+{{- template "Parameter" $param}}
+{{- end }}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Parameter emits the C++ parameter type and name for the given ast.Parameter
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Parameter" -}}
+{{- if $.Init }}{{template "Type" $.Type}} {{$.Name}}
+{{- else if $.Optional}}std::optional<{{template "Type" $.Type}}> {{$.Name}}
+{{- else }}{{template "Type" $.Type}} {{$.Name}}
+{{- end }}
+{{- end}}
diff --git a/chromium/third_party/dawn/src/dawn_node/interop/WebGPUCommon.tmpl b/chromium/third_party/dawn/src/dawn_node/interop/WebGPUCommon.tmpl
new file mode 100644
index 00000000000..86307735634
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/interop/WebGPUCommon.tmpl
@@ -0,0 +1,126 @@
+{{/*
+ Copyright 2021 The Dawn Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/}}
+
+{{- /*
+--------------------------------------------------------------------------------
+Template file for use with src/dawn_node/tools/cmd/idlgen/main.go.
+This file provides common template definitions and is included by WebGPU.h.tmpl
+and WebGPU.cpp.tmpl.
+
+See:
+* https://github.com/ben-clayton/webidlparser/blob/main/ast/ast.go for the AST
+ types used by this template
+* src/dawn_node/tools/cmd/idlgen/main.go for additional structures and functions
+ used by this template
+* https://golang.org/pkg/text/template/ for documentation on the template syntax
+--------------------------------------------------------------------------------
+*/ -}}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Type generates the C++ type for the given ast.Type
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Type" -}}
+{{- if IsUndefinedType $}}void
+{{- else if IsTypeName $}}
+{{- if eq $.Name "boolean" }}bool
+{{- else if eq $.Name "long" }}int32_t
+{{- else if eq $.Name "unsigned long" }}uint32_t
+{{- else if eq $.Name "long long" }}int64_t
+{{- else if eq $.Name "unsigned long long" }}uint64_t
+{{- else if eq $.Name "object" }}Object
+{{- else if eq $.Name "DOMString" }}std::string
+{{- else if eq $.Name "USVString" }}std::string
+{{- else if eq $.Name "ArrayBuffer" }}ArrayBuffer
+{{- else if IsInterface (Lookup $.Name) }}Interface<{{$.Name}}>
+{{- else }}{{$.Name}}
+{{- end }}
+{{- else if IsParametrizedType $}}{{$.Name}}<{{template "TypeList" $.Elems}}>
+{{- else if IsNullableType $}}std::optional<{{template "Type" $.Type}}>
+{{- else if IsUnionType $}}std::variant<{{template "VariantTypeList" $.Types}}>
+{{- else if IsSequenceType $}}std::vector<{{template "Type" $.Elem}}>
+{{- else if IsRecordType $}}std::unordered_map<{{template "Type" $.Key}}, {{template "Type" $.Elem}}>
+{{- else }} /* Unhandled Type {{printf "%T" $}} */
+{{- end -}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- AttributeType generates the C++ type for the given ast.Member
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "AttributeType" -}}
+{{- if $.Required }}{{template "Type" $.Type}}
+{{- else if $.Init }}{{template "Type" $.Type}}
+{{- else }}std::optional<{{template "Type" $.Type}}>
+{{- end}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- Literal generates a C++ literal value using the following arguments:
+-- Value - the ast.Literal
+-- Type - the ast.Type of the literal
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "Literal" -}}
+{{- if IsDefaultDictionaryLiteral $.Value}}{{template "Type" $.Type}}{}
+{{- else if IsTypeName $.Type }}
+{{- $ty := Lookup $.Type.Name}}
+{{- if IsEnum $ty }}{{$.Type.Name}}::{{EnumEntryName $.Value.Value}}
+{{- else if IsBasicLiteral $.Value }}{{$.Value.Value}}
+{{- else }}/* Unhandled Type {{printf "ty: %v $.Type.Name: %T $.Value: %T" $ty $.Type.Name $.Value}} */
+{{- end }}
+{{- else if IsSequenceType $.Type }}{{template "Type" $.Type}}{} {{- /* TODO: Assumes the initialiser is empty */}}
+{{- else if IsBasicLiteral $.Value }}{{$.Value.Value}}
+{{- else }} /* Unhandled Type {{printf "%T %T" $.Type $.Value}} */
+{{- end}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- TypeList generates a C++ comma separated list of types from the given
+-- []ast.Type
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "TypeList" -}}
+{{- range $i, $ty := $}}
+{{- if $i }}, {{end}}
+{{- template "Type" $ty}}
+{{- end}}
+{{- end }}
+
+
+{{- /*
+--------------------------------------------------------------------------------
+-- VariantTypeList generates a C++ comma separated list of types from the given
+-- []ast.Type, skipping any 'undefined' types
+--------------------------------------------------------------------------------
+*/ -}}
+{{- define "VariantTypeList" -}}
+{{- range $i, $ty := $}}
+{{- if not (IsUndefinedType $ty)}}
+{{- if $i }}, {{end}}
+{{- template "Type" $ty}}
+{{- end}}
+{{- end}}
+{{- end }}
+
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/go.mod b/chromium/third_party/dawn/src/dawn_node/tools/go.mod
new file mode 100644
index 00000000000..e39e222e92c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/tools/go.mod
@@ -0,0 +1,9 @@
+module dawn.googlesource.com/dawn/src/dawn_node/tools
+
+go 1.16
+
+require (
+ github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094
+ github.com/mattn/go-colorable v0.1.9
+ github.com/mattn/go-isatty v0.0.14 // indirect
+)
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/go.sum b/chromium/third_party/dawn/src/dawn_node/tools/go.sum
new file mode 100644
index 00000000000..42c01181c64
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/tools/go.sum
@@ -0,0 +1,33 @@
+github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094 h1:CTVJdI6oUCRNucMEmoh3c2U88DesoPtefsxKhoZ1WuQ=
+github.com/ben-clayton/webidlparser v0.0.0-20210923100217-8ba896ded094/go.mod h1:bV550SPlMos7UhMprxlm14XTBTpKHSUZ8Q4Id5qQuyw=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/run-cts b/chromium/third_party/dawn/src/dawn_node/tools/run-cts
new file mode 100755
index 00000000000..cf58452becc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/tools/run-cts
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+# Copyright 2021 The Tint Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e # Fail on any error.
+
+if [ ! -x "$(which go)" ] ; then
+ echo "error: go needs to be on \$PATH to use $0"
+ exit 1
+fi
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd )"
+ROOT_DIR="$( cd "${SCRIPT_DIR}/.." >/dev/null 2>&1 && pwd )"
+BINARY="${SCRIPT_DIR}/bin/run-cts"
+
+# Rebuild the binary.
+# Note, go caches build artifacts, so this is quick for repeat calls
+pushd "${SCRIPT_DIR}/src/cmd/run-cts" > /dev/null
+ go build -o "${BINARY}" main.go
+popd > /dev/null
+
+"${BINARY}" "$@"
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/idlgen/main.go b/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/idlgen/main.go
new file mode 100644
index 00000000000..5ea5499fcc7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/idlgen/main.go
@@ -0,0 +1,635 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// idlgen is a tool used to generate code from WebIDL files and a golang
+// template file
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+
+ "github.com/ben-clayton/webidlparser/ast"
+ "github.com/ben-clayton/webidlparser/parser"
+)
+
+func main() {
+ if err := run(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
+func showUsage() {
+ fmt.Println(`
+idlgen is a tool used to generate code from WebIDL files and a golang
+template file
+
+Usage:
+ idlgen --template=<template-path> --output=<output-path> <idl-file> [<idl-file>...]`)
+ os.Exit(1)
+}
+
+func run() error {
+ var templatePath string
+ var outputPath string
+ flag.StringVar(&templatePath, "template", "", "the template file run with the parsed WebIDL files")
+ flag.StringVar(&outputPath, "output", "", "the output file")
+ flag.Parse()
+
+ idlFiles := flag.Args()
+
+ // Check all required arguments are provided
+ if templatePath == "" || outputPath == "" || len(idlFiles) == 0 {
+ showUsage()
+ }
+
+ // Open up the output file
+ out := os.Stdout
+ if outputPath != "" {
+ file, err := os.Create(outputPath)
+ if err != nil {
+ return fmt.Errorf("failed to open output file '%v'", outputPath)
+ }
+ out = file
+ defer file.Close()
+ }
+
+ // Read the template file
+ tmpl, err := ioutil.ReadFile(templatePath)
+ if err != nil {
+ return fmt.Errorf("failed to open template file '%v'", templatePath)
+ }
+
+ // idl is the combination of the parsed idlFiles
+ idl := &ast.File{}
+
+ // Parse each of the WebIDL files and add the declarations to idl
+ for _, path := range idlFiles {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("failed to open file '%v'", path)
+ }
+ fileIDL := parser.Parse(string(content))
+ if numErrs := len(fileIDL.Errors); numErrs != 0 {
+ errs := make([]string, numErrs)
+ for i, e := range fileIDL.Errors {
+ errs[i] = e.Message
+ }
+ return fmt.Errorf("errors found while parsing %v:\n%v", path, strings.Join(errs, "\n"))
+ }
+ idl.Declarations = append(idl.Declarations, fileIDL.Declarations...)
+ }
+
+ // Initialize the generator
+ g := generator{t: template.New(templatePath)}
+ g.workingDir = filepath.Dir(templatePath)
+ g.funcs = map[string]interface{}{
+ // Functions exposed to the template
+ "AttributesOf": attributesOf,
+ "ConstantsOf": constantsOf,
+ "EnumEntryName": enumEntryName,
+ "Eval": g.eval,
+ "Include": g.include,
+ "IsBasicLiteral": is(ast.BasicLiteral{}),
+ "IsConstructor": isConstructor,
+ "IsDefaultDictionaryLiteral": is(ast.DefaultDictionaryLiteral{}),
+ "IsDictionary": is(ast.Dictionary{}),
+ "IsEnum": is(ast.Enum{}),
+ "IsInterface": is(ast.Interface{}),
+ "IsInterfaceOrNamespace": is(ast.Interface{}, ast.Namespace{}),
+ "IsMember": is(ast.Member{}),
+ "IsNamespace": is(ast.Namespace{}),
+ "IsNullableType": is(ast.NullableType{}),
+ "IsParametrizedType": is(ast.ParametrizedType{}),
+ "IsRecordType": is(ast.RecordType{}),
+ "IsSequenceType": is(ast.SequenceType{}),
+ "IsTypedef": is(ast.Typedef{}),
+ "IsTypeName": is(ast.TypeName{}),
+ "IsUndefinedType": isUndefinedType,
+ "IsUnionType": is(ast.UnionType{}),
+ "Lookup": g.lookup,
+ "MethodsOf": methodsOf,
+ "SetlikeOf": setlikeOf,
+ "Title": strings.Title,
+ }
+ t, err := g.t.
+ Option("missingkey=invalid").
+ Funcs(g.funcs).
+ Parse(string(tmpl))
+ if err != nil {
+ return fmt.Errorf("failed to parse template file '%v': %w", templatePath, err)
+ }
+
+ // simplify the definitions in the WebIDL before passing this to the template
+ idl, declarations := simplify(idl)
+ g.declarations = declarations
+
+ // Write the file header
+ fmt.Fprintf(out, header, strings.Join(os.Args[1:], "\n// "))
+
+ // Execute the template
+ return t.Execute(out, idl)
+}
+
+// declarations is a map of WebIDL declaration name to its AST node.
+type declarations map[string]ast.Decl
+
+// nameOf returns the name of the AST node n.
+// Returns an empty string if the node is not named.
+func nameOf(n ast.Node) string {
+ switch n := n.(type) {
+ case *ast.Namespace:
+ return n.Name
+ case *ast.Interface:
+ return n.Name
+ case *ast.Dictionary:
+ return n.Name
+ case *ast.Enum:
+ return n.Name
+ case *ast.Typedef:
+ return n.Name
+ case *ast.Mixin:
+ return n.Name
+ case *ast.Includes:
+ return ""
+ default:
+ panic(fmt.Errorf("unhandled AST declaration %T", n))
+ }
+}
+
+// simplify processes the AST 'in', returning a new AST that:
+// * Has all partial interfaces merged into a single interface.
+// * Has all mixins flattened into their place of use.
+// * Has all the declarations ordered in dependency order (leaf first)
+// simplify also returns the map of declarations in the AST.
+func simplify(in *ast.File) (*ast.File, declarations) {
+ s := simplifier{
+ declarations: declarations{},
+ registered: map[string]bool{},
+ out: &ast.File{},
+ }
+
+ // Walk the IDL declarations to merge together partial interfaces and embed
+ // mixins into their uses.
+ {
+ interfaces := map[string]*ast.Interface{}
+ mixins := map[string]*ast.Mixin{}
+ for _, d := range in.Declarations {
+ switch d := d.(type) {
+ case *ast.Interface:
+ if i, ok := interfaces[d.Name]; ok {
+ // Merge partial body into one interface
+ i.Members = append(i.Members, d.Members...)
+ } else {
+ clone := *d
+ d := &clone
+ interfaces[d.Name] = d
+ s.declarations[d.Name] = d
+ }
+ case *ast.Mixin:
+ mixins[d.Name] = d
+ s.declarations[d.Name] = d
+ case *ast.Includes:
+ // Merge mixin into interface
+ i, ok := interfaces[d.Name]
+ if !ok {
+ panic(fmt.Errorf("%v includes %v, but %v is not an interface", d.Name, d.Source, d.Name))
+ }
+ m, ok := mixins[d.Source]
+ if !ok {
+ panic(fmt.Errorf("%v includes %v, but %v is not an mixin", d.Name, d.Source, d.Source))
+ }
+ // Merge mixin into the interface
+ for _, member := range m.Members {
+ if member, ok := member.(*ast.Member); ok {
+ i.Members = append(i.Members, member)
+ }
+ }
+ default:
+ if name := nameOf(d); name != "" {
+ s.declarations[nameOf(d)] = d
+ }
+ }
+ }
+ }
+
+ // Now traverse the declarations in to produce the dependency-ordered
+ // output `s.out`.
+ for _, d := range in.Declarations {
+ if name := nameOf(d); name != "" {
+ s.visit(s.declarations[nameOf(d)])
+ }
+ }
+
+ return s.out, s.declarations
+}
+
+// simplifier holds internal state for simplify()
+type simplifier struct {
+ // all AST declarations
+ declarations declarations
+ // set of visited declarations
+ registered map[string]bool
+ // the dependency-ordered output
+ out *ast.File
+}
+
+// visit traverses the AST declaration 'd' adding all dependent declarations to
+// s.out.
+func (s *simplifier) visit(d ast.Decl) {
+ register := func(name string) bool {
+ if s.registered[name] {
+ return true
+ }
+ s.registered[name] = true
+ return false
+ }
+ switch d := d.(type) {
+ case *ast.Namespace:
+ if register(d.Name) {
+ return
+ }
+ for _, m := range d.Members {
+ if m, ok := m.(*ast.Member); ok {
+ s.visitType(m.Type)
+ for _, p := range m.Parameters {
+ s.visitType(p.Type)
+ }
+ }
+ }
+ case *ast.Interface:
+ if register(d.Name) {
+ return
+ }
+ if d, ok := s.declarations[d.Inherits]; ok {
+ s.visit(d)
+ }
+ for _, m := range d.Members {
+ if m, ok := m.(*ast.Member); ok {
+ s.visitType(m.Type)
+ for _, p := range m.Parameters {
+ s.visitType(p.Type)
+ }
+ }
+ }
+ case *ast.Dictionary:
+ if register(d.Name) {
+ return
+ }
+ if d, ok := s.declarations[d.Inherits]; ok {
+ s.visit(d)
+ }
+ for _, m := range d.Members {
+ s.visitType(m.Type)
+ for _, p := range m.Parameters {
+ s.visitType(p.Type)
+ }
+ }
+ case *ast.Typedef:
+ if register(d.Name) {
+ return
+ }
+ s.visitType(d.Type)
+ case *ast.Mixin:
+ if register(d.Name) {
+ return
+ }
+ for _, m := range d.Members {
+ if m, ok := m.(*ast.Member); ok {
+ s.visitType(m.Type)
+ for _, p := range m.Parameters {
+ s.visitType(p.Type)
+ }
+ }
+ }
+ case *ast.Enum:
+ if register(d.Name) {
+ return
+ }
+ case *ast.Includes:
+ if register(d.Name) {
+ return
+ }
+ default:
+ panic(fmt.Errorf("unhandled AST declaration %T", d))
+ }
+
+ s.out.Declarations = append(s.out.Declarations, d)
+}
+
+// visitType traverses the AST type 't' adding all dependent declarations to
+// s.out.
+func (s *simplifier) visitType(t ast.Type) {
+ switch t := t.(type) {
+ case *ast.TypeName:
+ if d, ok := s.declarations[t.Name]; ok {
+ s.visit(d)
+ }
+ case *ast.UnionType:
+ for _, t := range t.Types {
+ s.visitType(t)
+ }
+ case *ast.ParametrizedType:
+ for _, t := range t.Elems {
+ s.visitType(t)
+ }
+ case *ast.NullableType:
+ s.visitType(t.Type)
+ case *ast.SequenceType:
+ s.visitType(t.Elem)
+ case *ast.RecordType:
+ s.visitType(t.Elem)
+ default:
+ panic(fmt.Errorf("unhandled AST type %T", t))
+ }
+}
+
+// generator holds the template generator state
+type generator struct {
+ // the root template
+ t *template.Template
+ // the working directory
+ workingDir string
+ // map of function name to function exposed to the template executor
+ funcs map[string]interface{}
+ // dependency-sorted declarations
+ declarations declarations
+}
+
+// eval executes the sub-template with the given name and arguments, returning
+// the generated output
+// args can be a single argument:
+// arg[0]
+// or a list of name-value pairs:
+// (args[0]: name, args[1]: value), (args[2]: name, args[3]: value)...
+func (g *generator) eval(template string, args ...interface{}) (string, error) {
+ target := g.t.Lookup(template)
+ if target == nil {
+ return "", fmt.Errorf("template '%v' not found", template)
+ }
+ sb := strings.Builder{}
+ var err error
+ if len(args) == 1 {
+ err = target.Execute(&sb, args[0])
+ } else {
+ m := newMap()
+ if len(args)%2 != 0 {
+ return "", fmt.Errorf("Eval expects a single argument or list name-value pairs")
+ }
+ for i := 0; i < len(args); i += 2 {
+ name, ok := args[i].(string)
+ if !ok {
+ return "", fmt.Errorf("Eval argument %v is not a string", i)
+ }
+ m.Put(name, args[i+1])
+ }
+ err = target.Execute(&sb, m)
+ }
+ if err != nil {
+ return "", fmt.Errorf("while evaluating '%v': %v", template, err)
+ }
+ return sb.String(), nil
+}
+
+// lookup returns the declaration with the given name, or nil if not found.
+func (g *generator) lookup(name string) ast.Decl {
+ return g.declarations[name]
+}
+
+// include loads the template with the given path, importing the declarations
+// into the scope of the current template.
+func (g *generator) include(path string) (string, error) {
+ t, err := g.t.
+ Option("missingkey=invalid").
+ Funcs(g.funcs).
+ ParseFiles(filepath.Join(g.workingDir, path))
+ if err != nil {
+ return "", err
+ }
+ g.t.AddParseTree(path, t.Tree)
+ return "", nil
+}
+
+// Map is a simple generic key-value map, which can be used in the template
+type Map map[interface{}]interface{}
+
+func newMap() Map { return Map{} }
+
+// Put adds the key-value pair into the map.
+// Put always returns an empty string so nothing is printed in the template.
+func (m Map) Put(key, value interface{}) string {
+ m[key] = value
+ return ""
+}
+
+// Get looks up and returns the value with the given key. If the map does not
+// contain the given key, then nil is returned.
+func (m Map) Get(key interface{}) interface{} {
+ return m[key]
+}
+
+// is returns a function that returns true if the value passed to the function
+// matches any of the types of the objects in 'prototypes'.
+func is(prototypes ...interface{}) func(interface{}) bool {
+ types := make([]reflect.Type, len(prototypes))
+ for i, p := range prototypes {
+ types[i] = reflect.TypeOf(p)
+ }
+ return func(v interface{}) bool {
+ ty := reflect.TypeOf(v)
+ for _, rty := range types {
+ if ty == rty || ty == reflect.PtrTo(rty) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// isConstructor returns true if the object is a constructor ast.Member.
+func isConstructor(v interface{}) bool {
+ if member, ok := v.(*ast.Member); ok {
+ if ty, ok := member.Type.(*ast.TypeName); ok {
+ return ty.Name == "constructor"
+ }
+ }
+ return false
+}
+
+// isUndefinedType returns true if the type is 'undefined'
+func isUndefinedType(ty ast.Type) bool {
+ if ty, ok := ty.(*ast.TypeName); ok {
+ return ty.Name == "undefined"
+ }
+ return false
+}
+
+// enumEntryName formats the enum entry name 's' for use in a C++ enum.
+func enumEntryName(s string) string {
+ return "k" + strings.ReplaceAll(pascalCase(strings.Trim(s, `"`)), "-", "")
+}
+
+// Method describes a WebIDL interface method
+type Method struct {
+ // Name of the method
+ Name string
+ // The list of overloads of the method
+ Overloads []*ast.Member
+}
+
+// methodsOf returns all the methods of the given WebIDL interface.
+func methodsOf(obj interface{}) []*Method {
+ iface, ok := obj.(*ast.Interface)
+ if !ok {
+ return nil
+ }
+ byName := map[string]*Method{}
+ out := []*Method{}
+ for _, member := range iface.Members {
+ member := member.(*ast.Member)
+ if !member.Const && !member.Attribute && !isConstructor(member) {
+ if method, ok := byName[member.Name]; ok {
+ method.Overloads = append(method.Overloads, member)
+ } else {
+ method = &Method{
+ Name: member.Name,
+ Overloads: []*ast.Member{member},
+ }
+ byName[member.Name] = method
+ out = append(out, method)
+ }
+ }
+ }
+ return out
+}
+
+// attributesOf returns all the attributes of the given WebIDL interface or
+// namespace.
+func attributesOf(obj interface{}) []*ast.Member {
+ out := []*ast.Member{}
+ add := func(m interface{}) {
+ if m := m.(*ast.Member); m.Attribute {
+ out = append(out, m)
+ }
+ }
+ switch obj := obj.(type) {
+ case *ast.Interface:
+ for _, m := range obj.Members {
+ add(m)
+ }
+ case *ast.Namespace:
+ for _, m := range obj.Members {
+ add(m)
+ }
+ default:
+ return nil
+ }
+ return out
+}
+
+// constantsOf returns all the constant values of the given WebIDL interface or
+// namespace.
+func constantsOf(obj interface{}) []*ast.Member {
+ out := []*ast.Member{}
+ add := func(m interface{}) {
+ if m := m.(*ast.Member); m.Const {
+ out = append(out, m)
+ }
+ }
+ switch obj := obj.(type) {
+ case *ast.Interface:
+ for _, m := range obj.Members {
+ add(m)
+ }
+ case *ast.Namespace:
+ for _, m := range obj.Members {
+ add(m)
+ }
+ default:
+ return nil
+ }
+ return out
+}
+
+// setlikeOf returns the setlike ast.Pattern, if obj is a setlike interface.
+func setlikeOf(obj interface{}) *ast.Pattern {
+ iface, ok := obj.(*ast.Interface)
+ if !ok {
+ return nil
+ }
+ for _, pattern := range iface.Patterns {
+ if pattern.Type == ast.Setlike {
+ return pattern
+ }
+ }
+ return nil
+}
+
+// pascalCase returns the snake-case string s transformed into 'PascalCase',
+// Rules:
+// * The first letter of the string is capitalized
+// * Characters following an underscore, hyphen or number are capitalized
+// * Underscores are removed from the returned string
+// See: https://en.wikipedia.org/wiki/Camel_case
+func pascalCase(s string) string {
+ b := strings.Builder{}
+ upper := true
+ for _, r := range s {
+ if r == '_' || r == '-' {
+ upper = true
+ continue
+ }
+ if upper {
+ b.WriteRune(unicode.ToUpper(r))
+ upper = false
+ } else {
+ b.WriteRune(r)
+ }
+ if unicode.IsNumber(r) {
+ upper = true
+ }
+ }
+ return b.String()
+}
+
+const header = `// Copyright 2021 The Dawn Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+////////////////////////////////////////////////////////////////////////////////
+// File generated by tools/cmd/idlgen.go, with the arguments:
+// %v
+//
+// Do not modify this file directly
+////////////////////////////////////////////////////////////////////////////////
+
+`
diff --git a/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/run-cts/main.go b/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/run-cts/main.go
new file mode 100644
index 00000000000..d0dd681f4ed
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/tools/src/cmd/run-cts/main.go
@@ -0,0 +1,499 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// run-cts is a tool used to run the WebGPU CTS using the Dawn module for NodeJS
+package main
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+)
+
+const (
+ testTimeout = time.Minute
+)
+
+func main() {
+ if err := run(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
+func showUsage() {
+ fmt.Println(`
+run-cts is a tool used to run the WebGPU CTS using the Dawn module for NodeJS
+
+Usage:
+ run-cts --dawn-node=<path to dawn.node> --cts=<path to WebGPU CTS> [test-query]`)
+ os.Exit(1)
+}
+
+var colors bool
+var stdout io.Writer
+
+func run() error {
+ colors = os.Getenv("TERM") != "dumb" ||
+ isatty.IsTerminal(os.Stdout.Fd()) ||
+ isatty.IsCygwinTerminal(os.Stdout.Fd())
+ if colors {
+ if _, disable := os.LookupEnv("NO_COLOR"); disable {
+ colors = false
+ }
+ }
+
+ var dawnNode, cts, node, npx, logFilename string
+ var verbose, build bool
+ var numRunners int
+ flag.StringVar(&dawnNode, "dawn-node", "", "path to dawn.node module")
+ flag.StringVar(&cts, "cts", "", "root directory of WebGPU CTS")
+ flag.StringVar(&node, "node", "", "path to node executable")
+ flag.StringVar(&npx, "npx", "", "path to npx executable")
+ flag.BoolVar(&verbose, "verbose", false, "print extra information while testing")
+ flag.BoolVar(&build, "build", true, "attempt to build the CTS before running")
+ flag.BoolVar(&colors, "colors", colors, "enable / disable colors")
+ flag.IntVar(&numRunners, "j", runtime.NumCPU(), "number of concurrent runners")
+ flag.StringVar(&logFilename, "log", "", "path to log file of tests run and result")
+ flag.Parse()
+
+ if colors {
+ stdout = colorable.NewColorableStdout()
+ } else {
+ stdout = colorable.NewNonColorable(os.Stdout)
+ }
+
+ // Check mandatory arguments
+ if dawnNode == "" || cts == "" {
+ showUsage()
+ }
+ if !isFile(dawnNode) {
+ return fmt.Errorf("'%v' is not a file", dawnNode)
+ }
+ if !isDir(cts) {
+ return fmt.Errorf("'%v' is not a directory", cts)
+ }
+
+ // Make paths absolute
+ for _, path := range []*string{&dawnNode, &cts} {
+ abs, err := filepath.Abs(*path)
+ if err != nil {
+ return fmt.Errorf("unable to get absolute path for '%v'", *path)
+ }
+ *path = abs
+ }
+
+ // The test query is the optional unnamed argument
+ queries := []string{"webgpu:*"}
+ if args := flag.Args(); len(args) > 0 {
+ queries = args
+ }
+
+ // Find node
+ if node == "" {
+ var err error
+ node, err = exec.LookPath("node")
+ if err != nil {
+ return fmt.Errorf("add node to PATH or specify with --node")
+ }
+ }
+ // Find npx
+ if npx == "" {
+ var err error
+ npx, err = exec.LookPath("npx")
+ if err != nil {
+ npx = ""
+ }
+ }
+
+ r := runner{
+ numRunners: numRunners,
+ verbose: verbose,
+ node: node,
+ npx: npx,
+ dawnNode: dawnNode,
+ cts: cts,
+ evalScript: `require('./src/common/tools/setup-ts-in-node.js');
+ require('./src/common/runtime/cmdline.ts');`,
+ }
+
+ if logFilename != "" {
+ writer, err := os.Create(logFilename)
+ if err != nil {
+ return fmt.Errorf("failed to open log '%v': %w", logFilename, err)
+ }
+ defer writer.Close()
+ r.log = newLogger(writer)
+ }
+
+ if build {
+ // Attempt to build the CTS (instead of using the `setup-ts-in-node` transpiler)
+ if npx != "" {
+ if err := r.buildCTS(); err != nil {
+ return fmt.Errorf("failed to build CTS: %w", err)
+ } else {
+ r.evalScript = `require('./out-node/common/runtime/cmdline.js');`
+ }
+ } else {
+ fmt.Println("npx not found on PATH. Using runtime TypeScript transpilation (slow)")
+ }
+ }
+
+ // Find all the test cases that match the given queries.
+ if err := r.gatherTestCases(queries); err != nil {
+ return fmt.Errorf("failed to gather test cases: %w", err)
+ }
+
+ fmt.Printf("Testing %d test cases...\n", len(r.testcases))
+
+ return r.run()
+}
+
+type logger struct {
+ writer io.Writer
+ idx int
+ resultByIndex map[int]result
+}
+
+// newLogger creates a new logger instance.
+func newLogger(writer io.Writer) logger {
+ return logger{writer, 0, map[int]result{}}
+}
+
+// logResult writes the test results to the log file in sequential order.
+// logResult should be called whenever a new test result becomes available.
+func (l *logger) logResults(res result) {
+ if l.writer == nil {
+ return
+ }
+ l.resultByIndex[res.index] = res
+ for {
+ logRes, ok := l.resultByIndex[l.idx]
+ if !ok {
+ break
+ }
+ fmt.Fprintf(l.writer, "%v [%v]\n", logRes.testcase, logRes.status)
+ l.idx++
+ }
+}
+
+type runner struct {
+ numRunners int
+ verbose bool
+ node, npx, dawnNode, cts string
+ evalScript string
+ testcases []string
+ log logger
+}
+
+// buildCTS calls `npx grunt run:build-out-node` in the CTS directory to compile
+// the TypeScript files down to JavaScript. Doing this once ahead of time can be
+// much faster than dynamically transpiling when there are many tests to run.
+func (r *runner) buildCTS() error {
+ cmd := exec.Command(r.npx, "grunt", "run:build-out-node")
+ cmd.Dir = r.cts
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("%w: %v", err, string(out))
+ }
+ return nil
+}
+
+// gatherTestCases() queries the CTS for all test cases that match the given
+// query. On success, gatherTestCases() populates r.testcases.
+func (r *runner) gatherTestCases(queries []string) error {
+ args := append([]string{
+ "-e", r.evalScript,
+ "--", // Start of arguments
+ // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
+ // and slices away the first two arguments. When running with '-e', args
+ // start at 1, so just inject a dummy argument.
+ "dummy-arg",
+ "--list",
+ }, queries...)
+
+ cmd := exec.Command(r.node, args...)
+ cmd.Dir = r.cts
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("%w\n%v", err, string(out))
+ }
+
+ tests := filterTestcases(strings.Split(string(out), "\n"))
+ r.testcases = tests
+ return nil
+}
+
+// run() calls the CTS test runner to run each testcase in a separate process.
+// Up to r.numRunners tests will be run concurrently.
+func (r *runner) run() error {
+ // Create a chan of test indices.
+ // This will be read by the test runner goroutines.
+ caseIndices := make(chan int, len(r.testcases))
+ for i := range r.testcases {
+ caseIndices <- i
+ }
+ close(caseIndices)
+
+ // Create a chan for the test results.
+ // This will be written to by the test runner goroutines.
+ results := make(chan result, len(r.testcases))
+
+ // Spin up the test runner goroutines
+ start := time.Now()
+ wg := sync.WaitGroup{}
+ for i := 0; i < r.numRunners; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for idx := range caseIndices {
+ results <- r.runTestcase(idx)
+ }
+ }()
+ }
+
+ // Create another goroutine to close the results chan when all the runner
+ // goroutines have finished.
+ var timeTaken time.Duration
+ go func() {
+ wg.Wait()
+ timeTaken = time.Since(start)
+ close(results)
+ }()
+
+ // Total number of tests, test counts binned by status
+ numTests, numByStatus := len(r.testcases), map[status]int{}
+
+ // Helper function for printing a progress bar.
+ lastStatusUpdate, animFrame := time.Now(), 0
+ updateProgress := func() {
+ printANSIProgressBar(animFrame, numTests, numByStatus)
+ animFrame++
+ lastStatusUpdate = time.Now()
+ }
+
+ // Pull test results as they become available.
+ // Update the status counts, and print any failures (or all test results if --verbose)
+ progressUpdateRate := time.Millisecond * 10
+ if !colors {
+ // No colors == no cursor control. Reduce progress updates so that
+ // we're not printing endless progress bars.
+ progressUpdateRate = time.Second
+ }
+
+ for res := range results {
+ r.log.logResults(res)
+
+ numByStatus[res.status] = numByStatus[res.status] + 1
+ name := res.testcase
+ if r.verbose || (res.status != pass && res.status != skip) {
+ fmt.Printf("%v - %v: %v\n", name, res.status, res.message)
+ updateProgress()
+ }
+ if time.Since(lastStatusUpdate) > progressUpdateRate {
+ updateProgress()
+ }
+ }
+ printANSIProgressBar(animFrame, numTests, numByStatus)
+
+ // All done. Print final stats.
+ fmt.Printf(`
+Completed in %v
+
+pass: %v (%v)
+fail: %v (%v)
+skip: %v (%v)
+timeout: %v (%v)
+`,
+ timeTaken,
+ numByStatus[pass], percentage(numByStatus[pass], numTests),
+ numByStatus[fail], percentage(numByStatus[fail], numTests),
+ numByStatus[skip], percentage(numByStatus[skip], numTests),
+ numByStatus[timeout], percentage(numByStatus[timeout], numTests),
+ )
+ return nil
+}
+
+// status is an enumerator of test result status
+type status string
+
+const (
+ pass status = "pass"
+ fail status = "fail"
+ skip status = "skip"
+ timeout status = "timeout"
+)
+
+// result holds the information about a completed test
+type result struct {
+ index int
+ testcase string
+ status status
+ message string
+ error error
+}
+
+// runTestcase() runs the CTS testcase with the given index, returning the test
+// result.
+func (r *runner) runTestcase(idx int) result {
+ ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
+ defer cancel()
+
+ testcase := r.testcases[idx]
+
+ eval := r.evalScript
+ args := append([]string{
+ "-e", eval, // Evaluate 'eval'.
+ "--",
+ // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
+ // and slices away the first two arguments. When running with '-e', args
+ // start at 1, so just inject a dummy argument.
+ "dummy-arg",
+ // Actual arguments begin here
+ "--gpu-provider", r.dawnNode,
+ "--verbose",
+ }, testcase)
+
+ cmd := exec.CommandContext(ctx, r.node, args...)
+ cmd.Dir = r.cts
+ out, err := cmd.CombinedOutput()
+ msg := string(out)
+ switch {
+ case errors.Is(err, context.DeadlineExceeded):
+ return result{index: idx, testcase: testcase, status: timeout, message: msg}
+ case strings.Contains(msg, "[fail]"):
+ return result{index: idx, testcase: testcase, status: fail, message: msg}
+ case strings.Contains(msg, "[skip]"):
+ return result{index: idx, testcase: testcase, status: skip, message: msg}
+ case strings.Contains(msg, "[pass]"), err == nil:
+ return result{index: idx, testcase: testcase, status: pass, message: msg}
+ }
+ return result{index: idx, testcase: testcase, status: fail, message: fmt.Sprint(msg, err), error: err}
+}
+
+// filterTestcases returns in with empty strings removed
+func filterTestcases(in []string) []string {
+ out := make([]string, 0, len(in))
+ for _, c := range in {
+ if c != "" {
+ out = append(out, c)
+ }
+ }
+ return out
+}
+
+// percentage returns the percentage of n out of total as a string
+func percentage(n, total int) string {
+ if total == 0 {
+ return "-"
+ }
+ f := float64(n) / float64(total)
+ return fmt.Sprintf("%.1f%c", f*100.0, '%')
+}
+
+// isDir returns true if the path resolves to a directory
+func isDir(path string) bool {
+ s, err := os.Stat(path)
+ if err != nil {
+ return false
+ }
+ return s.IsDir()
+}
+
+// isFile returns true if the path resolves to a file
+func isFile(path string) bool {
+ s, err := os.Stat(path)
+ if err != nil {
+ return false
+ }
+ return !s.IsDir()
+}
+
+// printANSIProgressBar prints a colored progress bar, providing realtime
+// information about the status of the CTS run.
+// Note: We'll want to skip this if !isatty or if we're running on windows.
+func printANSIProgressBar(animFrame int, numTests int, numByStatus map[status]int) {
+ const (
+ barWidth = 50
+
+ escape = "\u001B["
+ positionLeft = escape + "0G"
+ red = escape + "31m"
+ green = escape + "32m"
+ yellow = escape + "33m"
+ blue = escape + "34m"
+ magenta = escape + "35m"
+ cyan = escape + "36m"
+ white = escape + "37m"
+ reset = escape + "0m"
+ )
+
+ animSymbols := []rune{'⣾', '⣽', '⣻', '⢿', '⡿', '⣟', '⣯', '⣷'}
+ blockSymbols := []rune{'▏', '▎', '▍', '▌', '▋', '▊', '▉'}
+
+ numBlocksPrinted := 0
+
+ fmt.Fprint(stdout, string(animSymbols[animFrame%len(animSymbols)]), " [")
+ animFrame++
+
+ numFinished := 0
+
+ for _, ty := range []struct {
+ status status
+ color string
+ }{{pass, green}, {skip, blue}, {timeout, yellow}, {fail, red}} {
+ num := numByStatus[ty.status]
+ numFinished += num
+ statusFrac := float64(num) / float64(numTests)
+ fNumBlocks := barWidth * statusFrac
+ fmt.Fprint(stdout, ty.color)
+ numBlocks := int(math.Ceil(fNumBlocks))
+ if numBlocks > 1 {
+ fmt.Print(strings.Repeat(string("▉"), numBlocks))
+ }
+ if numBlocks > 0 {
+ frac := fNumBlocks - math.Floor(fNumBlocks)
+ symbol := blockSymbols[int(math.Round(frac*float64(len(blockSymbols)-1)))]
+ fmt.Print(string(symbol))
+ }
+ numBlocksPrinted += numBlocks
+ }
+
+ if barWidth > numBlocksPrinted {
+ fmt.Print(strings.Repeat(string(" "), barWidth-numBlocksPrinted))
+ }
+ fmt.Fprint(stdout, reset)
+ fmt.Print("] ", percentage(numFinished, numTests))
+
+ if colors {
+ // move cursor to start of line so the bar is overridden
+ fmt.Fprint(stdout, positionLeft)
+ } else {
+ // cannot move cursor, so newline
+ fmt.Println()
+ }
+}
diff --git a/chromium/third_party/dawn/src/dawn_node/utils/Debug.h b/chromium/third_party/dawn/src/dawn_node/utils/Debug.h
new file mode 100644
index 00000000000..38735409817
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_node/utils/Debug.h
@@ -0,0 +1,146 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNODE_UTILS_DEBUG_H_
+#define DAWNNODE_UTILS_DEBUG_H_
+
+#include <iostream>
+#include <optional>
+#include <sstream>
+#include <unordered_map>
+#include <variant>
+#include <vector>
+
+#include "dawn/webgpu_cpp_print.h"
+
+namespace wgpu { namespace utils {
+
+ // Write() is a helper for printing container types to the std::ostream.
+ // Write() is used by the LOG() macro below.
+
+ // Forward declarations
+ inline std::ostream& Write(std::ostream& out) {
+ return out;
+ }
+ template <typename T>
+ inline std::ostream& Write(std::ostream& out, const std::optional<T>& value);
+ template <typename T>
+ inline std::ostream& Write(std::ostream& out, const std::vector<T>& value);
+ template <typename K, typename V>
+ inline std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value);
+ template <typename... TYS>
+ inline std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value);
+ template <typename VALUE>
+ std::ostream& Write(std::ostream& out, VALUE&& value);
+
+ // Write() implementations
+ template <typename T>
+ std::ostream& Write(std::ostream& out, const std::optional<T>& value) {
+ if (value.has_value()) {
+ return Write(out, value.value());
+ }
+ return out << "<undefined>";
+ }
+
+ template <typename T>
+ std::ostream& Write(std::ostream& out, const std::vector<T>& value) {
+ out << "[";
+ bool first = true;
+ for (const auto& el : value) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ Write(out, el);
+ }
+ return out << "]";
+ }
+
+ template <typename K, typename V>
+ std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value) {
+ out << "{";
+ bool first = true;
+ for (auto it : value) {
+ if (!first) {
+ out << ", ";
+ }
+ first = false;
+ Write(out, it.first);
+ out << ": ";
+ Write(out, it.second);
+ }
+ return out << "}";
+ }
+
+ template <typename... TYS>
+ std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value) {
+ std::visit([&](auto&& v) { Write(out, v); }, value);
+ return out;
+ }
+
+ template <typename VALUE>
+ std::ostream& Write(std::ostream& out, VALUE&& value) {
+ return out << std::forward<VALUE>(value);
+ }
+
+ template <typename FIRST, typename... REST>
+ inline std::ostream& Write(std::ostream& out, FIRST&& first, REST&&... rest) {
+ Write(out, std::forward<FIRST>(first));
+ Write(out, std::forward<REST>(rest)...);
+ return out;
+ }
+
+ // Fatal() prints a message to stdout with the given file, line, function and optional message,
+ // then calls abort(). Fatal() is usually not called directly, but by the UNREACHABLE() and
+ // UNIMPLEMENTED() macro below.
+ template <typename... MSG_ARGS>
+ [[noreturn]] inline void Fatal(const char* reason,
+ const char* file,
+ int line,
+ const char* function,
+ MSG_ARGS&&... msg_args) {
+ std::stringstream msg;
+ msg << file << ":" << line << ": " << reason << ": " << function << "()";
+ if constexpr (sizeof...(msg_args) > 0) {
+ msg << " ";
+ Write(msg, std::forward<MSG_ARGS>(msg_args)...);
+ }
+ std::cout << msg.str() << std::endl;
+ abort();
+ }
+
+// LOG() prints the current file, line and function to stdout, followed by a
+// string representation of all the variadic arguments.
+#define LOG(...) \
+ ::wgpu::utils::Write(std::cout << __FILE__ << ":" << __LINE__ << " " << __FUNCTION__ << ": ", \
+ ##__VA_ARGS__) \
+ << std::endl
+
+// UNIMPLEMENTED() prints 'UNIMPLEMENTED' with the current file, line and
+// function to stdout, along with the optional message, then calls abort().
+// The macro calls Fatal(), which is annotated with [[noreturn]].
+// Used to stub code that has not yet been implemented.
+#define UNIMPLEMENTED(...) \
+ ::wgpu::utils::Fatal("UNIMPLEMENTED", __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__)
+
+// UNREACHABLE() prints 'UNREACHABLE' with the current file, line and
+// function to stdout, along with the optional message, then calls abort().
+// The macro calls Fatal(), which is annotated with [[noreturn]].
+// Used to stub code that has not yet been implemented.
+#define UNREACHABLE(...) \
+ ::wgpu::utils::Fatal("UNREACHABLE", __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__)
+
+}} // namespace wgpu::utils
+
+#endif // DAWNNODE_UTILS_DEBUG_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/BUILD.gn b/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
index b0b0c5e021a..b678fa10b5b 100644
--- a/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
@@ -83,6 +83,7 @@ dawn_component("dawn_wire") {
"client/ObjectAllocator.h",
"client/Queue.cpp",
"client/Queue.h",
+ "client/RequestTracker.h",
"client/ShaderModule.cpp",
"client/ShaderModule.h",
"server/ObjectStorage.h",
diff --git a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
index bbcb04494c6..e970367758c 100644
--- a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
@@ -55,6 +55,7 @@ target_sources(dawn_wire PRIVATE
"client/ObjectAllocator.h"
"client/Queue.cpp"
"client/Queue.h"
+ "client/RequestTracker.h"
"client/ShaderModule.cpp"
"client/ShaderModule.h"
"server/ObjectStorage.h"
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
index 2233c8114e6..f27b99ea4e1 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
@@ -140,25 +140,20 @@ namespace dawn_wire { namespace client {
}
Buffer::~Buffer() {
- // Callbacks need to be fired in all cases, as they can handle freeing resources
- // so we call them with "DestroyedBeforeCallback" status.
- for (auto& it : mRequests) {
- if (it.second.callback) {
- it.second.callback(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, it.second.userdata);
- }
- }
- mRequests.clear();
-
+ ClearAllCallbacks(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
FreeMappedData();
}
void Buffer::CancelCallbacksForDisconnect() {
- for (auto& it : mRequests) {
- if (it.second.callback) {
- it.second.callback(WGPUBufferMapAsyncStatus_DeviceLost, it.second.userdata);
+ ClearAllCallbacks(WGPUBufferMapAsyncStatus_DeviceLost);
+ }
+
+ void Buffer::ClearAllCallbacks(WGPUBufferMapAsyncStatus status) {
+ mRequests.CloseAll([status](MapRequestData* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, request->userdata);
}
- }
- mRequests.clear();
+ });
}
void Buffer::MapAsync(WGPUMapModeFlags mode,
@@ -177,10 +172,7 @@ namespace dawn_wire { namespace client {
// Create the request structure that will hold information while this mapping is
// in flight.
- uint64_t serial = mRequestSerial++;
- ASSERT(mRequests.find(serial) == mRequests.end());
-
- Buffer::MapRequestData request = {};
+ MapRequestData request = {};
request.callback = callback;
request.userdata = userdata;
request.offset = offset;
@@ -191,6 +183,8 @@ namespace dawn_wire { namespace client {
request.type = MapRequestType::Write;
}
+ uint64_t serial = mRequests.Add(std::move(request));
+
// Serialize the command to send to the server.
BufferMapAsyncCmd cmd;
cmd.bufferId = this->id;
@@ -200,26 +194,17 @@ namespace dawn_wire { namespace client {
cmd.size = size;
client->SerializeCommand(cmd);
-
- // Register this request so that we can retrieve it from its serial when the server
- // sends the callback.
- mRequests[serial] = std::move(request);
}
bool Buffer::OnMapAsyncCallback(uint64_t requestSerial,
uint32_t status,
uint64_t readDataUpdateInfoLength,
const uint8_t* readDataUpdateInfo) {
- auto requestIt = mRequests.find(requestSerial);
- if (requestIt == mRequests.end()) {
+ MapRequestData request;
+ if (!mRequests.Acquire(requestSerial, &request)) {
return false;
}
- auto request = std::move(requestIt->second);
- // Delete the request before calling the callback otherwise the callback could be fired a
- // second time. If, for example, buffer.Unmap() is called inside the callback.
- mRequests.erase(requestIt);
-
auto FailRequest = [&request]() -> bool {
if (request.callback != nullptr) {
request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
@@ -352,11 +337,11 @@ namespace dawn_wire { namespace client {
mMapSize = 0;
// Tag all mapping requests still in flight as unmapped before callback.
- for (auto& it : mRequests) {
- if (it.second.clientStatus == WGPUBufferMapAsyncStatus_Success) {
- it.second.clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
+ mRequests.ForAll([](MapRequestData* request) {
+ if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+ request->clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
}
- }
+ });
BufferUnmapCmd cmd;
cmd.self = ToAPI(this);
@@ -368,11 +353,11 @@ namespace dawn_wire { namespace client {
FreeMappedData();
// Tag all mapping requests still in flight as destroyed before callback.
- for (auto& it : mRequests) {
- if (it.second.clientStatus == WGPUBufferMapAsyncStatus_Success) {
- it.second.clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
+ mRequests.ForAll([](MapRequestData* request) {
+ if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+ request->clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
}
- }
+ });
BufferDestroyCmd cmd;
cmd.self = ToAPI(this);
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
index a7d3fabff77..0a243843891 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
@@ -19,8 +19,7 @@
#include "dawn_wire/WireClient.h"
#include "dawn_wire/client/ObjectBase.h"
-
-#include <map>
+#include "dawn_wire/client/RequestTracker.h"
namespace dawn_wire { namespace client {
@@ -52,6 +51,7 @@ namespace dawn_wire { namespace client {
private:
void CancelCallbacksForDisconnect() override;
+ void ClearAllCallbacks(WGPUBufferMapAsyncStatus status);
bool IsMappedForReading() const;
bool IsMappedForWriting() const;
@@ -86,8 +86,7 @@ namespace dawn_wire { namespace client {
MapRequestType type = MapRequestType::None;
};
- std::map<uint64_t, MapRequestData> mRequests;
- uint64_t mRequestSerial = 0;
+ RequestTracker<MapRequestData> mRequests;
uint64_t mSize = 0;
// Only one mapped pointer can be active at a time because Unmap clears all the in-flight
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
index a00bb5e90f7..2d4445e7940 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
@@ -138,7 +138,8 @@ namespace dawn_wire { namespace client {
{
for (LinkNode<ObjectBase>* device = deviceList.head(); device != deviceList.end();
device = device->next()) {
- static_cast<Device*>(device->value())->HandleDeviceLost("GPU connection lost");
+ static_cast<Device*>(device->value())
+ ->HandleDeviceLost(WGPUDeviceLostReason_Undefined, "GPU connection lost");
}
}
for (auto& objectList : mObjects) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.h b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
index 3616e372155..fc3758a0d88 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
@@ -19,6 +19,7 @@
#include <dawn_wire/Wire.h>
#include "common/LinkedList.h"
+#include "common/NonCopyable.h"
#include "dawn_wire/ChunkedCommandSerializer.h"
#include "dawn_wire/WireClient.h"
#include "dawn_wire/WireCmd_autogen.h"
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
index e3e34bde154..e6665abf010 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
@@ -52,12 +52,14 @@ namespace dawn_wire { namespace client {
return true;
}
- bool Client::DoDeviceLostCallback(Device* device, char const* message) {
+ bool Client::DoDeviceLostCallback(Device* device,
+ WGPUDeviceLostReason reason,
+ char const* message) {
if (device == nullptr) {
// The device might have been deleted or recreated so this isn't an error.
return true;
}
- device->HandleDeviceLost(message);
+ device->HandleDeviceLost(reason, message);
return true;
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
index 2c87b7c1fb7..8379d51b1c7 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
@@ -35,7 +35,7 @@ namespace dawn_wire { namespace client {
}
};
- mDeviceLostCallback = [](char const*, void*) {
+ mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
static bool calledOnce = false;
if (!calledOnce) {
calledOnce = true;
@@ -48,26 +48,23 @@ namespace dawn_wire { namespace client {
}
Device::~Device() {
- // Fire pending error scopes
- auto errorScopes = std::move(mErrorScopes);
- for (const auto& it : errorScopes) {
- it.second.callback(WGPUErrorType_Unknown, "Device destroyed before callback",
- it.second.userdata);
- }
-
- auto createPipelineAsyncRequests = std::move(mCreatePipelineAsyncRequests);
- for (const auto& it : createPipelineAsyncRequests) {
- if (it.second.createComputePipelineAsyncCallback != nullptr) {
- it.second.createComputePipelineAsyncCallback(
+ mErrorScopes.CloseAll([](ErrorScopeData* request) {
+ request->callback(WGPUErrorType_Unknown, "Device destroyed before callback",
+ request->userdata);
+ });
+
+ mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+ if (request->createComputePipelineAsyncCallback != nullptr) {
+ request->createComputePipelineAsyncCallback(
WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", it.second.userdata);
+ "Device destroyed before callback", request->userdata);
} else {
- ASSERT(it.second.createRenderPipelineAsyncCallback != nullptr);
- it.second.createRenderPipelineAsyncCallback(
+ ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+ request->createRenderPipelineAsyncCallback(
WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
- "Device destroyed before callback", it.second.userdata);
+ "Device destroyed before callback", request->userdata);
}
- }
+ });
}
void Device::HandleError(WGPUErrorType errorType, const char* message) {
@@ -83,33 +80,30 @@ namespace dawn_wire { namespace client {
}
}
- void Device::HandleDeviceLost(const char* message) {
+ void Device::HandleDeviceLost(WGPUDeviceLostReason reason, const char* message) {
if (mDeviceLostCallback && !mDidRunLostCallback) {
mDidRunLostCallback = true;
- mDeviceLostCallback(message, mDeviceLostUserdata);
+ mDeviceLostCallback(reason, message, mDeviceLostUserdata);
}
}
void Device::CancelCallbacksForDisconnect() {
- for (auto& it : mCreatePipelineAsyncRequests) {
- ASSERT((it.second.createComputePipelineAsyncCallback != nullptr) ^
- (it.second.createRenderPipelineAsyncCallback != nullptr));
- if (it.second.createRenderPipelineAsyncCallback) {
- it.second.createRenderPipelineAsyncCallback(
+ mErrorScopes.CloseAll([](ErrorScopeData* request) {
+ request->callback(WGPUErrorType_DeviceLost, "Device lost", request->userdata);
+ });
+
+ mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+ if (request->createComputePipelineAsyncCallback != nullptr) {
+ request->createComputePipelineAsyncCallback(
WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, "Device lost",
- it.second.userdata);
+ request->userdata);
} else {
- it.second.createComputePipelineAsyncCallback(
- WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, "Device lost",
- it.second.userdata);
+ ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+ request->createRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
+ nullptr, "Device lost",
+ request->userdata);
}
- }
- mCreatePipelineAsyncRequests.clear();
-
- for (auto& it : mErrorScopes) {
- it.second.callback(WGPUErrorType_DeviceLost, "Device lost", it.second.userdata);
- }
- mErrorScopes.clear();
+ });
}
std::weak_ptr<bool> Device::GetAliveWeakPtr() {
@@ -152,10 +146,7 @@ namespace dawn_wire { namespace client {
return true;
}
- uint64_t serial = mErrorScopeRequestSerial++;
- ASSERT(mErrorScopes.find(serial) == mErrorScopes.end());
-
- mErrorScopes[serial] = {callback, userdata};
+ uint64_t serial = mErrorScopes.Add({callback, userdata});
DevicePopErrorScopeCmd cmd;
cmd.deviceId = this->id;
@@ -180,14 +171,11 @@ namespace dawn_wire { namespace client {
return false;
}
- auto requestIt = mErrorScopes.find(requestSerial);
- if (requestIt == mErrorScopes.end()) {
+ ErrorScopeData request;
+ if (!mErrorScopes.Acquire(requestSerial, &request)) {
return false;
}
- ErrorScopeData request = std::move(requestIt->second);
-
- mErrorScopes.erase(requestIt);
request.callback(type, message, request.userdata);
return true;
}
@@ -208,6 +196,12 @@ namespace dawn_wire { namespace client {
return Buffer::CreateError(this);
}
+ bool Device::GetLimits(WGPUSupportedLimits* limits) {
+ // Not implemented in the wire.
+ UNREACHABLE();
+ return false;
+ }
+
WGPUQueue Device::GetQueue() {
// The queue is lazily created because if a Device is created by
// Reserve/Inject, we cannot send the GetQueue message until
@@ -229,38 +223,6 @@ namespace dawn_wire { namespace client {
return ToAPI(mQueue);
}
- WGPUQueue Device::GetDefaultQueue() {
- return GetQueue();
- }
-
- // TODO(dawn:800): Once the deprecated computeStage field is removed this method will no longer
- // be needed and DeviceCreateComputePipeline can be removed from client_handwritten_commands in
- // dawn_wire.json
- WGPUComputePipeline Device::CreateComputePipeline(
- WGPUComputePipelineDescriptor const* descriptor) {
- DeviceCreateComputePipelineCmd cmd;
- cmd.self = ToAPI(this);
-
- auto* allocation = client->ComputePipelineAllocator().New(client);
- cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
-
- // Copy compute to the deprecated computeStage or visa-versa, depending on which one is
- // populated, so that serialization doesn't fail.
- WGPUComputePipelineDescriptor localDescriptor = *descriptor;
- if (localDescriptor.computeStage.module == nullptr) {
- localDescriptor.computeStage.module = localDescriptor.compute.module;
- localDescriptor.computeStage.entryPoint = localDescriptor.compute.entryPoint;
- } else if (localDescriptor.compute.module == nullptr) {
- localDescriptor.compute.module = localDescriptor.computeStage.module;
- localDescriptor.compute.entryPoint = localDescriptor.computeStage.entryPoint;
- }
-
- cmd.descriptor = &localDescriptor;
- client->SerializeCommand(cmd);
-
- return ToAPI(allocation->object.get());
- }
-
void Device::CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) {
@@ -269,50 +231,32 @@ namespace dawn_wire { namespace client {
"GPU device disconnected", userdata);
}
- DeviceCreateComputePipelineAsyncCmd cmd;
- cmd.deviceId = this->id;
-
- // Copy compute to the deprecated computeStage or visa-versa, depending on which one is
- // populated, so that serialization doesn't fail.
- // TODO(dawn:800): Remove once computeStage is removed.
- WGPUComputePipelineDescriptor localDescriptor = *descriptor;
- if (localDescriptor.computeStage.module == nullptr) {
- localDescriptor.computeStage.module = localDescriptor.compute.module;
- localDescriptor.computeStage.entryPoint = localDescriptor.compute.entryPoint;
- } else if (localDescriptor.compute.module == nullptr) {
- localDescriptor.compute.module = localDescriptor.computeStage.module;
- localDescriptor.compute.entryPoint = localDescriptor.computeStage.entryPoint;
- }
-
- cmd.descriptor = &localDescriptor;
-
- uint64_t serial = mCreatePipelineAsyncRequestSerial++;
- ASSERT(mCreatePipelineAsyncRequests.find(serial) == mCreatePipelineAsyncRequests.end());
- cmd.requestSerial = serial;
-
auto* allocation = client->ComputePipelineAllocator().New(client);
+
CreatePipelineAsyncRequest request = {};
request.createComputePipelineAsyncCallback = callback;
request.userdata = userdata;
request.pipelineObjectID = allocation->object->id;
+ uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+
+ DeviceCreateComputePipelineAsyncCmd cmd;
+ cmd.deviceId = this->id;
+ cmd.descriptor = descriptor;
+ cmd.requestSerial = serial;
cmd.pipelineObjectHandle = ObjectHandle{allocation->object->id, allocation->generation};
- client->SerializeCommand(cmd);
- mCreatePipelineAsyncRequests[serial] = std::move(request);
+ client->SerializeCommand(cmd);
}
bool Device::OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
WGPUCreatePipelineAsyncStatus status,
const char* message) {
- const auto& requestIt = mCreatePipelineAsyncRequests.find(requestSerial);
- if (requestIt == mCreatePipelineAsyncRequests.end()) {
+ CreatePipelineAsyncRequest request;
+ if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
return false;
}
- CreatePipelineAsyncRequest request = std::move(requestIt->second);
- mCreatePipelineAsyncRequests.erase(requestIt);
-
auto pipelineAllocation =
client->ComputePipelineAllocator().GetObject(request.pipelineObjectID);
@@ -337,37 +281,33 @@ namespace dawn_wire { namespace client {
return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
"GPU device disconnected", userdata);
}
- DeviceCreateRenderPipelineAsyncCmd cmd;
- cmd.deviceId = this->id;
- cmd.descriptor = descriptor;
-
- uint64_t serial = mCreatePipelineAsyncRequestSerial++;
- ASSERT(mCreatePipelineAsyncRequests.find(serial) == mCreatePipelineAsyncRequests.end());
- cmd.requestSerial = serial;
auto* allocation = client->RenderPipelineAllocator().New(client);
+
CreatePipelineAsyncRequest request = {};
request.createRenderPipelineAsyncCallback = callback;
request.userdata = userdata;
request.pipelineObjectID = allocation->object->id;
+ uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+
+ DeviceCreateRenderPipelineAsyncCmd cmd;
+ cmd.deviceId = this->id;
+ cmd.descriptor = descriptor;
+ cmd.requestSerial = serial;
cmd.pipelineObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
- client->SerializeCommand(cmd);
- mCreatePipelineAsyncRequests[serial] = std::move(request);
+ client->SerializeCommand(cmd);
}
bool Device::OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
WGPUCreatePipelineAsyncStatus status,
const char* message) {
- const auto& requestIt = mCreatePipelineAsyncRequests.find(requestSerial);
- if (requestIt == mCreatePipelineAsyncRequests.end()) {
+ CreatePipelineAsyncRequest request;
+ if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
return false;
}
- CreatePipelineAsyncRequest request = std::move(requestIt->second);
- mCreatePipelineAsyncRequests.erase(requestIt);
-
auto pipelineAllocation =
client->RenderPipelineAllocator().GetObject(request.pipelineObjectID);
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.h b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
index 2c0d35cbb01..426799c1eb2 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
@@ -21,8 +21,8 @@
#include "dawn_wire/WireCmd_autogen.h"
#include "dawn_wire/client/ApiObjects_autogen.h"
#include "dawn_wire/client/ObjectBase.h"
+#include "dawn_wire/client/RequestTracker.h"
-#include <map>
#include <memory>
namespace dawn_wire { namespace client {
@@ -53,7 +53,7 @@ namespace dawn_wire { namespace client {
void HandleError(WGPUErrorType errorType, const char* message);
void HandleLogging(WGPULoggingType loggingType, const char* message);
- void HandleDeviceLost(const char* message);
+ void HandleDeviceLost(WGPUDeviceLostReason reason, const char* message);
bool OnPopErrorScopeCallback(uint64_t requestSerial,
WGPUErrorType type,
const char* message);
@@ -64,8 +64,7 @@ namespace dawn_wire { namespace client {
WGPUCreatePipelineAsyncStatus status,
const char* message);
- // TODO(dawn:22): Remove once the deprecation period is finished.
- WGPUQueue GetDefaultQueue();
+ bool GetLimits(WGPUSupportedLimits* limits);
WGPUQueue GetQueue();
void CancelCallbacksForDisconnect() override;
@@ -77,8 +76,7 @@ namespace dawn_wire { namespace client {
WGPUErrorCallback callback = nullptr;
void* userdata = nullptr;
};
- std::map<uint64_t, ErrorScopeData> mErrorScopes;
- uint64_t mErrorScopeRequestSerial = 0;
+ RequestTracker<ErrorScopeData> mErrorScopes;
uint64_t mErrorScopeStackSize = 0;
struct CreatePipelineAsyncRequest {
@@ -87,8 +85,7 @@ namespace dawn_wire { namespace client {
void* userdata = nullptr;
ObjectId pipelineObjectID;
};
- std::map<uint64_t, CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
- uint64_t mCreatePipelineAsyncRequestSerial = 0;
+ RequestTracker<CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
WGPUErrorCallback mErrorCallback = nullptr;
WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
index 1ac8c778190..098ddc5afca 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
@@ -24,17 +24,11 @@ namespace dawn_wire { namespace client {
}
bool Queue::OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status) {
- auto requestIt = mOnWorkDoneRequests.find(requestSerial);
- if (requestIt == mOnWorkDoneRequests.end()) {
+ OnWorkDoneData request;
+ if (!mOnWorkDoneRequests.Acquire(requestSerial, &request)) {
return false;
}
- // Remove the request data so that the callback cannot be called again.
- // ex.) inside the callback: if the queue is deleted (when there are multiple queues),
- // all callbacks reject.
- OnWorkDoneData request = std::move(requestIt->second);
- mOnWorkDoneRequests.erase(requestIt);
-
request.callback(status, request.userdata);
return true;
}
@@ -47,16 +41,13 @@ namespace dawn_wire { namespace client {
return;
}
- uint32_t serial = mOnWorkDoneSerial++;
- ASSERT(mOnWorkDoneRequests.find(serial) == mOnWorkDoneRequests.end());
+ uint64_t serial = mOnWorkDoneRequests.Add({callback, userdata});
QueueOnSubmittedWorkDoneCmd cmd;
cmd.queueId = this->id;
cmd.signalValue = signalValue;
cmd.requestSerial = serial;
- mOnWorkDoneRequests[serial] = {callback, userdata};
-
client->SerializeCommand(cmd);
}
@@ -97,12 +88,11 @@ namespace dawn_wire { namespace client {
}
void Queue::ClearAllCallbacks(WGPUQueueWorkDoneStatus status) {
- for (auto& it : mOnWorkDoneRequests) {
- if (it.second.callback) {
- it.second.callback(status, it.second.userdata);
+ mOnWorkDoneRequests.CloseAll([status](OnWorkDoneData* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, request->userdata);
}
- }
- mOnWorkDoneRequests.clear();
+ });
}
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Queue.h b/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
index d8e93a31062..901acac2d43 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
@@ -19,8 +19,7 @@
#include "dawn_wire/WireClient.h"
#include "dawn_wire/client/ObjectBase.h"
-
-#include <map>
+#include "dawn_wire/client/RequestTracker.h"
namespace dawn_wire { namespace client {
@@ -44,15 +43,13 @@ namespace dawn_wire { namespace client {
private:
void CancelCallbacksForDisconnect() override;
-
void ClearAllCallbacks(WGPUQueueWorkDoneStatus status);
struct OnWorkDoneData {
WGPUQueueWorkDoneCallback callback = nullptr;
void* userdata = nullptr;
};
- uint64_t mOnWorkDoneSerial = 0;
- std::map<uint64_t, OnWorkDoneData> mOnWorkDoneRequests;
+ RequestTracker<OnWorkDoneData> mOnWorkDoneRequests;
};
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/RequestTracker.h b/chromium/third_party/dawn/src/dawn_wire/client/RequestTracker.h
new file mode 100644
index 00000000000..7ce2d0004fe
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/client/RequestTracker.h
@@ -0,0 +1,82 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_REQUESTTRACKER_H_
+#define DAWNWIRE_CLIENT_REQUESTTRACKER_H_
+
+#include "common/Assert.h"
+#include "common/NonCopyable.h"
+
+#include <cstdint>
+#include <map>
+
+namespace dawn_wire { namespace client {
+
+ class Device;
+ class MemoryTransferService;
+
+ template <typename Request>
+ class RequestTracker : NonCopyable {
+ public:
+ ~RequestTracker() {
+ ASSERT(mRequests.empty());
+ }
+
+ uint64_t Add(Request&& request) {
+ mSerial++;
+ mRequests.emplace(mSerial, request);
+ return mSerial;
+ }
+
+ bool Acquire(uint64_t serial, Request* request) {
+ auto it = mRequests.find(serial);
+ if (it == mRequests.end()) {
+ return false;
+ }
+ *request = std::move(it->second);
+ mRequests.erase(it);
+ return true;
+ }
+
+ template <typename CloseFunc>
+ void CloseAll(CloseFunc&& closeFunc) {
+ // Call closeFunc on all requests while handling reentrancy where the callback of some
+ // requests may add some additional requests. We guarantee all callbacks for requests
+ // are called exactly onces, so keep closing new requests if the first batch added more.
+ // It is fine to loop infinitely here if that's what the application makes use do.
+ while (!mRequests.empty()) {
+ // Move mRequests to a local variable so that further reentrant modifications of
+ // mRequests don't invalidate the iterators.
+ auto allRequests = std::move(mRequests);
+ for (auto& it : allRequests) {
+ closeFunc(&it.second);
+ }
+ }
+ }
+
+ template <typename F>
+ void ForAll(F&& f) {
+ for (auto& it : mRequests) {
+ f(&it.second);
+ }
+ }
+
+ private:
+ uint64_t mSerial;
+ std::map<uint64_t, Request> mRequests;
+ };
+
+}} // namespace dawn_wire::client
+
+#endif // DAWNWIRE_CLIENT_REQUESTTRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp
index 97e0204c924..c28b978c3ab 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp
@@ -19,15 +19,7 @@
namespace dawn_wire { namespace client {
ShaderModule::~ShaderModule() {
- // Callbacks need to be fired in all cases, as they can handle freeing resources. So we call
- // them with "Unknown" status.
- for (auto& it : mCompilationInfoRequests) {
- if (it.second.callback) {
- it.second.callback(WGPUCompilationInfoRequestStatus_Unknown, nullptr,
- it.second.userdata);
- }
- }
- mCompilationInfoRequests.clear();
+ ClearAllCallbacks(WGPUCompilationInfoRequestStatus_Unknown);
}
void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
@@ -36,31 +28,37 @@ namespace dawn_wire { namespace client {
return;
}
- uint64_t serial = mCompilationInfoRequestSerial++;
+ uint64_t serial = mCompilationInfoRequests.Add({callback, userdata});
+
ShaderModuleGetCompilationInfoCmd cmd;
cmd.shaderModuleId = this->id;
cmd.requestSerial = serial;
- mCompilationInfoRequests[serial] = {callback, userdata};
-
client->SerializeCommand(cmd);
}
bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
WGPUCompilationInfoRequestStatus status,
const WGPUCompilationInfo* info) {
- auto requestIt = mCompilationInfoRequests.find(requestSerial);
- if (requestIt == mCompilationInfoRequests.end()) {
+ CompilationInfoRequest request;
+ if (!mCompilationInfoRequests.Acquire(requestSerial, &request)) {
return false;
}
- // Remove the request data so that the callback cannot be called again.
- // ex.) inside the callback: if the shader module is deleted, all callbacks reject.
- CompilationInfoRequest request = std::move(requestIt->second);
- mCompilationInfoRequests.erase(requestIt);
-
request.callback(status, info, request.userdata);
return true;
}
+ void ShaderModule::CancelCallbacksForDisconnect() {
+ ClearAllCallbacks(WGPUCompilationInfoRequestStatus_DeviceLost);
+ }
+
+ void ShaderModule::ClearAllCallbacks(WGPUCompilationInfoRequestStatus status) {
+ mCompilationInfoRequests.CloseAll([status](CompilationInfoRequest* request) {
+ if (request->callback != nullptr) {
+ request->callback(status, nullptr, request->userdata);
+ }
+ });
+ }
+
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h
index add5b975ffd..f12a4d0f1b6 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h
@@ -17,8 +17,8 @@
#include <dawn/webgpu.h>
-#include "common/SerialMap.h"
#include "dawn_wire/client/ObjectBase.h"
+#include "dawn_wire/client/RequestTracker.h"
namespace dawn_wire { namespace client {
@@ -33,12 +33,14 @@ namespace dawn_wire { namespace client {
const WGPUCompilationInfo* info);
private:
+ void CancelCallbacksForDisconnect() override;
+ void ClearAllCallbacks(WGPUCompilationInfoRequestStatus status);
+
struct CompilationInfoRequest {
WGPUCompilationInfoCallback callback = nullptr;
void* userdata = nullptr;
};
- uint64_t mCompilationInfoRequestSerial = 0;
- std::map<uint64_t, CompilationInfoRequest> mCompilationInfoRequests;
+ RequestTracker<CompilationInfoRequest> mCompilationInfoRequests;
};
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
index 7a504ef4de6..8297cbdcf47 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
@@ -144,9 +144,9 @@ namespace dawn_wire { namespace server {
data->info.get());
mProcs.deviceSetDeviceLostCallback(
device,
- [](const char* message, void* userdata) {
+ [](WGPUDeviceLostReason reason, const char* message, void* userdata) {
DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
- info->server->OnDeviceLost(info->self, message);
+ info->server->OnDeviceLost(info->self, reason, message);
},
data->info.get());
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
index 711813537d5..b4429871f7e 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
@@ -196,7 +196,7 @@ namespace dawn_wire { namespace server {
// Error callbacks
void OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message);
- void OnDeviceLost(ObjectHandle device, const char* message);
+ void OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message);
void OnLogging(ObjectHandle device, WGPULoggingType type, const char* message);
void OnDevicePopErrorScope(WGPUErrorType type,
const char* message,
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
index 939e632e561..c8cddf4e500 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
@@ -59,9 +59,12 @@ namespace dawn_wire { namespace server {
SerializeCommand(cmd);
}
- void Server::OnDeviceLost(ObjectHandle device, const char* message) {
+ void Server::OnDeviceLost(ObjectHandle device,
+ WGPUDeviceLostReason reason,
+ const char* message) {
ReturnDeviceLostCallbackCmd cmd;
cmd.device = device;
+ cmd.reason = reason;
cmd.message = message;
SerializeCommand(cmd);
diff --git a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
index cfddcc2a67d..f9588e4d4fc 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
@@ -29,6 +29,9 @@ struct ID3D12Device;
struct ID3D12Resource;
namespace dawn_native { namespace d3d12 {
+
+ class D3D11on12ResourceCache;
+
DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
HWND window);
@@ -62,6 +65,8 @@ namespace dawn_native { namespace d3d12 {
class DAWN_NATIVE_EXPORT ExternalImageDXGI {
public:
+ ~ExternalImageDXGI();
+
// Note: SharedHandle must be a handle to a texture object.
static std::unique_ptr<ExternalImageDXGI> Create(
WGPUDevice device,
@@ -79,11 +84,14 @@ namespace dawn_native { namespace d3d12 {
// Contents of WGPUTextureDescriptor are stored individually since the descriptor
// could outlive this image.
WGPUTextureUsageFlags mUsage;
+ WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None;
WGPUTextureDimension mDimension;
WGPUExtent3D mSize;
WGPUTextureFormat mFormat;
uint32_t mMipLevelCount;
uint32_t mSampleCount;
+
+ std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
};
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
diff --git a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
index 9baf128b087..501512af701 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
@@ -63,9 +63,13 @@ namespace dawn_native {
// An optional parameter of Adapter::CreateDevice() to send additional information when creating
// a Device. For example, we can use it to enable a workaround, optimization or feature.
struct DAWN_NATIVE_EXPORT DeviceDescriptor {
+ // TODO(dawn:1149): remove once requiredExtensions is no longer used.
std::vector<const char*> requiredExtensions;
+ std::vector<const char*> requiredFeatures;
std::vector<const char*> forceEnabledToggles;
std::vector<const char*> forceDisabledToggles;
+
+ const WGPURequiredLimits* requiredLimits = nullptr;
};
// A struct to record the information of a toggle. A toggle is a code path in Dawn device that
@@ -77,10 +81,10 @@ namespace dawn_native {
const char* url;
};
- // A struct to record the information of an extension. An extension is a GPU feature that is not
+ // A struct to record the information of a feature. A feature is a GPU feature that is not
// required to be supported by all Dawn backends and can only be used when it is enabled on the
// creation of device.
- using ExtensionInfo = ToggleInfo;
+ using FeatureInfo = ToggleInfo;
// An adapter is an object that represent on possibility of creating devices in the system.
// Most of the time it will represent a combination of a physical GPU and an API. Not that the
@@ -107,7 +111,11 @@ namespace dawn_native {
void GetProperties(wgpu::AdapterProperties* properties) const;
std::vector<const char*> GetSupportedExtensions() const;
+ std::vector<const char*> GetSupportedFeatures() const;
WGPUDeviceProperties GetAdapterProperties() const;
+ bool GetLimits(WGPUSupportedLimits* limits) const;
+
+ void SetUseTieredLimits(bool useTieredLimits);
// Check that the Adapter is able to support importing external images. This is necessary
// to implement the swapchain and interop APIs in Chromium.
@@ -120,6 +128,10 @@ namespace dawn_native {
// On an error, nullptr is returned.
WGPUDevice CreateDevice(const DeviceDescriptor* deviceDescriptor = nullptr);
+ void RequestDevice(const DeviceDescriptor* descriptor,
+ WGPURequestDeviceCallback callback,
+ void* userdata);
+
// Reset the backend device object for testing purposes.
void ResetInternalDeviceForTesting();
@@ -248,6 +260,13 @@ namespace dawn_native {
ExternalImageExportInfo(ExternalImageType type);
};
+ DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
+
+ DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
+
+ DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
+ WGPUBindGroupLayout b);
+
} // namespace dawn_native
#endif // DAWNNATIVE_DAWNNATIVE_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
index 88cf03daf93..ce354b27b18 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
@@ -114,12 +114,6 @@ namespace dawn_native { namespace vulkan {
#endif // __linux__
- // Exports a signal semaphore from a wrapped texture. This must be called on wrapped
- // textures before they are destroyed. On failure, returns -1
- // TODO(enga): Remove after updating Chromium to use ExportVulkanImage.
- DAWN_NATIVE_EXPORT int ExportSignalSemaphoreOpaqueFD(WGPUDevice cDevice,
- WGPUTexture cTexture);
-
// Imports external memory into a Vulkan image. Internally, this uses external memory /
// semaphore extensions to import the image and wait on the provided synchronizaton
// primitives before the texture can be used.
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
index 4d69c95e42a..0c11d91d0c8 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
@@ -61,6 +61,16 @@ namespace dawn_wire {
const volatile char* deserializeBuffer,
size_t deserializeBufferSize);
+ DAWN_WIRE_EXPORT size_t
+ SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
+
+ DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
+ char* serializeBuffer);
+
+ DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
+ const volatile char* deserializeBuffer,
+ size_t deserializeBufferSize);
+
} // namespace dawn_wire
#endif // DAWNWIRE_WIRE_H_
diff --git a/chromium/third_party/dawn/src/tests/BUILD.gn b/chromium/third_party/dawn/src/tests/BUILD.gn
index 3ff1698fd00..84a74f56a87 100644
--- a/chromium/third_party/dawn/src/tests/BUILD.gn
+++ b/chromium/third_party/dawn/src/tests/BUILD.gn
@@ -135,8 +135,8 @@ test("dawn_unittests") {
"${dawn_root}/src/common",
"${dawn_root}/src/dawn:dawn_proc",
"${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn_native",
"${dawn_root}/src/dawn_native:dawn_native_sources",
+ "${dawn_root}/src/dawn_native:dawn_native_static",
"${dawn_root}/src/dawn_wire",
"${dawn_root}/src/utils:dawn_utils",
]
@@ -162,13 +162,14 @@ test("dawn_unittests") {
"unittests/EnumClassBitmasksTests.cpp",
"unittests/EnumMaskIteratorTests.cpp",
"unittests/ErrorTests.cpp",
- "unittests/ExtensionTests.cpp",
+ "unittests/FeatureTests.cpp",
"unittests/GPUInfoTests.cpp",
"unittests/GetProcAddressTests.cpp",
"unittests/ITypArrayTests.cpp",
"unittests/ITypBitsetTests.cpp",
"unittests/ITypSpanTests.cpp",
"unittests/ITypVectorTests.cpp",
+ "unittests/LimitsTests.cpp",
"unittests/LinkedListTests.cpp",
"unittests/MathTests.cpp",
"unittests/ObjectBaseTests.cpp",
@@ -203,8 +204,10 @@ test("dawn_unittests") {
"unittests/validation/GetBindGroupLayoutValidationTests.cpp",
"unittests/validation/IndexBufferValidationTests.cpp",
"unittests/validation/InternalUsageValidationTests.cpp",
+ "unittests/validation/LabelTests.cpp",
"unittests/validation/MinimumBufferSizeValidationTests.cpp",
"unittests/validation/MultipleDeviceTests.cpp",
+ "unittests/validation/OverridableConstantsValidationTests.cpp",
"unittests/validation/QueryValidationTests.cpp",
"unittests/validation/QueueOnSubmittedWorkDoneValidationTests.cpp",
"unittests/validation/QueueSubmitValidationTests.cpp",
@@ -213,6 +216,7 @@ test("dawn_unittests") {
"unittests/validation/RenderBundleValidationTests.cpp",
"unittests/validation/RenderPassDescriptorValidationTests.cpp",
"unittests/validation/RenderPipelineValidationTests.cpp",
+ "unittests/validation/RequestDeviceValidationTests.cpp",
"unittests/validation/ResourceUsageTrackingTests.cpp",
"unittests/validation/SamplerValidationTests.cpp",
"unittests/validation/ShaderModuleValidationTests.cpp",
@@ -227,6 +231,7 @@ test("dawn_unittests") {
"unittests/validation/VertexBufferValidationTests.cpp",
"unittests/validation/VertexStateValidationTests.cpp",
"unittests/validation/VideoViewsValidationTests.cpp",
+ "unittests/validation/WriteBufferTests.cpp",
"unittests/wire/WireArgumentTests.cpp",
"unittests/wire/WireBasicTests.cpp",
"unittests/wire/WireBufferMappingTests.cpp",
@@ -296,6 +301,7 @@ source_set("dawn_end2end_tests_sources") {
"end2end/BufferZeroInitTests.cpp",
"end2end/ClipSpaceTests.cpp",
"end2end/ColorStateTests.cpp",
+ "end2end/CommandEncoderTests.cpp",
"end2end/CompressedTextureFormatTests.cpp",
"end2end/ComputeCopyStorageBufferTests.cpp",
"end2end/ComputeDispatchTests.cpp",
@@ -356,6 +362,7 @@ source_set("dawn_end2end_tests_sources") {
"end2end/TextureViewTests.cpp",
"end2end/TextureZeroInitTests.cpp",
"end2end/VertexFormatTests.cpp",
+ "end2end/VertexOnlyRenderPipelineTests.cpp",
"end2end/VertexStateTests.cpp",
"end2end/ViewportOrientationTests.cpp",
"end2end/ViewportTests.cpp",
@@ -438,6 +445,7 @@ source_set("dawn_white_box_tests_sources") {
}
sources += [
+ "white_box/BufferAllocatedSizeTests.cpp",
"white_box/InternalResourceUsageTests.cpp",
"white_box/InternalStorageBufferBindingTests.cpp",
"white_box/QueryInternalShaderTests.cpp",
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
index 4bb69481579..39068300560 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
+++ b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
@@ -18,13 +18,11 @@
namespace utils {
- // For creating deprecated render pipeline descriptors
-
- ComboVertexStateDescriptor::ComboVertexStateDescriptor() {
+ ComboVertexState::ComboVertexState() {
vertexBufferCount = 0;
// Fill the default values for vertexBuffers and vertexAttributes in buffers.
- wgpu::VertexAttributeDescriptor vertexAttribute;
+ wgpu::VertexAttribute vertexAttribute;
vertexAttribute.shaderLocation = 0;
vertexAttribute.offset = 0;
vertexAttribute.format = wgpu::VertexFormat::Float32;
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
index 036ce1c7115..451c0036b58 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
+++ b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
@@ -24,18 +24,18 @@
namespace utils {
// Primarily used by tests to easily set up the vertex buffer state portion of a RenderPipeline.
- class ComboVertexStateDescriptor {
+ class ComboVertexState {
public:
- ComboVertexStateDescriptor();
+ ComboVertexState();
- ComboVertexStateDescriptor(const ComboVertexStateDescriptor&) = delete;
- ComboVertexStateDescriptor& operator=(const ComboVertexStateDescriptor&) = delete;
- ComboVertexStateDescriptor(ComboVertexStateDescriptor&&) = delete;
- ComboVertexStateDescriptor& operator=(ComboVertexStateDescriptor&&) = delete;
+ ComboVertexState(const ComboVertexState&) = delete;
+ ComboVertexState& operator=(const ComboVertexState&) = delete;
+ ComboVertexState(ComboVertexState&&) = delete;
+ ComboVertexState& operator=(ComboVertexState&&) = delete;
uint32_t vertexBufferCount;
- std::array<wgpu::VertexBufferLayoutDescriptor, kMaxVertexBuffers> cVertexBuffers;
- std::array<wgpu::VertexAttributeDescriptor, kMaxVertexAttributes> cAttributes;
+ std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cVertexBuffers;
+ std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
};
class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
diff --git a/chromium/third_party/dawn/src/utils/TestUtils.cpp b/chromium/third_party/dawn/src/utils/TestUtils.cpp
index 8f359abe575..d55a8c2f601 100644
--- a/chromium/third_party/dawn/src/utils/TestUtils.cpp
+++ b/chromium/third_party/dawn/src/utils/TestUtils.cpp
@@ -173,8 +173,9 @@ namespace utils {
case wgpu::VertexFormat::Sint32x4:
return 16;
case wgpu::VertexFormat::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TextureUtils.cpp b/chromium/third_party/dawn/src/utils/TextureUtils.cpp
index ab9e6c8f2df..aaa808e5b76 100644
--- a/chromium/third_party/dawn/src/utils/TextureUtils.cpp
+++ b/chromium/third_party/dawn/src/utils/TextureUtils.cpp
@@ -40,6 +40,85 @@ namespace utils {
}
}
+ bool IsBCTextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBFloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
switch (textureFormat) {
case wgpu::TextureFormat::R8Unorm:
@@ -107,16 +186,64 @@ namespace utils {
case wgpu::TextureFormat::BC7RGBAUnormSrgb:
return 16u;
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ return 8u;
+
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
+ return 16u;
+
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 16u;
+
case wgpu::TextureFormat::Depth24Plus:
case wgpu::TextureFormat::Depth24PlusStencil8:
// Block size of a multi-planar format depends on aspect.
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ // TODO(dawn:666): implement stencil8
case wgpu::TextureFormat::Stencil8:
+ // TODO(dawn:570): implement depth16unorm
+ case wgpu::TextureFormat::Depth16Unorm:
case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
@@ -176,15 +303,64 @@ namespace utils {
case wgpu::TextureFormat::BC6HRGBFloat:
case wgpu::TextureFormat::BC7RGBAUnorm:
case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
return 4u;
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ return 4u;
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ return 5u;
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ return 6u;
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ return 8u;
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ return 10u;
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 12u;
+
// Block size of a multi-planar format depends on aspect.
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ // TODO(dawn:666): implement stencil8
case wgpu::TextureFormat::Stencil8:
+ // TODO(dawn:570): implement depth16unorm
+ case wgpu::TextureFormat::Depth16Unorm:
case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
@@ -244,15 +420,64 @@ namespace utils {
case wgpu::TextureFormat::BC6HRGBFloat:
case wgpu::TextureFormat::BC7RGBAUnorm:
case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8Unorm:
+ case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+ case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+ case wgpu::TextureFormat::ETC2RGBA8Unorm:
+ case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+ case wgpu::TextureFormat::EACR11Unorm:
+ case wgpu::TextureFormat::EACR11Snorm:
+ case wgpu::TextureFormat::EACRG11Unorm:
+ case wgpu::TextureFormat::EACRG11Snorm:
return 4u;
+ case wgpu::TextureFormat::ASTC4x4Unorm:
+ case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+ case wgpu::TextureFormat::ASTC5x4Unorm:
+ case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+ return 4u;
+ case wgpu::TextureFormat::ASTC5x5Unorm:
+ case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC6x5Unorm:
+ case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x5Unorm:
+ case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x5Unorm:
+ case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+ return 5u;
+ case wgpu::TextureFormat::ASTC6x6Unorm:
+ case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC8x6Unorm:
+ case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x6Unorm:
+ case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+ return 6u;
+ case wgpu::TextureFormat::ASTC8x8Unorm:
+ case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+ case wgpu::TextureFormat::ASTC10x8Unorm:
+ case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+ return 8u;
+ case wgpu::TextureFormat::ASTC10x10Unorm:
+ case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+ case wgpu::TextureFormat::ASTC12x10Unorm:
+ case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+ return 10u;
+ case wgpu::TextureFormat::ASTC12x12Unorm:
+ case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+ return 12u;
+
// Block size of a multi-planar format depends on aspect.
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+ // TODO(dawn:666): implement stencil8
case wgpu::TextureFormat::Stencil8:
+ // TODO(dawn:570): implement depth16unorm
+ case wgpu::TextureFormat::Depth16Unorm:
case wgpu::TextureFormat::Undefined:
- UNREACHABLE();
+ break;
}
+ UNREACHABLE();
}
const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat) {
diff --git a/chromium/third_party/dawn/src/utils/TextureUtils.h b/chromium/third_party/dawn/src/utils/TextureUtils.h
index 97d24f03a14..9c570c203d8 100644
--- a/chromium/third_party/dawn/src/utils/TextureUtils.h
+++ b/chromium/third_party/dawn/src/utils/TextureUtils.h
@@ -22,7 +22,7 @@
#include "common/Assert.h"
namespace utils {
- static constexpr std::array<wgpu::TextureFormat, 53> kAllTextureFormats = {
+ static constexpr std::array<wgpu::TextureFormat, 91> kAllTextureFormats = {
wgpu::TextureFormat::R8Unorm,
wgpu::TextureFormat::R8Snorm,
wgpu::TextureFormat::R8Uint,
@@ -76,7 +76,44 @@ namespace utils {
wgpu::TextureFormat::BC6HRGBFloat,
wgpu::TextureFormat::BC7RGBAUnorm,
wgpu::TextureFormat::BC7RGBAUnormSrgb,
- };
+ wgpu::TextureFormat::ETC2RGB8Unorm,
+ wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm,
+ wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm,
+ wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm,
+ wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm,
+ wgpu::TextureFormat::EACRG11Snorm,
+ wgpu::TextureFormat::ASTC4x4Unorm,
+ wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm,
+ wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm,
+ wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm,
+ wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm,
+ wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm,
+ wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm,
+ wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm,
+ wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm,
+ wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm,
+ wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm,
+ wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm,
+ wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm,
+ wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm,
+ wgpu::TextureFormat::ASTC12x12UnormSrgb};
static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
@@ -87,8 +124,67 @@ namespace utils {
wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb};
+ static constexpr std::array<wgpu::TextureFormat, 10> kETC2Formats = {
+ wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm};
+
+ static constexpr std::array<wgpu::TextureFormat, 28> kASTCFormats = {
+ wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb,
+ };
+
+ static constexpr std::array<wgpu::TextureFormat, 52> kCompressedFormats = {
+ wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
+ wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8Unorm, wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+ wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+ wgpu::TextureFormat::ETC2RGBA8Unorm, wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+ wgpu::TextureFormat::EACR11Unorm, wgpu::TextureFormat::EACR11Snorm,
+ wgpu::TextureFormat::EACRG11Unorm, wgpu::TextureFormat::EACRG11Snorm,
+ wgpu::TextureFormat::ASTC4x4Unorm, wgpu::TextureFormat::ASTC4x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x4Unorm, wgpu::TextureFormat::ASTC5x4UnormSrgb,
+ wgpu::TextureFormat::ASTC5x5Unorm, wgpu::TextureFormat::ASTC5x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x5Unorm, wgpu::TextureFormat::ASTC6x5UnormSrgb,
+ wgpu::TextureFormat::ASTC6x6Unorm, wgpu::TextureFormat::ASTC6x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x5Unorm, wgpu::TextureFormat::ASTC8x5UnormSrgb,
+ wgpu::TextureFormat::ASTC8x6Unorm, wgpu::TextureFormat::ASTC8x6UnormSrgb,
+ wgpu::TextureFormat::ASTC8x8Unorm, wgpu::TextureFormat::ASTC8x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x5Unorm, wgpu::TextureFormat::ASTC10x5UnormSrgb,
+ wgpu::TextureFormat::ASTC10x6Unorm, wgpu::TextureFormat::ASTC10x6UnormSrgb,
+ wgpu::TextureFormat::ASTC10x8Unorm, wgpu::TextureFormat::ASTC10x8UnormSrgb,
+ wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
+ wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb};
+ static_assert(kCompressedFormats.size() ==
+ kBCFormats.size() + kETC2Formats.size() + kASTCFormats.size(),
+ "Number of compressed format must equal number of BC, ETC2, and ASTC formats.");
+
bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
+ bool IsBCTextureFormat(wgpu::TextureFormat textureFormat);
+ bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat);
+ bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat);
+
uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
index b06421a2a8e..0dff1781360 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.h
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
@@ -66,9 +66,8 @@ namespace utils {
const ComboRenderPassDescriptor& operator=(
const ComboRenderPassDescriptor& otherRenderPass);
- std::array<wgpu::RenderPassColorAttachmentDescriptor, kMaxColorAttachments>
- cColorAttachments;
- wgpu::RenderPassDepthStencilAttachmentDescriptor cDepthStencilAttachmentInfo = {};
+ std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments> cColorAttachments;
+ wgpu::RenderPassDepthStencilAttachment cDepthStencilAttachmentInfo = {};
};
struct BasicRenderPass {