summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2021-09-01 11:08:40 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2021-10-01 12:16:21 +0000
commit03c549e0392f92c02536d3f86d5e1d8dfa3435ac (patch)
treefe49d170a929b34ba82cd10db1a0bd8e3760fa4b /chromium/third_party/dawn
parent5d013f5804a0d91fcf6c626b2d6fb6eca5c845b0 (diff)
downloadqtwebengine-chromium-03c549e0392f92c02536d3f86d5e1d8dfa3435ac.tar.gz
BASELINE: Update Chromium to 91.0.4472.160
Change-Id: I0def1f08a2412aeed79a9ab95dd50eb5c3f65f31 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/dawn')
-rw-r--r--chromium/third_party/dawn/.gn8
-rw-r--r--chromium/third_party/dawn/.vscode/tasks.json159
-rw-r--r--chromium/third_party/dawn/CMakeLists.txt6
-rw-r--r--chromium/third_party/dawn/DEPS92
-rw-r--r--chromium/third_party/dawn/build_overrides/dawn.gni1
-rw-r--r--chromium/third_party/dawn/build_overrides/vulkan_common.gni (renamed from chromium/third_party/dawn/build_overrides/shaderc.gni)10
-rw-r--r--chromium/third_party/dawn/build_overrides/vulkan_loader.gni3
-rw-r--r--chromium/third_party/dawn/build_overrides/vulkan_tools.gni21
-rw-r--r--chromium/third_party/dawn/build_overrides/vulkan_validation_layers.gni7
-rw-r--r--chromium/third_party/dawn/codereview.settings2
-rw-r--r--chromium/third_party/dawn/dawn.json375
-rw-r--r--chromium/third_party/dawn/dawn_wire.json21
-rw-r--r--chromium/third_party/dawn/docs/codegen.md88
-rw-r--r--chromium/third_party/dawn/docs/fuzzing.md16
-rw-r--r--chromium/third_party/dawn/docs/overview.md4
-rw-r--r--chromium/third_party/dawn/examples/Animometer.cpp97
-rw-r--r--chromium/third_party/dawn/examples/CHelloTriangle.cpp97
-rw-r--r--chromium/third_party/dawn/examples/ComputeBoids.cpp84
-rw-r--r--chromium/third_party/dawn/examples/CppHelloTriangle.cpp40
-rw-r--r--chromium/third_party/dawn/examples/CubeReflection.cpp136
-rw-r--r--chromium/third_party/dawn/examples/ManualSwapChainTest.cpp11
-rw-r--r--chromium/third_party/dawn/examples/SampleUtils.cpp4
-rw-r--r--chromium/third_party/dawn/generator/dawn_json_generator.py2
-rw-r--r--chromium/third_party/dawn/generator/extract_json.py9
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp2
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.cpp172
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.h71
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/client/ClientHandlers.cpp4
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerBase.h10
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerHandlers.cpp4
-rw-r--r--chromium/third_party/dawn/scripts/dawn_features.gni4
-rw-r--r--chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni8
-rwxr-xr-xchromium/third_party/dawn/scripts/roll-shader-deps.sh6
-rwxr-xr-xchromium/third_party/dawn/scripts/update_fuzzer_seed_corpus.sh98
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn27
-rw-r--r--chromium/third_party/dawn/src/common/CMakeLists.txt12
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.cpp38
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.h5
-rw-r--r--chromium/third_party/dawn/src/common/RefCounted.cpp8
-rw-r--r--chromium/third_party/dawn/src/common/RefCounted.h4
-rw-r--r--chromium/third_party/dawn/src/common/VertexFormatUtils.cpp289
-rw-r--r--chromium/third_party/dawn/src/common/VertexFormatUtils.h65
-rw-r--r--chromium/third_party/dawn/src/common/WindowsUtils.cpp32
-rw-r--r--chromium/third_party/dawn/src/common/WindowsUtils.h22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BUILD.gn11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.h22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CMakeLists.txt15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp184
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.h68
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp115
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.h25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CompilationMessages.h55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp195
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DawnNative.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp712
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h183
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Extensions.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Extensions.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp111
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ExternalTexture.h51
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Fence.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Fence.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp104
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.h55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp665
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.h37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp722
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Subresource.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.cpp40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.h36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp61
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/TintUtils.cpp55
-rw-r--r--chromium/third_party/dawn/src/dawn_native/TintUtils.h37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp183
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp94
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp90
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp112
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp122
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp71
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp86
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp181
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h35
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm135
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm98
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp96
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp135
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp52
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp84
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp58
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp89
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp118
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/BUILD.gn6
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/BufferConsumer.h85
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/BufferConsumer_impl.h73
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt6
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h19
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireResult.h38
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp44
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Queue.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp66
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp13
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerShaderModule.cpp51
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h38
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/DawnNative.h6
-rw-r--r--chromium/third_party/dawn/src/tests/BUILD.gn25
-rw-r--r--chromium/third_party/dawn/src/utils/BUILD.gn4
-rw-r--r--chromium/third_party/dawn/src/utils/CMakeLists.txt2
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp103
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h23
-rw-r--r--chromium/third_party/dawn/src/utils/TestUtils.cpp10
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.cpp186
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.h17
-rw-r--r--chromium/third_party/dawn/third_party/CMakeLists.txt33
249 files changed, 6562 insertions, 3679 deletions
diff --git a/chromium/third_party/dawn/.gn b/chromium/third_party/dawn/.gn
index b5067637ad3..2b8936eb458 100644
--- a/chromium/third_party/dawn/.gn
+++ b/chromium/third_party/dawn/.gn
@@ -25,19 +25,15 @@ default_args = {
mac_min_system_version = "10.11.0"
angle_enable_abseil = false
-
angle_standalone = false
-
angle_build_all = false
-
angle_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
-
angle_vulkan_loader_dir = "//third_party/vulkan-deps/vulkan-loader/src"
-
angle_vulkan_tools_dir = "//third_party/vulkan-deps/vulkan-tools/src"
-
angle_vulkan_validation_layers_dir =
"//third_party/vulkan-deps/vulkan-validation-layers/src"
+
+ vma_vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
}
check_targets = [
diff --git a/chromium/third_party/dawn/.vscode/tasks.json b/chromium/third_party/dawn/.vscode/tasks.json
new file mode 100644
index 00000000000..03599a8c3c9
--- /dev/null
+++ b/chromium/third_party/dawn/.vscode/tasks.json
@@ -0,0 +1,159 @@
+{
+ // See https://go.microsoft.com/fwlink/?LinkId=733558
+ // for the documentation about the tasks.json format
+ // Available variables which can be used inside of strings.
+ // ${workspaceRoot}: the root folder of the team
+ // ${file}: the current opened file
+ // ${fileBasename}: the current opened file's basename
+ // ${fileDirname}: the current opened file's dirname
+ // ${fileExtname}: the current opened file's extension
+ // ${cwd}: the current working directory of the spawned process
+ "version": "2.0.0",
+ "tasks": [
+ // Invokes ninja in the 'out/active' directory, which is created with
+ // the 'gn gen' task (see below).
+ {
+ "label": "build",
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
+ "type": "shell",
+ "linux": {
+ "command": "sh",
+ "args": [
+ "-c",
+ "ninja && echo Done"
+ ],
+ },
+ "osx": {
+ "command": "sh",
+ "args": [
+ "-c",
+ "ninja && echo Done"
+ ],
+ },
+ "windows": {
+ "command": "cmd",
+ "args": [
+ "/C",
+ "ninja && echo Done"
+ ],
+ },
+ "options": {
+ "cwd": "${workspaceRoot}/out/active",
+ },
+ "presentation": {
+ "echo": false,
+ "reveal": "always",
+ "focus": false,
+ "panel": "shared",
+ "showReuseMessage": false,
+ "clear": true,
+ },
+ "problemMatcher": {
+ "owner": "cpp",
+ "fileLocation": "absolute",
+ "pattern": {
+ "regexp": "^(.*):(\\d+):(\\d+):\\s+(warning|error):\\s+(.*)$",
+ "file": 1,
+ "line": 2,
+ "column": 3,
+ "severity": 4,
+ "message": 5
+ }
+ }
+ },
+ // Generates a GN build directory at 'out/<build-type>' with the
+ // is_debug argument set to to true iff the build-type is Debug.
+ // A symbolic link to this build directory is created at 'out/active'
+ // which is used to track the active build directory.
+ {
+ "label": "gn gen",
+ "type": "shell",
+ "linux": {
+ "command": "sh",
+ "args": [
+ "-c",
+ "gn gen 'out/${input:buildType}' --args=is_debug=$(if [ '${input:buildType}' = 'Debug' ]; then echo 'true'; else echo 'false'; fi) && (rm -fr out/active || true) && ln -s ${input:buildType} out/active",
+ ],
+ },
+ "osx": {
+ "command": "sh",
+ "args": [
+ "-c",
+ "gn gen 'out/${input:buildType}' --args=is_debug=$(if [ '${input:buildType}' = 'Debug' ]; then echo 'true'; else echo 'false'; fi) && (rm -fr out/active || true) && ln -s ${input:buildType} out/active",
+ ],
+ },
+ "windows": {
+ "command": "cmd",
+ "args": [
+ "/C",
+ "(IF \"${input:buildType}\" == \"Debug\" ( gn gen 'out\\${input:buildType}' --args=is_debug=true ) ELSE ( gn gen 'out\\${input:buildType}' --args=is_debug=false )) && (IF EXIST 'out\\active' rmdir 'out\\active' /q /s) && (mklink /j 'out\\active' 'out\\${input:buildType}')",
+ ],
+ },
+ "options": {
+ "cwd": "${workspaceRoot}"
+ },
+ "problemMatcher": [],
+ },
+ // Rebases the current branch on to origin/main and then calls
+ // `gclient sync`.
+ {
+ "label": "sync",
+ "type": "shell",
+ "linux": {
+ "command": "sh",
+ "args": [
+ "-c",
+ "git fetch origin && git rebase origin/main && gclient sync && echo Done"
+ ],
+ },
+ "osx": {
+ "command": "sh",
+ "args": [
+ "-c",
+ "git fetch origin && git rebase origin/main && gclient sync && echo Done"
+ ],
+ },
+ "windows": {
+ "command": "cmd",
+ "args": [
+ "/C",
+ "git fetch origin && git rebase origin/main && gclient sync && echo Done"
+ ],
+ },
+ "options": {
+ "cwd": "${workspaceRoot}"
+ },
+ "problemMatcher": [],
+ },
+ // Pushes the changes at HEAD to gerrit for review
+ {
+ "label": "push",
+ "type": "shell",
+ "command": "git",
+ "args": [
+ "push",
+ "origin",
+ "HEAD:refs/for/main"
+ ],
+ "options": {
+ "cwd": "${workspaceRoot}"
+ },
+ "problemMatcher": [],
+ }
+ ],
+ "inputs": [
+ {
+ "id": "buildType",
+ "type": "pickString",
+ "options": [
+ "Debug",
+ "Release",
+ ],
+ "default": "Debug",
+ "description": "The type of build",
+ },
+ ]
+} \ No newline at end of file
diff --git a/chromium/third_party/dawn/CMakeLists.txt b/chromium/third_party/dawn/CMakeLists.txt
index f069281af6d..161709463fc 100644
--- a/chromium/third_party/dawn/CMakeLists.txt
+++ b/chromium/third_party/dawn/CMakeLists.txt
@@ -69,7 +69,6 @@ option(DAWN_ENABLE_METAL "Enable compilation of the Metal backend" ${ENABLE_META
option(DAWN_ENABLE_NULL "Enable compilation of the Null backend" ON)
option(DAWN_ENABLE_OPENGL "Enable compilation of the OpenGL backend" ${ENABLE_OPENGL})
option(DAWN_ENABLE_VULKAN "Enable compilation of the Vulkan backend" ${ENABLE_VULKAN})
-option(DAWN_ENABLE_WGSL "Enable WGSL support" ON)
option(DAWN_ALWAYS_ASSERT "Enable assertions on all build types" OFF)
option(DAWN_USE_X11 "Enable support for X11 surface" ${USE_X11})
@@ -79,9 +78,7 @@ set(DAWN_THIRD_PARTY_DIR "${Dawn_SOURCE_DIR}/third_party" CACHE STRING "Director
set(DAWN_GLFW_DIR "${DAWN_THIRD_PARTY_DIR}/glfw" CACHE STRING "Directory in which to find GLFW")
set(DAWN_GLM_DIR "${DAWN_THIRD_PARTY_DIR}/glm" CACHE STRING "Directory in which to find GLM")
-set(DAWN_GLSLANG_DIR "${DAWN_THIRD_PARTY_DIR}/vulkan-deps/glslang/src" CACHE STRING "Directory in which to find GLSLang")
set(DAWN_JINJA2_DIR "${DAWN_THIRD_PARTY_DIR}/jinja2" CACHE STRING "Directory in which to find Jinja2")
-set(DAWN_SHADERC_DIR "${DAWN_THIRD_PARTY_DIR}/shaderc" CACHE STRING "Directory in which to find shaderc")
set(DAWN_SPIRV_CROSS_DIR "${DAWN_THIRD_PARTY_DIR}/vulkan-deps/spirv-cross/src" CACHE STRING "Directory in which to find SPIRV-Cross")
set(DAWN_SPIRV_HEADERS_DIR "${DAWN_THIRD_PARTY_DIR}/vulkan-deps/spirv-headers/src" CACHE STRING "Directory in which to find SPIRV-Headers")
set(DAWN_SPIRV_TOOLS_DIR "${DAWN_THIRD_PARTY_DIR}/vulkan-deps/spirv-tools/src" CACHE STRING "Directory in which to find SPIRV-Tools")
@@ -125,9 +122,6 @@ endif()
if (DAWN_ENABLE_VULKAN)
target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_VULKAN")
endif()
-if (DAWN_ENABLE_WGSL)
- target_compile_definitions(dawn_internal_config INTERFACE "-DDAWN_ENABLE_WGSL=1")
-endif()
if (DAWN_USE_X11)
target_compile_definitions(dawn_internal_config INTERFACE "DAWN_USE_X11")
endif()
diff --git a/chromium/third_party/dawn/DEPS b/chromium/third_party/dawn/DEPS
index b7ac3ee3cc7..c60e4106382 100644
--- a/chromium/third_party/dawn/DEPS
+++ b/chromium/third_party/dawn/DEPS
@@ -14,25 +14,63 @@ vars = {
deps = {
# Dependencies required to use GN/Clang in standalone
'build': {
- 'url': '{chromium_git}/chromium/src/build@6fa63e6b1506d6dce6c8e86c4dc2b70e4e6f1e6b',
+ 'url': '{chromium_git}/chromium/src/build@ea192b29d26f84e94910791c1eeb89d24c063dd0',
'condition': 'dawn_standalone',
},
'buildtools': {
- 'url': '{chromium_git}/chromium/src/buildtools@fc5af1ac75d8a249d692fb3cbf707dd9f791ec3c',
+ 'url': '{chromium_git}/chromium/src/buildtools@69cc9b8a3ae010e0721c4bea12de7a352d9a93f9',
'condition': 'dawn_standalone',
},
+ 'buildtools/clang_format/script': {
+ 'url': '{chromium_git}/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@99803d74e35962f63a775f29477882afd4d57d94',
+ 'condition': 'dawn_standalone',
+ },
+
+ 'buildtools/linux64': {
+ 'packages': [{
+ 'package': 'gn/gn/linux-amd64',
+ 'version': 'git_revision:dfcbc6fed0a8352696f92d67ccad54048ad182b3',
+ }],
+ 'dep_type': 'cipd',
+ 'condition': 'dawn_standalone and host_os == "linux"',
+ },
+ 'buildtools/mac': {
+ 'packages': [{
+ 'package': 'gn/gn/mac-${{arch}}',
+ 'version': 'git_revision:dfcbc6fed0a8352696f92d67ccad54048ad182b3',
+ }],
+ 'dep_type': 'cipd',
+ 'condition': 'dawn_standalone and host_os == "mac"',
+ },
+ 'buildtools/win': {
+ 'packages': [{
+ 'package': 'gn/gn/windows-amd64',
+ 'version': 'git_revision:dfcbc6fed0a8352696f92d67ccad54048ad182b3',
+ }],
+ 'dep_type': 'cipd',
+ 'condition': 'dawn_standalone and host_os == "win"',
+ },
+
+ 'buildtools/third_party/libc++/trunk': {
+ 'url': '{chromium_git}/external/github.com/llvm/llvm-project/libcxx.git@8fa87946779682841e21e2da977eccfb6cb3bded',
+ 'condition': 'dawn_standalone',
+ },
+
+ 'buildtools/third_party/libc++abi/trunk': {
+ 'url': '{chromium_git}/external/github.com/llvm/llvm-project/libcxxabi.git@6918862bfc2bff22b45058fac22b1596c49982fb',
+ 'condition': 'dawn_standalone',
+ },
+
'tools/clang': {
- 'url': '{chromium_git}/chromium/src/tools/clang@aecd85e062e006f47665ed6347e0cc1e9a116421',
+ 'url': '{chromium_git}/chromium/src/tools/clang@24cecabb89512de31a1083308b237420651a835c',
'condition': 'dawn_standalone',
},
'tools/clang/dsymutil': {
- 'packages': [
- {
- 'package': 'chromium/llvm-build-tools/dsymutil',
- 'version': 'M56jPzDv1620Rnm__jTMYS62Zi8rxHVq7yw0qeBFEgkC',
- }
- ],
- 'condition': 'checkout_mac or checkout_ios',
+ 'packages': [{
+ 'package': 'chromium/llvm-build-tools/dsymutil',
+ 'version': 'M56jPzDv1620Rnm__jTMYS62Zi8rxHVq7yw0qeBFEgkC',
+ }],
+ 'condition': 'dawn_standalone and (checkout_mac or checkout_ios)',
'dep_type': 'cipd',
},
@@ -56,16 +94,9 @@ deps = {
'condition': 'dawn_standalone',
},
- # SPIRV compiler dependencies: shaderc
- 'third_party/shaderc': {
- 'url': '{chromium_git}/external/github.com/google/shaderc@8d081127ee28ff5df8123c994c00bc66a57e9e9c',
- 'condition': 'dawn_standalone',
- },
-
# WGSL support
'third_party/tint': {
- 'url': '{dawn_git}/tint@c1f7e904176d02369405b64e251391f9e9758428',
- 'condition': 'dawn_standalone',
+ 'url': '{dawn_git}/tint@933d44a2c8b9148c7a052b7555765f4a115f80aa',
},
# GLFW for tests and samples
@@ -81,12 +112,12 @@ deps = {
},
'third_party/vulkan_memory_allocator': {
- 'url': '{chromium_git}/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator@065e739079d9d58bef28ccd793cbf512261f09ed',
+ 'url': '{chromium_git}/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator@1ecb35c39875c77219c75ecb5c5b3316020f66b8',
'condition': 'dawn_standalone',
},
'third_party/angle': {
- 'url': '{chromium_git}/angle/angle@f4cd17472aca1939277b145ef0f72b0946b83716',
+ 'url': '{chromium_git}/angle/angle@9a025fd44822a9d185eed1ec071e9f04706d0b34',
'condition': 'dawn_standalone',
},
@@ -96,7 +127,8 @@ deps = {
},
'third_party/vulkan-deps': {
- 'url': '{chromium_git}/vulkan-deps@23a4efc36ed9e649fc650ce9dd26df5cac2c26ab',
+ 'url': '{chromium_git}/vulkan-deps@105af117f0532953577198cc9bd8ee6f76c29009',
+ 'condition': 'dawn_standalone',
},
'third_party/zlib': {
@@ -110,14 +142,14 @@ hooks = [
{
'name': 'sysroot_x86',
'pattern': '.',
- 'condition': 'checkout_linux and ((checkout_x86 or checkout_x64) and dawn_standalone)',
+ 'condition': 'dawn_standalone and checkout_linux and (checkout_x86 or checkout_x64)',
'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x86'],
},
{
'name': 'sysroot_x64',
'pattern': '.',
- 'condition': 'checkout_linux and (checkout_x64 and dawn_standalone)',
+ 'condition': 'dawn_standalone and checkout_linux and checkout_x64',
'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x64'],
},
@@ -126,14 +158,14 @@ hooks = [
# is more consistent (only changes when rolling build/) and is cached.
'name': 'mac_toolchain',
'pattern': '.',
- 'condition': 'checkout_mac',
+ 'condition': 'dawn_standalone and checkout_mac',
'action': ['python', 'build/mac_toolchain.py'],
},
{
# Update the Windows toolchain if necessary. Must run before 'clang' below.
'name': 'win_toolchain',
'pattern': '.',
- 'condition': 'checkout_win and dawn_standalone',
+ 'condition': 'dawn_standalone and checkout_win',
'action': ['python', 'build/vs_toolchain.py', 'update', '--force'],
},
{
@@ -147,7 +179,7 @@ hooks = [
# Pull rc binaries using checked-in hashes.
'name': 'rc_win',
'pattern': '.',
- 'condition': 'checkout_win and (host_os == "win" and dawn_standalone)',
+ 'condition': 'dawn_standalone and checkout_win and host_os == "win"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@@ -159,7 +191,7 @@ hooks = [
{
'name': 'clang_format_win',
'pattern': '.',
- 'condition': 'host_os == "win"',
+ 'condition': 'dawn_standalone and host_os == "win"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@@ -170,7 +202,7 @@ hooks = [
{
'name': 'clang_format_mac',
'pattern': '.',
- 'condition': 'host_os == "mac"',
+ 'condition': 'dawn_standalone and host_os == "mac"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@@ -181,7 +213,7 @@ hooks = [
{
'name': 'clang_format_linux',
'pattern': '.',
- 'condition': 'host_os == "linux"',
+ 'condition': 'dawn_standalone and host_os == "linux"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@@ -200,7 +232,5 @@ hooks = [
]
recursedeps = [
- # buildtools provides clang_format, libc++, and libc++abi
- 'buildtools',
'third_party/vulkan-deps',
]
diff --git a/chromium/third_party/dawn/build_overrides/dawn.gni b/chromium/third_party/dawn/build_overrides/dawn.gni
index 6fac7d4e366..d0190b1024a 100644
--- a/chromium/third_party/dawn/build_overrides/dawn.gni
+++ b/chromium/third_party/dawn/build_overrides/dawn.gni
@@ -32,7 +32,6 @@ dawn_jinja2_dir = "//third_party/jinja2"
dawn_glfw_dir = "//third_party/glfw"
dawn_glm_dir = "//third_party/glm"
dawn_googletest_dir = "//third_party/googletest"
-dawn_shaderc_dir = "//third_party/shaderc"
dawn_spirv_tools_dir = "//third_party/vulkan-deps/spirv-tools/src"
dawn_spirv_cross_dir = "//third_party/vulkan-deps/spirv-cross/src"
dawn_swiftshader_dir = "//third_party/swiftshader"
diff --git a/chromium/third_party/dawn/build_overrides/shaderc.gni b/chromium/third_party/dawn/build_overrides/vulkan_common.gni
index fb0f62bd73d..9a883e7a30a 100644
--- a/chromium/third_party/dawn/build_overrides/shaderc.gni
+++ b/chromium/third_party/dawn/build_overrides/vulkan_common.gni
@@ -1,4 +1,4 @@
-# Copyright 2018 The Dawn Authors
+# Copyright 2021 The Dawn Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-shaderc_glslang_dir = "//third_party/vulkan-deps/glslang/src"
-shaderc_spirv_tools_dir = "//third_party/vulkan-deps/spirv-tools/src"
-shaderc_spirv_headers_dir = "//third_party/vulkan-deps/spirv-headers/src"
+vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
+
+# Subdirectories for generated files
+vulkan_data_subdir = "vulkandata"
+vulkan_gen_subdir = ""
diff --git a/chromium/third_party/dawn/build_overrides/vulkan_loader.gni b/chromium/third_party/dawn/build_overrides/vulkan_loader.gni
index 7185deaf9fd..ac6a61d3141 100644
--- a/chromium/third_party/dawn/build_overrides/vulkan_loader.gni
+++ b/chromium/third_party/dawn/build_overrides/vulkan_loader.gni
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
+import("//build_overrides/vulkan_common.gni")
-vulkan_gen_subdir = "vulkan_loader"
vulkan_loader_shared = true
diff --git a/chromium/third_party/dawn/build_overrides/vulkan_tools.gni b/chromium/third_party/dawn/build_overrides/vulkan_tools.gni
index cd89fc9b209..7bd4d9916b5 100644
--- a/chromium/third_party/dawn/build_overrides/vulkan_tools.gni
+++ b/chromium/third_party/dawn/build_overrides/vulkan_tools.gni
@@ -1,8 +1,15 @@
-# Copyright 2019 The ANGLE Project Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright 2021 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
-vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
-
-vulkan_data_subdir = "vulkandata"
-vulkan_gen_subdir = "angle/vulkan"
+import("//build_overrides/vulkan_common.gni")
diff --git a/chromium/third_party/dawn/build_overrides/vulkan_validation_layers.gni b/chromium/third_party/dawn/build_overrides/vulkan_validation_layers.gni
index 37193a879a4..91f95b3d6c7 100644
--- a/chromium/third_party/dawn/build_overrides/vulkan_validation_layers.gni
+++ b/chromium/third_party/dawn/build_overrides/vulkan_validation_layers.gni
@@ -12,17 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import("//build_overrides/vulkan_common.gni")
+
# These are variables that are overridable by projects that include Dawn.
# The values in this file are the defaults for when we are building from
# Dawn's repository.
-vulkan_headers_dir = "//third_party/vulkan-deps/vulkan-headers/src"
vvl_spirv_tools_dir = "//third_party/vulkan-deps/spirv-tools/src"
vvl_glslang_dir = "//third_party/vulkan-deps/glslang/src"
-# Subdirectories for generated files
-vulkan_data_subdir = "vulkandata"
-vulkan_gen_subdir = ""
-
# Fake the use_x11 when inside Dawn's repository
import("../scripts/dawn_features.gni")
use_x11 = dawn_use_x11
diff --git a/chromium/third_party/dawn/codereview.settings b/chromium/third_party/dawn/codereview.settings
index 3894052c3ab..10cc2bf828c 100644
--- a/chromium/third_party/dawn/codereview.settings
+++ b/chromium/third_party/dawn/codereview.settings
@@ -1,3 +1,5 @@
# This file is used by git cl to get repository specific information.
GERRIT_HOST: True
CODE_REVIEW_SERVER: https://dawn-review.googlesource.com
+GERRIT_SQUASH_UPLOADS: False
+TRYSERVER_GERRIT_URL: https://dawn-review.googlesource.com
diff --git a/chromium/third_party/dawn/dawn.json b/chromium/third_party/dawn/dawn.json
index 7162078c9ef..9eecdf63be9 100644
--- a/chromium/third_party/dawn/dawn.json
+++ b/chromium/third_party/dawn/dawn.json
@@ -14,6 +14,9 @@
"See the License for the specific language governing permissions and",
"limitations under the License."
],
+
+ "_doc": "See docs/codegen.md",
+
"adapter properties": {
"category": "structure",
"extensible": true,
@@ -205,7 +208,7 @@
{"value": 9, "name": "writeonly storage texture"}
]
},
- "blend descriptor": {
+ "blend component": {
"category": "structure",
"extensible": false,
"members": [
@@ -214,6 +217,10 @@
{"name": "dst factor", "type": "blend factor", "default": "zero"}
]
},
+ "blend descriptor": {
+ "category": "typedef",
+ "type": "blend component"
+ },
"blend factor": {
"category": "enum",
"values": [
@@ -247,8 +254,8 @@
"extensible": true,
"members": [
{"name": "format", "type": "texture format"},
- {"name": "alpha blend", "type": "blend descriptor"},
- {"name": "color blend", "type": "blend descriptor"},
+ {"name": "alpha blend", "type": "blend component"},
+ {"name": "color blend", "type": "blend component"},
{"name": "write mask", "type": "color write mask", "default": "all"}
]
},
@@ -293,12 +300,8 @@
]
},
"buffer copy view": {
- "category": "structure",
- "extensible": true,
- "members": [
- {"name": "layout", "type": "texture data layout"},
- {"name": "buffer", "type": "buffer"}
- ]
+ "category": "typedef",
+ "type": "image copy buffer"
},
"buffer descriptor": {
"category": "structure",
@@ -409,32 +412,29 @@
{"name": "destination", "type": "buffer"},
{"name": "destination offset", "type": "uint64_t"},
{"name": "size", "type": "uint64_t"}
- ],
- "TODO": [
- "Restrictions on the alignment of the copy? Cf Metal on OSX"
]
},
{
"name": "copy buffer to texture",
"args": [
- {"name": "source", "type": "buffer copy view", "annotation": "const*"},
- {"name": "destination", "type": "texture copy view", "annotation": "const*"},
+ {"name": "source", "type": "image copy buffer", "annotation": "const*"},
+ {"name": "destination", "type": "image copy texture", "annotation": "const*"},
{"name": "copy size", "type": "extent 3D", "annotation": "const*"}
]
},
{
"name": "copy texture to buffer",
"args": [
- {"name": "source", "type": "texture copy view", "annotation": "const*"},
- {"name": "destination", "type": "buffer copy view", "annotation": "const*"},
+ {"name": "source", "type": "image copy texture", "annotation": "const*"},
+ {"name": "destination", "type": "image copy buffer", "annotation": "const*"},
{"name": "copy size", "type": "extent 3D", "annotation": "const*"}
]
},
{
"name": "copy texture to texture",
"args": [
- {"name": "source", "type": "texture copy view", "annotation": "const*"},
- {"name": "destination", "type": "texture copy view", "annotation": "const*"},
+ {"name": "source", "type": "image copy texture", "annotation": "const*"},
+ {"name": "destination", "type": "image copy texture", "annotation": "const*"},
{"name": "copy size", "type": "extent 3D", "annotation": "const*"}
]
},
@@ -501,6 +501,49 @@
{"value": 8, "name": "always"}
]
},
+ "compilation info": {
+ "category": "structure",
+ "extensible": false,
+ "members": [
+ {"name": "message count", "type": "uint32_t"},
+ {"name": "messages", "type": "compilation message", "annotation": "const*", "length": "message count"}
+ ]
+ },
+ "compilation info callback": {
+ "category": "callback",
+ "args": [
+ {"name": "status", "type": "compilation info request status"},
+ {"name": "compilation info", "type": "compilation info", "annotation": "const*"},
+ {"name": "userdata", "type": "void", "annotation": "*"}
+ ]
+ },
+ "compilation info request status": {
+ "category": "enum",
+ "values": [
+ {"value": 0, "name": "success"},
+ {"value": 1, "name": "error"},
+ {"value": 2, "name": "device lost"},
+ {"value": 3, "name": "unknown"}
+ ]
+ },
+ "compilation message": {
+ "category": "structure",
+ "extensible": false,
+ "members": [
+ {"name": "message", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+ {"name": "type", "type": "compilation message type"},
+ {"name": "line num", "type": "uint64_t"},
+ {"name": "line pos", "type": "uint64_t"}
+ ]
+ },
+ "compilation message type": {
+ "category": "enum",
+ "values": [
+ {"value": 0, "name": "error"},
+ {"value": 1, "name": "warning"},
+ {"value": 2, "name": "info"}
+ ]
+ },
"compute pass descriptor": {
"category": "structure",
"extensible": true,
@@ -703,7 +746,7 @@
"name": "create render pipeline async",
"returns": "void",
"args": [
- {"name": "descriptor", "type": "render pipeline descriptor", "annotation": "const*"},
+ {"name": "descriptor", "type": "render pipeline descriptor 2", "annotation": "const*"},
{"name": "callback", "type": "create render pipeline async callback"},
{"name": "userdata", "type": "void", "annotation": "*"}
]
@@ -723,6 +766,13 @@
]
},
{
+ "name": "create render pipeline 2",
+ "returns": "render pipeline",
+ "args": [
+ {"name": "descriptor", "type": "render pipeline descriptor 2", "annotation": "const*"}
+ ]
+ },
+ {
"name": "create sampler",
"returns": "sampler",
"args": [
@@ -760,6 +810,13 @@
"returns": "queue"
},
{
+ "name": "create external texture",
+ "returns": "external texture",
+ "args": [
+ {"name": "external texture descriptor", "type": "external texture descriptor", "annotation": "const*"}
+ ]
+ },
+ {
"name": "inject error",
"args": [
{"name": "type", "type": "error type"},
@@ -818,7 +875,8 @@
{"name": "shader float16", "type": "bool", "default": "false"},
{"name": "pipeline statistics query", "type": "bool", "default": "false"},
{"name": "timestamp query", "type": "bool", "default": "false"},
- {"name": "multi planar formats", "type": "bool", "default": "false"}
+ {"name": "multi planar formats", "type": "bool", "default": "false"},
+ {"name": "depth clamping", "type": "bool", "default": "false"}
]
},
"depth stencil state descriptor": {
@@ -828,8 +886,8 @@
{"name": "format", "type": "texture format"},
{"name": "depth write enabled", "type": "bool", "default": "false"},
{"name": "depth compare", "type": "compare function", "default": "always"},
- {"name": "stencil front", "type": "stencil state face descriptor"},
- {"name": "stencil back", "type": "stencil state face descriptor"},
+ {"name": "stencil front", "type": "stencil face state"},
+ {"name": "stencil back", "type": "stencil face state"},
{"name": "stencil read mask", "type": "uint32_t", "default": "0xFFFFFFFF"},
{"name": "stencil write mask", "type": "uint32_t", "default": "0xFFFFFFFF"}
]
@@ -866,11 +924,29 @@
"extent 3D": {
"category": "structure",
"members": [
- {"name": "width", "type": "uint32_t", "default": 1},
+ {"name": "width", "type": "uint32_t"},
{"name": "height", "type": "uint32_t", "default": 1},
+ {"name": "depth or array layers", "type": "uint32_t", "default": 1},
{"name": "depth", "type": "uint32_t", "default": 1}
]
},
+ "external texture": {
+ "category": "object",
+ "methods": [
+ {
+ "name": "destroy",
+ "returns": "void"
+ }
+ ]
+ },
+ "external texture descriptor": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "plane 0", "type": "texture view"},
+ {"name": "format", "type": "texture format"}
+ ]
+ },
"fence": {
"category": "object",
"methods": [
@@ -929,6 +1005,24 @@
{"value": 1, "name": "CW"}
]
},
+ "image copy buffer": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "layout", "type": "texture data layout"},
+ {"name": "buffer", "type": "buffer"}
+ ]
+ },
+ "image copy texture": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "texture", "type": "texture"},
+ {"name": "mip level", "type": "uint32_t", "default": "0"},
+ {"name": "origin", "type": "origin 3D"},
+ {"name": "aspect", "type": "texture aspect", "default": "all"}
+ ]
+ },
"index format": {
"category": "enum",
"values": [
@@ -1146,7 +1240,7 @@
{
"name": "write texture",
"args": [
- {"name": "destination", "type": "texture copy view", "annotation": "const*"},
+ {"name": "destination", "type": "image copy texture", "annotation": "const*"},
{"name": "data", "type": "void", "annotation": "const*", "length": "data size"},
{"name": "data size", "type": "size_t"},
{"name": "data layout", "type": "texture data layout", "annotation": "const*"},
@@ -1157,8 +1251,8 @@
"name": "copy texture for browser",
"extensible": true,
"args": [
- {"name": "source", "type": "texture copy view", "annotation": "const*"},
- {"name": "destination", "type": "texture copy view", "annotation": "const*"},
+ {"name": "source", "type": "image copy texture", "annotation": "const*"},
+ {"name": "destination", "type": "image copy texture", "annotation": "const*"},
{"name": "copy size", "type": "extent 3D", "annotation": "const*"},
{"name": "options", "type": "copy texture for browser options", "annotation": "const*"}
]
@@ -1525,6 +1619,92 @@
}
]
},
+
+ "vertex state": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "module", "type": "shader module"},
+ {"name": "entry point", "type": "char", "annotation": "const*", "length": "strlen"},
+ {"name": "buffer count", "type": "uint32_t"},
+ {"name": "buffers", "type": "vertex buffer layout", "annotation": "const*", "length": "buffer count"}
+ ]
+ },
+
+ "primitive state": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "topology", "type": "primitive topology", "default": "triangle list"},
+ {"name": "strip index format", "type": "index format", "default": "undefined"},
+ {"name": "front face", "type": "front face", "default": "CCW"},
+ {"name": "cull mode", "type": "cull mode", "default": "none"}
+ ]
+ },
+
+ "primitive depth clamping state": {
+ "category": "structure",
+ "chained": true,
+ "members": [
+ {"name": "clamp depth", "type": "bool", "default": "false"}
+ ]
+ },
+
+ "depth stencil state": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "format", "type": "texture format"},
+ {"name": "depth write enabled", "type": "bool", "default": "false"},
+ {"name": "depth compare", "type": "compare function", "default": "always"},
+ {"name": "stencil front", "type": "stencil face state"},
+ {"name": "stencil back", "type": "stencil face state"},
+ {"name": "stencil read mask", "type": "uint32_t", "default": "0xFFFFFFFF"},
+ {"name": "stencil write mask", "type": "uint32_t", "default": "0xFFFFFFFF"},
+ {"name": "depth bias", "type": "int32_t", "default": "0"},
+ {"name": "depth bias slope scale", "type": "float", "default": "0.0f"},
+ {"name": "depth bias clamp", "type": "float", "default": "0.0f"}
+ ]
+ },
+
+ "multisample state": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "count", "type": "uint32_t", "default": "1"},
+ {"name": "mask", "type": "uint32_t", "default": "0xFFFFFFFF"},
+ {"name": "alpha to coverage enabled", "type": "bool", "default": "false"}
+ ]
+ },
+
+ "fragment state": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "module", "type": "shader module"},
+ {"name": "entry point", "type": "char", "annotation": "const*", "length": "strlen"},
+ {"name": "target count", "type": "uint32_t"},
+ {"name": "targets", "type": "color target state", "annotation": "const*", "length": "target count"}
+ ]
+ },
+ "color target state": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "format", "type": "texture format"},
+ {"name": "blend", "type": "blend state", "annotation": "const*", "optional": true},
+ {"name": "write mask", "type": "color write mask", "default": "all"}
+ ]
+ },
+ "blend state": {
+ "category": "structure",
+ "extensible": false,
+ "members": [
+ {"name": "color", "type": "blend component"},
+ {"name": "alpha", "type": "blend component"}
+ ]
+ },
+
"render pipeline descriptor": {
"category": "structure",
"extensible": true,
@@ -1544,6 +1724,21 @@
{"name": "alpha to coverage enabled", "type": "bool", "default": "false"}
]
},
+
+ "render pipeline descriptor 2": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
+ {"name": "layout", "type": "pipeline layout", "optional": true},
+ {"name": "vertex", "type": "vertex state"},
+ {"name": "primitive", "type": "primitive state"},
+ {"name": "depth stencil", "type": "depth stencil state", "annotation": "const*", "optional": true},
+ {"name": "multisample", "type": "multisample state"},
+ {"name": "fragment", "type": "fragment state", "annotation": "const*", "optional": true}
+ ]
+ },
+
"render pipeline descriptor dummy extension": {
"category": "structure",
"chained": true,
@@ -1579,7 +1774,16 @@
]
},
"shader module": {
- "category": "object"
+ "category": "object",
+ "methods": [
+ {
+ "name": "get compilation info",
+ "args": [
+ {"name": "callback", "type": "compilation info callback"},
+ {"name": "userdata", "type": "void", "annotation": "*"}
+ ]
+ }
+ ]
},
"shader module descriptor": {
"category": "structure",
@@ -1625,7 +1829,7 @@
{"value": 7, "name": "decrement wrap"}
]
},
- "stencil state face descriptor": {
+ "stencil face state": {
"category": "structure",
"extensible": false,
"members": [
@@ -1635,6 +1839,10 @@
{"name": "pass op", "type": "stencil operation", "default": "keep"}
]
},
+ "stencil state face descriptor": {
+ "category": "typedef",
+ "type": "stencil face state"
+ },
"surface": {
"category": "object"
},
@@ -1719,7 +1927,8 @@
{"value": 5, "name": "shader module SPIRV descriptor"},
{"value": 6, "name": "shader module WGSL descriptor"},
{"value": 7, "name": "sampler descriptor dummy anisotropic filtering"},
- {"value": 8, "name": "render pipeline descriptor dummy extension"}
+ {"value": 8, "name": "render pipeline descriptor dummy extension"},
+ {"value": 9, "name": "primitive depth clamping state"}
]
},
"texture": {
@@ -1757,14 +1966,8 @@
]
},
"texture copy view": {
- "category": "structure",
- "extensible": true,
- "members": [
- {"name": "texture", "type": "texture"},
- {"name": "mip level", "type": "uint32_t", "default": "0"},
- {"name": "origin", "type": "origin 3D"},
- {"name": "aspect", "type": "texture aspect", "default": "all"}
- ]
+ "category": "typedef",
+ "type": "image copy texture"
},
"texture data layout": {
"category": "structure",
@@ -1888,9 +2091,6 @@
{"name": "base array layer", "type": "uint32_t", "default": "0"},
{"name": "array layer count", "type": "uint32_t", "default": "0"},
{"name": "aspect", "type": "texture aspect", "default": "all"}
- ],
- "TODO": [
- "jiawei.shao@intel.com: Allow choosing the aspect (depth vs. stencil)"
]
},
"texture view": {
@@ -1906,44 +2106,73 @@
{"value": 4, "name": "cube"},
{"value": 5, "name": "cube array"},
{"value": 6, "name": "3D"}
- ],
- "TODO": [
- "jiawei.shao@intel.com: support 1D and 3D texture views"
]
},
"vertex format": {
"category": "enum",
"values": [
- {"value": 0, "name": "uChar2"},
- {"value": 1, "name": "uChar4"},
- {"value": 2, "name": "char2"},
- {"value": 3, "name": "char4"},
- {"value": 4, "name": "uChar2 norm"},
- {"value": 5, "name": "uChar4 norm"},
- {"value": 6, "name": "char2 norm"},
- {"value": 7, "name": "char4 norm"},
- {"value": 8, "name": "uShort2"},
- {"value": 9, "name": "uShort4"},
- {"value": 10, "name": "short2"},
- {"value": 11, "name": "short4"},
- {"value": 12, "name": "uShort2 norm"},
- {"value": 13, "name": "uShort4 norm"},
- {"value": 14, "name": "short2 norm"},
- {"value": 15, "name": "short4 norm"},
- {"value": 16, "name": "half2"},
- {"value": 17, "name": "half4"},
- {"value": 18, "name": "float"},
- {"value": 19, "name": "float2"},
- {"value": 20, "name": "float3"},
- {"value": 21, "name": "float4"},
- {"value": 22, "name": "uInt"},
- {"value": 23, "name": "uInt2"},
- {"value": 24, "name": "uInt3"},
- {"value": 25, "name": "uInt4"},
- {"value": 26, "name": "int"},
- {"value": 27, "name": "int2"},
- {"value": 28, "name": "int3"},
- {"value": 29, "name": "int4"}
+ {"value": 0, "name": "undefined", "valid": false, "jsrepr": "undefined"},
+ {"value": 1, "name": "uint8x2"},
+ {"value": 2, "name": "uint8x4"},
+ {"value": 3, "name": "sint8x2"},
+ {"value": 4, "name": "sint8x4"},
+ {"value": 5, "name": "unorm8x2"},
+ {"value": 6, "name": "unorm8x4"},
+ {"value": 7, "name": "snorm8x2"},
+ {"value": 8, "name": "snorm8x4"},
+ {"value": 9, "name": "uint16x2"},
+ {"value": 10, "name": "uint16x4"},
+ {"value": 11, "name": "sint16x2"},
+ {"value": 12, "name": "sint16x4"},
+ {"value": 13, "name": "unorm16x2"},
+ {"value": 14, "name": "unorm16x4"},
+ {"value": 15, "name": "snorm16x2"},
+ {"value": 16, "name": "snorm16x4"},
+ {"value": 17, "name": "float16x2"},
+ {"value": 18, "name": "float16x4"},
+ {"value": 19, "name": "float32"},
+ {"value": 20, "name": "float32x2"},
+ {"value": 21, "name": "float32x3"},
+ {"value": 22, "name": "float32x4"},
+ {"value": 23, "name": "uint32"},
+ {"value": 24, "name": "uint32x2"},
+ {"value": 25, "name": "uint32x3"},
+ {"value": 26, "name": "uint32x4"},
+ {"value": 27, "name": "sint32"},
+ {"value": 28, "name": "sint32x2"},
+ {"value": 29, "name": "sint32x3"},
+ {"value": 30, "name": "sint32x4"},
+
+ {"value": 101, "name": "uChar2"},
+ {"value": 102, "name": "uChar4"},
+ {"value": 103, "name": "char2"},
+ {"value": 104, "name": "char4"},
+ {"value": 105, "name": "uChar2 norm"},
+ {"value": 106, "name": "uChar4 norm"},
+ {"value": 107, "name": "char2 norm"},
+ {"value": 108, "name": "char4 norm"},
+ {"value": 109, "name": "uShort2"},
+ {"value": 110, "name": "uShort4"},
+ {"value": 111, "name": "short2"},
+ {"value": 112, "name": "short4"},
+ {"value": 113, "name": "uShort2 norm"},
+ {"value": 114, "name": "uShort4 norm"},
+ {"value": 115, "name": "short2 norm"},
+ {"value": 116, "name": "short4 norm"},
+ {"value": 117, "name": "half2"},
+ {"value": 118, "name": "half4"},
+ {"value": 119, "name": "float"},
+ {"value": 120, "name": "float2"},
+ {"value": 121, "name": "float3"},
+ {"value": 122, "name": "float4"},
+ {"value": 123, "name": "uInt"},
+ {"value": 124, "name": "uInt2"},
+ {"value": 125, "name": "uInt3"},
+ {"value": 126, "name": "uInt4"},
+ {"value": 127, "name": "int"},
+ {"value": 128, "name": "int2"},
+ {"value": 129, "name": "int3"},
+ {"value": 130, "name": "int4"}
]
},
"ObjectType": {
diff --git a/chromium/third_party/dawn/dawn_wire.json b/chromium/third_party/dawn/dawn_wire.json
index 88d26e0c402..a6e39ed9ea7 100644
--- a/chromium/third_party/dawn/dawn_wire.json
+++ b/chromium/third_party/dawn/dawn_wire.json
@@ -14,6 +14,9 @@
"See the License for the specific language governing permissions and",
"limitations under the License."
],
+
+ "_doc": "See docs/codegen.md",
+
"commands": {
"buffer map async": [
{ "name": "buffer id", "type": "ObjectId" },
@@ -46,7 +49,7 @@
{ "name": "device id", "type": "ObjectId" },
{ "name": "request serial", "type": "uint64_t" },
{ "name": "pipeline object handle", "type": "ObjectHandle", "handle_type": "render pipeline"},
- { "name": "descriptor", "type": "render pipeline descriptor", "annotation": "const*"}
+ { "name": "descriptor", "type": "render pipeline descriptor 2", "annotation": "const*"}
],
"device pop error scope": [
{ "name": "device id", "type": "ObjectId" },
@@ -75,11 +78,15 @@
],
"queue write texture internal": [
{"name": "queue id", "type": "ObjectId" },
- {"name": "destination", "type": "texture copy view", "annotation": "const*"},
+ {"name": "destination", "type": "image copy texture", "annotation": "const*"},
{"name": "data", "type": "uint8_t", "annotation": "const*", "length": "data size"},
{"name": "data size", "type": "uint64_t"},
{"name": "data layout", "type": "texture data layout", "annotation": "const*"},
{"name": "writeSize", "type": "extent 3D", "annotation": "const*"}
+ ],
+ "shader module get compilation info": [
+ { "name": "shader module id", "type": "ObjectId" },
+ { "name": "request serial", "type": "uint64_t" }
]
},
"return commands": {
@@ -130,6 +137,12 @@
{ "name": "queue", "type": "ObjectHandle", "handle_type": "queue" },
{ "name": "request serial", "type": "uint64_t" },
{ "name": "status", "type": "queue work done status" }
+ ],
+ "shader module get compilation info callback": [
+ { "name": "shader module", "type": "ObjectHandle", "handle_type": "shader module" },
+ { "name": "request serial", "type": "uint64_t" },
+ { "name": "status", "type": "compilation info request status" },
+ { "name": "info", "type": "compilation info", "annotation": "const*", "optional": true }
]
},
"special items": {
@@ -150,6 +163,7 @@
"DeviceSetUncapturedErrorCallback",
"FenceGetCompletedValue",
"FenceOnCompletion",
+ "ShaderModuleGetCompilationInfo",
"QueueOnSubmittedWorkDone",
"QueueWriteBuffer",
"QueueWriteTexture"
@@ -168,7 +182,8 @@
"Buffer",
"Device",
"Fence",
- "Queue"
+ "Queue",
+ "ShaderModule"
],
"server_custom_pre_handler_commands": [
"BufferDestroy",
diff --git a/chromium/third_party/dawn/docs/codegen.md b/chromium/third_party/dawn/docs/codegen.md
new file mode 100644
index 00000000000..9cdf40b064d
--- /dev/null
+++ b/chromium/third_party/dawn/docs/codegen.md
@@ -0,0 +1,88 @@
+# Dawn's code generators.
+
+Dawn relies on a lot of code generation to produce boilerplate code, especially webgpu.h-related code. They start by reading some JSON files (and sometimes XML too), process the data into an in-memory representation that's then used by some [Jinja2](https://jinja.palletsprojects.com/) templates to generate the code. This is similar to the model/view separation in Web development.
+
+Generators are based on [generator_lib.py](../generator/generator_lib.py) which provides facilities for integrating in build systems and using Jinja2. Templates can be found in [`generator/templates`](../generator/templates) and the generated files are in `out/<Debug/Release/foo>/gen/src` when building Dawn in standalone. Generated files can also be found in [Chromium's code search](https://source.chromium.org/chromium/chromium/src/+/master:out/Debug/gen/third_party/dawn/src/).
+
+## Dawn "JSON API" generators
+
+Most of the code generation is done from [`dawn.json`](../dawn.json) which is a JSON description of the WebGPU API with extra annotation used by some of the generators. The code for all the "Dawn JSON" generators is in [`dawn_json_generator.py`](../generator/dawn_json_generator.py) (with templates in the regular template dir).
+
+At this time it is used to generate:
+
+ - the `webgpu.h` C header
+ - the `webgpu_cpp.cpp/h` C++ wrapper over the C header
+ - libraries that implements `webgpu.h` by calling in a static or `thread_local` proc table
+ - parts of the [Emscripten](https://emscripten.org/) WebGPU implementation
+ - a GMock version of the API with its proc table for testing
+ - validation helper functions for dawn_native
+ - the definition of dawn_native's proc table
+ - dawn_native's internal version of the webgpu.h types
+ - a lot of dawn_wire parts, see below
+
+Internally `dawn.json` is a dictionary from the "canonical name" of things to their definition. The "canonical name" is a space-separated (mostly) lower-case version of the name that's parsed into a `Name` Python object. Then that name can be turned into various casings with `.CamelCase()` `.SNAKE_CASE()`, etc. When `dawn.json` things reference each other, it is always via these "canonical names".
+
+The basic schema is that every entry is a thing with a `"category"` key what determines the sub-schema to apply to that thing. Categories and their sub-shema are defined below. Several parts of the schema use the concept of "record" which is a list of "record members" which are a combination of a type, a name and other metadata. For example the list of arguments of a function is a record. The list of structure members is a record. This combined concept is useful for the dawn_wire generator to generate code for structure and function calls in a very similar way.
+
+A **record** is a list of **record members**, each of which is a dictionary with the following schema:
+ - `"name"` a string
+ - `"type"` a string, the name of the base type for this member
+ - `"annotation"` a string, default to "value". Define the C annotation to apply to the base type. Allowed annotations are `"value"` (the default), `"*"`, `"const*"`, `"const*const*"`
+ - `"length"` (default to 1 if not set), a string. Defines length of the array pointed to for pointer arguments. If not set the length is implicitly 1 (so not an array), but otherwise it can be set to the name of another member in the same record that will contain the length of the array (this is heavily used in the `fooCount` `foos` pattern in the API). As a special case `"strlen"` can be used for `const char*` record members to denote that the length should be determined with `strlen`.
+ - `"optional"` (default to false) a boolean that says whether this member is optional. Member records can be optional if they are pointers (otherwise dawn_wire will always try to dereference them), objects (otherwise dawn_wire will always try to encode their ID and crash), or if they have a `"default"` key. Optional pointers and objects will always default to `nullptr`.
+ - `"default"` (optional) a number or string. If set the record member will use that value as default value. Depending on the member's category it can be a number, a string containing a number, or the name of an enum/bitmask value.
+
+**`"native"`**, doesn't have any other key. This is used to define native types that can be referenced by name in other things.
+
+**`"typedef"`** (usually only used for gradual deprecations):
+ - `"name"`: the name of the things this is a typedef for.
+
+**`"enum"`** an `uint32_t`-based enum value.
+ - `"values"` an array of enum values. Each value is a dictionary containing:
+ - `"name"` a string
+ - `"value"` a number that can be decimal or hexadecimal
+ - `"jsrepr"` (optional) a string to allow overriding how this value map to Javascript for the Emscripten bits
+ - `"valid"` (defaults to true) a boolean that controls whether the dawn_native validation utilities will consider this enum value valid.
+
+**`"bitmask"`** an `uint32_t`-based bitmask. It is similar to **`"enum"`** but can be output differently.
+
+**`"callback"`** defines a function pointer type that can be used by other things (usually callbacks passed to method calls)
+ - `"args"` a **record**, so an array of **record members**
+
+**`"structure"`**
+ - `"members"` a **record**, so an array of **record members**
+ - `"extensible"` (defaults to false) a boolean defining if this is an "extensible" WebGPU structure (i.e. has `nextInChain`). "descriptor" structures should usually have this set to true.
+ - `"chained"` (defaults to false) a boolean defining if this is a structure that can be "chained" in a WebGPU structure (i.e. has `nextInChain` and `sType`)
+
+**`"object"`**
+ - `**methods**` an array of methods for this object. Note that "release" and "reference" don't need to be specified. Each method is a dictionary containing:
+ - `"name"` a string
+ - `"return_type"` (default to no return type) a string that's the name of the return type.
+ - `"arguments"` a **record**, so an array of **record members**
+
+## Dawn "wire" generators
+
+The generator for the pieces of dawn_wire need additional data which is found in [`dawn_wire_json`](../dawn_wire.json). Examples of pieces that are generated are:
+
+ - `WireCmd.cpp/.h` the most important piece: the meat of the serialization / deserialization code for WebGPU structures and commands
+ - `ServerHandlers/Doers.cpp` that does the complete handling of all regular WebGPU methods in the server
+ - `ApiProcs.cpp` that implements the complete handling of all regular WebGPU methods in the client
+
+Most of the WebGPU methods can be handled automatically by the wire client/server but some of them need custom handling (for example because they handle callbacks or need client-side state tracking). `dawn_wire.json` defines which methods need special handling, and extra wire commands that can be used by that special handling (and will get `WireCmd` support).
+
+The schema of `dawn_wire.json` is a dictionary with the following keys:
+ - `"commands"` an array of **records** defining extra client->server commands that can be used in special-cased code path.
+ - Each **record member** can have an extra `"skip_serialize"` key that's a boolean that default to false and makes `WireCmd` skip it on its on-wire format.
+ - `"return commands"` like `"commands"` but in revers, an array of **records** defining extra server->client commands
+ - `"special items"` a dictionary containing various lists of methods or object that require special handling in places in the dawn_wire autogenerated files
+ - `"client_side_structures"`: a list of structure that we shouldn't generate serialization/deserialization code for because they are client-side only
+ - `"client_handwritten_commands"`: a list of methods that are written manually and won't be automatically generated in the client
+ - `"client_side_commands"`: a list of methods that won't be automatically generated in the server. Gets added to `"client_handwritten_commands"`
+ - `"client_special_objects"`: a list of objects that need special manual state-tracking in the client and won't be autogenerated
+ - `"server_custom_pre_handler_commands"`: a list of methods that will run custom "pre-handlers" before calling the autogenerated handlers in the server
+ - `"server_handwrittten_commands"`: a list of methods that are written manually and won't be automatically generated in the server.
+ - `server_reverse_object_lookup_objects`: a list of objects for which the server will maintain an object -> ID mapping.
+
+## OpenGL loader generator
+
+The code to load OpenGL entrypoints from a `GetProcAddress` function is generated from [`gl.xml`](../third_party/khronos/gl.xml) and the [list of extensions](../src/dawn_native/opengl/supported_extensions.json) it supports.
diff --git a/chromium/third_party/dawn/docs/fuzzing.md b/chromium/third_party/dawn/docs/fuzzing.md
index 8c7b7baffa7..85219015a30 100644
--- a/chromium/third_party/dawn/docs/fuzzing.md
+++ b/chromium/third_party/dawn/docs/fuzzing.md
@@ -8,19 +8,11 @@ The `dawn_wire_server_and_frontend_fuzzer` sets up Dawn using the Null backend,
The `dawn_wire_server_and_vulkan_backend_fuzzer` is like `dawn_wire_server_and_frontend_fuzzer` but it runs using a Vulkan CPU backend such as Swiftshader. This fuzzer supports error injection by using the first bytes of the fuzzing input as a Vulkan call index for which to mock a failure.
-## Updating the Seed Corpus
+## Automatic Seed Corpus Generation
Using a seed corpus significantly improves the efficiency of fuzzing. Dawn's fuzzers use interesting testcases discovered in previous fuzzing runs to seed future runs. Fuzzing can be further improved by using Dawn tests as a example of API usage which allows the fuzzer to quickly discover and use new API entrypoints and usage patterns.
-The script [update_fuzzer_seed_corpus.sh](../scripts/update_fuzzer_seed_corpus.sh) can be used to capture a trace while running Dawn tests, and upload it to the existing fuzzer seed corpus. It does the following steps:
-1. Builds the provided test and fuzzer targets.
-2. Runs the provided test target with `--use-wire --wire-trace-dir=tmp_dir1 [additional_test_args]` to dump traces of the tests.
-3. Generates one variant of each trace for every possible error index, by running the fuzzer target with `--injected-error-testcase-dir=tmp_dir2 ...`.
-4. Minimizes all testcases by running the fuzzer target with `-merge=1 tmp_dir3 tmp_dir1 tmp_dir2`.
+Dawn has a CI builder [cron-linux-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/ci/cron-linux-clang-rel-x64) which runs on a periodic schedule. This bot runs the `dawn_end2end_tests` and `dawn_unittests` using the wire and writes out traces of the commands. This can manually be done by running: `<test_binary> --use-wire --wire-trace-dir=tmp_dir`. The output directory will contain one trace for each test, where the traces are prepended with `0xFFFFFFFFFFFFFFFF`. The header is the callsite index at which the error injector should inject an error. If the fuzzer doesn't support error injection it will skip the header. [cron-linux-clang-rel-x64] then hashes the output files to produce unique names and uploads them to the fuzzer corpus directories.
+Please see the `dawn.py`[https://source.chromium.org/chromium/chromium/tools/build/+/master:recipes/recipes/dawn.py] recipe for specific details.
-To run the script:
-1. You must be in a Chromium checkout using the GN arg `use_libfuzzer=true`
-2. Run `./third_party/dawn/scripts/update_fuzzer_seed_corpus.sh <out_dir> <fuzzer> <test> [additional_test_args]`.
-
- Example: `./third_party/dawn/scripts/update_fuzzer_seed_corpus.sh out/fuzz dawn_wire_server_and_vulkan_backend_fuzzer dawn_end2end_tests --gtest_filter=*Vulkan`
-3. The script will print instructions for testing, and then uploading new inputs. Please, only upload inputs after testing the fuzzer with new inputs, and verifying there is a meaningful change in coverage. Uploading requires [gcloud](https://g3doc.corp.google.com/cloud/sdk/g3doc/index.md?cl=head) to be logged in with @google.com credentials: `gcloud auth login`.
+Regenerating the seed corpus keeps it up to date when Dawn's API or wire protocol changes. \ No newline at end of file
diff --git a/chromium/third_party/dawn/docs/overview.md b/chromium/third_party/dawn/docs/overview.md
index cf0c162fe8c..65dd003ebbf 100644
--- a/chromium/third_party/dawn/docs/overview.md
+++ b/chromium/third_party/dawn/docs/overview.md
@@ -50,6 +50,4 @@ Normally libraries implementing `webgpu.h` should implement function like `wgpuD
## Code generation
-When the WebGPU API evolves a lot of places in Dawn have to be updated, so to reduce efforts, Dawn relies heavily on code generation. The code generators are based on [Jinja2](https://jinja.palletsprojects.com/) and separate the model from the view like in Web development. The model is some JSON file, usually [`dawn.json`](../dawn.json) and the views are the Jinja2 templates in [`generator/templates`](../generator/templates). The generated files are not checked into the repository but instead are generated during the build.
-
-Most of the code generation is done in [`dawn_json_generator.py`](../generator/dawn_json_generator.py) but other generators exist so common functionality to build code generators has been extracted into[`generator_lib.py`](../generator/generator_lib.py).
+When the WebGPU API evolves, a lot of places in Dawn have to be updated, so to reduce efforts, Dawn relies heavily on code generation for things like headers, proc tables and de/serialization. For more information, see [codegen.md](codegen.md).
diff --git a/chromium/third_party/dawn/examples/Animometer.cpp b/chromium/third_party/dawn/examples/Animometer.cpp
index 35b0d9277c8..67d34057a8c 100644
--- a/chromium/third_party/dawn/examples/Animometer.cpp
+++ b/chromium/third_party/dawn/examples/Animometer.cpp
@@ -55,73 +55,76 @@ void init() {
swapchain.Configure(GetPreferredSwapChainTextureFormat(), wgpu::TextureUsage::RenderAttachment,
640, 480);
- wgpu::ShaderModule vsModule =
- utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
- #version 450
-
- layout(std140, set = 0, binding = 0) uniform Constants {
- float scale;
- float time;
- float offsetX;
- float offsetY;
- float scalar;
- float scalarOffset;
- } c;
-
- layout(location = 0) out vec4 v_color;
-
- const vec4 positions[3] = vec4[3](
- vec4( 0.0f, 0.1f, 0.0f, 1.0f),
- vec4(-0.1f, -0.1f, 0.0f, 1.0f),
- vec4( 0.1f, -0.1f, 0.0f, 1.0f)
- );
-
- const vec4 colors[3] = vec4[3](
- vec4(1.0f, 0.0f, 0.0f, 1.0f),
- vec4(0.0f, 1.0f, 0.0f, 1.0f),
- vec4(0.0f, 0.0f, 1.0f, 1.0f)
- );
-
- void main() {
- vec4 position = positions[gl_VertexIndex];
- vec4 color = colors[gl_VertexIndex];
-
- float fade = mod(c.scalarOffset + c.time * c.scalar / 10.0, 1.0);
+ wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+ [[block]] struct Constants {
+ scale : f32;
+ time : f32;
+ offsetX : f32;
+ offsetY : f32;
+ scalar : f32;
+ scalarOffset : f32;
+ };
+ [[set(0), binding(0)]] var<uniform> c : Constants;
+
+ [[location(0)]] var<out> v_color : vec4<f32>;
+ [[builtin(vertex_idx)]] var<in> VertexIndex : u32;
+ [[builtin(position)]] var<out> Position : vec4<f32>;
+
+ [[stage(vertex)]] fn main() -> void {
+ var positions : array<vec4<f32>, 3> = array<vec4<f32>, 3>(
+ vec4<f32>( 0.0, 0.1, 0.0, 1.0),
+ vec4<f32>(-0.1, -0.1, 0.0, 1.0),
+ vec4<f32>( 0.1, -0.1, 0.0, 1.0)
+ );
+
+ var colors : array<vec4<f32>, 3> = array<vec4<f32>, 3>(
+ vec4<f32>(1.0, 0.0, 0.0, 1.0),
+ vec4<f32>(0.0, 1.0, 0.0, 1.0),
+ vec4<f32>(0.0, 0.0, 1.0, 1.0)
+ );
+
+ var position : vec4<f32> = positions[VertexIndex];
+ var color : vec4<f32> = colors[VertexIndex];
+
+ // TODO(dawn:572): Revisit once modf has been reworked in WGSL.
+ var fade : f32 = c.scalarOffset + c.time * c.scalar / 10.0;
+ fade = fade - floor(fade);
if (fade < 0.5) {
fade = fade * 2.0;
} else {
fade = (1.0 - fade) * 2.0;
}
- float xpos = position.x * c.scale;
- float ypos = position.y * c.scale;
- float angle = 3.14159 * 2.0 * fade;
- float xrot = xpos * cos(angle) - ypos * sin(angle);
- float yrot = xpos * sin(angle) + ypos * cos(angle);
+
+ var xpos : f32 = position.x * c.scale;
+ var ypos : f32 = position.y * c.scale;
+ const angle : f32 = 3.14159 * 2.0 * fade;
+ const xrot : f32 = xpos * cos(angle) - ypos * sin(angle);
+ const yrot : f32 = xpos * sin(angle) + ypos * cos(angle);
xpos = xrot + c.offsetX;
ypos = yrot + c.offsetY;
- v_color = vec4(fade, 1.0 - fade, 0.0, 1.0) + color;
- gl_Position = vec4(xpos, ypos, 0.0, 1.0);
+
+ v_color = vec4<f32>(fade, 1.0 - fade, 0.0, 1.0) + color;
+ Position = vec4<f32>(xpos, ypos, 0.0, 1.0);
})");
- wgpu::ShaderModule fsModule = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
[[location(0)]] var<out> FragColor : vec4<f32>;
[[location(0)]] var<in> v_color : vec4<f32>;
[[stage(fragment)]] fn main() -> void {
FragColor = v_color;
- return;
})");
wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
- device, {{0, wgpu::ShaderStage::Vertex, wgpu::BindingType::UniformBuffer, true}});
+ device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform, true}});
- utils::ComboRenderPipelineDescriptor descriptor(device);
+ utils::ComboRenderPipelineDescriptor2 descriptor;
descriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
- descriptor.vertexStage.module = vsModule;
- descriptor.cFragmentStage.module = fsModule;
- descriptor.cColorStates[0].format = GetPreferredSwapChainTextureFormat();
+ descriptor.vertex.module = vsModule;
+ descriptor.cFragment.module = fsModule;
+ descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
- pipeline = device.CreateRenderPipeline(&descriptor);
+ pipeline = device.CreateRenderPipeline2(&descriptor);
shaderData.resize(kNumTriangles);
for (auto& data : shaderData) {
diff --git a/chromium/third_party/dawn/examples/CHelloTriangle.cpp b/chromium/third_party/dawn/examples/CHelloTriangle.cpp
index 107ba59cc06..2de76572edb 100644
--- a/chromium/third_party/dawn/examples/CHelloTriangle.cpp
+++ b/chromium/third_party/dawn/examples/CHelloTriangle.cpp
@@ -48,7 +48,7 @@ void init() {
" Position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);\n"
" return;\n"
"}\n";
- WGPUShaderModule vsModule = utils::CreateShaderModuleFromWGSL(device, vs).Release();
+ WGPUShaderModule vsModule = utils::CreateShaderModule(device, vs).Release();
const char* fs =
"[[location(0)]] var<out> fragColor : vec4<f32>;\n"
@@ -56,60 +56,51 @@ void init() {
" fragColor = vec4<f32>(1.0, 0.0, 0.0, 1.0);\n"
" return;\n"
"}\n";
- WGPUShaderModule fsModule = utils::CreateShaderModuleFromWGSL(device, fs).Release();
+ WGPUShaderModule fsModule = utils::CreateShaderModule(device, fs).Release();
{
- WGPURenderPipelineDescriptor descriptor = {};
-
- descriptor.vertexStage.module = vsModule;
- descriptor.vertexStage.entryPoint = "main";
-
- WGPUProgrammableStageDescriptor fragmentStage = {};
- fragmentStage.module = fsModule;
- fragmentStage.entryPoint = "main";
- descriptor.fragmentStage = &fragmentStage;
-
- descriptor.sampleCount = 1;
-
- WGPUBlendDescriptor blendDescriptor = {};
- blendDescriptor.operation = WGPUBlendOperation_Add;
- blendDescriptor.srcFactor = WGPUBlendFactor_One;
- blendDescriptor.dstFactor = WGPUBlendFactor_One;
- WGPUColorStateDescriptor colorStateDescriptor = {};
- colorStateDescriptor.format = swapChainFormat;
- colorStateDescriptor.alphaBlend = blendDescriptor;
- colorStateDescriptor.colorBlend = blendDescriptor;
- colorStateDescriptor.writeMask = WGPUColorWriteMask_All;
-
- descriptor.colorStateCount = 1;
- descriptor.colorStates = &colorStateDescriptor;
-
- WGPUPipelineLayoutDescriptor pl = {};
- pl.bindGroupLayoutCount = 0;
- pl.bindGroupLayouts = nullptr;
- descriptor.layout = wgpuDeviceCreatePipelineLayout(device, &pl);
-
- WGPUVertexStateDescriptor vertexState = {};
- vertexState.indexFormat = WGPUIndexFormat_Undefined;
- vertexState.vertexBufferCount = 0;
- vertexState.vertexBuffers = nullptr;
- descriptor.vertexState = &vertexState;
-
- WGPURasterizationStateDescriptor rasterizationState = {};
- rasterizationState.frontFace = WGPUFrontFace_CCW;
- rasterizationState.cullMode = WGPUCullMode_None;
- rasterizationState.depthBias = 0;
- rasterizationState.depthBiasSlopeScale = 0.0;
- rasterizationState.depthBiasClamp = 0.0;
- descriptor.rasterizationState = &rasterizationState;
-
- descriptor.primitiveTopology = WGPUPrimitiveTopology_TriangleList;
- descriptor.sampleMask = 0xFFFFFFFF;
- descriptor.alphaToCoverageEnabled = false;
-
- descriptor.depthStencilState = nullptr;
-
- pipeline = wgpuDeviceCreateRenderPipeline(device, &descriptor);
+ WGPURenderPipelineDescriptor2 descriptor = {};
+
+ // Fragment state
+ WGPUBlendState blend = {};
+ blend.color.operation = WGPUBlendOperation_Add;
+ blend.color.srcFactor = WGPUBlendFactor_One;
+ blend.color.dstFactor = WGPUBlendFactor_One;
+ blend.alpha.operation = WGPUBlendOperation_Add;
+ blend.alpha.srcFactor = WGPUBlendFactor_One;
+ blend.alpha.dstFactor = WGPUBlendFactor_One;
+
+ WGPUColorTargetState colorTarget = {};
+ colorTarget.format = swapChainFormat;
+ colorTarget.blend = &blend;
+ colorTarget.writeMask = WGPUColorWriteMask_All;
+
+ WGPUFragmentState fragment = {};
+ fragment.module = fsModule;
+ fragment.entryPoint = "main";
+ fragment.targetCount = 1;
+ fragment.targets = &colorTarget;
+ descriptor.fragment = &fragment;
+
+ // Other state
+ descriptor.layout = nullptr;
+ descriptor.depthStencil = nullptr;
+
+ descriptor.vertex.module = vsModule;
+ descriptor.vertex.entryPoint = "main";
+ descriptor.vertex.bufferCount = 0;
+ descriptor.vertex.buffers = nullptr;
+
+ descriptor.multisample.count = 1;
+ descriptor.multisample.mask = 0xFFFFFFFF;
+ descriptor.multisample.alphaToCoverageEnabled = false;
+
+ descriptor.primitive.frontFace = WGPUFrontFace_CCW;
+ descriptor.primitive.cullMode = WGPUCullMode_None;
+ descriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
+ descriptor.primitive.stripIndexFormat = WGPUIndexFormat_Undefined;
+
+ pipeline = wgpuDeviceCreateRenderPipeline2(device, &descriptor);
}
wgpuShaderModuleRelease(vsModule);
diff --git a/chromium/third_party/dawn/examples/ComputeBoids.cpp b/chromium/third_party/dawn/examples/ComputeBoids.cpp
index cb437d99117..9cf7245b245 100644
--- a/chromium/third_party/dawn/examples/ComputeBoids.cpp
+++ b/chromium/third_party/dawn/examples/ComputeBoids.cpp
@@ -95,7 +95,7 @@ void initBuffers() {
}
void initRender() {
- wgpu::ShaderModule vsModule = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
[[location(0)]] var<in> a_particlePos : vec2<f32>;
[[location(1)]] var<in> a_particleVel : vec2<f32>;
[[location(2)]] var<in> a_pos : vec2<f32>;
@@ -112,7 +112,7 @@ void initRender() {
}
)");
- wgpu::ShaderModule fsModule = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
[[location(0)]] var<out> FragColor : vec4<f32>;
[[stage(fragment)]]
fn main() -> void {
@@ -123,53 +123,53 @@ void initRender() {
depthStencilView = CreateDefaultDepthStencilView(device);
- utils::ComboRenderPipelineDescriptor descriptor(device);
- descriptor.vertexStage.module = vsModule;
- descriptor.cFragmentStage.module = fsModule;
-
- descriptor.cVertexState.vertexBufferCount = 2;
- descriptor.cVertexState.cVertexBuffers[0].arrayStride = sizeof(Particle);
- descriptor.cVertexState.cVertexBuffers[0].stepMode = wgpu::InputStepMode::Instance;
- descriptor.cVertexState.cVertexBuffers[0].attributeCount = 2;
- descriptor.cVertexState.cAttributes[0].offset = offsetof(Particle, pos);
- descriptor.cVertexState.cAttributes[0].format = wgpu::VertexFormat::Float2;
- descriptor.cVertexState.cAttributes[1].shaderLocation = 1;
- descriptor.cVertexState.cAttributes[1].offset = offsetof(Particle, vel);
- descriptor.cVertexState.cAttributes[1].format = wgpu::VertexFormat::Float2;
- descriptor.cVertexState.cVertexBuffers[1].arrayStride = sizeof(glm::vec2);
- descriptor.cVertexState.cVertexBuffers[1].attributeCount = 1;
- descriptor.cVertexState.cVertexBuffers[1].attributes = &descriptor.cVertexState.cAttributes[2];
- descriptor.cVertexState.cAttributes[2].shaderLocation = 2;
- descriptor.cVertexState.cAttributes[2].format = wgpu::VertexFormat::Float2;
- descriptor.depthStencilState = &descriptor.cDepthStencilState;
- descriptor.cDepthStencilState.format = wgpu::TextureFormat::Depth24PlusStencil8;
- descriptor.cColorStates[0].format = GetPreferredSwapChainTextureFormat();
-
- renderPipeline = device.CreateRenderPipeline(&descriptor);
+ utils::ComboRenderPipelineDescriptor2 descriptor;
+
+ descriptor.vertex.module = vsModule;
+ descriptor.vertex.bufferCount = 2;
+ descriptor.cBuffers[0].arrayStride = sizeof(Particle);
+ descriptor.cBuffers[0].stepMode = wgpu::InputStepMode::Instance;
+ descriptor.cBuffers[0].attributeCount = 2;
+ descriptor.cAttributes[0].offset = offsetof(Particle, pos);
+ descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x2;
+ descriptor.cAttributes[1].shaderLocation = 1;
+ descriptor.cAttributes[1].offset = offsetof(Particle, vel);
+ descriptor.cAttributes[1].format = wgpu::VertexFormat::Float32x2;
+ descriptor.cBuffers[1].arrayStride = sizeof(glm::vec2);
+ descriptor.cBuffers[1].attributeCount = 1;
+ descriptor.cBuffers[1].attributes = &descriptor.cAttributes[2];
+ descriptor.cAttributes[2].shaderLocation = 2;
+ descriptor.cAttributes[2].format = wgpu::VertexFormat::Float32x2;
+
+ descriptor.cFragment.module = fsModule;
+ descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+ descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
+
+ renderPipeline = device.CreateRenderPipeline2(&descriptor);
}
void initSim() {
- wgpu::ShaderModule module = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
struct Particle {
- [[offset(0)]] pos : vec2<f32>;
- [[offset(8)]] vel : vec2<f32>;
+ pos : vec2<f32>;
+ vel : vec2<f32>;
};
[[block]] struct SimParams {
- [[offset(0)]] deltaT : f32;
- [[offset(4)]] rule1Distance : f32;
- [[offset(8)]] rule2Distance : f32;
- [[offset(12)]] rule3Distance : f32;
- [[offset(16)]] rule1Scale : f32;
- [[offset(20)]] rule2Scale : f32;
- [[offset(24)]] rule3Scale : f32;
- [[offset(28)]] particleCount : u32;
+ deltaT : f32;
+ rule1Distance : f32;
+ rule2Distance : f32;
+ rule3Distance : f32;
+ rule1Scale : f32;
+ rule2Scale : f32;
+ rule3Scale : f32;
+ particleCount : u32;
};
[[block]] struct Particles {
- [[offset(0)]] particles : [[stride(16)]] array<Particle>;
+ particles : array<Particle>;
};
[[binding(0), group(0)]] var<uniform> params : SimParams;
- [[binding(1), group(0)]] var<storage_buffer> particlesA : [[access(read)]] Particles;
- [[binding(2), group(0)]] var<storage_buffer> particlesB : [[access(read_write)]] Particles;
+ [[binding(1), group(0)]] var<storage> particlesA : [[access(read)]] Particles;
+ [[binding(2), group(0)]] var<storage> particlesB : [[access(read_write)]] Particles;
[[builtin(global_invocation_id)]] var<in> GlobalInvocationID : vec3<u32>;
// https://github.com/austinEng/Project6-Vulkan-Flocking/blob/master/data/shaders/computeparticles/particle.comp
@@ -247,9 +247,9 @@ void initSim() {
auto bgl = utils::MakeBindGroupLayout(
device, {
- {0, wgpu::ShaderStage::Compute, wgpu::BindingType::UniformBuffer},
- {1, wgpu::ShaderStage::Compute, wgpu::BindingType::StorageBuffer},
- {2, wgpu::ShaderStage::Compute, wgpu::BindingType::StorageBuffer},
+ {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+ {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+ {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
});
wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, &bgl);
diff --git a/chromium/third_party/dawn/examples/CppHelloTriangle.cpp b/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
index 510f181ef70..b65839cb23b 100644
--- a/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
+++ b/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
@@ -55,7 +55,7 @@ void initTextures() {
descriptor.dimension = wgpu::TextureDimension::e2D;
descriptor.size.width = 1024;
descriptor.size.height = 1024;
- descriptor.size.depth = 1;
+ descriptor.size.depthOrArrayLayers = 1;
descriptor.sampleCount = 1;
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
descriptor.mipLevelCount = 1;
@@ -72,12 +72,13 @@ void initTextures() {
wgpu::Buffer stagingBuffer = utils::CreateBufferFromData(
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
- wgpu::BufferCopyView bufferCopyView = utils::CreateBufferCopyView(stagingBuffer, 0, 4 * 1024);
- wgpu::TextureCopyView textureCopyView = utils::CreateTextureCopyView(texture, 0, {0, 0, 0});
+ wgpu::ImageCopyBuffer imageCopyBuffer =
+ utils::CreateImageCopyBuffer(stagingBuffer, 0, 4 * 1024);
+ wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
wgpu::Extent3D copySize = {1024, 1024, 1};
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
- encoder.CopyBufferToTexture(&bufferCopyView, &textureCopyView, &copySize);
+ encoder.CopyBufferToTexture(&imageCopyBuffer, &imageCopyTexture, &copySize);
wgpu::CommandBuffer copy = encoder.Finish();
queue.Submit(1, &copy);
@@ -94,7 +95,7 @@ void init() {
initBuffers();
initTextures();
- wgpu::ShaderModule vsModule = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
[[builtin(position)]] var<out> Position : vec4<f32>;
[[location(0)]] var<in> pos : vec4<f32>;
[[stage(vertex)]] fn main() -> void {
@@ -102,7 +103,7 @@ void init() {
return;
})");
- wgpu::ShaderModule fsModule = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
[[builtin(frag_coord)]] var<in> FragCoord : vec4<f32>;
[[group(0), binding(0)]] var mySampler: sampler;
[[group(0), binding(1)]] var myTexture : texture_2d<f32>;
@@ -115,27 +116,26 @@ void init() {
auto bgl = utils::MakeBindGroupLayout(
device, {
- {0, wgpu::ShaderStage::Fragment, wgpu::BindingType::Sampler},
- {1, wgpu::ShaderStage::Fragment, wgpu::BindingType::SampledTexture},
+ {0, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+ {1, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
});
wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, &bgl);
depthStencilView = CreateDefaultDepthStencilView(device);
- utils::ComboRenderPipelineDescriptor descriptor(device);
+ utils::ComboRenderPipelineDescriptor2 descriptor;
descriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl);
- descriptor.vertexStage.module = vsModule;
- descriptor.cFragmentStage.module = fsModule;
- descriptor.cVertexState.vertexBufferCount = 1;
- descriptor.cVertexState.cVertexBuffers[0].arrayStride = 4 * sizeof(float);
- descriptor.cVertexState.cVertexBuffers[0].attributeCount = 1;
- descriptor.cVertexState.cAttributes[0].format = wgpu::VertexFormat::Float4;
- descriptor.depthStencilState = &descriptor.cDepthStencilState;
- descriptor.cDepthStencilState.format = wgpu::TextureFormat::Depth24PlusStencil8;
- descriptor.cColorStates[0].format = GetPreferredSwapChainTextureFormat();
-
- pipeline = device.CreateRenderPipeline(&descriptor);
+ descriptor.vertex.module = vsModule;
+ descriptor.vertex.bufferCount = 1;
+ descriptor.cBuffers[0].arrayStride = 4 * sizeof(float);
+ descriptor.cBuffers[0].attributeCount = 1;
+ descriptor.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
+ descriptor.cFragment.module = fsModule;
+ descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
+ descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+
+ pipeline = device.CreateRenderPipeline2(&descriptor);
wgpu::TextureView view = texture.CreateView();
diff --git a/chromium/third_party/dawn/examples/CubeReflection.cpp b/chromium/third_party/dawn/examples/CubeReflection.cpp
index 34691bff9ed..e50bc337d7f 100644
--- a/chromium/third_party/dawn/examples/CubeReflection.cpp
+++ b/chromium/third_party/dawn/examples/CubeReflection.cpp
@@ -101,15 +101,15 @@ void init() {
initBuffers();
- wgpu::ShaderModule vsModule = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
[[block]] struct Camera {
- [[offset(0)]] view : mat4x4<f32>;
- [[offset(64)]] proj : mat4x4<f32>;
+ view : mat4x4<f32>;
+ proj : mat4x4<f32>;
};
[[group(0), binding(0)]] var<uniform> camera : Camera;
[[block]] struct Model {
- [[offset(0)]] matrix : mat4x4<f32>;
+ matrix : mat4x4<f32>;
};
[[group(0), binding(1)]] var<uniform> model : Model;
@@ -125,7 +125,7 @@ void init() {
return;
})");
- wgpu::ShaderModule fsModule = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
[[location(0)]] var<out> FragColor : vec4<f32>;
[[location(2)]] var<in> f_col : vec3<f32>;
@@ -134,7 +134,7 @@ void init() {
return;
})");
- wgpu::ShaderModule fsReflectionModule = utils::CreateShaderModuleFromWGSL(device, R"(
+ wgpu::ShaderModule fsReflectionModule = utils::CreateShaderModule(device, R"(
[[location(0)]] var<out> FragColor : vec4<f32>;
[[location(2)]] var<in> f_col : vec3<f32>;
@@ -143,20 +143,23 @@ void init() {
return;
})");
- utils::ComboVertexStateDescriptor vertexState;
- vertexState.cVertexBuffers[0].attributeCount = 2;
- vertexState.cAttributes[0].format = wgpu::VertexFormat::Float3;
- vertexState.cAttributes[1].shaderLocation = 1;
- vertexState.cAttributes[1].offset = 3 * sizeof(float);
- vertexState.cAttributes[1].format = wgpu::VertexFormat::Float3;
+ wgpu::VertexAttribute attributes[2];
+ attributes[0].shaderLocation = 0;
+ attributes[0].offset = 0;
+ attributes[0].format = wgpu::VertexFormat::Float32x3;
+ attributes[1].shaderLocation = 1;
+ attributes[1].offset = 3 * sizeof(float);
+ attributes[1].format = wgpu::VertexFormat::Float32x3;
- vertexState.vertexBufferCount = 1;
- vertexState.cVertexBuffers[0].arrayStride = 6 * sizeof(float);
+ wgpu::VertexBufferLayout vertexBufferLayout;
+ vertexBufferLayout.attributeCount = 2;
+ vertexBufferLayout.attributes = attributes;
+ vertexBufferLayout.arrayStride = 6 * sizeof(float);
auto bgl = utils::MakeBindGroupLayout(
device, {
- {0, wgpu::ShaderStage::Vertex, wgpu::BindingType::UniformBuffer},
- {1, wgpu::ShaderStage::Vertex, wgpu::BindingType::UniformBuffer},
+ {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+ {1, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
});
wgpu::PipelineLayout pl = utils::MakeBasicPipelineLayout(device, &bgl);
@@ -184,49 +187,64 @@ void init() {
depthStencilView = CreateDefaultDepthStencilView(device);
- utils::ComboRenderPipelineDescriptor descriptor(device);
- descriptor.layout = pl;
- descriptor.vertexStage.module = vsModule;
- descriptor.cFragmentStage.module = fsModule;
- descriptor.vertexState = &vertexState;
- descriptor.depthStencilState = &descriptor.cDepthStencilState;
- descriptor.cDepthStencilState.format = wgpu::TextureFormat::Depth24PlusStencil8;
- descriptor.cColorStates[0].format = GetPreferredSwapChainTextureFormat();
- descriptor.cDepthStencilState.depthWriteEnabled = true;
- descriptor.cDepthStencilState.depthCompare = wgpu::CompareFunction::Less;
-
- pipeline = device.CreateRenderPipeline(&descriptor);
-
- utils::ComboRenderPipelineDescriptor pDescriptor(device);
- pDescriptor.layout = pl;
- pDescriptor.vertexStage.module = vsModule;
- pDescriptor.cFragmentStage.module = fsModule;
- pDescriptor.vertexState = &vertexState;
- pDescriptor.depthStencilState = &pDescriptor.cDepthStencilState;
- pDescriptor.cDepthStencilState.format = wgpu::TextureFormat::Depth24PlusStencil8;
- pDescriptor.cColorStates[0].format = GetPreferredSwapChainTextureFormat();
- pDescriptor.cDepthStencilState.stencilFront.passOp = wgpu::StencilOperation::Replace;
- pDescriptor.cDepthStencilState.stencilBack.passOp = wgpu::StencilOperation::Replace;
- pDescriptor.cDepthStencilState.depthCompare = wgpu::CompareFunction::Less;
-
- planePipeline = device.CreateRenderPipeline(&pDescriptor);
-
- utils::ComboRenderPipelineDescriptor rfDescriptor(device);
- rfDescriptor.layout = pl;
- rfDescriptor.vertexStage.module = vsModule;
- rfDescriptor.cFragmentStage.module = fsReflectionModule;
- rfDescriptor.vertexState = &vertexState;
- rfDescriptor.depthStencilState = &rfDescriptor.cDepthStencilState;
- rfDescriptor.cDepthStencilState.format = wgpu::TextureFormat::Depth24PlusStencil8;
- rfDescriptor.cColorStates[0].format = GetPreferredSwapChainTextureFormat();
- rfDescriptor.cDepthStencilState.stencilFront.compare = wgpu::CompareFunction::Equal;
- rfDescriptor.cDepthStencilState.stencilBack.compare = wgpu::CompareFunction::Equal;
- rfDescriptor.cDepthStencilState.stencilFront.passOp = wgpu::StencilOperation::Replace;
- rfDescriptor.cDepthStencilState.stencilBack.passOp = wgpu::StencilOperation::Replace;
- rfDescriptor.cDepthStencilState.depthWriteEnabled = true;
- rfDescriptor.cDepthStencilState.depthCompare = wgpu::CompareFunction::Less;
-
- reflectionPipeline = device.CreateRenderPipeline(&rfDescriptor);
+ {
+ utils::ComboRenderPipelineDescriptor2 descriptor;
+ descriptor.vertex.module = vsModule;
+ descriptor.vertex.bufferCount = 1;
+ descriptor.vertex.buffers = &vertexBufferLayout;
+
+ descriptor.layout = pl;
+ descriptor.cFragment.module = fsModule;
+ descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
+
+ wgpu::DepthStencilState* depthStencil =
+ descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+ depthStencil->depthWriteEnabled = true;
+ depthStencil->depthCompare = wgpu::CompareFunction::Less;
+
+ pipeline = device.CreateRenderPipeline2(&descriptor);
+ }
+
+ {
+ utils::ComboRenderPipelineDescriptor2 descriptor;
+ descriptor.vertex.module = vsModule;
+ descriptor.vertex.bufferCount = 1;
+ descriptor.vertex.buffers = &vertexBufferLayout;
+
+ descriptor.layout = pl;
+ descriptor.cFragment.module = fsModule;
+ descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
+
+ wgpu::DepthStencilState* depthStencil =
+ descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+ depthStencil->stencilFront.passOp = wgpu::StencilOperation::Replace;
+ depthStencil->stencilBack.passOp = wgpu::StencilOperation::Replace;
+ depthStencil->depthCompare = wgpu::CompareFunction::Less;
+
+ planePipeline = device.CreateRenderPipeline2(&descriptor);
+ }
+
+ {
+ utils::ComboRenderPipelineDescriptor2 descriptor;
+ descriptor.vertex.module = vsModule;
+ descriptor.vertex.bufferCount = 1;
+ descriptor.vertex.buffers = &vertexBufferLayout;
+
+ descriptor.layout = pl;
+ descriptor.cFragment.module = fsReflectionModule;
+ descriptor.cTargets[0].format = GetPreferredSwapChainTextureFormat();
+
+ wgpu::DepthStencilState* depthStencil =
+ descriptor.EnableDepthStencil(wgpu::TextureFormat::Depth24PlusStencil8);
+ depthStencil->stencilFront.compare = wgpu::CompareFunction::Equal;
+ depthStencil->stencilBack.compare = wgpu::CompareFunction::Equal;
+ depthStencil->stencilFront.passOp = wgpu::StencilOperation::Replace;
+ depthStencil->stencilBack.passOp = wgpu::StencilOperation::Replace;
+ depthStencil->depthWriteEnabled = true;
+ depthStencil->depthCompare = wgpu::CompareFunction::Less;
+
+ reflectionPipeline = device.CreateRenderPipeline2(&descriptor);
+ }
cameraData.proj = glm::perspective(glm::radians(45.0f), 1.f, 1.0f, 100.0f);
}
diff --git a/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp b/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp
index 771bb95f769..c99bf047567 100644
--- a/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp
+++ b/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp
@@ -311,8 +311,8 @@ int main(int argc, const char* argv[]) {
queue = device.GetQueue();
// The hacky pipeline to render a triangle.
- utils::ComboRenderPipelineDescriptor pipelineDesc(device);
- pipelineDesc.vertexStage.module = utils::CreateShaderModuleFromWGSL(device, R"(
+ utils::ComboRenderPipelineDescriptor2 pipelineDesc;
+ pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
[[builtin(vertex_index)]] var<in> VertexIndex : u32;
[[builtin(position)]] var<out> Position : vec4<f32>;
const pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
@@ -324,16 +324,15 @@ int main(int argc, const char* argv[]) {
Position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
return;
})");
- pipelineDesc.cFragmentStage.module = utils::CreateShaderModuleFromWGSL(device, R"(
+ pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
[[location(0)]] var<out> fragColor : vec4<f32>;
[[stage(fragment)]] fn main() -> void {
fragColor = vec4<f32>(1.0, 0.0, 0.0, 1.0);
return;
})");
- pipelineDesc.colorStateCount = 1;
// BGRA shouldn't be hardcoded. Consider having a map[format -> pipeline].
- pipelineDesc.cColorStates[0].format = wgpu::TextureFormat::BGRA8Unorm;
- trianglePipeline = device.CreateRenderPipeline(&pipelineDesc);
+ pipelineDesc.cTargets[0].format = wgpu::TextureFormat::BGRA8Unorm;
+ trianglePipeline = device.CreateRenderPipeline2(&pipelineDesc);
// Craete the first window, since the example exits when there are no windows.
AddWindow();
diff --git a/chromium/third_party/dawn/examples/SampleUtils.cpp b/chromium/third_party/dawn/examples/SampleUtils.cpp
index 59ab852c7f2..3a0372818f7 100644
--- a/chromium/third_party/dawn/examples/SampleUtils.cpp
+++ b/chromium/third_party/dawn/examples/SampleUtils.cpp
@@ -193,7 +193,7 @@ wgpu::TextureView CreateDefaultDepthStencilView(const wgpu::Device& device) {
descriptor.dimension = wgpu::TextureDimension::e2D;
descriptor.size.width = 640;
descriptor.size.height = 480;
- descriptor.size.depth = 1;
+ descriptor.size.depthOrArrayLayers = 1;
descriptor.sampleCount = 1;
descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
descriptor.mipLevelCount = 1;
@@ -250,7 +250,7 @@ bool InitSample(int argc, const char** argv) {
}
if (std::string("-h") == argv[i] || std::string("--help") == argv[i]) {
printf("Usage: %s [-b BACKEND] [-c COMMAND_BUFFER]\n", argv[0]);
- printf(" BACKEND is one of: d3d12, metal, null, opengl, vulkan\n");
+ printf(" BACKEND is one of: d3d12, metal, null, opengl, opengles, vulkan\n");
printf(" COMMAND_BUFFER is one of: none, terrible\n");
return false;
}
diff --git a/chromium/third_party/dawn/generator/dawn_json_generator.py b/chromium/third_party/dawn/generator/dawn_json_generator.py
index 8af22721f65..84a2b99c1a6 100644
--- a/chromium/third_party/dawn/generator/dawn_json_generator.py
+++ b/chromium/third_party/dawn/generator/dawn_json_generator.py
@@ -593,7 +593,7 @@ def as_frontendType(typ):
def as_wireType(typ):
if typ.category == 'object':
return typ.name.CamelCase() + '*'
- elif typ.category in ['bitmask', 'enum']:
+ elif typ.category in ['bitmask', 'enum', 'structure']:
return 'WGPU' + typ.name.CamelCase()
else:
return as_cppType(typ.name)
diff --git a/chromium/third_party/dawn/generator/extract_json.py b/chromium/third_party/dawn/generator/extract_json.py
index 4afefae946a..67114bf4fb6 100644
--- a/chromium/third_party/dawn/generator/extract_json.py
+++ b/chromium/third_party/dawn/generator/extract_json.py
@@ -27,9 +27,18 @@ if __name__ == "__main__":
for (name, content) in files.items():
output_file = output_dir + os.path.sep + name
+ # Create the output directory if needed.
directory = os.path.dirname(output_file)
if not os.path.exists(directory):
os.makedirs(directory)
+ # Skip writing to the file if it already has the correct content.
+ try:
+ with open(output_file, 'r') as outfile:
+ if outfile.read() == content:
+ continue
+ except (OSError, EnvironmentError):
+ pass
+
with open(output_file, 'w') as outfile:
outfile.write(content)
diff --git a/chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp b/chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp
index defae2df3fa..3b3bd74f9cd 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp
@@ -64,7 +64,7 @@ namespace dawn_native {
{% if method.return_type.name.canonical_case() != "void" %}
auto result =
{%- endif %}
- self->{{method.name.CamelCase()}}(
+ self->API{{method.name.CamelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{{as_varName(arg.name)}}_
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.cpp b/chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.cpp
index 07c5e224841..4816ee122c7 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.cpp
@@ -16,6 +16,7 @@
#include "common/Assert.h"
#include "common/Log.h"
+#include "dawn_wire/BufferConsumer_impl.h"
#include "dawn_wire/Wire.h"
#include <algorithm>
@@ -60,9 +61,9 @@
{% elif member.type.category == "structure"%}
{%- set Provider = ", provider" if member.type.may_have_dawn_object else "" -%}
{% if member.annotation == "const*const*" %}
- {{as_cType(member.type.name)}}Serialize(*{{in}}, &{{out}}, buffer{{Provider}});
+ WIRE_TRY({{as_cType(member.type.name)}}Serialize(*{{in}}, &{{out}}, buffer{{Provider}}));
{% else %}
- {{as_cType(member.type.name)}}Serialize({{in}}, &{{out}}, buffer{{Provider}});
+ WIRE_TRY({{as_cType(member.type.name)}}Serialize({{in}}, &{{out}}, buffer{{Provider}}));
{% endif %}
{%- else -%}
{{out}} = {{in}};
@@ -73,9 +74,9 @@
{% macro deserialize_member(member, in, out) %}
{%- if member.type.category == "object" -%}
{%- set Optional = "Optional" if member.optional else "" -%}
- DESERIALIZE_TRY(resolver.Get{{Optional}}FromId({{in}}, &{{out}}));
+ WIRE_TRY(resolver.Get{{Optional}}FromId({{in}}, &{{out}}));
{%- elif member.type.category == "structure" -%}
- DESERIALIZE_TRY({{as_cType(member.type.name)}}Deserialize(&{{out}}, &{{in}}, deserializeBuffer, allocator
+ WIRE_TRY({{as_cType(member.type.name)}}Deserialize(&{{out}}, &{{in}}, deserializeBuffer, allocator
{%- if member.type.may_have_dawn_object -%}
, resolver
{%- endif -%}
@@ -203,7 +204,7 @@ namespace {
//* Serializes `record` into `transfer`, using `buffer` to get more space for pointed-to data
//* and `provider` to serialize objects.
- DAWN_DECLARE_UNUSED bool {{Return}}{{name}}Serialize(const {{Return}}{{name}}{{Cmd}}& record, {{Return}}{{name}}Transfer* transfer,
+ DAWN_DECLARE_UNUSED WireResult {{Return}}{{name}}Serialize(const {{Return}}{{name}}{{Cmd}}& record, {{Return}}{{name}}Transfer* transfer,
SerializeBuffer* buffer
{%- if record.may_have_dawn_object -%}
, const ObjectIdProvider& provider
@@ -225,7 +226,7 @@ namespace {
{% if record.extensible %}
if (record.nextInChain != nullptr) {
transfer->hasNextInChain = true;
- SERIALIZE_TRY(SerializeChainedStruct(record.nextInChain, buffer, provider));
+ WIRE_TRY(SerializeChainedStruct(record.nextInChain, buffer, provider));
} else {
transfer->hasNextInChain = false;
}
@@ -250,7 +251,7 @@ namespace {
transfer->{{memberName}}Strlen = std::strlen(record.{{memberName}});
char* stringInBuffer;
- SERIALIZE_TRY(buffer->NextN(transfer->{{memberName}}Strlen, &stringInBuffer));
+ WIRE_TRY(buffer->NextN(transfer->{{memberName}}Strlen, &stringInBuffer));
memcpy(stringInBuffer, record.{{memberName}}, transfer->{{memberName}}Strlen);
}
{% endfor %}
@@ -268,7 +269,7 @@ namespace {
auto memberLength = {{member_length(member, "record.")}};
{{member_transfer_type(member)}}* memberBuffer;
- SERIALIZE_TRY(buffer->NextN(memberLength, &memberBuffer));
+ WIRE_TRY(buffer->NextN(memberLength, &memberBuffer));
//* This loop cannot overflow because it iterates up to |memberLength|. Even if
//* memberLength were the maximum integer value, |i| would become equal to it just before
@@ -278,14 +279,14 @@ namespace {
}
}
{% endfor %}
- return true;
+ return WireResult::Success;
}
DAWN_UNUSED_FUNC({{Return}}{{name}}Serialize);
//* Deserializes `transfer` into `record` getting more serialized data from `buffer` and `size`
//* if needed, using `allocator` to store pointed-to values and `resolver` to translate object
//* Ids to actual objects.
- DAWN_DECLARE_UNUSED DeserializeResult {{Return}}{{name}}Deserialize({{Return}}{{name}}{{Cmd}}* record, const volatile {{Return}}{{name}}Transfer* transfer,
+ DAWN_DECLARE_UNUSED WireResult {{Return}}{{name}}Deserialize({{Return}}{{name}}{{Cmd}}* record, const volatile {{Return}}{{name}}Transfer* transfer,
DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator
{%- if record.may_have_dawn_object -%}
, const ObjectIdResolver& resolver
@@ -310,7 +311,7 @@ namespace {
{% if record.extensible %}
record->nextInChain = nullptr;
if (transfer->hasNextInChain) {
- DESERIALIZE_TRY(DeserializeChainedStruct(&record->nextInChain, deserializeBuffer, allocator, resolver));
+ WIRE_TRY(DeserializeChainedStruct(&record->nextInChain, deserializeBuffer, allocator, resolver));
}
{% endif %}
@@ -336,15 +337,15 @@ namespace {
if (stringLength64 >= std::numeric_limits<size_t>::max()) {
//* Cannot allocate space for the string. It can be at most
//* size_t::max() - 1. We need 1 byte for the null-terminator.
- return DeserializeResult::FatalError;
+ return WireResult::FatalError;
}
size_t stringLength = static_cast<size_t>(stringLength64);
const volatile char* stringInBuffer;
- DESERIALIZE_TRY(deserializeBuffer->ReadN(stringLength, &stringInBuffer));
+ WIRE_TRY(deserializeBuffer->ReadN(stringLength, &stringInBuffer));
char* copiedString;
- DESERIALIZE_TRY(GetSpace(allocator, stringLength + 1, &copiedString));
+ WIRE_TRY(GetSpace(allocator, stringLength + 1, &copiedString));
//* We can cast away the volatile qualifier because DeserializeBuffer::ReadN already
//* validated that the range [stringInBuffer, stringInBuffer + stringLength) is valid.
//* memcpy may have an unknown access pattern, but this is fine since the string is only
@@ -367,13 +368,14 @@ namespace {
{
auto memberLength = {{member_length(member, "record->")}};
const volatile {{member_transfer_type(member)}}* memberBuffer;
- DESERIALIZE_TRY(deserializeBuffer->ReadN(memberLength, &memberBuffer));
+ WIRE_TRY(deserializeBuffer->ReadN(memberLength, &memberBuffer));
{{as_cType(member.type.name)}}* copiedMembers;
- DESERIALIZE_TRY(GetSpace(allocator, memberLength, &copiedMembers));
+ WIRE_TRY(GetSpace(allocator, memberLength, &copiedMembers));
{% if member.annotation == "const*const*" %}
{{as_cType(member.type.name)}}** pointerArray;
- DESERIALIZE_TRY(GetSpace(allocator, memberLength, &pointerArray));
+ WIRE_TRY(GetSpace(allocator, memberLength, &pointerArray));
+
//* This loop cannot overflow because it iterates up to |memberLength|. Even if
//* memberLength were the maximum integer value, |i| would become equal to it just before
//* exiting the loop, but not increment past or wrap around.
@@ -394,7 +396,7 @@ namespace {
}
{% endfor %}
- return DeserializeResult::Success;
+ return WireResult::Success;
}
DAWN_UNUSED_FUNC({{Return}}{{name}}Deserialize);
{% endmacro %}
@@ -409,30 +411,30 @@ namespace {
return size;
}
- bool {{Cmd}}::Serialize(size_t commandSize, SerializeBuffer* buffer
+ WireResult {{Cmd}}::Serialize(size_t commandSize, SerializeBuffer* buffer
{%- if not is_return -%}
, const ObjectIdProvider& objectIdProvider
{%- endif -%}
) const {
{{Name}}Transfer* transfer;
- SERIALIZE_TRY(buffer->Next(&transfer));
+ WIRE_TRY(buffer->Next(&transfer));
transfer->commandSize = commandSize;
- SERIALIZE_TRY({{Name}}Serialize(*this, transfer, buffer
+ WIRE_TRY({{Name}}Serialize(*this, transfer, buffer
{%- if command.may_have_dawn_object -%}
, objectIdProvider
{%- endif -%}
));
- return true;
+ return WireResult::Success;
}
- DeserializeResult {{Cmd}}::Deserialize(DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator
+ WireResult {{Cmd}}::Deserialize(DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator
{%- if command.may_have_dawn_object -%}
, const ObjectIdResolver& resolver
{%- endif -%}
) {
const volatile {{Name}}Transfer* transfer;
- DESERIALIZE_TRY(deserializeBuffer->Read(&transfer));
+ WIRE_TRY(deserializeBuffer->Read(&transfer));
return {{Name}}Deserialize(this, transfer, deserializeBuffer, allocator
{%- if command.may_have_dawn_object -%}
@@ -444,22 +446,6 @@ namespace {
namespace dawn_wire {
- // Macro to simplify error handling, similar to DAWN_TRY but for DeserializeResult.
-#define DESERIALIZE_TRY(EXPR) \
- do { \
- DeserializeResult exprResult = EXPR; \
- if (exprResult != DeserializeResult::Success) { \
- return exprResult; \
- } \
- } while (0)
-
-#define SERIALIZE_TRY(EXPR) \
- do { \
- if (!(EXPR)) { \
- return false; \
- } \
- } while (0)
-
ObjectHandle::ObjectHandle() = default;
ObjectHandle::ObjectHandle(ObjectId id, ObjectGeneration generation)
: id(id), generation(generation) {
@@ -485,77 +471,31 @@ namespace dawn_wire {
return *this;
}
- template <typename BufferT>
- template <typename T>
- bool BufferConsumer<BufferT>::Peek(T** data) {
- if (sizeof(T) > mSize) {
- return false;
- }
-
- *data = reinterpret_cast<T*>(mBuffer);
- return true;
- }
-
- template <typename BufferT>
- template <typename T>
- bool BufferConsumer<BufferT>::Next(T** data) {
- if (sizeof(T) > mSize) {
- return false;
- }
-
- *data = reinterpret_cast<T*>(mBuffer);
- mBuffer += sizeof(T);
- mSize -= sizeof(T);
- return true;
- }
-
- template <typename BufferT>
- template <typename T, typename N>
- bool BufferConsumer<BufferT>::NextN(N count, T** data) {
- static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
-
- constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
- if (count > kMaxCountWithoutOverflows) {
- return false;
- }
-
- // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
- size_t totalSize = sizeof(T) * count;
- if (totalSize > mSize) {
- return false;
- }
-
- *data = reinterpret_cast<T*>(mBuffer);
- mBuffer += totalSize;
- mSize -= totalSize;
- return true;
- }
-
namespace {
// Allocates enough space from allocator to countain T[count] and return it in out.
// Return FatalError if the allocator couldn't allocate the memory.
// Always writes to |out| on success.
template <typename T, typename N>
- DeserializeResult GetSpace(DeserializeAllocator* allocator, N count, T** out) {
+ WireResult GetSpace(DeserializeAllocator* allocator, N count, T** out) {
constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
if (count > kMaxCountWithoutOverflows) {
- return DeserializeResult::FatalError;
+ return WireResult::FatalError;
}
size_t totalSize = sizeof(T) * count;
*out = static_cast<T*>(allocator->GetSpace(totalSize));
if (*out == nullptr) {
- return DeserializeResult::FatalError;
+ return WireResult::FatalError;
}
- return DeserializeResult::Success;
+ return WireResult::Success;
}
size_t GetChainedStructExtraRequiredSize(const WGPUChainedStruct* chainedStruct);
- DAWN_NO_DISCARD bool SerializeChainedStruct(WGPUChainedStruct const* chainedStruct,
- SerializeBuffer* buffer,
- const ObjectIdProvider& provider);
- DeserializeResult DeserializeChainedStruct(const WGPUChainedStruct** outChainNext,
+ DAWN_NO_DISCARD WireResult SerializeChainedStruct(WGPUChainedStruct const* chainedStruct,
+ SerializeBuffer* buffer,
+ const ObjectIdProvider& provider);
+ WireResult DeserializeChainedStruct(const WGPUChainedStruct** outChainNext,
DeserializeBuffer* deserializeBuffer,
DeserializeAllocator* allocator,
const ObjectIdResolver& resolver);
@@ -593,9 +533,9 @@ namespace dawn_wire {
return result;
}
- DAWN_NO_DISCARD bool SerializeChainedStruct(WGPUChainedStruct const* chainedStruct,
- SerializeBuffer* buffer,
- const ObjectIdProvider& provider) {
+ DAWN_NO_DISCARD WireResult SerializeChainedStruct(WGPUChainedStruct const* chainedStruct,
+ SerializeBuffer* buffer,
+ const ObjectIdProvider& provider) {
ASSERT(chainedStruct != nullptr);
ASSERT(buffer != nullptr);
do {
@@ -605,11 +545,11 @@ namespace dawn_wire {
case {{as_cEnum(types["s type"].name, sType.name)}}: {
{{CType}}Transfer* transfer;
- SERIALIZE_TRY(buffer->Next(&transfer));
+ WIRE_TRY(buffer->Next(&transfer));
transfer->chain.sType = chainedStruct->sType;
transfer->chain.hasNext = chainedStruct->next != nullptr;
- SERIALIZE_TRY({{CType}}Serialize(*reinterpret_cast<{{CType}} const*>(chainedStruct), transfer, buffer
+ WIRE_TRY({{CType}}Serialize(*reinterpret_cast<{{CType}} const*>(chainedStruct), transfer, buffer
{%- if types[sType.name.get()].may_have_dawn_object -%}
, provider
{%- endif -%}
@@ -626,7 +566,7 @@ namespace dawn_wire {
}
WGPUChainedStructTransfer* transfer;
- SERIALIZE_TRY(buffer->Next(&transfer));
+ WIRE_TRY(buffer->Next(&transfer));
transfer->sType = WGPUSType_Invalid;
transfer->hasNext = chainedStruct->next != nullptr;
@@ -636,34 +576,34 @@ namespace dawn_wire {
}
}
} while (chainedStruct != nullptr);
- return true;
+ return WireResult::Success;
}
- DeserializeResult DeserializeChainedStruct(const WGPUChainedStruct** outChainNext,
+ WireResult DeserializeChainedStruct(const WGPUChainedStruct** outChainNext,
DeserializeBuffer* deserializeBuffer,
DeserializeAllocator* allocator,
const ObjectIdResolver& resolver) {
bool hasNext;
do {
const volatile WGPUChainedStructTransfer* header;
- DESERIALIZE_TRY(deserializeBuffer->Peek(&header));
+ WIRE_TRY(deserializeBuffer->Peek(&header));
WGPUSType sType = header->sType;
switch (sType) {
{% for sType in types["s type"].values if sType.valid and sType.name.CamelCase() not in client_side_structures %}
{% set CType = as_cType(sType.name) %}
case {{as_cEnum(types["s type"].name, sType.name)}}: {
const volatile {{CType}}Transfer* transfer;
- DESERIALIZE_TRY(deserializeBuffer->Read(&transfer));
+ WIRE_TRY(deserializeBuffer->Read(&transfer));
{{CType}}* outStruct;
- DESERIALIZE_TRY(GetSpace(allocator, sizeof({{CType}}), &outStruct));
+ WIRE_TRY(GetSpace(allocator, sizeof({{CType}}), &outStruct));
outStruct->chain.sType = sType;
outStruct->chain.next = nullptr;
*outChainNext = &outStruct->chain;
outChainNext = &outStruct->chain.next;
- DESERIALIZE_TRY({{CType}}Deserialize(outStruct, transfer, deserializeBuffer, allocator
+ WIRE_TRY({{CType}}Deserialize(outStruct, transfer, deserializeBuffer, allocator
{%- if types[sType.name.get()].may_have_dawn_object -%}
, resolver
{%- endif -%}
@@ -680,10 +620,10 @@ namespace dawn_wire {
}
const volatile WGPUChainedStructTransfer* transfer;
- DESERIALIZE_TRY(deserializeBuffer->Read(&transfer));
+ WIRE_TRY(deserializeBuffer->Read(&transfer));
WGPUChainedStruct* outStruct;
- DESERIALIZE_TRY(GetSpace(allocator, sizeof(WGPUChainedStruct), &outStruct));
+ WIRE_TRY(GetSpace(allocator, sizeof(WGPUChainedStruct), &outStruct));
outStruct->sType = WGPUSType_Invalid;
outStruct->next = nullptr;
@@ -696,7 +636,7 @@ namespace dawn_wire {
}
} while (hasNext);
- return DeserializeResult::Success;
+ return WireResult::Success;
}
//* Output [de]serialization helpers for commands
@@ -733,10 +673,12 @@ namespace dawn_wire {
SerializeBuffer serializeBuffer(buffer, SerializedWGPUDevicePropertiesSize(deviceProperties));
WGPUDevicePropertiesTransfer* transfer;
- bool success =
- serializeBuffer.Next(&transfer) &&
- WGPUDevicePropertiesSerialize(*deviceProperties, transfer, &serializeBuffer);
- ASSERT(success);
+
+ WireResult result = serializeBuffer.Next(&transfer);
+ ASSERT(result == WireResult::Success);
+
+ result = WGPUDevicePropertiesSerialize(*deviceProperties, transfer, &serializeBuffer);
+ ASSERT(result == WireResult::Success);
}
bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
@@ -744,12 +686,12 @@ namespace dawn_wire {
size_t size) {
const volatile WGPUDevicePropertiesTransfer* transfer;
DeserializeBuffer deserializeBuffer(buffer, size);
- if (deserializeBuffer.Read(&transfer) != DeserializeResult::Success) {
+ if (deserializeBuffer.Read(&transfer) != WireResult::Success) {
return false;
}
return WGPUDevicePropertiesDeserialize(deviceProperties, transfer, &deserializeBuffer,
- nullptr) == DeserializeResult::Success;
+ nullptr) == WireResult::Success;
}
} // namespace dawn_wire
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.h b/chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.h
index e5be81e0340..68f365c20a1 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.h
+++ b/chromium/third_party/dawn/generator/templates/dawn_wire/WireCmd.h
@@ -17,7 +17,9 @@
#include <dawn/webgpu.h>
+#include "dawn_wire/BufferConsumer.h"
#include "dawn_wire/ObjectType_autogen.h"
+#include "dawn_wire/WireResult.h"
namespace dawn_wire {
@@ -43,67 +45,6 @@ namespace dawn_wire {
ObjectHandle& AssignFrom(const volatile ObjectHandle& rhs);
};
- enum class DeserializeResult {
- Success,
- FatalError,
- };
-
- template <typename BufferT>
- class BufferConsumer {
- public:
- BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {}
-
- BufferT* Buffer() const { return mBuffer; }
- size_t AvailableSize() const { return mSize; }
-
- protected:
- template <typename T, typename N>
- DAWN_NO_DISCARD bool NextN(N count, T** data);
-
- template <typename T>
- DAWN_NO_DISCARD bool Next(T** data);
-
- template <typename T>
- DAWN_NO_DISCARD bool Peek(T** data);
-
- private:
- BufferT* mBuffer;
- size_t mSize;
- };
-
- class SerializeBuffer : public BufferConsumer<char> {
- public:
- using BufferConsumer::BufferConsumer;
- using BufferConsumer::NextN;
- using BufferConsumer::Next;
- };
-
- class DeserializeBuffer : public BufferConsumer<const volatile char> {
- public:
- using BufferConsumer::BufferConsumer;
-
- template <typename T, typename N>
- DAWN_NO_DISCARD DeserializeResult ReadN(N count, const volatile T** data) {
- return NextN(count, data)
- ? DeserializeResult::Success
- : DeserializeResult::FatalError;
- }
-
- template <typename T>
- DAWN_NO_DISCARD DeserializeResult Read(const volatile T** data) {
- return Next(data)
- ? DeserializeResult::Success
- : DeserializeResult::FatalError;
- }
-
- template <typename T>
- DAWN_NO_DISCARD DeserializeResult Peek(const volatile T** data) {
- return BufferConsumer::Peek(data)
- ? DeserializeResult::Success
- : DeserializeResult::FatalError;
- }
- };
-
// Interface to allocate more space to deserialize pointed-to data.
// nullptr is treated as an error.
class DeserializeAllocator {
@@ -116,8 +57,8 @@ namespace dawn_wire {
class ObjectIdResolver {
public:
{% for type in by_category["object"] %}
- virtual DeserializeResult GetFromId(ObjectId id, {{as_cType(type.name)}}* out) const = 0;
- virtual DeserializeResult GetOptionalFromId(ObjectId id, {{as_cType(type.name)}}* out) const = 0;
+ virtual WireResult GetFromId(ObjectId id, {{as_cType(type.name)}}* out) const = 0;
+ virtual WireResult GetOptionalFromId(ObjectId id, {{as_cType(type.name)}}* out) const = 0;
{% endfor %}
};
@@ -157,7 +98,7 @@ namespace dawn_wire {
//* Serialize the structure and everything it points to into serializeBuffer which must be
//* big enough to contain all the data (as queried from GetRequiredSize).
- DAWN_NO_DISCARD bool Serialize(size_t commandSize, SerializeBuffer* serializeBuffer
+ WireResult Serialize(size_t commandSize, SerializeBuffer* serializeBuffer
{%- if not is_return_command -%}
, const ObjectIdProvider& objectIdProvider
{%- endif -%}
@@ -170,7 +111,7 @@ namespace dawn_wire {
//* Deserialize returns:
//* - Success if everything went well (yay!)
//* - FatalError is something bad happened (buffer too small for example)
- DeserializeResult Deserialize(DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator
+ WireResult Deserialize(DeserializeBuffer* deserializeBuffer, DeserializeAllocator* allocator
{%- if command.may_have_dawn_object -%}
, const ObjectIdResolver& resolver
{%- endif -%}
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ClientHandlers.cpp b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ClientHandlers.cpp
index de1ca3c6198..13ac79c13bc 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ClientHandlers.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ClientHandlers.cpp
@@ -21,9 +21,9 @@ namespace dawn_wire { namespace client {
{% for command in cmd_records["return command"] %}
bool Client::Handle{{command.name.CamelCase()}}(DeserializeBuffer* deserializeBuffer) {
Return{{command.name.CamelCase()}}Cmd cmd;
- DeserializeResult deserializeResult = cmd.Deserialize(deserializeBuffer, &mAllocator);
+ WireResult deserializeResult = cmd.Deserialize(deserializeBuffer, &mAllocator);
- if (deserializeResult == DeserializeResult::FatalError) {
+ if (deserializeResult == WireResult::FatalError) {
return false;
}
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerBase.h b/chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerBase.h
index 66193a477e3..eb0aab8c3bd 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerBase.h
+++ b/chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerBase.h
@@ -70,20 +70,20 @@ namespace dawn_wire { namespace server {
private:
// Implementation of the ObjectIdResolver interface
{% for type in by_category["object"] %}
- DeserializeResult GetFromId(ObjectId id, {{as_cType(type.name)}}* out) const final {
+ WireResult GetFromId(ObjectId id, {{as_cType(type.name)}}* out) const final {
auto data = mKnown{{type.name.CamelCase()}}.Get(id);
if (data == nullptr) {
- return DeserializeResult::FatalError;
+ return WireResult::FatalError;
}
*out = data->handle;
- return DeserializeResult::Success;
+ return WireResult::Success;
}
- DeserializeResult GetOptionalFromId(ObjectId id, {{as_cType(type.name)}}* out) const final {
+ WireResult GetOptionalFromId(ObjectId id, {{as_cType(type.name)}}* out) const final {
if (id == 0) {
*out = nullptr;
- return DeserializeResult::Success;
+ return WireResult::Success;
}
return GetFromId(id, out);
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerHandlers.cpp b/chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerHandlers.cpp
index a544a505d16..ea3da6cae3f 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerHandlers.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn_wire/server/ServerHandlers.cpp
@@ -25,13 +25,13 @@ namespace dawn_wire { namespace server {
//* The generic command handlers
bool Server::Handle{{Suffix}}(DeserializeBuffer* deserializeBuffer) {
{{Suffix}}Cmd cmd;
- DeserializeResult deserializeResult = cmd.Deserialize(deserializeBuffer, &mAllocator
+ WireResult deserializeResult = cmd.Deserialize(deserializeBuffer, &mAllocator
{%- if command.may_have_dawn_object -%}
, *this
{%- endif -%}
);
- if (deserializeResult == DeserializeResult::FatalError) {
+ if (deserializeResult == WireResult::FatalError) {
return false;
}
diff --git a/chromium/third_party/dawn/scripts/dawn_features.gni b/chromium/third_party/dawn/scripts/dawn_features.gni
index cc2899a73ee..924f2404209 100644
--- a/chromium/third_party/dawn/scripts/dawn_features.gni
+++ b/chromium/third_party/dawn/scripts/dawn_features.gni
@@ -75,10 +75,6 @@ declare_args() {
# Enables error injection for faking failures to native API calls
dawn_enable_error_injection =
is_debug || (build_with_chromium && use_fuzzing_engine)
-
- # Enable support WGSL for shaders.
- # Turned off for Skia, because Tint is currently not part of its DEPS.
- dawn_enable_wgsl = !defined(is_skia_standalone)
}
# GN does not allow reading a variable defined in the same declare_args().
diff --git a/chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni b/chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni
index 9120a799d27..acb5a70dcb1 100644
--- a/chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni
+++ b/chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni
@@ -49,10 +49,6 @@ if (!defined(dawn_googletest_dir)) {
dawn_googletest_dir = "//third_party/googletest"
}
-if (!defined(dawn_shaderc_dir)) {
- dawn_shaderc_dir = "//third_party/shaderc"
-}
-
if (!defined(dawn_spirv_cross_dir)) {
dawn_spirv_cross_dir = "//third_party/vulkan-deps/spirv-cross/src"
}
@@ -77,6 +73,6 @@ if (!defined(dawn_vulkan_validation_layers_dir)) {
}
if (!defined(dawn_tint_dir)) {
- # Default to Tint not being available.
- dawn_tint_dir = ""
+ # Default to Tint being Dawn's DEPS
+ dawn_tint_dir = "${dawn_root}/third_party/tint"
}
diff --git a/chromium/third_party/dawn/scripts/roll-shader-deps.sh b/chromium/third_party/dawn/scripts/roll-shader-deps.sh
index db16800867b..8fbcac53152 100755
--- a/chromium/third_party/dawn/scripts/roll-shader-deps.sh
+++ b/chromium/third_party/dawn/scripts/roll-shader-deps.sh
@@ -18,10 +18,6 @@
#
# Depends on roll-dep from depot_path being in PATH.
-glslang_dir="third_party/glslang/"
-glslang_trunk="origin/master"
-shaderc_dir="third_party/shaderc/"
-shaderc_trunk="origin/main"
spirv_cross_dir="third_party/spirv-cross/"
spirv_cross_trunk="origin/master"
spirv_headers_dir="third_party/spirv-headers/"
@@ -43,8 +39,6 @@ fi
old_head=$(git rev-parse HEAD)
-roll-dep --ignore-dirty-tree --roll-to="${glslang_trunk}" "${glslang_dir}"
-roll-dep --ignore-dirty-tree --roll-to="${shaderc_trunk}" "${shaderc_dir}"
roll-dep --ignore-dirty-tree --roll-to="${spirv_cross_trunk}" "${spirv_cross_dir}"
roll-dep --ignore-dirty-tree --roll-to="${spirv_headers_trunk}" "${spirv_headers_dir}"
roll-dep --ignore-dirty-tree --roll-to="${spirv_tools_trunk}" "${spirv_tools_dir}"
diff --git a/chromium/third_party/dawn/scripts/update_fuzzer_seed_corpus.sh b/chromium/third_party/dawn/scripts/update_fuzzer_seed_corpus.sh
deleted file mode 100755
index 966c1d6c89b..00000000000
--- a/chromium/third_party/dawn/scripts/update_fuzzer_seed_corpus.sh
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Generates a seed corpus for fuzzing based on dumping wire traces
-# from running Dawn tests
-
-# Exit if anything fails
-set -e
-
-if [ "$#" -lt 3 ]; then
-cat << EOF
-
-Usage:
- $0 <out_dir> <fuzzer_name> <test_name> [additional_test_args...]
-
-Example:
- $0 out/fuzz dawn_wire_server_and_vulkan_backend_fuzzer dawn_end2end_tests --gtest_filter=*Vulkan
-
-EOF
- exit 1
-fi
-
-all_args=("$@")
-out_dir=$1
-fuzzer_name=$2
-test_name=$3
-additional_test_args=("${all_args[@]:3}")
-
-testcase_dir="/tmp/testcases/${fuzzer_name}/"
-injected_error_testcase_dir="/tmp/testcases/${fuzzer_name}_injected/"
-minimized_testcase_dir="/tmp/testcases/${fuzzer_name}_minimized/"
-
-# Print commands so it's clear what is being executed
-set -x
-
-# Make a directory for temporarily storing testcases
-mkdir -p "$testcase_dir"
-
-# Make an empty directory for temporarily storing testcases with injected errors
-rm -rf "$injected_error_testcase_dir"
-mkdir -p "$injected_error_testcase_dir"
-
-# Make an empty directory for temporarily storing minimized testcases
-rm -rf "$minimized_testcase_dir"
-mkdir -p "$minimized_testcase_dir"
-
-# Build the fuzzer and test
-autoninja -C $out_dir $fuzzer_name $test_name
-
-fuzzer_binary="${out_dir}/${fuzzer_name}"
-test_binary="${out_dir}/${test_name}"
-
-# Run the test binary
-$test_binary --use-wire --wire-trace-dir="$testcase_dir" $additional_test_args
-
-# Run the fuzzer over the testcases to inject errors
-$fuzzer_binary --injected-error-testcase-dir="$injected_error_testcase_dir" -runs=0 "$testcase_dir"
-
-# Run the fuzzer to minimize the testcases + injected errors
-$fuzzer_binary -merge=1 "$minimized_testcase_dir" "$injected_error_testcase_dir" "$testcase_dir"
-
-# Turn off command printing
-set +x
-
-if [ -z "$(ls -A $minimized_testcase_dir)" ]; then
-cat << EOF
-
-Minimized testcase directory is empty!
-Are you building with use_libfuzzer=true ?
-
-EOF
- exit 1
-fi
-
-cat << EOF
-
-Please test the corpus in $minimized_testcase_dir with $fuzzer_name and confirm it works as expected.
-
- $fuzzer_binary $minimized_testcase_dir
-
-Then, run the following command to upload new testcases to the seed corpus:
-
- gsutil -m rsync $minimized_testcase_dir gs://clusterfuzz-corpus/libfuzzer/${fuzzer_name}/
-
-EOF
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
index 08b91a0e5e6..176e82ea414 100644
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/common/BUILD.gn
@@ -75,10 +75,6 @@ config("dawn_internal") {
defines += [ "DAWN_ENABLE_BACKEND_VULKAN" ]
}
- if (dawn_enable_wgsl) {
- defines += [ "DAWN_ENABLE_WGSL" ]
- }
-
if (dawn_use_x11) {
defines += [ "DAWN_USE_X11" ]
}
@@ -110,17 +106,12 @@ config("dawn_internal") {
"-Wnon-c-typedef-for-linkage",
"-Wpessimizing-move",
"-Wrange-loop-analysis",
+ "-Wredundant-move",
"-Wshadow-field",
"-Wstrict-prototypes",
"-Wtautological-unsigned-zero-compare",
]
- # Allow comparison against type limits that might be tautological on 32bit
- # or 64bit systems. Without this the following produces an error on 64bit:
- #
- # if (myUint64 > std::numeric_limits<size_t>::max()) {...}
- cflags += [ "-Wno-tautological-type-limit-compare" ]
-
if (is_win) {
cflags += [
# clang-cl doesn't know -pedantic, pass it explicitly to the clang driver
@@ -190,18 +181,30 @@ if (is_win || is_linux || is_chromeos || is_mac || is_fuchsia || is_android) {
"TypeTraits.h",
"TypedInteger.h",
"UnderlyingType.h",
+ "VertexFormatUtils.cpp",
+ "VertexFormatUtils.h",
"ityp_array.h",
"ityp_bitset.h",
"ityp_span.h",
"ityp_stack_vec.h",
"ityp_vector.h",
"vulkan_platform.h",
- "windows_with_undefs.h",
"xlib_with_undefs.h",
]
public_configs = [ ":dawn_internal" ]
- deps = [ "${dawn_root}/src/dawn:dawn_headers" ]
+ deps = [
+ "${dawn_root}/src/dawn:dawn_headers",
+ "${dawn_root}/src/dawn:dawncpp_headers",
+ ]
+
+ if (is_win) {
+ sources += [
+ "WindowsUtils.cpp",
+ "WindowsUtils.h",
+ "windows_with_undefs.h",
+ ]
+ }
if (dawn_enable_vulkan) {
public_deps = [ "${dawn_root}/third_party/khronos:vulkan_headers" ]
}
diff --git a/chromium/third_party/dawn/src/common/CMakeLists.txt b/chromium/third_party/dawn/src/common/CMakeLists.txt
index 46c17375855..a49e6827f8b 100644
--- a/chromium/third_party/dawn/src/common/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/common/CMakeLists.txt
@@ -53,15 +53,25 @@ target_sources(dawn_common PRIVATE
"TypeTraits.h"
"TypedInteger.h"
"UnderlyingType.h"
+ "VertexFormatUtils.cpp"
+ "VertexFormatUtils.h"
"ityp_array.h"
"ityp_bitset.h"
"ityp_span.h"
"ityp_stack_vec.h"
"ityp_vector.h"
"vulkan_platform.h"
- "windows_with_undefs.h"
"xlib_with_undefs.h"
)
+
+if (WIN32)
+ target_sources(dawn_common PRIVATE
+ "WindowsUtils.cpp"
+ "WindowsUtils.h"
+ "windows_with_undefs.h"
+ )
+endif()
+
target_link_libraries(dawn_common PRIVATE dawn_internal_config)
# TODO Android Log support
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.cpp b/chromium/third_party/dawn/src/common/GPUInfo.cpp
index c3ea9cefb5c..65ef78eec32 100644
--- a/chromium/third_party/dawn/src/common/GPUInfo.cpp
+++ b/chromium/third_party/dawn/src/common/GPUInfo.cpp
@@ -14,7 +14,32 @@
#include "common/GPUInfo.h"
+#include <algorithm>
+#include <array>
+
namespace gpu_info {
+ namespace {
+ // Intel
+ // Referenced from the following Mesa source code:
+ // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
+ // gen9
+ const std::array<uint32_t, 25> Skylake = {
+ {0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
+ 0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
+ 0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
+ // gen9p5
+ const std::array<uint32_t, 20> Kabylake = {
+ {0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
+ 0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
+ const std::array<uint32_t, 17> Coffeelake = {
+ {0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
+ 0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
+ const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
+ const std::array<uint32_t, 21> Cometlake = {
+ {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
+ 0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
+ } // anonymous namespace
+
bool IsAMD(PCIVendorID vendorId) {
return vendorId == kVendorID_AMD;
}
@@ -39,4 +64,17 @@ namespace gpu_info {
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
}
+
+ // Intel GPUs
+ bool IsSkylake(PCIDeviceID deviceId) {
+ return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
+ }
+ bool IsKabylake(PCIDeviceID deviceId) {
+ return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
+ }
+ bool IsCoffeelake(PCIDeviceID deviceId) {
+ return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
+ (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
+ (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
+ }
} // namespace gpu_info
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.h b/chromium/third_party/dawn/src/common/GPUInfo.h
index 87efbbc9dfb..09980a7e183 100644
--- a/chromium/third_party/dawn/src/common/GPUInfo.h
+++ b/chromium/third_party/dawn/src/common/GPUInfo.h
@@ -43,5 +43,10 @@ namespace gpu_info {
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
+ // Intel architectures
+ bool IsSkylake(PCIDeviceID deviceId);
+ bool IsKabylake(PCIDeviceID deviceId);
+ bool IsCoffeelake(PCIDeviceID deviceId);
+
} // namespace gpu_info
#endif // COMMON_GPUINFO_H
diff --git a/chromium/third_party/dawn/src/common/RefCounted.cpp b/chromium/third_party/dawn/src/common/RefCounted.cpp
index af38fc668e2..f5596386491 100644
--- a/chromium/third_party/dawn/src/common/RefCounted.cpp
+++ b/chromium/third_party/dawn/src/common/RefCounted.cpp
@@ -73,6 +73,14 @@ void RefCounted::Release() {
}
}
+void RefCounted::APIReference() {
+ Reference();
+}
+
+void RefCounted::APIRelease() {
+ Release();
+}
+
void RefCounted::DeleteThis() {
delete this;
}
diff --git a/chromium/third_party/dawn/src/common/RefCounted.h b/chromium/third_party/dawn/src/common/RefCounted.h
index 9328a4c5c42..6b266e3b9e4 100644
--- a/chromium/third_party/dawn/src/common/RefCounted.h
+++ b/chromium/third_party/dawn/src/common/RefCounted.h
@@ -27,10 +27,12 @@ class RefCounted {
uint64_t GetRefCountForTesting() const;
uint64_t GetRefCountPayload() const;
- // Dawn API
void Reference();
void Release();
+ void APIReference();
+ void APIRelease();
+
protected:
virtual ~RefCounted() = default;
// A Derived class may override this if they require a custom deleter.
diff --git a/chromium/third_party/dawn/src/common/VertexFormatUtils.cpp b/chromium/third_party/dawn/src/common/VertexFormatUtils.cpp
new file mode 100644
index 00000000000..f39b792ec88
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/VertexFormatUtils.cpp
@@ -0,0 +1,289 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "VertexFormatUtils.h"
+
+#include "Assert.h"
+
+namespace dawn {
+
+ namespace {
+
+ static constexpr wgpu::VertexFormat kFirstDeprecatedVertexFormat =
+ wgpu::VertexFormat::UChar2;
+ static constexpr uint32_t kFirstDeprecatedVertexFormatValue =
+ static_cast<uint32_t>(kFirstDeprecatedVertexFormat);
+
+ } // namespace
+
+ bool IsDeprecatedVertexFormat(wgpu::VertexFormat format) {
+ return format >= kFirstDeprecatedVertexFormat;
+ }
+
+ wgpu::VertexFormat NormalizeVertexFormat(wgpu::VertexFormat format) {
+ // If the specified format is from the deprecated range return the corresponding
+ // non-deprecated format.
+ if (format >= kFirstDeprecatedVertexFormat) {
+ uint32_t formatValue = static_cast<uint32_t>(format);
+ // Need to add one to account for the "undefined" enum with value 0
+ return static_cast<wgpu::VertexFormat>(
+ (formatValue - kFirstDeprecatedVertexFormatValue) + 1);
+ }
+ return format;
+ }
+
+ uint32_t VertexFormatNumComponents(wgpu::VertexFormat format) {
+ format = NormalizeVertexFormat(format);
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Unorm8x4:
+ case wgpu::VertexFormat::Snorm8x4:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Unorm16x4:
+ case wgpu::VertexFormat::Snorm16x4:
+ case wgpu::VertexFormat::Float16x4:
+ case wgpu::VertexFormat::Float32x4:
+ case wgpu::VertexFormat::Uint32x4:
+ case wgpu::VertexFormat::Sint32x4:
+ return 4;
+ case wgpu::VertexFormat::Float32x3:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Sint32x3:
+ return 3;
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Float16x2:
+ case wgpu::VertexFormat::Float32x2:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Sint32x2:
+ return 2;
+ case wgpu::VertexFormat::Float32:
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Sint32:
+ return 1;
+
+ case wgpu::VertexFormat::Undefined:
+ break;
+
+ // Deprecated formats (should be unreachable after NormalizeVertexFormat call)
+ case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int4:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ size_t VertexFormatComponentSize(wgpu::VertexFormat format) {
+ format = NormalizeVertexFormat(format);
+ switch (format) {
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Unorm8x4:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Snorm8x4:
+ return sizeof(char);
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Unorm16x4:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Snorm16x4:
+ case wgpu::VertexFormat::Float16x2:
+ case wgpu::VertexFormat::Float16x4:
+ return sizeof(uint16_t);
+ case wgpu::VertexFormat::Float32:
+ case wgpu::VertexFormat::Float32x2:
+ case wgpu::VertexFormat::Float32x3:
+ case wgpu::VertexFormat::Float32x4:
+ return sizeof(float);
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Uint32x4:
+ case wgpu::VertexFormat::Sint32:
+ case wgpu::VertexFormat::Sint32x2:
+ case wgpu::VertexFormat::Sint32x3:
+ case wgpu::VertexFormat::Sint32x4:
+ return sizeof(int32_t);
+
+ case wgpu::VertexFormat::Undefined:
+ break;
+
+ // Deprecated formats (should be unreachable after NormalizeVertexFormat call)
+ case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int4:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+ size_t VertexFormatSize(wgpu::VertexFormat format) {
+ return VertexFormatNumComponents(format) * VertexFormatComponentSize(format);
+ }
+
+ const char* GetWGSLVertexFormatType(wgpu::VertexFormat format) {
+ format = NormalizeVertexFormat(format);
+ switch (format) {
+ case wgpu::VertexFormat::Float32:
+ return "f32";
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Float16x2:
+ case wgpu::VertexFormat::Float32x2:
+ return "vec2<f32>";
+ case wgpu::VertexFormat::Float32x3:
+ return "vec3<f32>";
+ case wgpu::VertexFormat::Unorm8x4:
+ case wgpu::VertexFormat::Snorm8x4:
+ case wgpu::VertexFormat::Unorm16x4:
+ case wgpu::VertexFormat::Snorm16x4:
+ case wgpu::VertexFormat::Float16x4:
+ case wgpu::VertexFormat::Float32x4:
+ return "vec4<f32>";
+ case wgpu::VertexFormat::Uint32:
+ return "u32";
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Uint32x2:
+ return "vec2<u32>";
+ case wgpu::VertexFormat::Uint32x3:
+ return "vec3<u32>";
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Uint32x4:
+ return "vec4<u32>";
+ case wgpu::VertexFormat::Sint32:
+ return "i32";
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Sint32x2:
+ return "vec2<i32>";
+ case wgpu::VertexFormat::Sint32x3:
+ return "vec3<i32>";
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Sint32x4:
+ return "vec4<i32>";
+
+ case wgpu::VertexFormat::Undefined:
+ break;
+
+ // Deprecated formats (should be unreachable after NormalizeVertexFormat call)
+ case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int4:
+ break;
+ }
+ UNREACHABLE();
+ }
+
+} // namespace dawn
diff --git a/chromium/third_party/dawn/src/common/VertexFormatUtils.h b/chromium/third_party/dawn/src/common/VertexFormatUtils.h
new file mode 100644
index 00000000000..632ca7fba7e
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/VertexFormatUtils.h
@@ -0,0 +1,65 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VERTEX_FORMAT_UTILS_H_
+#define DAWNNATIVE_VERTEX_FORMAT_UTILS_H_
+
+#include <array>
+
+#include <dawn/webgpu_cpp.h>
+
+namespace dawn {
+
+ static constexpr std::array<wgpu::VertexFormat, 30> kAllVertexFormats = {
+ wgpu::VertexFormat::Uint8x2, wgpu::VertexFormat::Uint8x4, wgpu::VertexFormat::Sint8x2,
+ wgpu::VertexFormat::Sint8x4, wgpu::VertexFormat::Unorm8x2, wgpu::VertexFormat::Unorm8x4,
+ wgpu::VertexFormat::Snorm8x2, wgpu::VertexFormat::Snorm8x4, wgpu::VertexFormat::Uint16x2,
+ wgpu::VertexFormat::Uint16x4, wgpu::VertexFormat::Unorm16x2, wgpu::VertexFormat::Unorm16x4,
+ wgpu::VertexFormat::Sint16x2, wgpu::VertexFormat::Sint16x4, wgpu::VertexFormat::Snorm16x2,
+ wgpu::VertexFormat::Snorm16x4, wgpu::VertexFormat::Float16x2, wgpu::VertexFormat::Float16x4,
+ wgpu::VertexFormat::Float32, wgpu::VertexFormat::Float32x2, wgpu::VertexFormat::Float32x3,
+ wgpu::VertexFormat::Float32x4, wgpu::VertexFormat::Uint32, wgpu::VertexFormat::Uint32x2,
+ wgpu::VertexFormat::Uint32x3, wgpu::VertexFormat::Uint32x4, wgpu::VertexFormat::Sint32,
+ wgpu::VertexFormat::Sint32x2, wgpu::VertexFormat::Sint32x3, wgpu::VertexFormat::Sint32x4,
+ };
+
+ static constexpr std::array<wgpu::VertexFormat, 30> kAllDeprecatedVertexFormats = {
+ wgpu::VertexFormat::UChar2, wgpu::VertexFormat::UChar4,
+ wgpu::VertexFormat::Char2, wgpu::VertexFormat::Char4,
+ wgpu::VertexFormat::UChar2Norm, wgpu::VertexFormat::UChar4Norm,
+ wgpu::VertexFormat::Char2Norm, wgpu::VertexFormat::Char4Norm,
+ wgpu::VertexFormat::UShort2, wgpu::VertexFormat::UShort4,
+ wgpu::VertexFormat::UShort2Norm, wgpu::VertexFormat::UShort4Norm,
+ wgpu::VertexFormat::Short2, wgpu::VertexFormat::Short4,
+ wgpu::VertexFormat::Short2Norm, wgpu::VertexFormat::Short4Norm,
+ wgpu::VertexFormat::Half2, wgpu::VertexFormat::Half4,
+ wgpu::VertexFormat::Float, wgpu::VertexFormat::Float2,
+ wgpu::VertexFormat::Float3, wgpu::VertexFormat::Float4,
+ wgpu::VertexFormat::UInt, wgpu::VertexFormat::UInt2,
+ wgpu::VertexFormat::UInt3, wgpu::VertexFormat::UInt4,
+ wgpu::VertexFormat::Int, wgpu::VertexFormat::Int2,
+ wgpu::VertexFormat::Int3, wgpu::VertexFormat::Int4,
+ };
+
+ bool IsDeprecatedVertexFormat(wgpu::VertexFormat format);
+ wgpu::VertexFormat NormalizeVertexFormat(wgpu::VertexFormat format);
+ uint32_t VertexFormatNumComponents(wgpu::VertexFormat format);
+ size_t VertexFormatComponentSize(wgpu::VertexFormat format);
+ size_t VertexFormatSize(wgpu::VertexFormat format);
+
+ const char* GetWGSLVertexFormatType(wgpu::VertexFormat textureFormat);
+
+} // namespace dawn
+
+#endif \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/common/WindowsUtils.cpp b/chromium/third_party/dawn/src/common/WindowsUtils.cpp
new file mode 100644
index 00000000000..0f9b9852fca
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/WindowsUtils.cpp
@@ -0,0 +1,32 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "common/WindowsUtils.h"
+
+#include "common/windows_with_undefs.h"
+
+#include <memory>
+
+std::string WCharToUTF8(const wchar_t* input) {
+ // The -1 argument asks WideCharToMultiByte to use the null terminator to know the size of
+ // input. It will return a size that includes the null terminator.
+ int requiredSize = WideCharToMultiByte(CP_UTF8, 0, input, -1, nullptr, 0, nullptr, nullptr);
+
+ // When we can use C++17 this can be changed to use string.data() instead.
+ std::unique_ptr<char[]> result = std::make_unique<char[]>(requiredSize);
+ WideCharToMultiByte(CP_UTF8, 0, input, -1, result.get(), requiredSize, nullptr, nullptr);
+
+ // This will allocate the returned std::string and then destroy result.
+ return std::string(result.get(), result.get() + (requiredSize - 1));
+}
diff --git a/chromium/third_party/dawn/src/common/WindowsUtils.h b/chromium/third_party/dawn/src/common/WindowsUtils.h
new file mode 100644
index 00000000000..0c43d08aa41
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/WindowsUtils.h
@@ -0,0 +1,22 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_WINDOWSUTILS_H_
+#define COMMON_WINDOWSUTILS_H_
+
+#include <string>
+
+std::string WCharToUTF8(const wchar_t* input);
+
+#endif // COMMON_WINDOWSUTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
index a9c23ae741b..56a9ba557b8 100644
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
@@ -46,6 +46,20 @@ namespace dawn_native {
}
}
+ AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor2* descriptor)
+ : mSampleCount(descriptor->multisample.count) {
+ ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
+ ++i) {
+ mColorAttachmentsSet.set(i);
+ mColorFormats[i] = descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
+ }
+ if (descriptor->depthStencil != nullptr) {
+ mDepthStencilFormat = descriptor->depthStencil->format;
+ }
+ }
+
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
for (ColorAttachmentIndex i(uint8_t(0));
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.h b/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
index ce8b8aaf14c..891b15883d5 100644
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
+++ b/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
@@ -38,6 +38,7 @@ namespace dawn_native {
// Note: Descriptors must be validated before the AttachmentState is constructed.
explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
+ explicit AttachmentStateBlueprint(const RenderPipelineDescriptor2* descriptor);
explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
diff --git a/chromium/third_party/dawn/src/dawn_native/BUILD.gn b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
index 56fe8ae030e..803ee30246b 100644
--- a/chromium/third_party/dawn/src/dawn_native/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
@@ -146,6 +146,7 @@ source_set("dawn_native_sources") {
"${dawn_root}/third_party/gn/spirv_cross:spirv_cross",
"${dawn_spirv_tools_dir}:spvtools_opt",
"${dawn_spirv_tools_dir}:spvtools_val",
+ "${dawn_tint_dir}/src:libtint",
]
defines = []
libs = []
@@ -193,6 +194,8 @@ source_set("dawn_native_sources") {
"CommandValidation.h",
"Commands.cpp",
"Commands.h",
+ "CompilationMessages.cpp",
+ "CompilationMessages.h",
"ComputePassEncoder.cpp",
"ComputePassEncoder.h",
"ComputePipeline.cpp",
@@ -219,6 +222,8 @@ source_set("dawn_native_sources") {
"ErrorScope.h",
"Extensions.cpp",
"Extensions.h",
+ "ExternalTexture.cpp",
+ "ExternalTexture.h",
"Fence.cpp",
"Fence.h",
"Format.cpp",
@@ -286,6 +291,8 @@ source_set("dawn_native_sources") {
"SwapChain.h",
"Texture.cpp",
"Texture.h",
+ "TintUtils.cpp",
+ "TintUtils.h",
"ToBackend.h",
"Toggles.cpp",
"Toggles.h",
@@ -625,10 +632,6 @@ source_set("dawn_native_sources") {
"${dawn_angle_dir}:libGLESv2",
]
}
-
- if (dawn_enable_wgsl) {
- deps += [ "${dawn_tint_dir}:libtint" ]
- }
}
# The static and shared libraries for dawn_native. Most of the files are
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
index 7c8dea9c054..e8bb23f50d2 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
@@ -252,11 +252,11 @@ namespace dawn_native {
}
}
- void BufferBase::MapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata) {
+ void BufferBase::APIMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata) {
// Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
// possible to default the function argument (because there is the callback later in the
// argument list)
@@ -291,15 +291,15 @@ namespace dawn_native {
GetDevice()->GetPendingCommandSerial());
}
- void* BufferBase::GetMappedRange(size_t offset, size_t size) {
- return GetMappedRangeInternal(true, offset, size);
+ void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
+ return GetMappedRange(offset, size, true);
}
- const void* BufferBase::GetConstMappedRange(size_t offset, size_t size) {
- return GetMappedRangeInternal(false, offset, size);
+ const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
+ return GetMappedRange(offset, size, false);
}
- void* BufferBase::GetMappedRangeInternal(bool writable, size_t offset, size_t size) {
+ void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
if (!CanGetMappedRange(writable, offset, size)) {
return nullptr;
}
@@ -314,7 +314,7 @@ namespace dawn_native {
return start == nullptr ? nullptr : start + offset;
}
- void BufferBase::Destroy() {
+ void BufferBase::APIDestroy() {
if (IsError()) {
// It is an error to call Destroy() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data.
@@ -354,6 +354,10 @@ namespace dawn_native {
return {};
}
+ void BufferBase::APIUnmap() {
+ Unmap();
+ }
+
void BufferBase::Unmap() {
UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.h b/chromium/third_party/dawn/src/dawn_native/Buffer.h
index 936ae3c2971..8e848da9819 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.h
@@ -61,16 +61,19 @@ namespace dawn_native {
bool IsDataInitialized() const;
void SetIsDataInitialized();
- // Dawn API
- void MapAsync(wgpu::MapMode mode,
- size_t offset,
- size_t size,
- WGPUBufferMapCallback callback,
- void* userdata);
- void* GetMappedRange(size_t offset, size_t size);
- const void* GetConstMappedRange(size_t offset, size_t size);
+ void* GetMappedRange(size_t offset, size_t size, bool writable = true);
void Unmap();
- void Destroy();
+
+ // Dawn API
+ void APIMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata);
+ void* APIGetMappedRange(size_t offset, size_t size);
+ const void* APIGetConstMappedRange(size_t offset, size_t size);
+ void APIUnmap();
+ void APIDestroy();
protected:
BufferBase(DeviceBase* device,
@@ -91,7 +94,6 @@ namespace dawn_native {
virtual bool IsCPUWritableAtCreation() const = 0;
MaybeError CopyFromStagingBuffer();
- void* GetMappedRangeInternal(bool writable, size_t offset, size_t size);
void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
MaybeError ValidateMap(wgpu::BufferUsage requiredUsage,
diff --git a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
index ea3684ecb96..ba6cec9d320 100644
--- a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
@@ -25,10 +25,6 @@ if(BUILD_SHARED_LIBS)
target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_SHARED_LIBRARY")
endif()
-if(DAWN_ENABLE_WGSL)
- target_link_libraries(dawn_native PRIVATE libtint)
-endif()
-
target_sources(dawn_native PRIVATE
"${DAWN_INCLUDE_DIR}/dawn_native/DawnNative.h"
"${DAWN_INCLUDE_DIR}/dawn_native/dawn_native_export.h"
@@ -66,6 +62,8 @@ target_sources(dawn_native PRIVATE
"CommandValidation.h"
"Commands.cpp"
"Commands.h"
+ "CompilationMessages.cpp"
+ "CompilationMessages.h"
"ComputePassEncoder.cpp"
"ComputePassEncoder.h"
"ComputePipeline.cpp"
@@ -92,6 +90,8 @@ target_sources(dawn_native PRIVATE
"ErrorScope.h"
"Extensions.cpp"
"Extensions.h"
+ "ExternalTexture.cpp"
+ "ExternalTexture.h"
"ObjectContentHasher.cpp"
"ObjectContentHasher.h"
"Fence.cpp"
@@ -159,6 +159,8 @@ target_sources(dawn_native PRIVATE
"SwapChain.h"
"Texture.cpp"
"Texture.h"
+ "TintUtils.cpp"
+ "TintUtils.h"
"ToBackend.h"
"Toggles.cpp"
"Toggles.h"
@@ -169,6 +171,7 @@ target_link_libraries(dawn_native
PRIVATE dawn_common
dawn_platform
dawn_internal_config
+ libtint
spirv-cross-core
spirv-cross-glsl
spirv-cross-hlsl
@@ -180,6 +183,10 @@ if (DAWN_USE_X11)
find_package(X11 REQUIRED)
target_link_libraries(dawn_native PRIVATE ${X11_LIBRARIES})
target_include_directories(dawn_native PRIVATE ${X11_INCLUDE_DIR})
+ target_sources(dawn_native PRIVATE
+ "XlibXcbFunctions.cpp"
+ "XlibXcbFunctions.h"
+ )
endif()
if (WIN32)
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
index bde0c63252a..f02a20fd57a 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
@@ -66,18 +66,26 @@ namespace dawn_native {
const uint32_t mipLevel) {
Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- if (extent.width == copySize.width && extent.height == copySize.height) {
- return true;
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ return extent.width == copySize.width && extent.height == copySize.height;
+ case wgpu::TextureDimension::e3D:
+ return extent.width == copySize.width && extent.height == copySize.height &&
+ extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
+ default:
+ UNREACHABLE();
}
- return false;
}
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
const Extent3D& copySize) {
switch (copy.texture->GetDimension()) {
case wgpu::TextureDimension::e2D:
- return {copy.aspect, {copy.origin.z, copySize.depth}, {copy.mipLevel, 1}};
+ return {
+ copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
+ case wgpu::TextureDimension::e3D:
+ return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
default:
UNREACHABLE();
}
@@ -181,7 +189,7 @@ namespace dawn_native {
}
const uint64_t overwrittenRangeSize =
- copyTextureDataSizePerRow * heightInBlocks * copy->copySize.depth;
+ copyTextureDataSizePerRow * heightInBlocks * copy->copySize.depthOrArrayLayers;
if (copy->destination.buffer->GetSize() > overwrittenRangeSize) {
return false;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
index 00c8fd04836..7b6d3670782 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
@@ -127,10 +127,8 @@ namespace dawn_native {
}
if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
- wgpu::IndexFormat pipelineIndexFormat =
- mLastRenderPipeline->GetVertexStateDescriptor()->indexFormat;
if (!IsStripPrimitiveTopology(mLastRenderPipeline->GetPrimitiveTopology()) ||
- mIndexFormat == pipelineIndexFormat) {
+ mIndexFormat == mLastRenderPipeline->GetStripIndexFormat()) {
mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
}
}
@@ -142,8 +140,7 @@ namespace dawn_native {
}
if (aspects[VALIDATION_ASPECT_INDEX_BUFFER]) {
- wgpu::IndexFormat pipelineIndexFormat =
- mLastRenderPipeline->GetVertexStateDescriptor()->indexFormat;
+ wgpu::IndexFormat pipelineIndexFormat = mLastRenderPipeline->GetStripIndexFormat();
if (!mIndexBufferSet) {
return DAWN_VALIDATION_ERROR("Missing index buffer");
} else if (IsStripPrimitiveTopology(mLastRenderPipeline->GetPrimitiveTopology()) &&
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
index 26992a8bedb..55fc22778ab 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
@@ -77,9 +77,10 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(const TextureCopyView& src) {
+ MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(
+ const ImageCopyTexture& src) {
Aspect aspectUsed;
- DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByTextureCopyView(src));
+ DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
if (aspectUsed == Aspect::Depth) {
switch (src.texture->GetFormat().format) {
case wgpu::TextureFormat::Depth24Plus:
@@ -398,11 +399,11 @@ namespace dawn_native {
return {};
}
- void EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
- QuerySetBase* querySet,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset) {
+ MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
+ QuerySetBase* querySet,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset) {
DeviceBase* device = encoder->GetDevice();
// The availability got from query set is a reference to vector<bool>, need to covert
@@ -415,10 +416,12 @@ namespace dawn_native {
BufferDescriptor availabilityDesc = {};
availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
Ref<BufferBase> availabilityBuffer =
- AcquireRef(device->CreateBuffer(&availabilityDesc));
- device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0, availability.data(),
- availability.size() * sizeof(uint32_t));
+ AcquireRef(device->APICreateBuffer(&availabilityDesc));
+ DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
+ availability.data(),
+ availability.size() * sizeof(uint32_t)));
// Timestamp params uniform buffer
TimestampParams params = {queryCount, static_cast<uint32_t>(destinationOffset),
@@ -426,11 +429,14 @@ namespace dawn_native {
BufferDescriptor parmsDesc = {};
parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
parmsDesc.size = sizeof(params);
- Ref<BufferBase> paramsBuffer = AcquireRef(device->CreateBuffer(&parmsDesc));
- device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<BufferBase> paramsBuffer = AcquireRef(device->APICreateBuffer(&parmsDesc));
+ DAWN_TRY(
+ device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
EncodeConvertTimestampsToNanoseconds(encoder, destination, availabilityBuffer.Get(),
paramsBuffer.Get());
+ return {};
}
} // namespace
@@ -461,21 +467,13 @@ namespace dawn_native {
}
// Set the query at queryIndex to available for resolving in query set.
- querySet->SetQueryAvailability(queryIndex, 1);
-
- // Gets the iterator for that querySet or create a new vector of bool set to false
- // if the querySet wasn't registered.
- auto it = mQueryAvailabilityMap.emplace(querySet, querySet->GetQueryCount()).first;
- it->second[queryIndex] = 1;
- }
-
- const QueryAvailabilityMap& CommandEncoder::GetQueryAvailabilityMap() const {
- return mQueryAvailabilityMap;
+ querySet->SetQueryAvailability(queryIndex, true);
}
// Implementation of the API's command recording methods
- ComputePassEncoder* CommandEncoder::BeginComputePass(const ComputePassDescriptor* descriptor) {
+ ComputePassEncoder* CommandEncoder::APIBeginComputePass(
+ const ComputePassDescriptor* descriptor) {
DeviceBase* device = GetDevice();
bool success =
@@ -497,7 +495,7 @@ namespace dawn_native {
return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
}
- RenderPassEncoder* CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
+ RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
DeviceBase* device = GetDevice();
PassResourceUsageTracker usageTracker(PassType::Render);
@@ -580,11 +578,11 @@ namespace dawn_native {
return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
}
- void CommandEncoder::CopyBufferToBuffer(BufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
+ void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(source));
@@ -621,15 +619,18 @@ namespace dawn_native {
});
}
- void CommandEncoder::CopyBufferToTexture(const BufferCopyView* source,
- const TextureCopyView* destination,
- const Extent3D* copySize) {
+ void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ Extent3D fixedCopySize = *copySize;
+ DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedCopySize));
+
if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateBufferCopyView(GetDevice(), *source));
+ DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
DAWN_TRY(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc));
- DAWN_TRY(ValidateTextureCopyView(GetDevice(), *destination, *copySize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, fixedCopySize));
DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
@@ -638,25 +639,26 @@ namespace dawn_native {
// because in the latter we divide copyExtent.width by blockWidth and
// copyExtent.height by blockHeight while the divisibility conditions are
// checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(*destination, *copySize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, fixedCopySize));
}
const TexelBlockInfo& blockInfo =
destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
TextureDataLayout srcLayout = FixUpDeprecatedTextureDataLayoutOptions(
- GetDevice(), source->layout, blockInfo, *copySize);
+ GetDevice(), source->layout, blockInfo, fixedCopySize);
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(ValidateLinearTextureCopyOffset(srcLayout, blockInfo));
DAWN_TRY(ValidateLinearTextureData(srcLayout, source->buffer->GetSize(), blockInfo,
- *copySize));
+ fixedCopySize));
mTopLevelBuffers.insert(source->buffer);
mTopLevelTextures.insert(destination->texture);
}
- ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
+ ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, fixedCopySize);
// Skip noop copies.
- if (copySize->width != 0 && copySize->height != 0 && copySize->depth != 0) {
+ if (fixedCopySize.width != 0 && fixedCopySize.height != 0 &&
+ fixedCopySize.depthOrArrayLayers != 0) {
// Record the copy command.
CopyBufferToTextureCmd* copy =
allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
@@ -669,49 +671,53 @@ namespace dawn_native {
copy->destination.mipLevel = destination->mipLevel;
copy->destination.aspect =
ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = *copySize;
+ copy->copySize = fixedCopySize;
}
return {};
});
}
- void CommandEncoder::CopyTextureToBuffer(const TextureCopyView* source,
- const BufferCopyView* destination,
- const Extent3D* copySize) {
+ void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
+ const ImageCopyBuffer* destination,
+ const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ Extent3D fixedCopySize = *copySize;
+ DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedCopySize));
+
if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateTextureCopyView(GetDevice(), *source, *copySize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, fixedCopySize));
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
- DAWN_TRY(ValidateBufferCopyView(GetDevice(), *destination));
+ DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
DAWN_TRY(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst));
// We validate texture copy range before validating linear texture data,
// because in the latter we divide copyExtent.width by blockWidth and
// copyExtent.height by blockHeight while the divisibility conditions are
// checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(*source, *copySize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, fixedCopySize));
}
const TexelBlockInfo& blockInfo =
source->texture->GetFormat().GetAspectInfo(source->aspect).block;
TextureDataLayout dstLayout = FixUpDeprecatedTextureDataLayoutOptions(
- GetDevice(), destination->layout, blockInfo, *copySize);
+ GetDevice(), destination->layout, blockInfo, fixedCopySize);
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(ValidateLinearTextureCopyOffset(dstLayout, blockInfo));
DAWN_TRY(ValidateLinearTextureData(dstLayout, destination->buffer->GetSize(),
- blockInfo, *copySize));
+ blockInfo, fixedCopySize));
mTopLevelTextures.insert(source->texture);
mTopLevelBuffers.insert(destination->buffer);
}
- ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
+ ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, fixedCopySize);
// Skip noop copies.
- if (copySize->width != 0 && copySize->height != 0 && copySize->depth != 0) {
+ if (fixedCopySize.width != 0 && fixedCopySize.height != 0 &&
+ fixedCopySize.depthOrArrayLayers != 0) {
// Record the copy command.
CopyTextureToBufferCmd* copy =
allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
@@ -723,29 +729,31 @@ namespace dawn_native {
copy->destination.offset = dstLayout.offset;
copy->destination.bytesPerRow = dstLayout.bytesPerRow;
copy->destination.rowsPerImage = dstLayout.rowsPerImage;
- copy->copySize = *copySize;
+ copy->copySize = fixedCopySize;
}
return {};
});
}
- void CommandEncoder::CopyTextureToTexture(const TextureCopyView* source,
- const TextureCopyView* destination,
- const Extent3D* copySize) {
+ void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ Extent3D fixedCopySize = *copySize;
+ DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedCopySize));
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(source->texture));
DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
- DAWN_TRY(ValidateTextureCopyView(GetDevice(), *source, *copySize));
- DAWN_TRY(ValidateTextureCopyView(GetDevice(), *destination, *copySize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, fixedCopySize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, fixedCopySize));
DAWN_TRY(
- ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
+ ValidateTextureToTextureCopyRestrictions(*source, *destination, fixedCopySize));
- DAWN_TRY(ValidateTextureCopyRange(*source, *copySize));
- DAWN_TRY(ValidateTextureCopyRange(*destination, *copySize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, fixedCopySize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, fixedCopySize));
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
@@ -755,7 +763,8 @@ namespace dawn_native {
}
// Skip noop copies.
- if (copySize->width != 0 && copySize->height != 0 && copySize->depth != 0) {
+ if (fixedCopySize.width != 0 && fixedCopySize.height != 0 &&
+ fixedCopySize.depthOrArrayLayers != 0) {
CopyTextureToTextureCmd* copy =
allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
copy->source.texture = source->texture;
@@ -767,20 +776,20 @@ namespace dawn_native {
copy->destination.mipLevel = destination->mipLevel;
copy->destination.aspect =
ConvertAspect(destination->texture->GetFormat(), destination->aspect);
- copy->copySize = *copySize;
+ copy->copySize = fixedCopySize;
}
return {};
});
}
- void CommandEncoder::InjectValidationError(const char* message) {
+ void CommandEncoder::APIInjectValidationError(const char* message) {
if (mEncodingContext.CheckCurrentEncoder(this)) {
mEncodingContext.HandleError(InternalErrorType::Validation, message);
}
}
- void CommandEncoder::InsertDebugMarker(const char* groupLabel) {
+ void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
InsertDebugMarkerCmd* cmd =
allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
@@ -793,7 +802,7 @@ namespace dawn_native {
});
}
- void CommandEncoder::PopDebugGroup() {
+ void CommandEncoder::APIPopDebugGroup() {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (GetDevice()->IsValidationEnabled()) {
if (mDebugGroupStackSize == 0) {
@@ -807,7 +816,7 @@ namespace dawn_native {
});
}
- void CommandEncoder::PushDebugGroup(const char* groupLabel) {
+ void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
PushDebugGroupCmd* cmd =
allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
@@ -822,11 +831,11 @@ namespace dawn_native {
});
}
- void CommandEncoder::ResolveQuerySet(QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset) {
+ void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(querySet));
@@ -850,17 +859,16 @@ namespace dawn_native {
cmd->destinationOffset = destinationOffset;
// Encode internal compute pipeline for timestamp query
- if (querySet->GetQueryType() == wgpu::QueryType::Timestamp &&
- GetDevice()->IsToggleEnabled(Toggle::ConvertTimestampsToNanoseconds)) {
- EncodeTimestampsToNanosecondsConversion(this, querySet, queryCount, destination,
- destinationOffset);
+ if (querySet->GetQueryType() == wgpu::QueryType::Timestamp) {
+ DAWN_TRY(EncodeTimestampsToNanosecondsConversion(this, querySet, queryCount, destination,
+ destinationOffset));
}
return {};
});
}
- void CommandEncoder::WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(querySet));
@@ -878,19 +886,29 @@ namespace dawn_native {
});
}
- CommandBufferBase* CommandEncoder::Finish(const CommandBufferDescriptor* descriptor) {
+ CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
+ Ref<CommandBufferBase> commandBuffer;
+ if (GetDevice()->ConsumedError(FinishInternal(descriptor), &commandBuffer)) {
+ return CommandBufferBase::MakeError(GetDevice());
+ }
+ ASSERT(!IsError());
+ return commandBuffer.Detach();
+ }
+
+ ResultOrError<Ref<CommandBufferBase>> CommandEncoder::FinishInternal(
+ const CommandBufferDescriptor* descriptor) {
DeviceBase* device = GetDevice();
+
// Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
// state of the encoding context. The internal state is set to finished, and subsequent
// calls to encode commands will generate errors.
- if (device->ConsumedError(mEncodingContext.Finish()) ||
- device->ConsumedError(device->ValidateIsAlive()) ||
- (device->IsValidationEnabled() &&
- device->ConsumedError(ValidateFinish(mEncodingContext.GetIterator(),
- mEncodingContext.GetPassUsages())))) {
- return CommandBufferBase::MakeError(device);
+ DAWN_TRY(mEncodingContext.Finish());
+ DAWN_TRY(device->ValidateIsAlive());
+
+ if (device->IsValidationEnabled()) {
+ DAWN_TRY(
+ ValidateFinish(mEncodingContext.GetIterator(), mEncodingContext.GetPassUsages()));
}
- ASSERT(!IsError());
return device->CreateCommandBuffer(this, descriptor);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
index 66983962655..a8bf6a0d288 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
@@ -22,13 +22,10 @@
#include "dawn_native/ObjectBase.h"
#include "dawn_native/PassResourceUsage.h"
-#include <map>
#include <string>
namespace dawn_native {
- using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
-
class CommandEncoder final : public ObjectBase {
public:
CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
@@ -38,42 +35,44 @@ namespace dawn_native {
void TrackUsedQuerySet(QuerySetBase* querySet);
void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
- const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
// Dawn API
- ComputePassEncoder* BeginComputePass(const ComputePassDescriptor* descriptor);
- RenderPassEncoder* BeginRenderPass(const RenderPassDescriptor* descriptor);
-
- void CopyBufferToBuffer(BufferBase* source,
- uint64_t sourceOffset,
+ ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
+ RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
+
+ void APICopyBufferToBuffer(BufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size);
+ void APICopyBufferToTexture(const ImageCopyBuffer* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+ void APICopyTextureToBuffer(const ImageCopyTexture* source,
+ const ImageCopyBuffer* destination,
+ const Extent3D* copySize);
+ void APICopyTextureToTexture(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize);
+
+ void APIInjectValidationError(const char* message);
+ void APIInsertDebugMarker(const char* groupLabel);
+ void APIPopDebugGroup();
+ void APIPushDebugGroup(const char* groupLabel);
+
+ void APIResolveQuerySet(QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size);
- void CopyBufferToTexture(const BufferCopyView* source,
- const TextureCopyView* destination,
- const Extent3D* copySize);
- void CopyTextureToBuffer(const TextureCopyView* source,
- const BufferCopyView* destination,
- const Extent3D* copySize);
- void CopyTextureToTexture(const TextureCopyView* source,
- const TextureCopyView* destination,
- const Extent3D* copySize);
-
- void InjectValidationError(const char* message);
- void InsertDebugMarker(const char* groupLabel);
- void PopDebugGroup();
- void PushDebugGroup(const char* groupLabel);
-
- void ResolveQuerySet(QuerySetBase* querySet,
- uint32_t firstQuery,
- uint32_t queryCount,
- BufferBase* destination,
- uint64_t destinationOffset);
- void WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
-
- CommandBufferBase* Finish(const CommandBufferDescriptor* descriptor = nullptr);
+ uint64_t destinationOffset);
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
+ CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
private:
+ ResultOrError<Ref<CommandBufferBase>> FinishInternal(
+ const CommandBufferDescriptor* descriptor);
+
MaybeError ValidateFinish(CommandIterator* commands,
const PerPassUsages& perPassUsages) const;
@@ -81,7 +80,6 @@ namespace dawn_native {
std::set<BufferBase*> mTopLevelBuffers;
std::set<TextureBase*> mTopLevelTextures;
std::set<QuerySetBase*> mUsedQuerySets;
- QueryAvailabilityMap mQueryAvailabilityMap;
uint64_t mDebugGroupStackSize = 0;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
index 174510b3b80..1e2fc38d3da 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
@@ -24,6 +24,7 @@
#include "dawn_native/QuerySet.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/RenderPipeline.h"
+#include "dawn_native/ValidationUtils_autogen.h"
namespace dawn_native {
@@ -103,7 +104,7 @@ namespace dawn_native {
uint32_t heightInBlocks = copySize.height / blockInfo.height;
uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
- if (copySize.depth == 0) {
+ if (copySize.depthOrArrayLayers == 0) {
return 0;
}
@@ -122,14 +123,14 @@ namespace dawn_native {
//
// This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
// computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
- ASSERT(copySize.depth <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
- rowsPerImage != wgpu::kCopyStrideUndefined));
+ ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
+ rowsPerImage != wgpu::kCopyStrideUndefined));
uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
- if (bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depth) {
+ if (bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers) {
return DAWN_VALIDATION_ERROR("requiredBytesInCopy is too large.");
}
- uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depth - 1);
+ uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
if (heightInBlocks > 0) {
ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
@@ -159,14 +160,14 @@ namespace dawn_native {
TextureDataLayout layout = originalLayout;
if (copyExtent.height != 0 && layout.rowsPerImage == 0) {
- if (copyExtent.depth > 1) {
+ if (copyExtent.depthOrArrayLayers > 1) {
device->EmitDeprecationWarning(
"rowsPerImage soon must be non-zero if copy depth > 1 (it will no longer "
"default to the copy height).");
ASSERT(copyExtent.height % blockInfo.height == 0);
uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
layout.rowsPerImage = heightInBlocks;
- } else if (copyExtent.depth == 1) {
+ } else if (copyExtent.depthOrArrayLayers == 1) {
device->EmitDeprecationWarning(
"rowsPerImage soon must be non-zero or unspecified if copy depth == 1 (it will "
"no longer default to the copy height).");
@@ -179,7 +180,7 @@ namespace dawn_native {
ASSERT(copyExtent.width % blockInfo.width == 0);
uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
- if (copyExtent.height == 1 && copyExtent.depth == 1 &&
+ if (copyExtent.height == 1 && copyExtent.depthOrArrayLayers == 1 &&
bytesInLastRow > layout.bytesPerRow) {
device->EmitDeprecationWarning(
"Soon, even if copy height == 1, bytesPerRow must be >= the byte size of each row "
@@ -203,11 +204,11 @@ namespace dawn_native {
uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
- ASSERT(heightInBlocks <= 1 && copyExtent.depth <= 1);
+ ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
}
if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
- ASSERT(copyExtent.depth <= 1);
+ ASSERT(copyExtent.depthOrArrayLayers <= 1);
layout->rowsPerImage = heightInBlocks;
}
}
@@ -219,8 +220,9 @@ namespace dawn_native {
ASSERT(copyExtent.height % blockInfo.height == 0);
uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
- if (copyExtent.depth > 1 && (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
- layout.rowsPerImage == wgpu::kCopyStrideUndefined)) {
+ if (copyExtent.depthOrArrayLayers > 1 &&
+ (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
+ layout.rowsPerImage == wgpu::kCopyStrideUndefined)) {
return DAWN_VALIDATION_ERROR(
"If copy depth > 1, bytesPerRow and rowsPerImage must be specified.");
}
@@ -265,11 +267,11 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateBufferCopyView(DeviceBase const* device,
- const BufferCopyView& bufferCopyView) {
- DAWN_TRY(device->ValidateObject(bufferCopyView.buffer));
- if (bufferCopyView.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
- if (bufferCopyView.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0) {
+ MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+ const ImageCopyBuffer& imageCopyBuffer) {
+ DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
+ if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
+ if (imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0) {
return DAWN_VALIDATION_ERROR("bytesPerRow must be a multiple of 256");
}
}
@@ -277,15 +279,16 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateTextureCopyView(DeviceBase const* device,
- const TextureCopyView& textureCopy,
- const Extent3D& copySize) {
+ MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+ const ImageCopyTexture& textureCopy,
+ const Extent3D& copySize) {
const TextureBase* texture = textureCopy.texture;
DAWN_TRY(device->ValidateObject(texture));
if (textureCopy.mipLevel >= texture->GetNumMipLevels()) {
return DAWN_VALIDATION_ERROR("mipLevel out of range");
}
+ DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
if (SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None) {
return DAWN_VALIDATION_ERROR("Texture does not have selected aspect for texture copy.");
}
@@ -305,27 +308,38 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateTextureCopyRange(const TextureCopyView& textureCopy,
+ MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+ const ImageCopyTexture& textureCopy,
const Extent3D& copySize) {
// TODO(jiawei.shao@intel.com): add validations on the texture-to-texture copies within the
// same texture.
const TextureBase* texture = textureCopy.texture;
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+
+ // Disallow copy to/from a 3D texture as unsafe until it is fully implemented.
+ if (texture->GetDimension() == wgpu::TextureDimension::e3D &&
+ device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+ return DAWN_VALIDATION_ERROR(
+ "Copy to/from a 3D texture is disallowed because it is not fully implemented");
+ }
+
// Validation for the copy being in-bounds:
Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
- // For 2D textures, include the array layer as depth so it can be checked with other
+ // For 1D/2D textures, include the array layer as depth so it can be checked with other
// dimensions.
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- mipSize.depth = texture->GetArrayLayers();
-
+ if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
+ mipSize.depthOrArrayLayers = texture->GetArrayLayers();
+ }
// All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
// overflows.
if (static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
static_cast<uint64_t>(mipSize.width) ||
static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
static_cast<uint64_t>(mipSize.height) ||
- static_cast<uint64_t>(textureCopy.origin.z) + static_cast<uint64_t>(copySize.depth) >
- static_cast<uint64_t>(mipSize.depth)) {
+ static_cast<uint64_t>(textureCopy.origin.z) +
+ static_cast<uint64_t>(copySize.depthOrArrayLayers) >
+ static_cast<uint64_t>(mipSize.depthOrArrayLayers)) {
return DAWN_VALIDATION_ERROR("Touching outside of the texture");
}
@@ -357,7 +371,7 @@ namespace dawn_native {
// Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
// formats).
- ResultOrError<Aspect> SingleAspectUsedByTextureCopyView(const TextureCopyView& view) {
+ ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
const Format& format = view.texture->GetFormat();
switch (view.aspect) {
case wgpu::TextureAspect::All:
@@ -382,9 +396,9 @@ namespace dawn_native {
}
}
- MaybeError ValidateLinearToDepthStencilCopyRestrictions(const TextureCopyView& dst) {
+ MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
Aspect aspectUsed;
- DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByTextureCopyView(dst));
+ DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
if (aspectUsed == Aspect::Depth) {
return DAWN_VALIDATION_ERROR("Cannot copy into the depth aspect of a texture");
}
@@ -392,9 +406,9 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateTextureToTextureCopyRestrictions(const TextureCopyView& src,
- const TextureCopyView& dst,
- const Extent3D& copySize) {
+ MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize) {
const uint32_t srcSamples = src.texture->GetSampleCount();
const uint32_t dstSamples = dst.texture->GetSampleCount();
@@ -403,11 +417,6 @@ namespace dawn_native {
"Source and destination textures must have matching sample counts.");
}
- if (src.texture->GetFormat().format != dst.texture->GetFormat().format) {
- // Metal requires texture-to-texture copies be the same format
- return DAWN_VALIDATION_ERROR("Source and destination texture formats must match.");
- }
-
// Metal cannot select a single aspect for texture-to-texture copies.
const Format& format = src.texture->GetFormat();
if (SelectFormatAspects(format, src.aspect) != format.aspects) {
@@ -422,7 +431,7 @@ namespace dawn_native {
if (src.texture == dst.texture && src.mipLevel == dst.mipLevel) {
ASSERT(src.texture->GetDimension() == wgpu::TextureDimension::e2D &&
dst.texture->GetDimension() == wgpu::TextureDimension::e2D);
- if (IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depth)) {
+ if (IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers)) {
return DAWN_VALIDATION_ERROR(
"Copy subresources cannot be overlapped when copying within the same "
"texture.");
@@ -432,6 +441,34 @@ namespace dawn_native {
return {};
}
+ MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize) {
+ if (src.texture->GetFormat().format != dst.texture->GetFormat().format) {
+ // Metal requires texture-to-texture copies be the same format
+ return DAWN_VALIDATION_ERROR("Source and destination texture formats must match.");
+ }
+
+ return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
+ }
+
+ // CopyTextureForBrowser could handle color conversion during the copy and it
+ // requires the source must be sampleable and the destination must be writable
+ // using a render pass
+ MaybeError ValidateCopyTextureForBrowserRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize) {
+ if (!(src.texture->GetUsage() & wgpu::TextureUsage::Sampled)) {
+ return DAWN_VALIDATION_ERROR("Source texture must have sampled usage");
+ }
+
+ if (!(dst.texture->GetUsage() & wgpu::TextureUsage::OutputAttachment)) {
+ return DAWN_VALIDATION_ERROR("Dest texture must have outputAttachment usage");
+ }
+
+ return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
+ }
+
MaybeError ValidateCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage) {
ASSERT(wgpu::HasZeroOrOneBits(usage));
if (!(texture->GetUsage() & usage)) {
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
index f0fbb105a68..9c8a42f5ce4 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
@@ -48,16 +48,17 @@ namespace dawn_native {
uint64_t byteSize,
const TexelBlockInfo& blockInfo,
const Extent3D& copyExtent);
- MaybeError ValidateTextureCopyRange(const TextureCopyView& textureCopyView,
+ MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+ const ImageCopyTexture& imageCopyTexture,
const Extent3D& copySize);
- ResultOrError<Aspect> SingleAspectUsedByTextureCopyView(const TextureCopyView& view);
- MaybeError ValidateLinearToDepthStencilCopyRestrictions(const TextureCopyView& dst);
+ ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
+ MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
- MaybeError ValidateBufferCopyView(DeviceBase const* device,
- const BufferCopyView& bufferCopyView);
- MaybeError ValidateTextureCopyView(DeviceBase const* device,
- const TextureCopyView& textureCopyView,
- const Extent3D& copySize);
+ MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+ const ImageCopyBuffer& imageCopyBuffer);
+ MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+ const ImageCopyTexture& imageCopyTexture,
+ const Extent3D& copySize);
MaybeError ValidateRowsPerImage(const Format& format,
uint32_t rowsPerImage,
@@ -71,10 +72,14 @@ namespace dawn_native {
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
- MaybeError ValidateTextureToTextureCopyRestrictions(const TextureCopyView& src,
- const TextureCopyView& dst,
+ MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
const Extent3D& copySize);
+ MaybeError ValidateCopyTextureForBrowserRestrictions(const ImageCopyTexture& src,
+ const ImageCopyTexture& dst,
+ const Extent3D& copySize);
+
MaybeError ValidateCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage);
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
diff --git a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp
new file mode 100644
index 00000000000..f3196541d63
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.cpp
@@ -0,0 +1,102 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/CompilationMessages.h"
+
+#include "common/Assert.h"
+#include "dawn_native/dawn_platform.h"
+
+#include <tint/tint.h>
+
+namespace dawn_native {
+
+ namespace {
+
+ WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
+ switch (severity) {
+ case tint::diag::Severity::Note:
+ return WGPUCompilationMessageType_Info;
+ case tint::diag::Severity::Warning:
+ return WGPUCompilationMessageType_Warning;
+ default:
+ return WGPUCompilationMessageType_Error;
+ }
+ }
+
+ } // anonymous namespace
+
+ OwnedCompilationMessages::OwnedCompilationMessages() {
+ mCompilationInfo.messageCount = 0;
+ mCompilationInfo.messages = nullptr;
+ }
+
+ void OwnedCompilationMessages::AddMessage(std::string message,
+ wgpu::CompilationMessageType type,
+ uint64_t lineNum,
+ uint64_t linePos) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ mMessageStrings.push_back(message);
+ mMessages.push_back(
+ {nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum, linePos});
+ }
+
+ void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ if (diagnostic.code) {
+ mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
+ } else {
+ mMessageStrings.push_back(diagnostic.message);
+ }
+ mMessages.push_back({nullptr, tintSeverityToMessageType(diagnostic.severity),
+ diagnostic.source.range.begin.line,
+ diagnostic.source.range.begin.column});
+ }
+
+ void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
+ // Cannot add messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ for (const auto& diag : diagnostics) {
+ AddMessage(diag);
+ }
+ }
+
+ void OwnedCompilationMessages::ClearMessages() {
+ // Cannot clear messages after GetCompilationInfo has been called.
+ ASSERT(mCompilationInfo.messages == nullptr);
+
+ mMessageStrings.clear();
+ mMessages.clear();
+ }
+
+ const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
+ mCompilationInfo.messageCount = mMessages.size();
+ mCompilationInfo.messages = mMessages.data();
+
+ // Ensure every message points at the correct message string. Cannot do this earlier, since
+ // vector reallocations may move the pointers around.
+ for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
+ WGPUCompilationMessage& message = mMessages[i];
+ std::string& messageString = mMessageStrings[i];
+ message.message = messageString.c_str();
+ }
+
+ return &mCompilationInfo;
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h
new file mode 100644
index 00000000000..02c449d0c0e
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/CompilationMessages.h
@@ -0,0 +1,55 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_COMPILATIONMESSAGES_H_
+#define DAWNNATIVE_COMPILATIONMESSAGES_H_
+
+#include "dawn_native/dawn_platform.h"
+
+#include "common/NonCopyable.h"
+
+#include <string>
+#include <vector>
+
+namespace tint { namespace diag {
+ class Diagnostic;
+ class List;
+}} // namespace tint::diag
+
+namespace dawn_native {
+
+ class OwnedCompilationMessages : public NonCopyable {
+ public:
+ OwnedCompilationMessages();
+ ~OwnedCompilationMessages() = default;
+
+ void AddMessage(std::string message,
+ wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
+ uint64_t lineNum = 0,
+ uint64_t linePos = 0);
+ void AddMessage(const tint::diag::Diagnostic& diagnostic);
+ void AddMessages(const tint::diag::List& diagnostics);
+ void ClearMessages();
+
+ const WGPUCompilationInfo* GetCompilationInfo();
+
+ private:
+ WGPUCompilationInfo mCompilationInfo;
+ std::vector<std::string> mMessageStrings;
+ std::vector<WGPUCompilationMessage> mMessages;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_COMPILATIONMESSAGES_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
index 057ef3f3d69..04bb8ff56fb 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
@@ -45,7 +45,7 @@ namespace dawn_native {
return new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
}
- void ComputePassEncoder::EndPass() {
+ void ComputePassEncoder::APIEndPass() {
if (mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(ValidateProgrammableEncoderEnd());
@@ -59,7 +59,7 @@ namespace dawn_native {
}
}
- void ComputePassEncoder::Dispatch(uint32_t x, uint32_t y, uint32_t z) {
+ void ComputePassEncoder::APIDispatch(uint32_t x, uint32_t y, uint32_t z) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
@@ -78,7 +78,8 @@ namespace dawn_native {
});
}
- void ComputePassEncoder::DispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
+ void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer,
+ uint64_t indirectOffset) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
@@ -116,7 +117,7 @@ namespace dawn_native {
});
}
- void ComputePassEncoder::SetPipeline(ComputePipelineBase* pipeline) {
+ void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(pipeline));
@@ -132,7 +133,7 @@ namespace dawn_native {
});
}
- void ComputePassEncoder::WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(querySet));
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
index 0f99462f3ca..fcff7a9f965 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
@@ -30,13 +30,13 @@ namespace dawn_native {
CommandEncoder* commandEncoder,
EncodingContext* encodingContext);
- void EndPass();
+ void APIEndPass();
- void Dispatch(uint32_t x, uint32_t y = 1, uint32_t z = 1);
- void DispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
- void SetPipeline(ComputePipelineBase* pipeline);
+ void APIDispatch(uint32_t x, uint32_t y = 1, uint32_t z = 1);
+ void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+ void APISetPipeline(ComputePipelineBase* pipeline);
- void WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
protected:
ComputePassEncoder(DeviceBase* device,
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
index e32e62abf0f..731f4969aa9 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
@@ -29,8 +29,9 @@ namespace dawn_native {
DAWN_TRY(device->ValidateObject(descriptor->layout));
}
- DAWN_TRY(ValidateProgrammableStageDescriptor(
- device, &descriptor->computeStage, descriptor->layout, SingleShaderStage::Compute));
+ DAWN_TRY(ValidateProgrammableStage(device, descriptor->computeStage.module,
+ descriptor->computeStage.entryPoint, descriptor->layout,
+ SingleShaderStage::Compute));
return {};
}
@@ -40,7 +41,8 @@ namespace dawn_native {
const ComputePipelineDescriptor* descriptor)
: PipelineBase(device,
descriptor->layout,
- {{SingleShaderStage::Compute, &descriptor->computeStage}}) {
+ {{SingleShaderStage::Compute, descriptor->computeStage.module,
+ descriptor->computeStage.entryPoint}}) {
}
ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
diff --git a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
index cd1fac673d0..1dc232989d1 100644
--- a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.cpp
@@ -32,11 +32,11 @@
namespace dawn_native {
namespace {
- // TODO(shaobo.yan@intel.com) : Support premultiplay-alpha, flipY.
+ // TODO(shaobo.yan@intel.com) : Support premultiplay-alpha
static const char sCopyTextureForBrowserVertex[] = R"(
[[block]] struct Uniforms {
- [[offset(0)]] u_scale : vec2<f32>;
- [[offset(8)]] u_offset : vec2<f32>;
+ u_scale : vec2<f32>;
+ u_offset : vec2<f32>;
};
const texcoord : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
vec2<f32>(-0.5, 0.0),
@@ -49,32 +49,53 @@ namespace dawn_native {
[[stage(vertex)]] fn main() -> void {
Position = vec4<f32>((texcoord[VertexIndex] * 2.0 - vec2<f32>(1.0, 1.0)), 0.0, 1.0);
+ // Y component of scale is calculated by the copySizeHeight / textureHeight. Only
+ // flipY case can get negative number.
+ var flipY : bool = uniforms.u_scale.y < 0.0;
+
// Texture coordinate takes top-left as origin point. We need to map the
// texture to triangle carefully.
- v_texcoord = (texcoord[VertexIndex] * vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0)) *
- uniforms.u_scale + uniforms.u_offset;
+ if (flipY) {
+ // We need to get the mirror positions(mirrored based on y = 0.5) on flip cases.
+ // Adopt transform to src texture and then mapping it to triangle coord which
+ // do a +1 shift on Y dimension will help us got that mirror position perfectly.
+ v_texcoord = (texcoord[VertexIndex] * uniforms.u_scale + uniforms.u_offset) *
+ vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0);
+ } else {
+ // For the normal case, we need to get the exact position.
+ // So mapping texture to triangle firstly then adopt the transform.
+ v_texcoord = (texcoord[VertexIndex] *
+ vec2<f32>(1.0, -1.0) + vec2<f32>(0.0, 1.0)) *
+ uniforms.u_scale + uniforms.u_offset;
+ }
}
)";
- static const char sPassthrough2D4ChannelFrag[] = R"(
+ static const char sCopyTextureForBrowserFragment[] = R"(
[[binding(1), group(0)]] var mySampler: sampler;
[[binding(2), group(0)]] var myTexture: texture_2d<f32>;
[[location(0)]] var<in> v_texcoord : vec2<f32>;
- [[location(0)]] var<out> rgbaColor : vec4<f32>;
+ [[location(0)]] var<out> outputColor : vec4<f32>;
[[stage(fragment)]] fn main() -> void {
// Clamp the texcoord and discard the out-of-bound pixels.
var clampedTexcoord : vec2<f32> =
clamp(v_texcoord, vec2<f32>(0.0, 0.0), vec2<f32>(1.0, 1.0));
if (all(clampedTexcoord == v_texcoord)) {
- rgbaColor = textureSample(myTexture, mySampler, v_texcoord);
+ var srcColor : vec4<f32> = textureSample(myTexture, mySampler, v_texcoord);
+ // Swizzling of texture formats when sampling / rendering is handled by the
+ // hardware so we don't need special logic in this shader. This is covered by tests.
+ outputColor = srcColor;
}
}
)";
- // TODO(shaobo.yan@intel.com): Expand supported texture formats
+ // TODO(shaobo.yan@intel.com): Expand copyTextureForBrowser to support any
+ // non-depth, non-stencil, non-compressed texture format pair copy. Now this API
+ // supports CopyImageBitmapToTexture normal format pairs.
MaybeError ValidateCopyTextureFormatConversion(const wgpu::TextureFormat srcFormat,
const wgpu::TextureFormat dstFormat) {
switch (srcFormat) {
+ case wgpu::TextureFormat::BGRA8Unorm:
case wgpu::TextureFormat::RGBA8Unorm:
break;
default:
@@ -84,6 +105,12 @@ namespace dawn_native {
switch (dstFormat) {
case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGB10A2Unorm:
break;
default:
return DAWN_VALIDATION_ERROR(
@@ -103,10 +130,21 @@ namespace dawn_native {
return {};
}
- RenderPipelineBase* GetOrCreateCopyTextureForBrowserPipeline(DeviceBase* device) {
+ RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store,
+ wgpu::TextureFormat dstFormat) {
+ auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
+ if (pipeline != store->copyTextureForBrowserPipelines.end()) {
+ return pipeline->second.Get();
+ }
+ return nullptr;
+ }
+
+ RenderPipelineBase* GetOrCreateCopyTextureForBrowserPipeline(
+ DeviceBase* device,
+ wgpu::TextureFormat dstFormat) {
InternalPipelineStore* store = device->GetInternalPipelineStore();
- if (store->copyTextureForBrowserPipeline == nullptr) {
+ if (GetCachedPipeline(store, dstFormat) == nullptr) {
// Create vertex shader module if not cached before.
if (store->copyTextureForBrowserVS == nullptr) {
ShaderModuleDescriptor descriptor;
@@ -114,8 +152,9 @@ namespace dawn_native {
wgslDesc.source = sCopyTextureForBrowserVertex;
descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
store->copyTextureForBrowserVS =
- AcquireRef(device->CreateShaderModule(&descriptor));
+ AcquireRef(device->APICreateShaderModule(&descriptor));
}
ShaderModuleBase* vertexModule = store->copyTextureForBrowserVS.Get();
@@ -124,66 +163,68 @@ namespace dawn_native {
if (store->copyTextureForBrowserFS == nullptr) {
ShaderModuleDescriptor descriptor;
ShaderModuleWGSLDescriptor wgslDesc;
- wgslDesc.source = sPassthrough2D4ChannelFrag;
+ wgslDesc.source = sCopyTextureForBrowserFragment;
descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
store->copyTextureForBrowserFS =
- AcquireRef(device->CreateShaderModule(&descriptor));
+ AcquireRef(device->APICreateShaderModule(&descriptor));
}
ShaderModuleBase* fragmentModule = store->copyTextureForBrowserFS.Get();
// Prepare vertex stage.
- ProgrammableStageDescriptor vertexStage = {};
- vertexStage.module = vertexModule;
- vertexStage.entryPoint = "main";
+ VertexState vertex = {};
+ vertex.module = vertexModule;
+ vertex.entryPoint = "main";
// Prepare frgament stage.
- ProgrammableStageDescriptor fragmentStage = {};
- fragmentStage.module = fragmentModule;
- fragmentStage.entryPoint = "main";
+ FragmentState fragment = {};
+ fragment.module = fragmentModule;
+ fragment.entryPoint = "main";
// Prepare color state.
- ColorStateDescriptor colorState = {};
- colorState.format = wgpu::TextureFormat::RGBA8Unorm;
+ ColorTargetState target = {};
+ target.format = dstFormat;
// Create RenderPipeline.
- RenderPipelineDescriptor renderPipelineDesc = {};
+ RenderPipelineDescriptor2 renderPipelineDesc = {};
// Generate the layout based on shader modules.
renderPipelineDesc.layout = nullptr;
- renderPipelineDesc.vertexStage = vertexStage;
- renderPipelineDesc.fragmentStage = &fragmentStage;
+ renderPipelineDesc.vertex = vertex;
+ renderPipelineDesc.fragment = &fragment;
- renderPipelineDesc.primitiveTopology = wgpu::PrimitiveTopology::TriangleList;
+ renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
- renderPipelineDesc.colorStateCount = 1;
- renderPipelineDesc.colorStates = &colorState;
+ fragment.targetCount = 1;
+ fragment.targets = &target;
- store->copyTextureForBrowserPipeline =
- AcquireRef(device->CreateRenderPipeline(&renderPipelineDesc));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ store->copyTextureForBrowserPipelines.insert(
+ {dstFormat, AcquireRef(device->APICreateRenderPipeline2(&renderPipelineDesc))});
}
- return store->copyTextureForBrowserPipeline.Get();
+ return GetCachedPipeline(store, dstFormat);
}
} // anonymous namespace
MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
- const TextureCopyView* source,
- const TextureCopyView* destination,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
const Extent3D* copySize,
const CopyTextureForBrowserOptions* options) {
DAWN_TRY(device->ValidateObject(source->texture));
DAWN_TRY(device->ValidateObject(destination->texture));
- DAWN_TRY(ValidateTextureCopyView(device, *source, *copySize));
- DAWN_TRY(ValidateTextureCopyView(device, *destination, *copySize));
+ DAWN_TRY(ValidateImageCopyTexture(device, *source, *copySize));
+ DAWN_TRY(ValidateImageCopyTexture(device, *destination, *copySize));
- DAWN_TRY(ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
+ DAWN_TRY(ValidateCopyTextureForBrowserRestrictions(*source, *destination, *copySize));
- DAWN_TRY(ValidateTextureCopyRange(*source, *copySize));
- DAWN_TRY(ValidateTextureCopyRange(*destination, *copySize));
+ DAWN_TRY(ValidateTextureCopyRange(device, *source, *copySize));
+ DAWN_TRY(ValidateTextureCopyRange(device, *destination, *copySize));
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
@@ -193,31 +234,23 @@ namespace dawn_native {
DAWN_TRY(ValidateCopyTextureForBrowserOptions(options));
- // TODO(shaobo.yan@intel.com): Support the simplest case for now that source and destination
- // texture has the same size and do full texture blit. Will address sub texture blit in
- // future and remove these validations.
- if (source->origin.x != 0 || source->origin.y != 0 || source->origin.z != 0 ||
- destination->origin.x != 0 || destination->origin.y != 0 ||
- destination->origin.z != 0 || source->mipLevel != 0 || destination->mipLevel != 0 ||
- source->texture->GetWidth() != destination->texture->GetWidth() ||
- source->texture->GetHeight() != destination->texture->GetHeight()) {
- return DAWN_VALIDATION_ERROR("Cannot support sub blit now.");
- }
-
return {};
}
MaybeError DoCopyTextureForBrowser(DeviceBase* device,
- const TextureCopyView* source,
- const TextureCopyView* destination,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
const Extent3D* copySize,
const CopyTextureForBrowserOptions* options) {
// TODO(shaobo.yan@intel.com): In D3D12 and Vulkan, compatible texture format can directly
// copy to each other. This can be a potential fast path.
- RenderPipelineBase* pipeline = GetOrCreateCopyTextureForBrowserPipeline(device);
+
+ RenderPipelineBase* pipeline = GetOrCreateCopyTextureForBrowserPipeline(
+ device, destination->texture->GetFormat().format);
// Prepare bind group layout.
- Ref<BindGroupLayoutBase> layout = AcquireRef(pipeline->GetBindGroupLayout(0));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<BindGroupLayoutBase> layout = AcquireRef(pipeline->APIGetBindGroupLayout(0));
// Prepare bind group descriptor
BindGroupEntry bindGroupEntries[3] = {};
@@ -226,36 +259,46 @@ namespace dawn_native {
bgDesc.entryCount = 3;
bgDesc.entries = bindGroupEntries;
+ Extent3D srcTextureSize = source->texture->GetSize();
+
// Prepare binding 0 resource: uniform buffer.
float uniformData[] = {
- 1.0, 1.0, // scale
- 0.0, 0.0 // offset
+ copySize->width / static_cast<float>(srcTextureSize.width),
+ copySize->height / static_cast<float>(srcTextureSize.height), // scale
+ source->origin.x / static_cast<float>(srcTextureSize.width),
+ source->origin.y / static_cast<float>(srcTextureSize.height) // offset
};
- // Handle flipY.
+ // Handle flipY. FlipY here means we flip the source texture firstly and then
+ // do copy. This helps on the case which source texture is flipped and the copy
+ // need to unpack the flip.
if (options && options->flipY) {
uniformData[1] *= -1.0;
- uniformData[3] += 1.0;
+ uniformData[3] += copySize->height / static_cast<float>(srcTextureSize.height);
}
BufferDescriptor uniformDesc = {};
uniformDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
uniformDesc.size = sizeof(uniformData);
- Ref<BufferBase> uniformBuffer = AcquireRef(device->CreateBuffer(&uniformDesc));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<BufferBase> uniformBuffer = AcquireRef(device->APICreateBuffer(&uniformDesc));
- device->GetQueue()->WriteBuffer(uniformBuffer.Get(), 0, uniformData, sizeof(uniformData));
+ DAWN_TRY(device->GetQueue()->WriteBuffer(uniformBuffer.Get(), 0, uniformData,
+ sizeof(uniformData)));
// Prepare binding 1 resource: sampler
// Use default configuration, filterMode set to Nearest for min and mag.
SamplerDescriptor samplerDesc = {};
- Ref<SamplerBase> sampler = AcquireRef(device->CreateSampler(&samplerDesc));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<SamplerBase> sampler = AcquireRef(device->APICreateSampler(&samplerDesc));
// Prepare binding 2 resource: sampled texture
TextureViewDescriptor srcTextureViewDesc = {};
srcTextureViewDesc.baseMipLevel = source->mipLevel;
srcTextureViewDesc.mipLevelCount = 1;
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
Ref<TextureViewBase> srcTextureView =
- AcquireRef(source->texture->CreateView(&srcTextureViewDesc));
+ AcquireRef(source->texture->APICreateView(&srcTextureViewDesc));
// Set bind group entries.
bindGroupEntries[0].binding = 0;
@@ -267,21 +310,25 @@ namespace dawn_native {
bindGroupEntries[2].textureView = srcTextureView.Get();
// Create bind group after all binding entries are set.
- Ref<BindGroupBase> bindGroup = AcquireRef(device->CreateBindGroup(&bgDesc));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<BindGroupBase> bindGroup = AcquireRef(device->APICreateBindGroup(&bgDesc));
// Create command encoder.
CommandEncoderDescriptor encoderDesc = {};
- Ref<CommandEncoder> encoder = AcquireRef(device->CreateCommandEncoder(&encoderDesc));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<CommandEncoder> encoder = AcquireRef(device->APICreateCommandEncoder(&encoderDesc));
// Prepare dst texture view as color Attachment.
TextureViewDescriptor dstTextureViewDesc;
dstTextureViewDesc.baseMipLevel = destination->mipLevel;
dstTextureViewDesc.mipLevelCount = 1;
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
Ref<TextureViewBase> dstView =
- AcquireRef(destination->texture->CreateView(&dstTextureViewDesc));
+ AcquireRef(destination->texture->APICreateView(&dstTextureViewDesc));
// Prepare render pass color attachment descriptor.
RenderPassColorAttachmentDescriptor colorAttachmentDesc;
+
colorAttachmentDesc.attachment = dstView.Get();
colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
@@ -291,22 +338,26 @@ namespace dawn_native {
RenderPassDescriptor renderPassDesc;
renderPassDesc.colorAttachmentCount = 1;
renderPassDesc.colorAttachments = &colorAttachmentDesc;
- Ref<RenderPassEncoder> passEncoder = AcquireRef(encoder->BeginRenderPass(&renderPassDesc));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<RenderPassEncoder> passEncoder =
+ AcquireRef(encoder->APIBeginRenderPass(&renderPassDesc));
// Start pipeline and encode commands to complete
// the copy from src texture to dst texture with transformation.
- passEncoder->SetPipeline(pipeline);
- passEncoder->SetBindGroup(0, bindGroup.Get());
- passEncoder->Draw(3);
- passEncoder->EndPass();
+ passEncoder->APISetPipeline(pipeline);
+ passEncoder->APISetBindGroup(0, bindGroup.Get());
+ passEncoder->APISetViewport(destination->origin.x, destination->origin.y, copySize->width,
+ copySize->height, 0.0, 1.0);
+ passEncoder->APIDraw(3);
+ passEncoder->APIEndPass();
// Finsh encoding.
- Ref<CommandBufferBase> commandBuffer = AcquireRef(encoder->Finish());
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<CommandBufferBase> commandBuffer = AcquireRef(encoder->APIFinish());
CommandBufferBase* submitCommandBuffer = commandBuffer.Get();
// Submit command buffer.
- Ref<QueueBase> queue = AcquireRef(device->GetQueue());
- queue->Submit(1, &submitCommandBuffer);
+ device->GetQueue()->APISubmit(1, &submitCommandBuffer);
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h
index e65a37ed147..e0965abcf1c 100644
--- a/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h
+++ b/chromium/third_party/dawn/src/dawn_native/CopyTextureForBrowserHelper.h
@@ -21,18 +21,18 @@
namespace dawn_native {
class DeviceBase;
struct Extent3D;
- struct TextureCopyView;
+ struct ImageCopyTexture;
struct CopyTextureForBrowserOptions;
MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
- const TextureCopyView* source,
- const TextureCopyView* destination,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
const Extent3D* copySize,
const CopyTextureForBrowserOptions* options);
MaybeError DoCopyTextureForBrowser(DeviceBase* device,
- const TextureCopyView* source,
- const TextureCopyView* destination,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
const Extent3D* copySize,
const CopyTextureForBrowserOptions* options);
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.cpp
index 7239774168d..a9fcf620a1f 100644
--- a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.cpp
@@ -20,68 +20,86 @@
namespace dawn_native {
- CreatePipelineAsyncTaskBase::CreatePipelineAsyncTaskBase(void* userdata) : mUserData(userdata) {
+ CreatePipelineAsyncTaskBase::CreatePipelineAsyncTaskBase(std::string errorMessage,
+ void* userdata)
+ : mErrorMessage(errorMessage), mUserData(userdata) {
}
CreatePipelineAsyncTaskBase::~CreatePipelineAsyncTaskBase() {
}
CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
- ComputePipelineBase* pipeline,
+ Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata)
- : CreatePipelineAsyncTaskBase(userdata),
- mPipeline(pipeline),
+ : CreatePipelineAsyncTaskBase(errorMessage, userdata),
+ mPipeline(std::move(pipeline)),
mCreateComputePipelineAsyncCallback(callback) {
}
- void CreateComputePipelineAsyncTask::Finish(WGPUCreatePipelineAsyncStatus status) {
- ASSERT(mPipeline != nullptr);
+ void CreateComputePipelineAsyncTask::Finish() {
ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
- if (status != WGPUCreatePipelineAsyncStatus_Success) {
- // TODO(jiawei.shao@intel.com): support handling device lost
- ASSERT(status == WGPUCreatePipelineAsyncStatus_DeviceDestroyed);
- mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed,
- nullptr, "Device destroyed before callback",
- mUserData);
- mPipeline->Release();
- } else {
+ if (mPipeline.Get() != nullptr) {
mCreateComputePipelineAsyncCallback(
- status, reinterpret_cast<WGPUComputePipeline>(mPipeline), "", mUserData);
+ WGPUCreatePipelineAsyncStatus_Success,
+ reinterpret_cast<WGPUComputePipeline>(mPipeline.Detach()), "", mUserData);
+ } else {
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+ mErrorMessage.c_str(), mUserData);
}
+ }
+
+ void CreateComputePipelineAsyncTask::HandleShutDown() {
+ ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
- // Set mCreateComputePipelineAsyncCallback to nullptr in case it is called more than once.
- mCreateComputePipelineAsyncCallback = nullptr;
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", mUserData);
+ }
+
+ void CreateComputePipelineAsyncTask::HandleDeviceLoss() {
+ ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+ mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "Device lost before callback", mUserData);
}
CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
- RenderPipelineBase* pipeline,
+ Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata)
- : CreatePipelineAsyncTaskBase(userdata),
- mPipeline(pipeline),
+ : CreatePipelineAsyncTaskBase(errorMessage, userdata),
+ mPipeline(std::move(pipeline)),
mCreateRenderPipelineAsyncCallback(callback) {
}
- void CreateRenderPipelineAsyncTask::Finish(WGPUCreatePipelineAsyncStatus status) {
- ASSERT(mPipeline != nullptr);
+ void CreateRenderPipelineAsyncTask::Finish() {
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
- if (status != WGPUCreatePipelineAsyncStatus_Success) {
- // TODO(jiawei.shao@intel.com): support handling device lost
- ASSERT(status == WGPUCreatePipelineAsyncStatus_DeviceDestroyed);
- mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed,
- nullptr, "Device destroyed before callback",
- mUserData);
- mPipeline->Release();
- } else {
+ if (mPipeline.Get() != nullptr) {
mCreateRenderPipelineAsyncCallback(
- status, reinterpret_cast<WGPURenderPipeline>(mPipeline), "", mUserData);
+ WGPUCreatePipelineAsyncStatus_Success,
+ reinterpret_cast<WGPURenderPipeline>(mPipeline.Detach()), "", mUserData);
+ } else {
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+ mErrorMessage.c_str(), mUserData);
}
+ }
+
+ void CreateRenderPipelineAsyncTask::HandleShutDown() {
+ ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+ "Device destroyed before callback", mUserData);
+ }
+
+ void CreateRenderPipelineAsyncTask::HandleDeviceLoss() {
+ ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
- // Set mCreatePipelineAsyncCallback to nullptr in case it is called more than once.
- mCreateRenderPipelineAsyncCallback = nullptr;
+ mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+ "Device lost before callback", mUserData);
}
CreatePipelineAsyncTracker::CreatePipelineAsyncTracker(DeviceBase* device) : mDevice(device) {
@@ -98,15 +116,29 @@ namespace dawn_native {
}
void CreatePipelineAsyncTracker::Tick(ExecutionSerial finishedSerial) {
+ // If a user calls Queue::Submit inside Create*PipelineAsync, then the device will be
+ // ticked, which in turns ticks the tracker, causing reentrance here. To prevent the
+ // reentrant call from invalidating mCreatePipelineAsyncTasksInFlight while in use by the
+ // first call, we remove the tasks to finish from the queue, update
+ // mCreatePipelineAsyncTasksInFlight, then run the callbacks.
+ std::vector<std::unique_ptr<CreatePipelineAsyncTaskBase>> tasks;
for (auto& task : mCreatePipelineAsyncTasksInFlight.IterateUpTo(finishedSerial)) {
- task->Finish(WGPUCreatePipelineAsyncStatus_Success);
+ tasks.push_back(std::move(task));
}
mCreatePipelineAsyncTasksInFlight.ClearUpTo(finishedSerial);
+
+ for (auto& task : tasks) {
+ if (mDevice->IsLost()) {
+ task->HandleDeviceLoss();
+ } else {
+ task->Finish();
+ }
+ }
}
void CreatePipelineAsyncTracker::ClearForShutDown() {
for (auto& task : mCreatePipelineAsyncTasksInFlight.IterateAll()) {
- task->Finish(WGPUCreatePipelineAsyncStatus_DeviceDestroyed);
+ task->HandleShutDown();
}
mCreatePipelineAsyncTasksInFlight.Clear();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h
index 438427adbab..b84daed2e6c 100644
--- a/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/CreatePipelineAsyncTracker.h
@@ -15,11 +15,13 @@
#ifndef DAWNNATIVE_CREATEPIPELINEASYNCTRACKER_H_
#define DAWNNATIVE_CREATEPIPELINEASYNCTRACKER_H_
+#include "common/RefCounted.h"
#include "common/SerialQueue.h"
#include "dawn/webgpu.h"
#include "dawn_native/IntegerTypes.h"
#include <memory>
+#include <string>
namespace dawn_native {
@@ -28,42 +30,51 @@ namespace dawn_native {
class RenderPipelineBase;
struct CreatePipelineAsyncTaskBase {
- CreatePipelineAsyncTaskBase(void* userData);
+ CreatePipelineAsyncTaskBase(std::string errorMessage, void* userData);
virtual ~CreatePipelineAsyncTaskBase();
- virtual void Finish(WGPUCreatePipelineAsyncStatus status) = 0;
+ virtual void Finish() = 0;
+ virtual void HandleShutDown() = 0;
+ virtual void HandleDeviceLoss() = 0;
protected:
+ std::string mErrorMessage;
void* mUserData;
};
struct CreateComputePipelineAsyncTask final : public CreatePipelineAsyncTaskBase {
- CreateComputePipelineAsyncTask(ComputePipelineBase* pipeline,
+ CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> pipeline,
+ std::string errorMessage,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
- void Finish(WGPUCreatePipelineAsyncStatus status) final;
+ void Finish() final;
+ void HandleShutDown() final;
+ void HandleDeviceLoss() final;
private:
- ComputePipelineBase* mPipeline;
+ Ref<ComputePipelineBase> mPipeline;
WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
};
struct CreateRenderPipelineAsyncTask final : public CreatePipelineAsyncTaskBase {
- CreateRenderPipelineAsyncTask(RenderPipelineBase* pipeline,
+ CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> pipeline,
+ std::string errorMessage,
WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata);
- void Finish(WGPUCreatePipelineAsyncStatus status) final;
+ void Finish() final;
+ void HandleShutDown() final;
+ void HandleDeviceLoss() final;
private:
- RenderPipelineBase* mPipeline;
+ Ref<RenderPipelineBase> mPipeline;
WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
};
class CreatePipelineAsyncTracker {
public:
- CreatePipelineAsyncTracker(DeviceBase* device);
+ explicit CreatePipelineAsyncTracker(DeviceBase* device);
~CreatePipelineAsyncTracker();
void TrackTask(std::unique_ptr<CreatePipelineAsyncTaskBase> task, ExecutionSerial serial);
diff --git a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
index c089926a370..849c6c0be68 100644
--- a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
@@ -208,7 +208,7 @@ namespace dawn_native {
DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
dawn_native::DeviceBase* deviceBase = reinterpret_cast<dawn_native::DeviceBase*>(device);
- return deviceBase->Tick();
+ return deviceBase->APITick();
}
// ExternalImageDescriptor
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
index e97a4bd030b..91c905f5f7e 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Device.cpp
@@ -22,11 +22,13 @@
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBuffer.h"
#include "dawn_native/CommandEncoder.h"
+#include "dawn_native/CompilationMessages.h"
#include "dawn_native/ComputePipeline.h"
#include "dawn_native/CreatePipelineAsyncTracker.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h"
#include "dawn_native/ErrorScope.h"
+#include "dawn_native/ExternalTexture.h"
#include "dawn_native/Fence.h"
#include "dawn_native/Instance.h"
#include "dawn_native/InternalPipelineStore.h"
@@ -176,7 +178,7 @@ namespace dawn_native {
// Tick the queue-related tasks since they should be complete. This must be done before
// ShutDownImpl() it may relinquish resources that will be freed by backends in the
// ShutDownImpl() call.
- GetQueue()->Tick(GetCompletedCommandSerial());
+ mQueue->Tick(GetCompletedCommandSerial());
// Call TickImpl once last time to clean up resources
// Ignore errors so that we can continue with destruction
IgnoreErrors(TickImpl());
@@ -251,7 +253,7 @@ namespace dawn_native {
}
}
- void DeviceBase::InjectError(wgpu::ErrorType type, const char* message) {
+ void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
if (ConsumedError(ValidateErrorType(type))) {
return;
}
@@ -278,24 +280,24 @@ namespace dawn_native {
HandleError(error->GetType(), ss.str().c_str());
}
- void DeviceBase::SetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
+ void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
mUncapturedErrorCallback = callback;
mUncapturedErrorUserdata = userdata;
}
- void DeviceBase::SetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
+ void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
mDeviceLostCallback = callback;
mDeviceLostUserdata = userdata;
}
- void DeviceBase::PushErrorScope(wgpu::ErrorFilter filter) {
+ void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
if (ConsumedError(ValidateErrorFilter(filter))) {
return;
}
mErrorScopeStack->Push(filter);
}
- bool DeviceBase::PopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
+ bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
if (mErrorScopeStack->Empty()) {
return false;
}
@@ -331,7 +333,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Device is lost");
}
- void DeviceBase::LoseForTesting() {
+ void DeviceBase::APILoseForTesting() {
if (mState != State::Alive) {
return;
}
@@ -401,8 +403,9 @@ namespace dawn_native {
}
}
- void DeviceBase::CheckPassedSerials() {
- ExecutionSerial completedSerial = CheckAndUpdateCompletedSerials();
+ MaybeError DeviceBase::CheckPassedSerials() {
+ ExecutionSerial completedSerial;
+ DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
ASSERT(completedSerial <= mLastSubmittedSerial);
// completedSerial should not be less than mCompletedSerial unless it is 0.
@@ -412,6 +415,8 @@ namespace dawn_native {
if (completedSerial > mCompletedSerial) {
mCompletedSerial = completedSerial;
}
+
+ return {};
}
ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
@@ -442,18 +447,17 @@ namespace dawn_native {
const size_t blueprintHash = blueprint.ComputeContentHash();
blueprint.SetContentHash(blueprintHash);
- Ref<BindGroupLayoutBase> result = nullptr;
+ Ref<BindGroupLayoutBase> result;
auto iter = mCaches->bindGroupLayouts.find(&blueprint);
if (iter != mCaches->bindGroupLayouts.end()) {
result = *iter;
} else {
- BindGroupLayoutBase* backendObj;
- DAWN_TRY_ASSIGN(backendObj, CreateBindGroupLayoutImpl(descriptor));
- backendObj->SetIsCachedReference();
- backendObj->SetContentHash(blueprintHash);
- mCaches->bindGroupLayouts.insert(backendObj);
- result = AcquireRef(backendObj);
+ DAWN_TRY_ASSIGN(result, CreateBindGroupLayoutImpl(descriptor));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->bindGroupLayouts.insert(result.Get());
}
+
return std::move(result);
}
@@ -477,25 +481,33 @@ namespace dawn_native {
return mEmptyBindGroupLayout.Get();
}
- ResultOrError<ComputePipelineBase*> DeviceBase::GetOrCreateComputePipeline(
+ std::pair<Ref<ComputePipelineBase>, size_t> DeviceBase::GetCachedComputePipeline(
const ComputePipelineDescriptor* descriptor) {
ComputePipelineBase blueprint(this, descriptor);
const size_t blueprintHash = blueprint.ComputeContentHash();
blueprint.SetContentHash(blueprintHash);
+ Ref<ComputePipelineBase> result;
auto iter = mCaches->computePipelines.find(&blueprint);
if (iter != mCaches->computePipelines.end()) {
- (*iter)->Reference();
- return *iter;
+ result = *iter;
}
- ComputePipelineBase* backendObj;
- DAWN_TRY_ASSIGN(backendObj, CreateComputePipelineImpl(descriptor));
- backendObj->SetIsCachedReference();
- backendObj->SetContentHash(blueprintHash);
- mCaches->computePipelines.insert(backendObj);
- return backendObj;
+ return std::make_pair(result, blueprintHash);
+ }
+
+ Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedPipeline(
+ Ref<ComputePipelineBase> computePipeline,
+ size_t blueprintHash) {
+ computePipeline->SetContentHash(blueprintHash);
+ auto insertion = mCaches->computePipelines.insert(computePipeline.Get());
+ if (insertion.second) {
+ computePipeline->SetIsCachedReference();
+ return computePipeline;
+ } else {
+ return *(insertion.first);
+ }
}
void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
@@ -504,25 +516,25 @@ namespace dawn_native {
ASSERT(removedCount == 1);
}
- ResultOrError<PipelineLayoutBase*> DeviceBase::GetOrCreatePipelineLayout(
+ ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor) {
PipelineLayoutBase blueprint(this, descriptor);
const size_t blueprintHash = blueprint.ComputeContentHash();
blueprint.SetContentHash(blueprintHash);
+ Ref<PipelineLayoutBase> result;
auto iter = mCaches->pipelineLayouts.find(&blueprint);
if (iter != mCaches->pipelineLayouts.end()) {
- (*iter)->Reference();
- return *iter;
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->pipelineLayouts.insert(result.Get());
}
- PipelineLayoutBase* backendObj;
- DAWN_TRY_ASSIGN(backendObj, CreatePipelineLayoutImpl(descriptor));
- backendObj->SetIsCachedReference();
- backendObj->SetContentHash(blueprintHash);
- mCaches->pipelineLayouts.insert(backendObj);
- return backendObj;
+ return std::move(result);
}
void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
@@ -531,25 +543,25 @@ namespace dawn_native {
ASSERT(removedCount == 1);
}
- ResultOrError<RenderPipelineBase*> DeviceBase::GetOrCreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor) {
+ ResultOrError<Ref<RenderPipelineBase>> DeviceBase::GetOrCreateRenderPipeline(
+ const RenderPipelineDescriptor2* descriptor) {
RenderPipelineBase blueprint(this, descriptor);
const size_t blueprintHash = blueprint.ComputeContentHash();
blueprint.SetContentHash(blueprintHash);
+ Ref<RenderPipelineBase> result;
auto iter = mCaches->renderPipelines.find(&blueprint);
if (iter != mCaches->renderPipelines.end()) {
- (*iter)->Reference();
- return *iter;
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result, CreateRenderPipelineImpl(descriptor));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->renderPipelines.insert(result.Get());
}
- RenderPipelineBase* backendObj;
- DAWN_TRY_ASSIGN(backendObj, CreateRenderPipelineImpl(descriptor));
- backendObj->SetIsCachedReference();
- backendObj->SetContentHash(blueprintHash);
- mCaches->renderPipelines.insert(backendObj);
- return backendObj;
+ return std::move(result);
}
void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
@@ -558,25 +570,25 @@ namespace dawn_native {
ASSERT(removedCount == 1);
}
- ResultOrError<SamplerBase*> DeviceBase::GetOrCreateSampler(
+ ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
const SamplerDescriptor* descriptor) {
SamplerBase blueprint(this, descriptor);
const size_t blueprintHash = blueprint.ComputeContentHash();
blueprint.SetContentHash(blueprintHash);
+ Ref<SamplerBase> result;
auto iter = mCaches->samplers.find(&blueprint);
if (iter != mCaches->samplers.end()) {
- (*iter)->Reference();
- return *iter;
+ result = *iter;
+ } else {
+ DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->samplers.insert(result.Get());
}
- SamplerBase* backendObj;
- DAWN_TRY_ASSIGN(backendObj, CreateSamplerImpl(descriptor));
- backendObj->SetIsCachedReference();
- backendObj->SetContentHash(blueprintHash);
- mCaches->samplers.insert(backendObj);
- return backendObj;
+ return std::move(result);
}
void DeviceBase::UncacheSampler(SamplerBase* obj) {
@@ -585,37 +597,36 @@ namespace dawn_native {
ASSERT(removedCount == 1);
}
- ResultOrError<ShaderModuleBase*> DeviceBase::GetOrCreateShaderModule(
+ ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) {
+ ASSERT(parseResult != nullptr);
+
ShaderModuleBase blueprint(this, descriptor);
const size_t blueprintHash = blueprint.ComputeContentHash();
blueprint.SetContentHash(blueprintHash);
+ Ref<ShaderModuleBase> result;
auto iter = mCaches->shaderModules.find(&blueprint);
if (iter != mCaches->shaderModules.end()) {
- (*iter)->Reference();
- return *iter;
- }
-
- ShaderModuleBase* backendObj;
- if (parseResult == nullptr) {
- // We skip the parse on creation if validation isn't enabled which let's us quickly
- // lookup in the cache without validating and parsing. We need the parsed module now, so
- // call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but we can
- // consider splitting it if additional validation is added.
- ASSERT(!IsValidationEnabled());
- ShaderModuleParseResult localParseResult =
- ValidateShaderModuleDescriptor(this, descriptor).AcquireSuccess();
- DAWN_TRY_ASSIGN(backendObj, CreateShaderModuleImpl(descriptor, &localParseResult));
+ result = *iter;
} else {
- DAWN_TRY_ASSIGN(backendObj, CreateShaderModuleImpl(descriptor, parseResult));
+ if (!parseResult->HasParsedShader()) {
+ // We skip the parse on creation if validation isn't enabled which let's us quickly
+ // lookup in the cache without validating and parsing. We need the parsed module
+ // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
+ // we can consider splitting it if additional validation is added.
+ ASSERT(!IsValidationEnabled());
+ DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult));
+ }
+ DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
+ result->SetIsCachedReference();
+ result->SetContentHash(blueprintHash);
+ mCaches->shaderModules.insert(result.Get());
}
- backendObj->SetIsCachedReference();
- backendObj->SetContentHash(blueprintHash);
- mCaches->shaderModules.insert(backendObj);
- return backendObj;
+
+ return std::move(result);
}
void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
@@ -651,6 +662,12 @@ namespace dawn_native {
}
Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+ const RenderPipelineDescriptor2* descriptor) {
+ AttachmentStateBlueprint blueprint(descriptor);
+ return GetOrCreateAttachmentState(&blueprint);
+ }
+
+ Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
const RenderPassDescriptor* descriptor) {
AttachmentStateBlueprint blueprint(descriptor);
return GetOrCreateAttachmentState(&blueprint);
@@ -664,185 +681,246 @@ namespace dawn_native {
// Object creation API methods
- BindGroupBase* DeviceBase::CreateBindGroup(const BindGroupDescriptor* descriptor) {
- BindGroupBase* result = nullptr;
-
- if (ConsumedError(CreateBindGroupInternal(&result, descriptor))) {
+ BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
+ Ref<BindGroupBase> result;
+ if (ConsumedError(CreateBindGroupInternal(descriptor), &result)) {
return BindGroupBase::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
- BindGroupLayoutBase* DeviceBase::CreateBindGroupLayout(
+ BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
const BindGroupLayoutDescriptor* descriptor) {
- BindGroupLayoutBase* result = nullptr;
-
- if (ConsumedError(CreateBindGroupLayoutInternal(&result, descriptor))) {
+ Ref<BindGroupLayoutBase> result;
+ if (ConsumedError(CreateBindGroupLayoutInternal(descriptor), &result)) {
return BindGroupLayoutBase::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
- BufferBase* DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
+ BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
Ref<BufferBase> result = nullptr;
if (ConsumedError(CreateBufferInternal(descriptor), &result)) {
ASSERT(result == nullptr);
return BufferBase::MakeError(this, descriptor);
}
-
return result.Detach();
}
- CommandEncoder* DeviceBase::CreateCommandEncoder(const CommandEncoderDescriptor* descriptor) {
+ CommandEncoder* DeviceBase::APICreateCommandEncoder(
+ const CommandEncoderDescriptor* descriptor) {
return new CommandEncoder(this, descriptor);
}
- ComputePipelineBase* DeviceBase::CreateComputePipeline(
+ ComputePipelineBase* DeviceBase::APICreateComputePipeline(
const ComputePipelineDescriptor* descriptor) {
- ComputePipelineBase* result = nullptr;
-
- if (ConsumedError(CreateComputePipelineInternal(&result, descriptor))) {
+ Ref<ComputePipelineBase> result;
+ if (ConsumedError(CreateComputePipelineInternal(descriptor), &result)) {
return ComputePipelineBase::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
- void DeviceBase::CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata) {
- ComputePipelineBase* result = nullptr;
+ void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ MaybeError maybeResult = CreateComputePipelineAsyncInternal(descriptor, callback, userdata);
- if (IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- ConsumedError(
- DAWN_VALIDATION_ERROR("CreateComputePipelineAsync is disallowed because it isn't "
- "completely implemented yet."));
- return;
- }
-
- MaybeError maybeError = CreateComputePipelineInternal(&result, descriptor);
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ // Call the callback directly when a validation error has been found in the front-end
+ // validations. If there is no error, then CreateComputePipelineAsyncInternal will call the
+ // callback.
+ if (maybeResult.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
userdata);
- return;
}
-
- std::unique_ptr<CreateComputePipelineAsyncTask> request =
- std::make_unique<CreateComputePipelineAsyncTask>(result, callback, userdata);
- mCreatePipelineAsyncTracker->TrackTask(std::move(request), GetPendingCommandSerial());
}
- PipelineLayoutBase* DeviceBase::CreatePipelineLayout(
+ PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor) {
- PipelineLayoutBase* result = nullptr;
-
- if (ConsumedError(CreatePipelineLayoutInternal(&result, descriptor))) {
+ Ref<PipelineLayoutBase> result;
+ if (ConsumedError(CreatePipelineLayoutInternal(descriptor), &result)) {
return PipelineLayoutBase::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
- QuerySetBase* DeviceBase::CreateQuerySet(const QuerySetDescriptor* descriptor) {
- QuerySetBase* result = nullptr;
-
- if (ConsumedError(CreateQuerySetInternal(&result, descriptor))) {
+ QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
+ Ref<QuerySetBase> result;
+ if (ConsumedError(CreateQuerySetInternal(descriptor), &result)) {
return QuerySetBase::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
- SamplerBase* DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
- SamplerBase* result = nullptr;
-
- if (ConsumedError(CreateSamplerInternal(&result, descriptor))) {
+ SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
+ Ref<SamplerBase> result;
+ if (ConsumedError(CreateSamplerInternal(descriptor), &result)) {
return SamplerBase::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
- void DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata) {
- RenderPipelineBase* result = nullptr;
-
- if (IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
- ConsumedError(
- DAWN_VALIDATION_ERROR("CreateRenderPipelineAsync is disallowed because it isn't "
- "completely implemented yet."));
- return;
- }
-
- MaybeError maybeError = CreateRenderPipelineInternal(&result, descriptor);
- if (maybeError.IsError()) {
- std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+ void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor2* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata) {
+ ResultOrError<Ref<RenderPipelineBase>> maybeResult =
+ CreateRenderPipelineInternal(descriptor);
+ if (maybeResult.IsError()) {
+ std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
userdata);
return;
}
+ Ref<RenderPipelineBase> result = maybeResult.AcquireSuccess();
std::unique_ptr<CreateRenderPipelineAsyncTask> request =
- std::make_unique<CreateRenderPipelineAsyncTask>(result, callback, userdata);
+ std::make_unique<CreateRenderPipelineAsyncTask>(std::move(result), "", callback,
+ userdata);
mCreatePipelineAsyncTracker->TrackTask(std::move(request), GetPendingCommandSerial());
}
- RenderBundleEncoder* DeviceBase::CreateRenderBundleEncoder(
+ RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
const RenderBundleEncoderDescriptor* descriptor) {
- RenderBundleEncoder* result = nullptr;
-
- if (ConsumedError(CreateRenderBundleEncoderInternal(&result, descriptor))) {
+ Ref<RenderBundleEncoder> result;
+ if (ConsumedError(CreateRenderBundleEncoderInternal(descriptor), &result)) {
return RenderBundleEncoder::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
- RenderPipelineBase* DeviceBase::CreateRenderPipeline(
+ RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
const RenderPipelineDescriptor* descriptor) {
- RenderPipelineBase* result = nullptr;
+ // TODO: Enable this warning once the tests have been converted to either use the new
+ // format or expect the deprecation warning.
+ EmitDeprecationWarning(
+ "The format of RenderPipelineDescriptor has changed, and will soon require the "
+ "new structure. Please begin using CreateRenderPipeline2() instead.");
- if (ConsumedError(CreateRenderPipelineInternal(&result, descriptor))) {
- return RenderPipelineBase::MakeError(this);
- }
+ // Convert descriptor to the new format it before proceeding.
+ RenderPipelineDescriptor2 normalizedDescriptor;
- return result;
- }
- ShaderModuleBase* DeviceBase::CreateShaderModule(const ShaderModuleDescriptor* descriptor) {
- ShaderModuleBase* result = nullptr;
+ normalizedDescriptor.label = descriptor->label;
+ normalizedDescriptor.layout = descriptor->layout;
+
+ normalizedDescriptor.vertex.module = descriptor->vertexStage.module;
+ normalizedDescriptor.vertex.entryPoint = descriptor->vertexStage.entryPoint;
+
+ normalizedDescriptor.primitive.topology = descriptor->primitiveTopology;
+
+ normalizedDescriptor.multisample.count = descriptor->sampleCount;
+ normalizedDescriptor.multisample.mask = descriptor->sampleMask;
+ normalizedDescriptor.multisample.alphaToCoverageEnabled =
+ descriptor->alphaToCoverageEnabled;
- if (ConsumedError(CreateShaderModuleInternal(&result, descriptor))) {
- return ShaderModuleBase::MakeError(this);
+ if (descriptor->vertexState) {
+ const VertexStateDescriptor* vertexState = descriptor->vertexState;
+ normalizedDescriptor.primitive.stripIndexFormat = vertexState->indexFormat;
+ normalizedDescriptor.vertex.bufferCount = vertexState->vertexBufferCount;
+ normalizedDescriptor.vertex.buffers = vertexState->vertexBuffers;
+ } else {
+ normalizedDescriptor.vertex.bufferCount = 0;
+ normalizedDescriptor.vertex.buffers = nullptr;
+ }
+
+ DepthStencilState depthStencil;
+ if (descriptor->depthStencilState) {
+ const DepthStencilStateDescriptor* depthStencilState = descriptor->depthStencilState;
+ normalizedDescriptor.depthStencil = &depthStencil;
+
+ depthStencil.format = depthStencilState->format;
+ depthStencil.depthWriteEnabled = depthStencilState->depthWriteEnabled;
+ depthStencil.depthCompare = depthStencilState->depthCompare;
+ depthStencil.stencilFront = depthStencilState->stencilFront;
+ depthStencil.stencilBack = depthStencilState->stencilBack;
+ depthStencil.stencilReadMask = depthStencilState->stencilReadMask;
+ depthStencil.stencilWriteMask = depthStencilState->stencilWriteMask;
+ }
+
+ if (descriptor->rasterizationState) {
+ const RasterizationStateDescriptor* rasterizationState = descriptor->rasterizationState;
+ normalizedDescriptor.primitive.frontFace = rasterizationState->frontFace;
+ normalizedDescriptor.primitive.cullMode = rasterizationState->cullMode;
+ depthStencil.depthBias = rasterizationState->depthBias;
+ depthStencil.depthBiasSlopeScale = rasterizationState->depthBiasSlopeScale;
+ depthStencil.depthBiasClamp = rasterizationState->depthBiasClamp;
+ }
+
+ FragmentState fragment;
+ std::vector<ColorTargetState> targets;
+ std::vector<BlendState> blendStates;
+ if (descriptor->fragmentStage) {
+ const ProgrammableStageDescriptor* fragmentStage = descriptor->fragmentStage;
+ normalizedDescriptor.fragment = &fragment;
+
+ fragment.module = fragmentStage->module;
+ fragment.entryPoint = fragmentStage->entryPoint;
+
+ targets.resize(descriptor->colorStateCount);
+ blendStates.resize(descriptor->colorStateCount);
+
+ for (uint32_t i = 0; i < descriptor->colorStateCount; ++i) {
+ const ColorStateDescriptor& colorState = descriptor->colorStates[i];
+ ColorTargetState& target = targets[i];
+ target.format = colorState.format;
+ target.writeMask = colorState.writeMask;
+
+ if (BlendEnabled(&colorState)) {
+ BlendState* blend = &blendStates[i];
+ target.blend = blend;
+
+ blend->color.srcFactor = colorState.colorBlend.srcFactor;
+ blend->color.dstFactor = colorState.colorBlend.dstFactor;
+ blend->color.operation = colorState.colorBlend.operation;
+
+ blend->alpha.srcFactor = colorState.alphaBlend.srcFactor;
+ blend->alpha.dstFactor = colorState.alphaBlend.dstFactor;
+ blend->alpha.operation = colorState.alphaBlend.operation;
+ }
+ }
+
+ fragment.targetCount = descriptor->colorStateCount;
+ fragment.targets = targets.data();
}
- return result;
+ Ref<RenderPipelineBase> result;
+ if (ConsumedError(CreateRenderPipelineInternal(&normalizedDescriptor), &result)) {
+ return RenderPipelineBase::MakeError(this);
+ }
+ return result.Detach();
}
- SwapChainBase* DeviceBase::CreateSwapChain(Surface* surface,
- const SwapChainDescriptor* descriptor) {
- SwapChainBase* result = nullptr;
-
- if (ConsumedError(CreateSwapChainInternal(&result, surface, descriptor))) {
+ RenderPipelineBase* DeviceBase::APICreateRenderPipeline2(
+ const RenderPipelineDescriptor2* descriptor) {
+ Ref<RenderPipelineBase> result;
+ if (ConsumedError(CreateRenderPipelineInternal(descriptor), &result)) {
+ return RenderPipelineBase::MakeError(this);
+ }
+ return result.Detach();
+ }
+ ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
+ Ref<ShaderModuleBase> result;
+ ShaderModuleParseResult parseResult = {};
+ if (ConsumedError(CreateShaderModuleInternal(descriptor, &parseResult), &result)) {
+ return ShaderModuleBase::MakeError(this, std::move(parseResult.compilationMessages));
+ }
+ return result.Detach();
+ }
+ SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChainBase> result;
+ if (ConsumedError(CreateSwapChainInternal(surface, descriptor), &result)) {
return SwapChainBase::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
- TextureBase* DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
+ TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
Ref<TextureBase> result;
-
if (ConsumedError(CreateTextureInternal(descriptor), &result)) {
return TextureBase::MakeError(this);
}
-
return result.Detach();
}
TextureViewBase* DeviceBase::CreateTextureView(TextureBase* texture,
const TextureViewDescriptor* descriptor) {
- TextureViewBase* result = nullptr;
-
- if (ConsumedError(CreateTextureViewInternal(&result, texture, descriptor))) {
+ Ref<TextureViewBase> result;
+ if (ConsumedError(CreateTextureViewInternal(texture, descriptor), &result)) {
return TextureViewBase::MakeError(this);
}
-
- return result;
+ return result.Detach();
}
// For Dawn Wire
- BufferBase* DeviceBase::CreateErrorBuffer() {
+ BufferBase* DeviceBase::APICreateErrorBuffer() {
BufferDescriptor desc = {};
return BufferBase::MakeError(this, &desc);
}
@@ -850,19 +928,22 @@ namespace dawn_native {
// Other Device API methods
// Returns true if future ticking is needed.
- bool DeviceBase::Tick() {
- if (ConsumedError(ValidateIsAlive())) {
+ bool DeviceBase::APITick() {
+ if (ConsumedError(Tick())) {
return false;
}
+ return !IsDeviceIdle();
+ }
+
+ MaybeError DeviceBase::Tick() {
+ DAWN_TRY(ValidateIsAlive());
+
// to avoid overly ticking, we only want to tick when:
// 1. the last submitted serial has moved beyond the completed serial
// 2. or the completed serial has not reached the future serial set by the trackers
if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
- CheckPassedSerials();
-
- if (ConsumedError(TickImpl())) {
- return false;
- }
+ DAWN_TRY(CheckPassedSerials());
+ DAWN_TRY(TickImpl());
// There is no GPU work in flight, we need to move the serials forward so that
// so that CPU operations waiting on GPU completion can know they don't have to wait.
@@ -876,28 +957,15 @@ namespace dawn_native {
// tick the dynamic uploader before the backend resource allocators. This would allow
// reclaiming resources one tick earlier.
mDynamicUploader->Deallocate(mCompletedSerial);
- GetQueue()->Tick(mCompletedSerial);
+ mQueue->Tick(mCompletedSerial);
mCreatePipelineAsyncTracker->Tick(mCompletedSerial);
}
- return !IsDeviceIdle();
- }
-
- void DeviceBase::Reference() {
- ASSERT(mRefCount != 0);
- mRefCount++;
- }
-
- void DeviceBase::Release() {
- ASSERT(mRefCount != 0);
- mRefCount--;
- if (mRefCount == 0) {
- delete this;
- }
+ return {};
}
- QueueBase* DeviceBase::GetQueue() {
+ QueueBase* DeviceBase::APIGetQueue() {
// Backends gave the primary queue during initialization.
ASSERT(mQueue != nullptr);
@@ -906,10 +974,20 @@ namespace dawn_native {
return mQueue.Get();
}
- QueueBase* DeviceBase::GetDefaultQueue() {
+ QueueBase* DeviceBase::APIGetDefaultQueue() {
EmitDeprecationWarning(
"Device::GetDefaultQueue is deprecated, use Device::GetQueue() instead");
- return GetQueue();
+ return APIGetQueue();
+ }
+
+ ExternalTextureBase* DeviceBase::APICreateExternalTexture(
+ const ExternalTextureDescriptor* descriptor) {
+ Ref<ExternalTextureBase> result = nullptr;
+ if (ConsumedError(CreateExternalTextureInternal(descriptor), &result)) {
+ return ExternalTextureBase::MakeError(this);
+ }
+
+ return result.Detach();
}
void DeviceBase::ApplyExtensions(const DeviceDescriptor* deviceDescriptor) {
@@ -955,29 +1033,28 @@ namespace dawn_native {
}
}
+ QueueBase* DeviceBase::GetQueue() const {
+ return mQueue.Get();
+ }
+
// Implementation details of object creation
- MaybeError DeviceBase::CreateBindGroupInternal(BindGroupBase** result,
- const BindGroupDescriptor* descriptor) {
+ ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroupInternal(
+ const BindGroupDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBindGroupDescriptor(this, descriptor));
}
- DAWN_TRY_ASSIGN(*result, CreateBindGroupImpl(descriptor));
- return {};
+ return CreateBindGroupImpl(descriptor);
}
- MaybeError DeviceBase::CreateBindGroupLayoutInternal(
- BindGroupLayoutBase** result,
+ ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayoutInternal(
const BindGroupLayoutDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBindGroupLayoutDescriptor(this, descriptor));
}
- Ref<BindGroupLayoutBase> bgl;
- DAWN_TRY_ASSIGN(bgl, GetOrCreateBindGroupLayout(descriptor));
- *result = bgl.Detach();
- return {};
+ return GetOrCreateBindGroupLayout(descriptor);
}
ResultOrError<Ref<BufferBase>> DeviceBase::CreateBufferInternal(
@@ -997,123 +1074,189 @@ namespace dawn_native {
return std::move(buffer);
}
- MaybeError DeviceBase::CreateComputePipelineInternal(
- ComputePipelineBase** result,
+ ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipelineInternal(
const ComputePipelineDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
}
- if (descriptor->layout == nullptr) {
- ComputePipelineDescriptor descriptorWithDefaultLayout = *descriptor;
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ ComputePipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateAndGetComputePipelineDescriptorWithDefaults(
+ *descriptor, &appliedDescriptor));
- DAWN_TRY_ASSIGN(descriptorWithDefaultLayout.layout,
- PipelineLayoutBase::CreateDefault(
- this, {{SingleShaderStage::Compute, &descriptor->computeStage}}));
- // Ref will keep the pipeline layout alive until the end of the function where
- // the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef = AcquireRef(descriptorWithDefaultLayout.layout);
+ auto pipelineAndBlueprintFromCache = GetCachedComputePipeline(&appliedDescriptor);
+ if (pipelineAndBlueprintFromCache.first.Get() != nullptr) {
+ return std::move(pipelineAndBlueprintFromCache.first);
+ }
+
+ Ref<ComputePipelineBase> backendObj;
+ DAWN_TRY_ASSIGN(backendObj, CreateComputePipelineImpl(&appliedDescriptor));
+ size_t blueprintHash = pipelineAndBlueprintFromCache.second;
+ return AddOrGetCachedPipeline(backendObj, blueprintHash);
+ }
+
+ MaybeError DeviceBase::CreateComputePipelineAsyncInternal(
+ const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ DAWN_TRY(ValidateIsAlive());
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
+ }
+
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef;
+ ComputePipelineDescriptor appliedDescriptor;
+ DAWN_TRY_ASSIGN(layoutRef, ValidateAndGetComputePipelineDescriptorWithDefaults(
+ *descriptor, &appliedDescriptor));
- DAWN_TRY_ASSIGN(*result, GetOrCreateComputePipeline(&descriptorWithDefaultLayout));
+ // Call the callback directly when we can get a cached compute pipeline object.
+ auto pipelineAndBlueprintFromCache = GetCachedComputePipeline(&appliedDescriptor);
+ if (pipelineAndBlueprintFromCache.first.Get() != nullptr) {
+ Ref<ComputePipelineBase> result = std::move(pipelineAndBlueprintFromCache.first);
+ callback(WGPUCreatePipelineAsyncStatus_Success,
+ reinterpret_cast<WGPUComputePipeline>(result.Detach()), "", userdata);
} else {
- DAWN_TRY_ASSIGN(*result, GetOrCreateComputePipeline(descriptor));
+ // Otherwise we will create the pipeline object in CreateComputePipelineAsyncImpl(),
+ // where the pipeline object may be created asynchronously and the result will be saved
+ // to mCreatePipelineAsyncTracker.
+ const size_t blueprintHash = pipelineAndBlueprintFromCache.second;
+ CreateComputePipelineAsyncImpl(&appliedDescriptor, blueprintHash, callback, userdata);
}
+
return {};
}
- MaybeError DeviceBase::CreatePipelineLayoutInternal(
- PipelineLayoutBase** result,
+ ResultOrError<Ref<PipelineLayoutBase>>
+ DeviceBase::ValidateAndGetComputePipelineDescriptorWithDefaults(
+ const ComputePipelineDescriptor& descriptor,
+ ComputePipelineDescriptor* outDescriptor) {
+ Ref<PipelineLayoutBase> layoutRef;
+ *outDescriptor = descriptor;
+ if (outDescriptor->layout == nullptr) {
+ DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
+ this, {{SingleShaderStage::Compute,
+ outDescriptor->computeStage.module,
+ outDescriptor->computeStage.entryPoint}}));
+ outDescriptor->layout = layoutRef.Get();
+ }
+
+ return layoutRef;
+ }
+
+ // TODO(jiawei.shao@intel.com): override this function with the async version on the backends
+ // that supports creating compute pipeline asynchronously
+ void DeviceBase::CreateComputePipelineAsyncImpl(const ComputePipelineDescriptor* descriptor,
+ size_t blueprintHash,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata) {
+ Ref<ComputePipelineBase> result;
+ std::string errorMessage;
+
+ auto resultOrError = CreateComputePipelineImpl(descriptor);
+ if (resultOrError.IsError()) {
+ std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
+ errorMessage = error->GetMessage();
+ } else {
+ result = AddOrGetCachedPipeline(resultOrError.AcquireSuccess(), blueprintHash);
+ }
+
+ std::unique_ptr<CreateComputePipelineAsyncTask> request =
+ std::make_unique<CreateComputePipelineAsyncTask>(result, errorMessage, callback,
+ userdata);
+ mCreatePipelineAsyncTracker->TrackTask(std::move(request), GetPendingCommandSerial());
+ }
+
+ ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayoutInternal(
const PipelineLayoutDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
}
- DAWN_TRY_ASSIGN(*result, GetOrCreatePipelineLayout(descriptor));
- return {};
+ return GetOrCreatePipelineLayout(descriptor);
}
- MaybeError DeviceBase::CreateQuerySetInternal(QuerySetBase** result,
- const QuerySetDescriptor* descriptor) {
+ ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTextureInternal(
+ const ExternalTextureDescriptor* descriptor) {
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateExternalTextureDescriptor(this, descriptor));
+ }
+
+ return ExternalTextureBase::Create(this, descriptor);
+ }
+
+ ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySetInternal(
+ const QuerySetDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateQuerySetDescriptor(this, descriptor));
}
- DAWN_TRY_ASSIGN(*result, CreateQuerySetImpl(descriptor));
- return {};
+ return CreateQuerySetImpl(descriptor);
}
- MaybeError DeviceBase::CreateRenderBundleEncoderInternal(
- RenderBundleEncoder** result,
+ ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoderInternal(
const RenderBundleEncoderDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
}
- *result = new RenderBundleEncoder(this, descriptor);
- return {};
+ return RenderBundleEncoder::Create(this, descriptor);
}
- MaybeError DeviceBase::CreateRenderPipelineInternal(
- RenderPipelineBase** result,
- const RenderPipelineDescriptor* descriptor) {
+ ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipelineInternal(
+ const RenderPipelineDescriptor2* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
}
if (descriptor->layout == nullptr) {
- RenderPipelineDescriptor descriptorWithDefaultLayout = *descriptor;
-
- std::vector<StageAndDescriptor> stages;
- stages.emplace_back(SingleShaderStage::Vertex, &descriptor->vertexStage);
- if (descriptor->fragmentStage != nullptr) {
- stages.emplace_back(SingleShaderStage::Fragment, descriptor->fragmentStage);
- }
+ RenderPipelineDescriptor2 descriptorWithDefaultLayout = *descriptor;
- DAWN_TRY_ASSIGN(descriptorWithDefaultLayout.layout,
- PipelineLayoutBase::CreateDefault(this, std::move(stages)));
// Ref will keep the pipeline layout alive until the end of the function where
// the pipeline will take another reference.
- Ref<PipelineLayoutBase> layoutRef = AcquireRef(descriptorWithDefaultLayout.layout);
+ Ref<PipelineLayoutBase> layoutRef;
+ DAWN_TRY_ASSIGN(layoutRef,
+ PipelineLayoutBase::CreateDefault(this, GetStages(descriptor)));
+ descriptorWithDefaultLayout.layout = layoutRef.Get();
- DAWN_TRY_ASSIGN(*result, GetOrCreateRenderPipeline(&descriptorWithDefaultLayout));
+ return GetOrCreateRenderPipeline(&descriptorWithDefaultLayout);
} else {
- DAWN_TRY_ASSIGN(*result, GetOrCreateRenderPipeline(descriptor));
+ return GetOrCreateRenderPipeline(descriptor);
}
- return {};
}
- MaybeError DeviceBase::CreateSamplerInternal(SamplerBase** result,
- const SamplerDescriptor* descriptor) {
+ ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSamplerInternal(
+ const SamplerDescriptor* descriptor) {
const SamplerDescriptor defaultDescriptor = {};
DAWN_TRY(ValidateIsAlive());
descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
if (IsValidationEnabled()) {
DAWN_TRY(ValidateSamplerDescriptor(this, descriptor));
}
- DAWN_TRY_ASSIGN(*result, GetOrCreateSampler(descriptor));
- return {};
+ return GetOrCreateSampler(descriptor);
}
- MaybeError DeviceBase::CreateShaderModuleInternal(ShaderModuleBase** result,
- const ShaderModuleDescriptor* descriptor) {
+ ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModuleInternal(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
DAWN_TRY(ValidateIsAlive());
- ShaderModuleParseResult parseResult = {};
- ShaderModuleParseResult* parseResultPtr = nullptr;
if (IsValidationEnabled()) {
- DAWN_TRY_ASSIGN(parseResult, ValidateShaderModuleDescriptor(this, descriptor));
- parseResultPtr = &parseResult;
+ DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult));
}
- DAWN_TRY_ASSIGN(*result, GetOrCreateShaderModule(descriptor, parseResultPtr));
- return {};
+ return GetOrCreateShaderModule(descriptor, parseResult);
}
- MaybeError DeviceBase::CreateSwapChainInternal(SwapChainBase** result,
- Surface* surface,
- const SwapChainDescriptor* descriptor) {
+ ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChainInternal(
+ Surface* surface,
+ const SwapChainDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateSwapChainDescriptor(this, surface, descriptor));
@@ -1121,48 +1264,48 @@ namespace dawn_native {
// TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
if (surface == nullptr) {
- DAWN_TRY_ASSIGN(*result, CreateSwapChainImpl(descriptor));
+ return CreateSwapChainImpl(descriptor);
} else {
ASSERT(descriptor->implementation == 0);
NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
- ResultOrError<NewSwapChainBase*> maybeNewSwapChain =
+ ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
CreateSwapChainImpl(surface, previousSwapChain, descriptor);
if (previousSwapChain != nullptr) {
previousSwapChain->DetachFromSurface();
}
- NewSwapChainBase* newSwapChain = nullptr;
+ Ref<NewSwapChainBase> newSwapChain;
DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
newSwapChain->SetIsAttached();
- surface->SetAttachedSwapChain(newSwapChain);
- *result = newSwapChain;
+ surface->SetAttachedSwapChain(newSwapChain.Get());
+ return newSwapChain;
}
- return {};
}
ResultOrError<Ref<TextureBase>> DeviceBase::CreateTextureInternal(
const TextureDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
+ TextureDescriptor fixedDescriptor = *descriptor;
+ DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(this, &(fixedDescriptor.size)));
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateTextureDescriptor(this, descriptor));
+ DAWN_TRY(ValidateTextureDescriptor(this, &fixedDescriptor));
}
- return CreateTextureImpl(descriptor);
+ return CreateTextureImpl(&fixedDescriptor);
}
- MaybeError DeviceBase::CreateTextureViewInternal(TextureViewBase** result,
- TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
+ ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureViewInternal(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
DAWN_TRY(ValidateObject(texture));
TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
if (IsValidationEnabled()) {
DAWN_TRY(ValidateTextureViewDescriptor(texture, &desc));
}
- DAWN_TRY_ASSIGN(*result, CreateTextureViewImpl(texture, &desc));
- return {};
+ return CreateTextureViewImpl(texture, &desc);
}
// Other implementation details
@@ -1198,7 +1341,6 @@ namespace dawn_native {
void DeviceBase::SetDefaultToggles() {
SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
SetToggle(Toggle::DisallowUnsafeAPIs, true);
- SetToggle(Toggle::ConvertTimestampsToNanoseconds, true);
}
void DeviceBase::ApplyToggleOverrides(const DeviceDescriptor* deviceDescriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
index 7764ec6b354..c5e7ad81d10 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ b/chromium/third_party/dawn/src/dawn_native/Device.h
@@ -27,6 +27,7 @@
#include "dawn_native/dawn_platform.h"
#include <memory>
+#include <utility>
namespace dawn_native {
class AdapterBase;
@@ -36,12 +37,14 @@ namespace dawn_native {
class CreatePipelineAsyncTracker;
class DynamicUploader;
class ErrorScopeStack;
+ class ExternalTextureBase;
+ class OwnedCompilationMessages;
class PersistentCache;
class StagingBufferBase;
struct InternalPipelineStore;
struct ShaderModuleParseResult;
- class DeviceBase {
+ class DeviceBase : public RefCounted {
public:
DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
virtual ~DeviceBase();
@@ -81,7 +84,7 @@ namespace dawn_native {
// The reference returned has the same lifetime as the device.
const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
- virtual CommandBufferBase* CreateCommandBuffer(
+ virtual ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) = 0;
@@ -89,7 +92,6 @@ namespace dawn_native {
ExecutionSerial GetLastSubmittedCommandSerial() const;
ExecutionSerial GetFutureSerial() const;
ExecutionSerial GetPendingCommandSerial() const;
- virtual MaybeError TickImpl() = 0;
// Many Dawn objects are completely immutable once created which means that if two
// creations are given the same arguments, they can return the same object. Reusing
@@ -111,22 +113,20 @@ namespace dawn_native {
BindGroupLayoutBase* GetEmptyBindGroupLayout();
- ResultOrError<ComputePipelineBase*> GetOrCreateComputePipeline(
- const ComputePipelineDescriptor* descriptor);
void UncacheComputePipeline(ComputePipelineBase* obj);
- ResultOrError<PipelineLayoutBase*> GetOrCreatePipelineLayout(
+ ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor);
void UncachePipelineLayout(PipelineLayoutBase* obj);
- ResultOrError<RenderPipelineBase*> GetOrCreateRenderPipeline(
- const RenderPipelineDescriptor* descriptor);
+ ResultOrError<Ref<RenderPipelineBase>> GetOrCreateRenderPipeline(
+ const RenderPipelineDescriptor2* descriptor);
void UncacheRenderPipeline(RenderPipelineBase* obj);
- ResultOrError<SamplerBase*> GetOrCreateSampler(const SamplerDescriptor* descriptor);
+ ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
void UncacheSampler(SamplerBase* obj);
- ResultOrError<ShaderModuleBase*> GetOrCreateShaderModule(
+ ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult);
void UncacheShaderModule(ShaderModuleBase* obj);
@@ -135,56 +135,57 @@ namespace dawn_native {
Ref<AttachmentState> GetOrCreateAttachmentState(
const RenderBundleEncoderDescriptor* descriptor);
Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
+ Ref<AttachmentState> GetOrCreateAttachmentState(
+ const RenderPipelineDescriptor2* descriptor);
Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
void UncacheAttachmentState(AttachmentState* obj);
// Dawn API
- BindGroupBase* CreateBindGroup(const BindGroupDescriptor* descriptor);
- BindGroupLayoutBase* CreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
- BufferBase* CreateBuffer(const BufferDescriptor* descriptor);
- CommandEncoder* CreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
- ComputePipelineBase* CreateComputePipeline(const ComputePipelineDescriptor* descriptor);
- PipelineLayoutBase* CreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
- QuerySetBase* CreateQuerySet(const QuerySetDescriptor* descriptor);
- void CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
- WGPUCreateComputePipelineAsyncCallback callback,
- void* userdata);
- void CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
- WGPUCreateRenderPipelineAsyncCallback callback,
- void* userdata);
- RenderBundleEncoder* CreateRenderBundleEncoder(
+ BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
+ BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
+ BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
+ CommandEncoder* APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
+ ComputePipelineBase* APICreateComputePipeline(const ComputePipelineDescriptor* descriptor);
+ PipelineLayoutBase* APICreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
+ QuerySetBase* APICreateQuerySet(const QuerySetDescriptor* descriptor);
+ void APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ void APICreateRenderPipelineAsync(const RenderPipelineDescriptor2* descriptor,
+ WGPUCreateRenderPipelineAsyncCallback callback,
+ void* userdata);
+ RenderBundleEncoder* APICreateRenderBundleEncoder(
const RenderBundleEncoderDescriptor* descriptor);
- RenderPipelineBase* CreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
- SamplerBase* CreateSampler(const SamplerDescriptor* descriptor);
- ShaderModuleBase* CreateShaderModule(const ShaderModuleDescriptor* descriptor);
- SwapChainBase* CreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
- TextureBase* CreateTexture(const TextureDescriptor* descriptor);
+ RenderPipelineBase* APICreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
+ RenderPipelineBase* APICreateRenderPipeline2(const RenderPipelineDescriptor2* descriptor);
+ ExternalTextureBase* APICreateExternalTexture(const ExternalTextureDescriptor* descriptor);
+ SamplerBase* APICreateSampler(const SamplerDescriptor* descriptor);
+ ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
+ SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
+ TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
TextureViewBase* CreateTextureView(TextureBase* texture,
const TextureViewDescriptor* descriptor);
InternalPipelineStore* GetInternalPipelineStore();
// For Dawn Wire
- BufferBase* CreateErrorBuffer();
+ BufferBase* APICreateErrorBuffer();
// TODO(dawn:22): Remove once the deprecation period is finished.
- QueueBase* GetDefaultQueue();
- QueueBase* GetQueue();
+ QueueBase* APIGetDefaultQueue();
+ QueueBase* APIGetQueue();
- void InjectError(wgpu::ErrorType type, const char* message);
- bool Tick();
+ void APIInjectError(wgpu::ErrorType type, const char* message);
+ bool APITick();
- void SetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
- void SetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
- void PushErrorScope(wgpu::ErrorFilter filter);
- bool PopErrorScope(wgpu::ErrorCallback callback, void* userdata);
+ void APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
+ void APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
+ void APIPushErrorScope(wgpu::ErrorFilter filter);
+ bool APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata);
MaybeError ValidateIsAlive() const;
PersistentCache* GetPersistentCache();
- void Reference();
- void Release();
-
virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(
size_t size) = 0;
virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
@@ -230,7 +231,8 @@ namespace dawn_native {
void IncrementLazyClearCountForTesting();
size_t GetDeprecationWarningCountForTesting();
void EmitDeprecationWarning(const char* warning);
- void LoseForTesting();
+ void APILoseForTesting();
+ QueueBase* GetQueue() const;
// AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
// ticked in order to clean up all pending callback work or to execute asynchronous resource
@@ -241,7 +243,9 @@ namespace dawn_native {
// reaching the serial the work will be executed on.
void AddFutureSerial(ExecutionSerial serial);
// Check for passed fences and set the new completed serial
- void CheckPassedSerials();
+ MaybeError CheckPassedSerials();
+
+ MaybeError Tick();
virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
@@ -259,66 +263,86 @@ namespace dawn_native {
void IncrementLastSubmittedCommandSerial();
private:
- virtual ResultOrError<BindGroupBase*> CreateBindGroupImpl(
+ virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) = 0;
- virtual ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
+ virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) = 0;
virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) = 0;
- virtual ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
+ virtual ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) = 0;
- virtual ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
+ virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) = 0;
- virtual ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) = 0;
- virtual ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) = 0;
- virtual ResultOrError<SamplerBase*> CreateSamplerImpl(
+ virtual ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) = 0;
+ virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
const SamplerDescriptor* descriptor) = 0;
- virtual ResultOrError<ShaderModuleBase*> CreateShaderModuleImpl(
+ virtual ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) = 0;
- virtual ResultOrError<SwapChainBase*> CreateSwapChainImpl(
+ virtual ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) = 0;
// Note that previousSwapChain may be nullptr, or come from a different backend.
- virtual ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ virtual ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) = 0;
virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
const TextureDescriptor* descriptor) = 0;
- virtual ResultOrError<TextureViewBase*> CreateTextureViewImpl(
+ virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) = 0;
+ virtual MaybeError TickImpl() = 0;
+
ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
- MaybeError CreateBindGroupInternal(BindGroupBase** result,
- const BindGroupDescriptor* descriptor);
- MaybeError CreateBindGroupLayoutInternal(BindGroupLayoutBase** result,
- const BindGroupLayoutDescriptor* descriptor);
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupInternal(
+ const BindGroupDescriptor* descriptor);
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutInternal(
+ const BindGroupLayoutDescriptor* descriptor);
ResultOrError<Ref<BufferBase>> CreateBufferInternal(const BufferDescriptor* descriptor);
- MaybeError CreateComputePipelineInternal(ComputePipelineBase** result,
- const ComputePipelineDescriptor* descriptor);
- MaybeError CreatePipelineLayoutInternal(PipelineLayoutBase** result,
- const PipelineLayoutDescriptor* descriptor);
- MaybeError CreateQuerySetInternal(QuerySetBase** result,
- const QuerySetDescriptor* descriptor);
- MaybeError CreateRenderBundleEncoderInternal(
- RenderBundleEncoder** result,
+ MaybeError CreateComputePipelineAsyncInternal(
+ const ComputePipelineDescriptor* descriptor,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineInternal(
+ const ComputePipelineDescriptor* descriptor);
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutInternal(
+ const PipelineLayoutDescriptor* descriptor);
+ ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureInternal(
+ const ExternalTextureDescriptor* descriptor);
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetInternal(
+ const QuerySetDescriptor* descriptor);
+ ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoderInternal(
const RenderBundleEncoderDescriptor* descriptor);
- MaybeError CreateRenderPipelineInternal(RenderPipelineBase** result,
- const RenderPipelineDescriptor* descriptor);
- MaybeError CreateSamplerInternal(SamplerBase** result, const SamplerDescriptor* descriptor);
- MaybeError CreateShaderModuleInternal(ShaderModuleBase** result,
- const ShaderModuleDescriptor* descriptor);
- MaybeError CreateSwapChainInternal(SwapChainBase** result,
- Surface* surface,
- const SwapChainDescriptor* descriptor);
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineInternal(
+ const RenderPipelineDescriptor2* descriptor);
+ ResultOrError<Ref<SamplerBase>> CreateSamplerInternal(const SamplerDescriptor* descriptor);
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleInternal(
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainInternal(
+ Surface* surface,
+ const SwapChainDescriptor* descriptor);
ResultOrError<Ref<TextureBase>> CreateTextureInternal(const TextureDescriptor* descriptor);
- MaybeError CreateTextureViewInternal(TextureViewBase** result,
- TextureBase* texture,
- const TextureViewDescriptor* descriptor);
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewInternal(
+ TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
+
+ ResultOrError<Ref<PipelineLayoutBase>> ValidateAndGetComputePipelineDescriptorWithDefaults(
+ const ComputePipelineDescriptor& descriptor,
+ ComputePipelineDescriptor* outDescriptor);
+ std::pair<Ref<ComputePipelineBase>, size_t> GetCachedComputePipeline(
+ const ComputePipelineDescriptor* descriptor);
+ Ref<ComputePipelineBase> AddOrGetCachedPipeline(Ref<ComputePipelineBase> computePipeline,
+ size_t blueprintHash);
+ void CreateComputePipelineAsyncImpl(const ComputePipelineDescriptor* descriptor,
+ size_t blueprintHash,
+ WGPUCreateComputePipelineAsyncCallback callback,
+ void* userdata);
void ApplyToggleOverrides(const DeviceDescriptor* deviceDescriptor);
void ApplyExtensions(const DeviceDescriptor* deviceDescriptor);
@@ -329,7 +353,7 @@ namespace dawn_native {
// Each backend should implement to check their passed fences if there are any and return a
// completed serial. Return 0 should indicate no fences to check.
- virtual ExecutionSerial CheckAndUpdateCompletedSerials() = 0;
+ virtual ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() = 0;
// During shut down of device, some operations might have been started since the last submit
// and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
// make all commands look completed.
@@ -387,7 +411,6 @@ namespace dawn_native {
struct DeprecationWarnings;
std::unique_ptr<DeprecationWarnings> mDeprecationWarnings;
- uint32_t mRefCount = 1;
State mState = State::BeingCreated;
FormatTable mFormatTable;
diff --git a/chromium/third_party/dawn/src/dawn_native/Extensions.cpp b/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
index 98d4a61b402..47b2481c5e1 100644
--- a/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
@@ -52,7 +52,11 @@ namespace dawn_native {
{"multiplanar_formats",
"Import and use multi-planar texture formats with per plane views",
"https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
- &WGPUDeviceProperties::multiPlanarFormats}}};
+ &WGPUDeviceProperties::multiPlanarFormats},
+ {Extension::DepthClamping,
+ {"depth_clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
+ &WGPUDeviceProperties::depthClamping}}};
} // anonymous namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/Extensions.h b/chromium/third_party/dawn/src/dawn_native/Extensions.h
index 08689caddcd..00dd639de6d 100644
--- a/chromium/third_party/dawn/src/dawn_native/Extensions.h
+++ b/chromium/third_party/dawn/src/dawn_native/Extensions.h
@@ -29,6 +29,7 @@ namespace dawn_native {
PipelineStatisticsQuery,
TimestampQuery,
MultiPlanarFormats,
+ DepthClamping,
EnumCount,
InvalidEnum = EnumCount,
diff --git a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
new file mode 100644
index 00000000000..b65ba467d04
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.cpp
@@ -0,0 +1,111 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/ExternalTexture.h"
+
+#include "dawn_native/Device.h"
+#include "dawn_native/Texture.h"
+
+#include "dawn_native/dawn_platform.h"
+
+namespace dawn_native {
+
+ MaybeError ValidateExternalTexturePlane(const TextureViewBase* textureView,
+ wgpu::TextureFormat format) {
+ if (textureView->GetFormat().format != format) {
+ return DAWN_VALIDATION_ERROR(
+ "The external texture descriptor specifies a texture format that is different from "
+ "at least one of the passed texture views.");
+ }
+
+ if ((textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::Sampled) !=
+ wgpu::TextureUsage::Sampled) {
+ return DAWN_VALIDATION_ERROR(
+ "The external texture descriptor specifies a texture that was not created with "
+ "TextureUsage::Sampled.");
+ }
+
+ if (textureView->GetDimension() != wgpu::TextureViewDimension::e2D) {
+ return DAWN_VALIDATION_ERROR(
+ "The external texture descriptor contains a texture view with a non-2D dimension.");
+ }
+
+ if (textureView->GetLevelCount() > 1) {
+ return DAWN_VALIDATION_ERROR(
+ "The external texture descriptor contains a texture view with a level count "
+ "greater than 1.");
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor) {
+ ASSERT(descriptor);
+ ASSERT(descriptor->plane0);
+
+ const Format* format;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+
+ switch (descriptor->format) {
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::RGBA16Float:
+ DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0, descriptor->format));
+ break;
+ default:
+ return DAWN_VALIDATION_ERROR(
+ "The external texture descriptor specifies an unsupported format.");
+ }
+
+ return {};
+ }
+
+ // static
+ ResultOrError<Ref<ExternalTextureBase>> ExternalTextureBase::Create(
+ DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor) {
+ Ref<ExternalTextureBase> externalTexture =
+ AcquireRef(new ExternalTextureBase(device, descriptor));
+ return std::move(externalTexture);
+ }
+
+ ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor)
+ : ObjectBase(device) {
+ textureViews[0] = descriptor->plane0;
+ }
+
+ ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+ : ObjectBase(device, tag) {
+ }
+
+ std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> ExternalTextureBase::GetTextureViews()
+ const {
+ return textureViews;
+ }
+
+ void ExternalTextureBase::APIDestroy() {
+ if (GetDevice()->ConsumedError(GetDevice()->ValidateObject(this))) {
+ return;
+ }
+ ASSERT(!IsError());
+ }
+
+ // static
+ ExternalTextureBase* ExternalTextureBase::MakeError(DeviceBase* device) {
+ return new ExternalTextureBase(device, ObjectBase::kError);
+ }
+
+} // namespace dawn_native \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h
new file mode 100644
index 00000000000..7bc81e25511
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ExternalTexture.h
@@ -0,0 +1,51 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_EXTERNALTEXTURE_H_
+#define DAWNNATIVE_EXTERNALTEXTURE_H_
+
+#include "dawn_native/Error.h"
+#include "dawn_native/ObjectBase.h"
+#include "dawn_native/Subresource.h"
+
+#include <array>
+
+namespace dawn_native {
+
+ struct ExternalTextureDescriptor;
+ class TextureViewBase;
+
+ MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor);
+
+ class ExternalTextureBase : public ObjectBase {
+ public:
+ static ResultOrError<Ref<ExternalTextureBase>> Create(
+ DeviceBase* device,
+ const ExternalTextureDescriptor* descriptor);
+
+ std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> GetTextureViews() const;
+
+ static ExternalTextureBase* MakeError(DeviceBase* device);
+
+ void APIDestroy();
+
+ private:
+ ExternalTextureBase(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+ ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> textureViews;
+ };
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_EXTERNALTEXTURE_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/Fence.cpp b/chromium/third_party/dawn/src/dawn_native/Fence.cpp
index 88e03c8e618..c9f322470b6 100644
--- a/chromium/third_party/dawn/src/dawn_native/Fence.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Fence.cpp
@@ -73,16 +73,16 @@ namespace dawn_native {
return new Fence(device, ObjectBase::kError);
}
- uint64_t Fence::GetCompletedValue() const {
+ uint64_t Fence::APIGetCompletedValue() const {
if (IsError()) {
return 0;
}
return uint64_t(mCompletedValue);
}
- void Fence::OnCompletion(uint64_t apiValue,
- wgpu::FenceOnCompletionCallback callback,
- void* userdata) {
+ void Fence::APIOnCompletion(uint64_t apiValue,
+ wgpu::FenceOnCompletionCallback callback,
+ void* userdata) {
FenceAPISerial value(apiValue);
WGPUFenceCompletionStatus status;
diff --git a/chromium/third_party/dawn/src/dawn_native/Fence.h b/chromium/third_party/dawn/src/dawn_native/Fence.h
index 9bc471e25e6..2f834349ed3 100644
--- a/chromium/third_party/dawn/src/dawn_native/Fence.h
+++ b/chromium/third_party/dawn/src/dawn_native/Fence.h
@@ -39,8 +39,10 @@ namespace dawn_native {
const QueueBase* GetQueue() const;
// Dawn API
- uint64_t GetCompletedValue() const;
- void OnCompletion(uint64_t value, wgpu::FenceOnCompletionCallback callback, void* userdata);
+ uint64_t APIGetCompletedValue() const;
+ void APIOnCompletion(uint64_t value,
+ wgpu::FenceOnCompletionCallback callback,
+ void* userdata);
protected:
friend class QueueBase;
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.h b/chromium/third_party/dawn/src/dawn_native/Format.h
index fefccf345ce..01457d1369e 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.h
+++ b/chromium/third_party/dawn/src/dawn_native/Format.h
@@ -20,6 +20,7 @@
#include "common/ityp_bitset.h"
#include "dawn_native/EnumClassBitmasks.h"
#include "dawn_native/Error.h"
+#include "dawn_native/Subresource.h"
#include <array>
@@ -75,10 +76,6 @@ namespace dawn_native {
// exact number of known format.
static constexpr size_t kKnownFormatCount = 55;
- // The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
- // the per plane index does not exceed the known maximum plane count
- static constexpr uint32_t kMaxPlanesPerFormat = 2;
-
struct Format;
using FormatTable = std::array<Format, kKnownFormatCount>;
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.cpp b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
index f7fe907af80..7d65e4d185e 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
@@ -238,7 +238,7 @@ namespace dawn_native {
#endif // defined(DAWN_USE_X11)
}
- Surface* InstanceBase::CreateSurface(const SurfaceDescriptor* descriptor) {
+ Surface* InstanceBase::APICreateSurface(const SurfaceDescriptor* descriptor) {
if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
return nullptr;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.h b/chromium/third_party/dawn/src/dawn_native/Instance.h
index 21f320411b7..86118d5ed22 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.h
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.h
@@ -72,7 +72,7 @@ namespace dawn_native {
const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
// Dawn API
- Surface* CreateSurface(const SurfaceDescriptor* descriptor);
+ Surface* APICreateSurface(const SurfaceDescriptor* descriptor);
private:
InstanceBase() = default;
diff --git a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h b/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h
index 5e3462baa18..1d901595de5 100644
--- a/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h
+++ b/chromium/third_party/dawn/src/dawn_native/InternalPipelineStore.h
@@ -25,7 +25,9 @@ namespace dawn_native {
class ShaderModuleBase;
struct InternalPipelineStore {
- Ref<RenderPipelineBase> copyTextureForBrowserPipeline;
+ std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>>
+ copyTextureForBrowserPipelines;
+
Ref<ShaderModuleBase> copyTextureForBrowserVS;
Ref<ShaderModuleBase> copyTextureForBrowserFS;
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
index f0aa43a059b..772f8c2e344 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
@@ -42,6 +42,9 @@ namespace dawn_native {
std::vector<TextureBase*> textures;
std::vector<PassTextureUsage> textureUsages;
+
+ std::vector<QuerySetBase*> querySets;
+ std::vector<std::vector<bool>> queryAvailabilities;
};
using PerPassUsages = std::vector<PassResourceUsage>;
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
index 5816280a2a7..81d72edf408 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
@@ -17,6 +17,7 @@
#include "dawn_native/Buffer.h"
#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/Format.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/Texture.h"
#include <utility>
@@ -65,6 +66,23 @@ namespace dawn_native {
const wgpu::TextureUsage& addedUsage) { *storedUsage |= addedUsage; });
}
+ void PassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
+ uint32_t queryIndex) {
+ // The query availability only need to be tracked again on render pass for checking query
+ // overwrite on render pass and resetting query set on Vulkan backend.
+ DAWN_ASSERT(mPassType == PassType::Render);
+ DAWN_ASSERT(querySet != nullptr);
+
+ // Gets the iterator for that querySet or create a new vector of bool set to false
+ // if the querySet wasn't registered.
+ auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
+ it->second[queryIndex] = true;
+ }
+
+ const QueryAvailabilityMap& PassResourceUsageTracker::GetQueryAvailabilityMap() const {
+ return mQueryAvailabilities;
+ }
+
// Returns the per-pass usage for use by backends for APIs with explicit barriers.
PassResourceUsage PassResourceUsageTracker::AcquireResourceUsage() {
PassResourceUsage result;
@@ -73,6 +91,8 @@ namespace dawn_native {
result.bufferUsages.reserve(mBufferUsages.size());
result.textures.reserve(mTextureUsages.size());
result.textureUsages.reserve(mTextureUsages.size());
+ result.querySets.reserve(mQueryAvailabilities.size());
+ result.queryAvailabilities.reserve(mQueryAvailabilities.size());
for (auto& it : mBufferUsages) {
result.buffers.push_back(it.first);
@@ -84,8 +104,14 @@ namespace dawn_native {
result.textureUsages.push_back(std::move(it.second));
}
+ for (auto& it : mQueryAvailabilities) {
+ result.querySets.push_back(it.first);
+ result.queryAvailabilities.push_back(std::move(it.second));
+ }
+
mBufferUsages.clear();
mTextureUsages.clear();
+ mQueryAvailabilities.clear();
return result;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
index cfcaa225e12..cd54f8c3464 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
@@ -24,8 +24,11 @@
namespace dawn_native {
class BufferBase;
+ class QuerySetBase;
class TextureBase;
+ using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
+
// Helper class to encapsulate the logic of tracking per-resource usage during the
// validation of command buffer passes. It is used both to know if there are validation
// errors, and to get a list of resources used per pass for backends that need the
@@ -36,6 +39,8 @@ namespace dawn_native {
void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
void AddTextureUsage(TextureBase* texture, const PassTextureUsage& textureUsage);
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+ const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
// Returns the per-pass usage for use by backends for APIs with explicit barriers.
PassResourceUsage AcquireResourceUsage();
@@ -44,6 +49,10 @@ namespace dawn_native {
PassType mPassType;
std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
std::map<TextureBase*, PassTextureUsage> mTextureUsages;
+ // Dedicated to track the availability of the queries used on render pass. The same query
+ // cannot be written twice in same render pass, so each render pass also need to have its
+ // own query availability map for validation.
+ QueryAvailabilityMap mQueryAvailabilities;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
index 4d582e1f4d7..e5933f555d2 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
@@ -22,18 +22,18 @@
namespace dawn_native {
- MaybeError ValidateProgrammableStageDescriptor(DeviceBase* device,
- const ProgrammableStageDescriptor* descriptor,
- const PipelineLayoutBase* layout,
- SingleShaderStage stage) {
- const ShaderModuleBase* module = descriptor->module;
+ MaybeError ValidateProgrammableStage(DeviceBase* device,
+ const ShaderModuleBase* module,
+ const std::string& entryPoint,
+ const PipelineLayoutBase* layout,
+ SingleShaderStage stage) {
DAWN_TRY(device->ValidateObject(module));
- if (!module->HasEntryPoint(descriptor->entryPoint)) {
+ if (!module->HasEntryPoint(entryPoint)) {
return DAWN_VALIDATION_ERROR("Entry point doesn't exist in the module");
}
- const EntryPointMetadata& metadata = module->GetEntryPoint(descriptor->entryPoint);
+ const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
if (metadata.stage != stage) {
return DAWN_VALIDATION_ERROR("Entry point isn't for the correct stage");
@@ -56,9 +56,9 @@ namespace dawn_native {
for (const StageAndDescriptor& stage : stages) {
// Extract argument for this stage.
- SingleShaderStage shaderStage = stage.first;
- ShaderModuleBase* module = stage.second->module;
- const char* entryPointName = stage.second->entryPoint;
+ SingleShaderStage shaderStage = stage.shaderStage;
+ ShaderModuleBase* module = stage.module;
+ const char* entryPointName = stage.entryPoint.c_str();
const EntryPointMetadata& metadata = module->GetEntryPoint(entryPointName);
ASSERT(metadata.stage == shaderStage);
@@ -125,7 +125,7 @@ namespace dawn_native {
return {};
}
- BindGroupLayoutBase* PipelineBase::GetBindGroupLayout(uint32_t groupIndexIn) {
+ BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
if (GetDevice()->ConsumedError(ValidateGetBindGroupLayout(groupIndexIn))) {
return BindGroupLayoutBase::MakeError(GetDevice());
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.h b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
index cb18032555e..008845d62d8 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
@@ -28,10 +28,11 @@
namespace dawn_native {
- MaybeError ValidateProgrammableStageDescriptor(DeviceBase* device,
- const ProgrammableStageDescriptor* descriptor,
- const PipelineLayoutBase* layout,
- SingleShaderStage stage);
+ MaybeError ValidateProgrammableStage(DeviceBase* device,
+ const ShaderModuleBase* module,
+ const std::string& entryPoint,
+ const PipelineLayoutBase* layout,
+ SingleShaderStage stage);
struct ProgrammableStage {
Ref<ShaderModuleBase> module;
@@ -49,7 +50,7 @@ namespace dawn_native {
const ProgrammableStage& GetStage(SingleShaderStage stage) const;
const PerStage<ProgrammableStage>& GetAllStages() const;
- BindGroupLayoutBase* GetBindGroupLayout(uint32_t groupIndex);
+ BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
// Helper functions for std::unordered_map-based pipeline caches.
size_t ComputeContentHash() override;
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
index 5cb05d31c5c..bc659ee3205 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
@@ -75,7 +75,7 @@ namespace dawn_native {
}
// static
- ResultOrError<PipelineLayoutBase*> PipelineLayoutBase::CreateDefault(
+ ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault(
DeviceBase* device,
std::vector<StageAndDescriptor> stages) {
using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>;
@@ -177,7 +177,7 @@ namespace dawn_native {
// Loops over all the reflected BindGroupLayoutEntries from shaders.
for (const StageAndDescriptor& stage : stages) {
const EntryPointMetadata::BindingInfoArray& info =
- stage.second->module->GetEntryPoint(stage.second->entryPoint).bindings;
+ stage.module->GetEntryPoint(stage.entryPoint).bindings;
for (BindGroupIndex group(0); group < info.size(); ++group) {
for (const auto& bindingIt : info[group]) {
@@ -187,7 +187,7 @@ namespace dawn_native {
// Create the BindGroupLayoutEntry
BindGroupLayoutEntry entry = ConvertMetadataToEntry(shaderBinding);
entry.binding = static_cast<uint32_t>(bindingNumber);
- entry.visibility = StageBit(stage.first);
+ entry.visibility = StageBit(stage.shaderStage);
// Add it to our map of all entries, if there is an existing entry, then we
// need to merge, if we can.
@@ -213,32 +213,30 @@ namespace dawn_native {
}
// Create the deduced pipeline layout, validating if it is valid.
- PipelineLayoutBase* pipelineLayout = nullptr;
- {
- ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
- for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
- bgls[group] = bindGroupLayouts[group].Get();
- }
+ ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
+ for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
+ bgls[group] = bindGroupLayouts[group].Get();
+ }
- PipelineLayoutDescriptor desc = {};
- desc.bindGroupLayouts = bgls.data();
- desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
+ PipelineLayoutDescriptor desc = {};
+ desc.bindGroupLayouts = bgls.data();
+ desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
- DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc));
- DAWN_TRY_ASSIGN(pipelineLayout, device->GetOrCreatePipelineLayout(&desc));
+ DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc));
- ASSERT(!pipelineLayout->IsError());
- }
+ Ref<PipelineLayoutBase> result;
+ DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
+ ASSERT(!result->IsError());
- // Sanity check in debug that the pipeline layout is compatible with the current pipeline.
+ // Sanity check in debug that the pipeline layout is compatible with the current
+ // pipeline.
for (const StageAndDescriptor& stage : stages) {
- const EntryPointMetadata& metadata =
- stage.second->module->GetEntryPoint(stage.second->entryPoint);
- ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, pipelineLayout)
+ const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+ ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())
.IsSuccess());
}
- return pipelineLayout;
+ return std::move(result);
}
const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
index 9c302741517..0c1b5d74daa 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
@@ -37,7 +37,11 @@ namespace dawn_native {
ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
- using StageAndDescriptor = std::pair<SingleShaderStage, const ProgrammableStageDescriptor*>;
+ struct StageAndDescriptor {
+ SingleShaderStage shaderStage;
+ ShaderModuleBase* module;
+ std::string entryPoint;
+ };
class PipelineLayoutBase : public CachedObject {
public:
@@ -45,7 +49,7 @@ namespace dawn_native {
~PipelineLayoutBase() override;
static PipelineLayoutBase* MakeError(DeviceBase* device);
- static ResultOrError<PipelineLayoutBase*> CreateDefault(
+ static ResultOrError<Ref<PipelineLayoutBase>> CreateDefault(
DeviceBase* device,
std::vector<StageAndDescriptor> stages);
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
index 362362dc630..c905de1b275 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
@@ -111,7 +111,7 @@ namespace dawn_native {
return {};
}
- void ProgrammablePassEncoder::InsertDebugMarker(const char* groupLabel) {
+ void ProgrammablePassEncoder::APIInsertDebugMarker(const char* groupLabel) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
InsertDebugMarkerCmd* cmd =
allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
@@ -124,7 +124,7 @@ namespace dawn_native {
});
}
- void ProgrammablePassEncoder::PopDebugGroup() {
+ void ProgrammablePassEncoder::APIPopDebugGroup() {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
if (mDebugGroupStackSize == 0) {
@@ -138,7 +138,7 @@ namespace dawn_native {
});
}
- void ProgrammablePassEncoder::PushDebugGroup(const char* groupLabel) {
+ void ProgrammablePassEncoder::APIPushDebugGroup(const char* groupLabel) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
PushDebugGroupCmd* cmd =
allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
@@ -153,10 +153,10 @@ namespace dawn_native {
});
}
- void ProgrammablePassEncoder::SetBindGroup(uint32_t groupIndexIn,
- BindGroupBase* group,
- uint32_t dynamicOffsetCountIn,
- const uint32_t* dynamicOffsetsIn) {
+ void ProgrammablePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCountIn,
+ const uint32_t* dynamicOffsetsIn) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
BindGroupIndex groupIndex(groupIndexIn);
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
index 05300e72959..8816914cd72 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
@@ -34,14 +34,14 @@ namespace dawn_native {
EncodingContext* encodingContext,
PassType passType);
- void InsertDebugMarker(const char* groupLabel);
- void PopDebugGroup();
- void PushDebugGroup(const char* groupLabel);
+ void APIInsertDebugMarker(const char* groupLabel);
+ void APIPopDebugGroup();
+ void APIPushDebugGroup(const char* groupLabel);
- void SetBindGroup(uint32_t groupIndex,
- BindGroupBase* group,
- uint32_t dynamicOffsetCount = 0,
- const uint32_t* dynamicOffsets = nullptr);
+ void APISetBindGroup(uint32_t groupIndex,
+ BindGroupBase* group,
+ uint32_t dynamicOffsetCount = 0,
+ const uint32_t* dynamicOffsets = nullptr);
protected:
bool IsValidationEnabled() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp b/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp
index 048e279f65f..585e844c740 100644
--- a/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/QueryHelper.cpp
@@ -34,28 +34,28 @@ namespace dawn_native {
static const char sConvertTimestampsToNanoseconds[] = R"(
struct Timestamp {
- [[offset(0)]] low : u32;
- [[offset(4)]] high : u32;
+ low : u32;
+ high : u32;
};
[[block]] struct TimestampArr {
- [[offset(0)]] t : [[stride(8)]] array<Timestamp>;
+ t : array<Timestamp>;
};
[[block]] struct AvailabilityArr {
- [[offset(0)]] v : [[stride(4)]] array<u32>;
+ v : array<u32>;
};
[[block]] struct TimestampParams {
- [[offset(0)]] count : u32;
- [[offset(4)]] offset : u32;
- [[offset(8)]] period : f32;
+ count : u32;
+ offset : u32;
+ period : f32;
};
[[group(0), binding(0)]]
- var<storage_buffer> timestamps : [[access(read_write)]] TimestampArr;
+ var<storage> timestamps : [[access(read_write)]] TimestampArr;
[[group(0), binding(1)]]
- var<storage_buffer> availability : [[access(read)]] AvailabilityArr;
+ var<storage> availability : [[access(read)]] AvailabilityArr;
[[group(0), binding(2)]] var<uniform> params : TimestampParams;
[[builtin(global_invocation_id)]] var<in> GlobalInvocationID : vec3<u32>;
@@ -87,15 +87,15 @@ namespace dawn_native {
if (timestamp.low <= u32(f32(0xFFFFFFFFu) / period)) {
timestamps.t[index].low = u32(round(f32(timestamp.low) * period));
} else {
- var lo : u32 = timestamp.low & 0xFFFF;
- var hi : u32 = timestamp.low >> 16;
+ var lo : u32 = timestamp.low & 0xFFFFu;
+ var hi : u32 = timestamp.low >> 16u;
var t0 : u32 = u32(round(f32(lo) * period));
- var t1 : u32 = u32(round(f32(hi) * period)) + (t0 >> 16);
- w = t1 >> 16;
+ var t1 : u32 = u32(round(f32(hi) * period)) + (t0 >> 16u);
+ w = t1 >> 16u;
- var result : u32 = t1 << 16;
- result = result | (t0 & 0xFFFF);
+ var result : u32 = t1 << 16u;
+ result = result | (t0 & 0xFFFFu);
timestamps.t[index].low = result;
}
@@ -116,7 +116,8 @@ namespace dawn_native {
wgslDesc.source = sConvertTimestampsToNanoseconds;
descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
- store->timestampCS = AcquireRef(device->CreateShaderModule(&descriptor));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ store->timestampCS = AcquireRef(device->APICreateShaderModule(&descriptor));
}
// Create ComputePipeline.
@@ -126,8 +127,9 @@ namespace dawn_native {
computePipelineDesc.computeStage.module = store->timestampCS.Get();
computePipelineDesc.computeStage.entryPoint = "main";
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
store->timestampComputePipeline =
- AcquireRef(device->CreateComputePipeline(&computePipelineDesc));
+ AcquireRef(device->APICreateComputePipeline(&computePipelineDesc));
}
return store->timestampComputePipeline.Get();
@@ -144,7 +146,8 @@ namespace dawn_native {
ComputePipelineBase* pipeline = GetOrCreateTimestampComputePipeline(device);
// Prepare bind group layout.
- Ref<BindGroupLayoutBase> layout = AcquireRef(pipeline->GetBindGroupLayout(0));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<BindGroupLayoutBase> layout = AcquireRef(pipeline->APIGetBindGroupLayout(0));
// Prepare bind group descriptor
std::array<BindGroupEntry, 3> bindGroupEntries = {};
@@ -165,15 +168,18 @@ namespace dawn_native {
bindGroupEntries[2].size = params->GetSize();
// Create bind group after all binding entries are set.
- Ref<BindGroupBase> bindGroup = AcquireRef(device->CreateBindGroup(&bgDesc));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<BindGroupBase> bindGroup = AcquireRef(device->APICreateBindGroup(&bgDesc));
// Create compute encoder and issue dispatch.
ComputePassDescriptor passDesc = {};
- Ref<ComputePassEncoder> pass = AcquireRef(encoder->BeginComputePass(&passDesc));
- pass->SetPipeline(pipeline);
- pass->SetBindGroup(0, bindGroup.Get());
- pass->Dispatch(static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
- pass->EndPass();
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<ComputePassEncoder> pass = AcquireRef(encoder->APIBeginComputePass(&passDesc));
+ pass->APISetPipeline(pipeline);
+ pass->APISetBindGroup(0, bindGroup.Get());
+ pass->APIDispatch(
+ static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
+ pass->APIEndPass();
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
index d181e364232..26e9c6d1c94 100644
--- a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
@@ -153,7 +153,7 @@ namespace dawn_native {
return {};
}
- void QuerySetBase::Destroy() {
+ void QuerySetBase::APIDestroy() {
if (GetDevice()->ConsumedError(ValidateDestroy())) {
return;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.h b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
index df36b2ada4b..32b75c9edcc 100644
--- a/chromium/third_party/dawn/src/dawn_native/QuerySet.h
+++ b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
@@ -40,7 +40,7 @@ namespace dawn_native {
MaybeError ValidateCanUseInSubmitNow() const;
- void Destroy();
+ void APIDestroy();
protected:
QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag);
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
index 53bf294bf0d..c113c1efe10 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
@@ -115,9 +115,9 @@ namespace dawn_native {
uint64_t imageAdditionalStride =
dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
- CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth, alignedRowsPerImage,
- imageAdditionalStride, alignedBytesPerRow, optimallyAlignedBytesPerRow,
- dataLayout.bytesPerRow);
+ CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers,
+ alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow,
+ optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
return uploadHandle;
}
@@ -176,7 +176,7 @@ namespace dawn_native {
return new ErrorQueue(device);
}
- void QueueBase::Submit(uint32_t commandCount, CommandBufferBase* const* commands) {
+ void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
SubmitInternal(commandCount, commands);
for (uint32_t i = 0; i < commandCount; ++i) {
@@ -184,7 +184,7 @@ namespace dawn_native {
}
}
- void QueueBase::Signal(Fence* fence, uint64_t apiSignalValue) {
+ void QueueBase::APISignal(Fence* fence, uint64_t apiSignalValue) {
FenceAPISerial signalValue(apiSignalValue);
DeviceBase* device = GetDevice();
@@ -197,9 +197,9 @@ namespace dawn_native {
fence->UpdateFenceOnComplete(fence, signalValue);
}
- void QueueBase::OnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata) {
+ void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata) {
// The error status depends on the type of error so we let the validation function choose it
WGPUQueueWorkDoneStatus status;
if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
@@ -223,10 +223,20 @@ namespace dawn_native {
}
void QueueBase::Tick(ExecutionSerial finishedSerial) {
+ // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
+ // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
+ // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
+ // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
+ // callbacks.
+ std::vector<std::unique_ptr<TaskInFlight>> tasks;
for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
- task->Finish();
+ tasks.push_back(std::move(task));
}
mTasksInFlight.ClearUpTo(finishedSerial);
+
+ for (auto& task : tasks) {
+ task->Finish();
+ }
}
void QueueBase::HandleDeviceLoss() {
@@ -236,7 +246,11 @@ namespace dawn_native {
mTasksInFlight.Clear();
}
- Fence* QueueBase::CreateFence(const FenceDescriptor* descriptor) {
+ Fence* QueueBase::APICreateFence(const FenceDescriptor* descriptor) {
+ // TODO(chromium:1177476): Remove once the deprecation period is finished.
+ GetDevice()->EmitDeprecationWarning(
+ "Fences are deprecated, use Queue::OnSubmittedWorkDone instead.");
+
if (GetDevice()->ConsumedError(ValidateCreateFence(descriptor))) {
return Fence::MakeError(GetDevice());
}
@@ -248,17 +262,17 @@ namespace dawn_native {
return new Fence(this, descriptor);
}
- void QueueBase::WriteBuffer(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- GetDevice()->ConsumedError(WriteBufferInternal(buffer, bufferOffset, data, size));
+ void QueueBase::APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
}
- MaybeError QueueBase::WriteBufferInternal(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
+ MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
DAWN_TRY(ValidateWriteBuffer(buffer, bufferOffset, size));
return WriteBufferImpl(buffer, bufferOffset, data, size);
}
@@ -287,34 +301,38 @@ namespace dawn_native {
buffer, bufferOffset, size);
}
- void QueueBase::WriteTexture(const TextureCopyView* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout* dataLayout,
- const Extent3D* writeSize) {
+ void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize) {
GetDevice()->ConsumedError(
WriteTextureInternal(destination, data, dataSize, dataLayout, writeSize));
}
- MaybeError QueueBase::WriteTextureInternal(const TextureCopyView* destination,
+ MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
const void* data,
size_t dataSize,
const TextureDataLayout* dataLayout,
const Extent3D* writeSize) {
- DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
+ Extent3D fixedWriteSize = *writeSize;
+ DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedWriteSize));
+
+ DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, &fixedWriteSize));
- if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depth == 0) {
+ if (fixedWriteSize.width == 0 || fixedWriteSize.height == 0 ||
+ fixedWriteSize.depthOrArrayLayers == 0) {
return {};
}
const TexelBlockInfo& blockInfo =
destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
TextureDataLayout layout = *dataLayout;
- ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
- return WriteTextureImpl(*destination, data, layout, *writeSize);
+ ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, fixedWriteSize);
+ return WriteTextureImpl(*destination, data, layout, fixedWriteSize);
}
- MaybeError QueueBase::WriteTextureImpl(const TextureCopyView& destination,
+ MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
const void* data,
const TextureDataLayout& dataLayout,
const Extent3D& writeSizePixel) {
@@ -358,25 +376,27 @@ namespace dawn_native {
&textureCopy, writeSizePixel);
}
- void QueueBase::CopyTextureForBrowser(const TextureCopyView* source,
- const TextureCopyView* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options) {
+ void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options) {
GetDevice()->ConsumedError(
CopyTextureForBrowserInternal(source, destination, copySize, options));
}
MaybeError QueueBase::CopyTextureForBrowserInternal(
- const TextureCopyView* source,
- const TextureCopyView* destination,
+ const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
const Extent3D* copySize,
const CopyTextureForBrowserOptions* options) {
+ Extent3D fixedCopySize = *copySize;
+ DAWN_TRY(FixUpDeprecatedGPUExtent3DDepth(GetDevice(), &fixedCopySize));
if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(
- ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options));
+ DAWN_TRY(ValidateCopyTextureForBrowser(GetDevice(), source, destination, &fixedCopySize,
+ options));
}
- return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
+ return DoCopyTextureForBrowser(GetDevice(), source, destination, &fixedCopySize, options);
}
MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
@@ -481,7 +501,7 @@ namespace dawn_native {
return {};
}
- MaybeError QueueBase::ValidateWriteTexture(const TextureCopyView* destination,
+ MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
size_t dataSize,
const TextureDataLayout* dataLayout,
const Extent3D* writeSize) const {
@@ -489,7 +509,7 @@ namespace dawn_native {
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
- DAWN_TRY(ValidateTextureCopyView(GetDevice(), *destination, *writeSize));
+ DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
if (dataLayout->offset > dataSize) {
return DAWN_VALIDATION_ERROR("Queue::WriteTexture out of range");
@@ -508,7 +528,7 @@ namespace dawn_native {
// because in the latter we divide copyExtent.width by blockWidth and
// copyExtent.height by blockHeight while the divisibility conditions are
// checked in validating texture copy range.
- DAWN_TRY(ValidateTextureCopyRange(*destination, *writeSize));
+ DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
const TexelBlockInfo& blockInfo =
destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.h b/chromium/third_party/dawn/src/dawn_native/Queue.h
index bdc2007d300..e0f59c4fee9 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.h
@@ -37,23 +37,30 @@ namespace dawn_native {
~QueueBase() override;
// Dawn API
- void Submit(uint32_t commandCount, CommandBufferBase* const* commands);
- void Signal(Fence* fence, uint64_t signalValue);
- Fence* CreateFence(const FenceDescriptor* descriptor);
- void OnSubmittedWorkDone(uint64_t signalValue,
- WGPUQueueWorkDoneCallback callback,
- void* userdata);
- void WriteBuffer(BufferBase* buffer, uint64_t bufferOffset, const void* data, size_t size);
- void WriteTexture(const TextureCopyView* destination,
- const void* data,
- size_t dataSize,
- const TextureDataLayout* dataLayout,
- const Extent3D* writeSize);
- void CopyTextureForBrowser(const TextureCopyView* source,
- const TextureCopyView* destination,
- const Extent3D* copySize,
- const CopyTextureForBrowserOptions* options);
-
+ void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
+ void APISignal(Fence* fence, uint64_t signalValue);
+ Fence* APICreateFence(const FenceDescriptor* descriptor);
+ void APIOnSubmittedWorkDone(uint64_t signalValue,
+ WGPUQueueWorkDoneCallback callback,
+ void* userdata);
+ void APIWriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
+ void APIWriteTexture(const ImageCopyTexture* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize);
+ void APICopyTextureForBrowser(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
+ const Extent3D* copySize,
+ const CopyTextureForBrowserOptions* options);
+
+ MaybeError WriteBuffer(BufferBase* buffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size);
void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
void Tick(ExecutionSerial finishedSerial);
void HandleDeviceLoss();
@@ -63,17 +70,13 @@ namespace dawn_native {
QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
private:
- MaybeError WriteBufferInternal(BufferBase* buffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size);
- MaybeError WriteTextureInternal(const TextureCopyView* destination,
+ MaybeError WriteTextureInternal(const ImageCopyTexture* destination,
const void* data,
size_t dataSize,
const TextureDataLayout* dataLayout,
const Extent3D* writeSize);
- MaybeError CopyTextureForBrowserInternal(const TextureCopyView* source,
- const TextureCopyView* destination,
+ MaybeError CopyTextureForBrowserInternal(const ImageCopyTexture* source,
+ const ImageCopyTexture* destination,
const Extent3D* copySize,
const CopyTextureForBrowserOptions* options);
@@ -83,7 +86,7 @@ namespace dawn_native {
uint64_t bufferOffset,
const void* data,
size_t size);
- virtual MaybeError WriteTextureImpl(const TextureCopyView& destination,
+ virtual MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
const void* data,
const TextureDataLayout& dataLayout,
const Extent3D& writeSize);
@@ -96,7 +99,7 @@ namespace dawn_native {
MaybeError ValidateWriteBuffer(const BufferBase* buffer,
uint64_t bufferOffset,
size_t size) const;
- MaybeError ValidateWriteTexture(const TextureCopyView* destination,
+ MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
size_t dataSize,
const TextureDataLayout* dataLayout,
const Extent3D* writeSize) const;
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
index b8f3c823556..45647a42cad 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
@@ -91,6 +91,13 @@ namespace dawn_native {
}
// static
+ Ref<RenderBundleEncoder> RenderBundleEncoder::Create(
+ DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor) {
+ return AcquireRef(new RenderBundleEncoder(device, descriptor));
+ }
+
+ // static
RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
return new RenderBundleEncoder(device, ObjectBase::kError);
}
@@ -99,7 +106,7 @@ namespace dawn_native {
return mBundleEncodingContext.AcquireCommands();
}
- RenderBundleBase* RenderBundleEncoder::Finish(const RenderBundleDescriptor* descriptor) {
+ RenderBundleBase* RenderBundleEncoder::APIFinish(const RenderBundleDescriptor* descriptor) {
RenderBundleBase* result = nullptr;
if (GetDevice()->ConsumedError(FinishImpl(descriptor), &result)) {
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
index 0bd63fb4680..27d5de26d21 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
@@ -28,15 +28,16 @@ namespace dawn_native {
class RenderBundleEncoder final : public RenderEncoderBase {
public:
- RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
-
+ static Ref<RenderBundleEncoder> Create(DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor);
static RenderBundleEncoder* MakeError(DeviceBase* device);
- RenderBundleBase* Finish(const RenderBundleDescriptor* descriptor);
+ RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
CommandIterator AcquireCommands();
private:
+ RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
index a6be71cbc6f..f87bb3b54ee 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
@@ -56,10 +56,10 @@ namespace dawn_native {
return std::move(mAttachmentState);
}
- void RenderEncoderBase::Draw(uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstVertex,
- uint32_t firstInstance) {
+ void RenderEncoderBase::APIDraw(uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(mCommandBufferState.ValidateCanDraw());
@@ -79,11 +79,11 @@ namespace dawn_native {
});
}
- void RenderEncoderBase::DrawIndexed(uint32_t indexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t baseVertex,
- uint32_t firstInstance) {
+ void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t baseVertex,
+ uint32_t firstInstance) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
@@ -115,7 +115,7 @@ namespace dawn_native {
});
}
- void RenderEncoderBase::DrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
+ void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
@@ -142,8 +142,8 @@ namespace dawn_native {
});
}
- void RenderEncoderBase::DrawIndexedIndirect(BufferBase* indirectBuffer,
- uint64_t indirectOffset) {
+ void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
+ uint64_t indirectOffset) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
@@ -181,7 +181,7 @@ namespace dawn_native {
});
}
- void RenderEncoderBase::SetPipeline(RenderPipelineBase* pipeline) {
+ void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(pipeline));
@@ -203,18 +203,20 @@ namespace dawn_native {
});
}
- void RenderEncoderBase::SetIndexBufferWithFormat(BufferBase* buffer, wgpu::IndexFormat format,
- uint64_t offset, uint64_t size) {
+ void RenderEncoderBase::APISetIndexBufferWithFormat(BufferBase* buffer,
+ wgpu::IndexFormat format,
+ uint64_t offset,
+ uint64_t size) {
GetDevice()->EmitDeprecationWarning(
"RenderEncoderBase::SetIndexBufferWithFormat is deprecated. Use "
"RenderEncoderBase::SetIndexBuffer instead.");
- SetIndexBuffer(buffer, format, offset, size);
+ APISetIndexBuffer(buffer, format, offset, size);
}
- void RenderEncoderBase::SetIndexBuffer(BufferBase* buffer,
- wgpu::IndexFormat format,
- uint64_t offset,
- uint64_t size) {
+ void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
+ wgpu::IndexFormat format,
+ uint64_t offset,
+ uint64_t size) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(buffer));
@@ -259,10 +261,10 @@ namespace dawn_native {
});
}
- void RenderEncoderBase::SetVertexBuffer(uint32_t slot,
- BufferBase* buffer,
- uint64_t offset,
- uint64_t size) {
+ void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
+ BufferBase* buffer,
+ uint64_t offset,
+ uint64_t size) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(buffer));
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
index 8025f7442ff..4f312707eed 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
@@ -27,28 +27,30 @@ namespace dawn_native {
EncodingContext* encodingContext,
Ref<AttachmentState> attachmentState);
- void Draw(uint32_t vertexCount,
- uint32_t instanceCount = 1,
- uint32_t firstVertex = 0,
- uint32_t firstInstance = 0);
- void DrawIndexed(uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t baseVertex,
- uint32_t firstInstance);
+ void APIDraw(uint32_t vertexCount,
+ uint32_t instanceCount = 1,
+ uint32_t firstVertex = 0,
+ uint32_t firstInstance = 0);
+ void APIDrawIndexed(uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t baseVertex,
+ uint32_t firstInstance);
- void DrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
- void DrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+ void APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+ void APIDrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
- void SetPipeline(RenderPipelineBase* pipeline);
+ void APISetPipeline(RenderPipelineBase* pipeline);
- void SetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
- void SetIndexBuffer(BufferBase* buffer,
- wgpu::IndexFormat format,
- uint64_t offset,
- uint64_t size);
- void SetIndexBufferWithFormat(BufferBase* buffer, wgpu::IndexFormat format, uint64_t offset,
- uint64_t size);
+ void APISetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
+ void APISetIndexBuffer(BufferBase* buffer,
+ wgpu::IndexFormat format,
+ uint64_t offset,
+ uint64_t size);
+ void APISetIndexBufferWithFormat(BufferBase* buffer,
+ wgpu::IndexFormat format,
+ uint64_t offset,
+ uint64_t size);
const AttachmentState* GetAttachmentState() const;
Ref<AttachmentState> AcquireAttachmentState();
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
index c5c31a301d3..4c48bb43e80 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
@@ -80,20 +80,15 @@ namespace dawn_native {
void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
DAWN_ASSERT(querySet != nullptr);
- // Gets the iterator for that querySet or create a new vector of bool set to false
- // if the querySet wasn't registered.
- auto it = mQueryAvailabilityMap.emplace(querySet, querySet->GetQueryCount()).first;
- it->second[queryIndex] = 1;
+ // Track the query availability with true on render pass for rewrite validation and query
+ // reset on render pass on Vulkan
+ mUsageTracker.TrackQueryAvailability(querySet, queryIndex);
// Track it again on command encoder for zero-initializing when resolving unused queries.
mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
}
- const QueryAvailabilityMap& RenderPassEncoder::GetQueryAvailabilityMap() const {
- return mQueryAvailabilityMap;
- }
-
- void RenderPassEncoder::EndPass() {
+ void RenderPassEncoder::APIEndPass() {
if (mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(ValidateProgrammableEncoderEnd());
@@ -110,7 +105,7 @@ namespace dawn_native {
}
}
- void RenderPassEncoder::SetStencilReference(uint32_t reference) {
+ void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
SetStencilReferenceCmd* cmd =
allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
@@ -120,7 +115,7 @@ namespace dawn_native {
});
}
- void RenderPassEncoder::SetBlendColor(const Color* color) {
+ void RenderPassEncoder::APISetBlendColor(const Color* color) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
SetBlendColorCmd* cmd = allocator->Allocate<SetBlendColorCmd>(Command::SetBlendColor);
cmd->color = *color;
@@ -129,12 +124,12 @@ namespace dawn_native {
});
}
- void RenderPassEncoder::SetViewport(float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth) {
+ void RenderPassEncoder::APISetViewport(float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
if ((isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
@@ -170,10 +165,10 @@ namespace dawn_native {
});
}
- void RenderPassEncoder::SetScissorRect(uint32_t x,
- uint32_t y,
- uint32_t width,
- uint32_t height) {
+ void RenderPassEncoder::APISetScissorRect(uint32_t x,
+ uint32_t y,
+ uint32_t width,
+ uint32_t height) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
if (width > mRenderTargetWidth || height > mRenderTargetHeight ||
@@ -194,7 +189,8 @@ namespace dawn_native {
});
}
- void RenderPassEncoder::ExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles) {
+ void RenderPassEncoder::APIExecuteBundles(uint32_t count,
+ RenderBundleBase* const* renderBundles) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
for (uint32_t i = 0; i < count; ++i) {
@@ -232,7 +228,7 @@ namespace dawn_native {
});
}
- void RenderPassEncoder::BeginOcclusionQuery(uint32_t queryIndex) {
+ void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
if (mOcclusionQuerySet.Get() == nullptr) {
@@ -253,9 +249,7 @@ namespace dawn_native {
}
DAWN_TRY(ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
- GetQueryAvailabilityMap()));
-
- mCommandEncoder->TrackUsedQuerySet(mOcclusionQuerySet.Get());
+ mUsageTracker.GetQueryAvailabilityMap()));
}
// Record the current query index for endOcclusionQuery.
@@ -271,7 +265,7 @@ namespace dawn_native {
});
}
- void RenderPassEncoder::EndOcclusionQuery() {
+ void RenderPassEncoder::APIEndOcclusionQuery() {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
if (!mOcclusionQueryActive) {
@@ -282,6 +276,7 @@ namespace dawn_native {
}
TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
+
mOcclusionQueryActive = false;
EndOcclusionQueryCmd* cmd =
@@ -293,13 +288,13 @@ namespace dawn_native {
});
}
- void RenderPassEncoder::WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(querySet));
DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
- DAWN_TRY(
- ValidateQueryIndexOverwrite(querySet, queryIndex, GetQueryAvailabilityMap()));
+ DAWN_TRY(ValidateQueryIndexOverwrite(querySet, queryIndex,
+ mUsageTracker.GetQueryAvailabilityMap()));
}
TrackQueryAvailability(querySet, queryIndex);
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
index abae1933294..a8bf460548c 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
@@ -37,26 +37,23 @@ namespace dawn_native {
CommandEncoder* commandEncoder,
EncodingContext* encodingContext);
- void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
- const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
-
- void EndPass();
+ void APIEndPass();
- void SetStencilReference(uint32_t reference);
- void SetBlendColor(const Color* color);
- void SetViewport(float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth);
- void SetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
- void ExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
+ void APISetStencilReference(uint32_t reference);
+ void APISetBlendColor(const Color* color);
+ void APISetViewport(float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth);
+ void APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
+ void APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
- void BeginOcclusionQuery(uint32_t queryIndex);
- void EndOcclusionQuery();
+ void APIBeginOcclusionQuery(uint32_t queryIndex);
+ void APIEndOcclusionQuery();
- void WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+ void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
protected:
RenderPassEncoder(DeviceBase* device,
@@ -65,6 +62,8 @@ namespace dawn_native {
ErrorTag errorTag);
private:
+ void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+
// For render and compute passes, the encoding context is borrowed from the command encoder.
// Keep a reference to the encoder to make sure the context isn't freed.
Ref<CommandEncoder> mCommandEncoder;
@@ -72,11 +71,6 @@ namespace dawn_native {
uint32_t mRenderTargetWidth;
uint32_t mRenderTargetHeight;
- // This map is to indicate the availability of the queries used in render pass. The same
- // query cannot be written twice in same render pass, so each render pass also need to have
- // its own query availability map.
- QueryAvailabilityMap mQueryAvailabilityMap;
-
// The resources for occlusion query
Ref<QuerySetBase> mOcclusionQuerySet;
uint32_t mCurrentOcclusionQueryIndex = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
index 3840cee7760..eb306238ddb 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/RenderPipeline.h"
#include "common/BitSetIterator.h"
+#include "common/VertexFormatUtils.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/ObjectContentHasher.h"
@@ -26,20 +27,26 @@ namespace dawn_native {
// Helper functions
namespace {
- MaybeError ValidateVertexAttributeDescriptor(
- const VertexAttributeDescriptor* attribute,
- uint64_t vertexBufferStride,
- std::bitset<kMaxVertexAttributes>* attributesSetMask) {
+ MaybeError ValidateVertexAttribute(DeviceBase* device,
+ const VertexAttribute* attribute,
+ uint64_t vertexBufferStride,
+ std::bitset<kMaxVertexAttributes>* attributesSetMask) {
DAWN_TRY(ValidateVertexFormat(attribute->format));
+ if (dawn::IsDeprecatedVertexFormat(attribute->format)) {
+ device->EmitDeprecationWarning(
+ "Vertex formats have changed and the old types will be removed soon.");
+ }
+
if (attribute->shaderLocation >= kMaxVertexAttributes) {
return DAWN_VALIDATION_ERROR("Setting attribute out of bounds");
}
// No underflow is possible because the max vertex format size is smaller than
// kMaxVertexBufferStride.
- ASSERT(kMaxVertexBufferStride >= VertexFormatSize(attribute->format));
- if (attribute->offset > kMaxVertexBufferStride - VertexFormatSize(attribute->format)) {
+ ASSERT(kMaxVertexBufferStride >= dawn::VertexFormatSize(attribute->format));
+ if (attribute->offset >
+ kMaxVertexBufferStride - dawn::VertexFormatSize(attribute->format)) {
return DAWN_VALIDATION_ERROR("Setting attribute offset out of bounds");
}
@@ -47,12 +54,14 @@ namespace dawn_native {
// than kMaxVertexBufferStride.
ASSERT(attribute->offset < kMaxVertexBufferStride);
if (vertexBufferStride > 0 &&
- attribute->offset + VertexFormatSize(attribute->format) > vertexBufferStride) {
+ attribute->offset + dawn::VertexFormatSize(attribute->format) >
+ vertexBufferStride) {
return DAWN_VALIDATION_ERROR("Setting attribute offset out of bounds");
}
- if (attribute->offset % 4 != 0) {
- return DAWN_VALIDATION_ERROR("Attribute offset needs to be a multiple of 4 bytes");
+ if (attribute->offset % dawn::VertexFormatComponentSize(attribute->format) != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "Attribute offset needs to be a multiple of the size format's components");
}
if ((*attributesSetMask)[attribute->shaderLocation]) {
@@ -63,8 +72,9 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateVertexBufferLayoutDescriptor(
- const VertexBufferLayoutDescriptor* buffer,
+ MaybeError ValidateVertexBufferLayout(
+ DeviceBase* device,
+ const VertexBufferLayout* buffer,
std::bitset<kMaxVertexAttributes>* attributesSetMask) {
DAWN_TRY(ValidateInputStepMode(buffer->stepMode));
if (buffer->arrayStride > kMaxVertexBufferStride) {
@@ -77,44 +87,30 @@ namespace dawn_native {
}
for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
- DAWN_TRY(ValidateVertexAttributeDescriptor(&buffer->attributes[i],
- buffer->arrayStride, attributesSetMask));
+ DAWN_TRY(ValidateVertexAttribute(device, &buffer->attributes[i],
+ buffer->arrayStride, attributesSetMask));
}
return {};
}
- MaybeError ValidateVertexStateDescriptor(
- DeviceBase* device,
- const VertexStateDescriptor* descriptor,
- wgpu::PrimitiveTopology primitiveTopology,
- std::bitset<kMaxVertexAttributes>* attributesSetMask) {
+ MaybeError ValidateVertexState(DeviceBase* device,
+ const VertexState* descriptor,
+ const PipelineLayoutBase* layout) {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
- DAWN_TRY(ValidateIndexFormat(descriptor->indexFormat));
- // Pipeline descriptors must have indexFormat != undefined IFF they are using strip
- // topologies.
- if (IsStripPrimitiveTopology(primitiveTopology)) {
- if (descriptor->indexFormat == wgpu::IndexFormat::Undefined) {
- return DAWN_VALIDATION_ERROR(
- "indexFormat must not be undefined when using strip primitive topologies");
- }
- } else if (descriptor->indexFormat != wgpu::IndexFormat::Undefined) {
- return DAWN_VALIDATION_ERROR(
- "indexFormat must be undefined when using non-strip primitive topologies");
- }
-
- if (descriptor->vertexBufferCount > kMaxVertexBuffers) {
+ if (descriptor->bufferCount > kMaxVertexBuffers) {
return DAWN_VALIDATION_ERROR("Vertex buffer count exceeds maximum");
}
+ std::bitset<kMaxVertexAttributes> attributesSetMask;
uint32_t totalAttributesNum = 0;
- for (uint32_t i = 0; i < descriptor->vertexBufferCount; ++i) {
- DAWN_TRY(ValidateVertexBufferLayoutDescriptor(&descriptor->vertexBuffers[i],
- attributesSetMask));
- totalAttributesNum += descriptor->vertexBuffers[i].attributeCount;
+ for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
+ DAWN_TRY(ValidateVertexBufferLayout(device, &descriptor->buffers[i],
+ &attributesSetMask));
+ totalAttributesNum += descriptor->buffers[i].attributeCount;
}
// Every vertex attribute has a member called shaderLocation, and there are some
@@ -123,18 +119,74 @@ namespace dawn_native {
// attribute number never exceed kMaxVertexAttributes.
ASSERT(totalAttributesNum <= kMaxVertexAttributes);
+ DAWN_TRY(ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+ layout, SingleShaderStage::Vertex));
+ const EntryPointMetadata& vertexMetadata =
+ descriptor->module->GetEntryPoint(descriptor->entryPoint);
+ if (!IsSubset(vertexMetadata.usedVertexAttributes, attributesSetMask)) {
+ return DAWN_VALIDATION_ERROR(
+ "Pipeline vertex stage uses vertex buffers not in the vertex state");
+ }
+
return {};
}
- MaybeError ValidateRasterizationStateDescriptor(
- const RasterizationStateDescriptor* descriptor) {
- if (descriptor->nextInChain != nullptr) {
- return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+ MaybeError ValidatePrimitiveState(const DeviceBase* device,
+ const PrimitiveState* descriptor) {
+ const ChainedStruct* chained = descriptor->nextInChain;
+ if (chained != nullptr) {
+ if (chained->sType != wgpu::SType::PrimitiveDepthClampingState) {
+ return DAWN_VALIDATION_ERROR("Unsupported sType");
+ }
+ if (!device->IsExtensionEnabled(Extension::DepthClamping)) {
+ return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
+ }
}
+ DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
+ DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
DAWN_TRY(ValidateCullMode(descriptor->cullMode));
+ // Pipeline descriptors must have stripIndexFormat != undefined IFF they are using strip
+ // topologies.
+ if (IsStripPrimitiveTopology(descriptor->topology)) {
+ if (descriptor->stripIndexFormat == wgpu::IndexFormat::Undefined) {
+ return DAWN_VALIDATION_ERROR(
+ "stripIndexFormat must not be undefined when using strip primitive "
+ "topologies");
+ }
+ } else if (descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined) {
+ return DAWN_VALIDATION_ERROR(
+ "stripIndexFormat must be undefined when using non-strip primitive topologies");
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateDepthStencilState(const DeviceBase* device,
+ const DepthStencilState* descriptor) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+ }
+
+ DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
+ DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
+ DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
+ DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
+
+ const Format* format;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+ if (!format->HasDepthOrStencil() || !format->isRenderable) {
+ return DAWN_VALIDATION_ERROR(
+ "Depth stencil format must be depth-stencil renderable");
+ }
+
if (std::isnan(descriptor->depthBiasSlopeScale) ||
std::isnan(descriptor->depthBiasClamp)) {
return DAWN_VALIDATION_ERROR("Depth bias parameters must not be NaN.");
@@ -143,23 +195,49 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateColorStateDescriptor(const DeviceBase* device,
- const ColorStateDescriptor& descriptor,
- bool fragmentWritten,
- wgpu::TextureComponentType fragmentOutputBaseType) {
- if (descriptor.nextInChain != nullptr) {
+ MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
+ if (descriptor->nextInChain != nullptr) {
+ return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+ }
+
+ if (!IsValidSampleCount(descriptor->count)) {
+ return DAWN_VALIDATION_ERROR("Multisample count is not supported");
+ }
+
+ if (descriptor->alphaToCoverageEnabled && descriptor->count <= 1) {
+ return DAWN_VALIDATION_ERROR("Enabling alphaToCoverage requires sample count > 1");
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateBlendState(const BlendState* descriptor) {
+ DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
+ DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
+ DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
+ DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
+ DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
+ DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
+
+ return {};
+ }
+
+ MaybeError ValidateColorTargetState(const DeviceBase* device,
+ const ColorTargetState* descriptor,
+ bool fragmentWritten,
+ wgpu::TextureComponentType fragmentOutputBaseType) {
+ if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
- DAWN_TRY(ValidateBlendOperation(descriptor.alphaBlend.operation));
- DAWN_TRY(ValidateBlendFactor(descriptor.alphaBlend.srcFactor));
- DAWN_TRY(ValidateBlendFactor(descriptor.alphaBlend.dstFactor));
- DAWN_TRY(ValidateBlendOperation(descriptor.colorBlend.operation));
- DAWN_TRY(ValidateBlendFactor(descriptor.colorBlend.srcFactor));
- DAWN_TRY(ValidateBlendFactor(descriptor.colorBlend.dstFactor));
- DAWN_TRY(ValidateColorWriteMask(descriptor.writeMask));
+
+ if (descriptor->blend) {
+ DAWN_TRY(ValidateBlendState(descriptor->blend));
+ }
+
+ DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor.format));
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
if (!format->IsColor() || !format->isRenderable) {
return DAWN_VALIDATION_ERROR("Color format must be color renderable");
}
@@ -172,32 +250,32 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateDepthStencilStateDescriptor(
- const DeviceBase* device,
- const DepthStencilStateDescriptor* descriptor) {
+ MaybeError ValidateFragmentState(DeviceBase* device,
+ const FragmentState* descriptor,
+ const PipelineLayoutBase* layout) {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
- DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
- DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
- DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
- DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
- const Format* format;
- DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
- if (!format->HasDepthOrStencil() || !format->isRenderable) {
- return DAWN_VALIDATION_ERROR(
- "Depth stencil format must be depth-stencil renderable");
+ DAWN_TRY(ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+ layout, SingleShaderStage::Fragment));
+
+ if (descriptor->targetCount > kMaxColorAttachments) {
+ return DAWN_VALIDATION_ERROR("Number of color targets exceeds maximum");
+ }
+
+ const EntryPointMetadata& fragmentMetadata =
+ descriptor->module->GetEntryPoint(descriptor->entryPoint);
+ for (ColorAttachmentIndex i(uint8_t(0));
+ i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
+ DAWN_TRY(
+ ValidateColorTargetState(device, &descriptor->targets[static_cast<uint8_t>(i)],
+ fragmentMetadata.fragmentOutputsWritten[i],
+ fragmentMetadata.fragmentOutputFormatBaseTypes[i]));
}
return {};
}
-
} // anonymous namespace
// Helper functions
@@ -212,95 +290,13 @@ namespace dawn_native {
}
}
- uint32_t VertexFormatNumComponents(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::UChar4:
- case wgpu::VertexFormat::Char4:
- case wgpu::VertexFormat::UChar4Norm:
- case wgpu::VertexFormat::Char4Norm:
- case wgpu::VertexFormat::UShort4:
- case wgpu::VertexFormat::Short4:
- case wgpu::VertexFormat::UShort4Norm:
- case wgpu::VertexFormat::Short4Norm:
- case wgpu::VertexFormat::Half4:
- case wgpu::VertexFormat::Float4:
- case wgpu::VertexFormat::UInt4:
- case wgpu::VertexFormat::Int4:
- return 4;
- case wgpu::VertexFormat::Float3:
- case wgpu::VertexFormat::UInt3:
- case wgpu::VertexFormat::Int3:
- return 3;
- case wgpu::VertexFormat::UChar2:
- case wgpu::VertexFormat::Char2:
- case wgpu::VertexFormat::UChar2Norm:
- case wgpu::VertexFormat::Char2Norm:
- case wgpu::VertexFormat::UShort2:
- case wgpu::VertexFormat::Short2:
- case wgpu::VertexFormat::UShort2Norm:
- case wgpu::VertexFormat::Short2Norm:
- case wgpu::VertexFormat::Half2:
- case wgpu::VertexFormat::Float2:
- case wgpu::VertexFormat::UInt2:
- case wgpu::VertexFormat::Int2:
- return 2;
- case wgpu::VertexFormat::Float:
- case wgpu::VertexFormat::UInt:
- case wgpu::VertexFormat::Int:
- return 1;
- }
- }
-
- size_t VertexFormatComponentSize(wgpu::VertexFormat format) {
- switch (format) {
- case wgpu::VertexFormat::UChar2:
- case wgpu::VertexFormat::UChar4:
- case wgpu::VertexFormat::Char2:
- case wgpu::VertexFormat::Char4:
- case wgpu::VertexFormat::UChar2Norm:
- case wgpu::VertexFormat::UChar4Norm:
- case wgpu::VertexFormat::Char2Norm:
- case wgpu::VertexFormat::Char4Norm:
- return sizeof(char);
- case wgpu::VertexFormat::UShort2:
- case wgpu::VertexFormat::UShort4:
- case wgpu::VertexFormat::UShort2Norm:
- case wgpu::VertexFormat::UShort4Norm:
- case wgpu::VertexFormat::Short2:
- case wgpu::VertexFormat::Short4:
- case wgpu::VertexFormat::Short2Norm:
- case wgpu::VertexFormat::Short4Norm:
- case wgpu::VertexFormat::Half2:
- case wgpu::VertexFormat::Half4:
- return sizeof(uint16_t);
- case wgpu::VertexFormat::Float:
- case wgpu::VertexFormat::Float2:
- case wgpu::VertexFormat::Float3:
- case wgpu::VertexFormat::Float4:
- return sizeof(float);
- case wgpu::VertexFormat::UInt:
- case wgpu::VertexFormat::UInt2:
- case wgpu::VertexFormat::UInt3:
- case wgpu::VertexFormat::UInt4:
- case wgpu::VertexFormat::Int:
- case wgpu::VertexFormat::Int2:
- case wgpu::VertexFormat::Int3:
- case wgpu::VertexFormat::Int4:
- return sizeof(int32_t);
- }
- }
-
- size_t VertexFormatSize(wgpu::VertexFormat format) {
- return VertexFormatNumComponents(format) * VertexFormatComponentSize(format);
- }
-
bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
return primitiveTopology == wgpu::PrimitiveTopology::LineStrip ||
primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip;
}
MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor) {
+ const RenderPipelineDescriptor2* descriptor) {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
@@ -310,78 +306,50 @@ namespace dawn_native {
}
// TODO(crbug.com/dawn/136): Support vertex-only pipelines.
- if (descriptor->fragmentStage == nullptr) {
+ if (descriptor->fragment == nullptr) {
return DAWN_VALIDATION_ERROR("Null fragment stage is not supported (yet)");
}
- DAWN_TRY(ValidatePrimitiveTopology(descriptor->primitiveTopology));
-
- std::bitset<kMaxVertexAttributes> attributesSetMask;
- if (descriptor->vertexState) {
- DAWN_TRY(ValidateVertexStateDescriptor(device,
- descriptor->vertexState, descriptor->primitiveTopology, &attributesSetMask));
- }
-
- DAWN_TRY(ValidateProgrammableStageDescriptor(
- device, &descriptor->vertexStage, descriptor->layout, SingleShaderStage::Vertex));
- DAWN_TRY(ValidateProgrammableStageDescriptor(
- device, descriptor->fragmentStage, descriptor->layout, SingleShaderStage::Fragment));
-
- if (descriptor->rasterizationState) {
- DAWN_TRY(ValidateRasterizationStateDescriptor(descriptor->rasterizationState));
- }
+ DAWN_TRY(ValidateVertexState(device, &descriptor->vertex, descriptor->layout));
- const EntryPointMetadata& vertexMetadata =
- descriptor->vertexStage.module->GetEntryPoint(descriptor->vertexStage.entryPoint);
- if (!IsSubset(vertexMetadata.usedVertexAttributes, attributesSetMask)) {
- return DAWN_VALIDATION_ERROR(
- "Pipeline vertex stage uses vertex buffers not in the vertex state");
- }
+ DAWN_TRY(ValidatePrimitiveState(device, &descriptor->primitive));
- if (!IsValidSampleCount(descriptor->sampleCount)) {
- return DAWN_VALIDATION_ERROR("Sample count is not supported");
+ if (descriptor->depthStencil) {
+ DAWN_TRY(ValidateDepthStencilState(device, descriptor->depthStencil));
}
- if (descriptor->colorStateCount > kMaxColorAttachments) {
- return DAWN_VALIDATION_ERROR("Color States number exceeds maximum");
- }
+ DAWN_TRY(ValidateMultisampleState(&descriptor->multisample));
- if (descriptor->colorStateCount == 0 && !descriptor->depthStencilState) {
- return DAWN_VALIDATION_ERROR(
- "Should have at least one colorState or a depthStencilState");
- }
+ ASSERT(descriptor->fragment != nullptr);
+ DAWN_TRY(ValidateFragmentState(device, descriptor->fragment, descriptor->layout));
- ASSERT(descriptor->fragmentStage != nullptr);
- const EntryPointMetadata& fragmentMetadata =
- descriptor->fragmentStage->module->GetEntryPoint(descriptor->fragmentStage->entryPoint);
- for (ColorAttachmentIndex i(uint8_t(0));
- i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorStateCount)); ++i) {
- DAWN_TRY(ValidateColorStateDescriptor(
- device, descriptor->colorStates[static_cast<uint8_t>(i)],
- fragmentMetadata.fragmentOutputsWritten[i],
- fragmentMetadata.fragmentOutputFormatBaseTypes[i]));
+ if (descriptor->fragment->targetCount == 0 && !descriptor->depthStencil) {
+ return DAWN_VALIDATION_ERROR("Should have at least one color target or a depthStencil");
}
- if (descriptor->depthStencilState) {
- DAWN_TRY(ValidateDepthStencilStateDescriptor(device, descriptor->depthStencilState));
- }
+ return {};
+ }
- if (descriptor->alphaToCoverageEnabled && descriptor->sampleCount <= 1) {
- return DAWN_VALIDATION_ERROR("Enabling alphaToCoverage requires sampleCount > 1");
+ std::vector<StageAndDescriptor> GetStages(const RenderPipelineDescriptor2* descriptor) {
+ std::vector<StageAndDescriptor> stages;
+ stages.push_back(
+ {SingleShaderStage::Vertex, descriptor->vertex.module, descriptor->vertex.entryPoint});
+ if (descriptor->fragment != nullptr) {
+ stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
+ descriptor->fragment->entryPoint});
}
-
- return {};
+ return stages;
}
- bool StencilTestEnabled(const DepthStencilStateDescriptor* mDepthStencilState) {
- return mDepthStencilState->stencilBack.compare != wgpu::CompareFunction::Always ||
- mDepthStencilState->stencilBack.failOp != wgpu::StencilOperation::Keep ||
- mDepthStencilState->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
- mDepthStencilState->stencilBack.passOp != wgpu::StencilOperation::Keep ||
- mDepthStencilState->stencilFront.compare != wgpu::CompareFunction::Always ||
- mDepthStencilState->stencilFront.failOp != wgpu::StencilOperation::Keep ||
- mDepthStencilState->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
- mDepthStencilState->stencilFront.passOp != wgpu::StencilOperation::Keep;
+ bool StencilTestEnabled(const DepthStencilState* mDepthStencil) {
+ return mDepthStencil->stencilBack.compare != wgpu::CompareFunction::Always ||
+ mDepthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil->stencilFront.compare != wgpu::CompareFunction::Always ||
+ mDepthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+ mDepthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep;
}
bool BlendEnabled(const ColorStateDescriptor* mColorState) {
@@ -396,75 +364,83 @@ namespace dawn_native {
// RenderPipelineBase
RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor)
+ const RenderPipelineDescriptor2* descriptor)
: PipelineBase(device,
descriptor->layout,
- {{SingleShaderStage::Vertex, &descriptor->vertexStage},
- {SingleShaderStage::Fragment, descriptor->fragmentStage}}),
- mAttachmentState(device->GetOrCreateAttachmentState(descriptor)),
- mPrimitiveTopology(descriptor->primitiveTopology),
- mSampleMask(descriptor->sampleMask),
- mAlphaToCoverageEnabled(descriptor->alphaToCoverageEnabled) {
- if (descriptor->vertexState != nullptr) {
- mVertexState = *descriptor->vertexState;
- } else {
- mVertexState = VertexStateDescriptor();
- }
-
- for (uint8_t slot = 0; slot < mVertexState.vertexBufferCount; ++slot) {
- if (mVertexState.vertexBuffers[slot].attributeCount == 0) {
+ {{SingleShaderStage::Vertex, descriptor->vertex.module,
+ descriptor->vertex.entryPoint},
+ {SingleShaderStage::Fragment, descriptor->fragment->module,
+ descriptor->fragment->entryPoint}}),
+ mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
+ mVertexBufferCount = descriptor->vertex.bufferCount;
+ const VertexBufferLayout* buffers = descriptor->vertex.buffers;
+ for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) {
+ if (buffers[slot].attributeCount == 0) {
continue;
}
VertexBufferSlot typedSlot(slot);
mVertexBufferSlotsUsed.set(typedSlot);
- mVertexBufferInfos[typedSlot].arrayStride =
- mVertexState.vertexBuffers[slot].arrayStride;
- mVertexBufferInfos[typedSlot].stepMode = mVertexState.vertexBuffers[slot].stepMode;
+ mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride;
+ mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode;
- for (uint32_t i = 0; i < mVertexState.vertexBuffers[slot].attributeCount; ++i) {
- VertexAttributeLocation location = VertexAttributeLocation(static_cast<uint8_t>(
- mVertexState.vertexBuffers[slot].attributes[i].shaderLocation));
+ for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) {
+ VertexAttributeLocation location = VertexAttributeLocation(
+ static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation));
mAttributeLocationsUsed.set(location);
mAttributeInfos[location].shaderLocation = location;
mAttributeInfos[location].vertexBufferSlot = typedSlot;
- mAttributeInfos[location].offset =
- mVertexState.vertexBuffers[slot].attributes[i].offset;
+ mAttributeInfos[location].offset = buffers[slot].attributes[i].offset;
+
mAttributeInfos[location].format =
- mVertexState.vertexBuffers[slot].attributes[i].format;
+ dawn::NormalizeVertexFormat(buffers[slot].attributes[i].format);
}
}
- if (descriptor->rasterizationState != nullptr) {
- mRasterizationState = *descriptor->rasterizationState;
- } else {
- mRasterizationState = RasterizationStateDescriptor();
+ mPrimitive = descriptor->primitive;
+ const ChainedStruct* chained = mPrimitive.nextInChain;
+ if (chained != nullptr) {
+ ASSERT(chained->sType == wgpu::SType::PrimitiveDepthClampingState);
+ const auto* clampState = static_cast<const PrimitiveDepthClampingState*>(chained);
+ mClampDepth = clampState->clampDepth;
}
+ mMultisample = descriptor->multisample;
if (mAttachmentState->HasDepthStencilAttachment()) {
- mDepthStencilState = *descriptor->depthStencilState;
+ mDepthStencil = *descriptor->depthStencil;
} else {
// These default values below are useful for backends to fill information.
// The values indicate that depth and stencil test are disabled when backends
// set their own depth stencil states/descriptors according to the values in
- // mDepthStencilState.
- mDepthStencilState.depthCompare = wgpu::CompareFunction::Always;
- mDepthStencilState.depthWriteEnabled = false;
- mDepthStencilState.stencilBack.compare = wgpu::CompareFunction::Always;
- mDepthStencilState.stencilBack.failOp = wgpu::StencilOperation::Keep;
- mDepthStencilState.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
- mDepthStencilState.stencilBack.passOp = wgpu::StencilOperation::Keep;
- mDepthStencilState.stencilFront.compare = wgpu::CompareFunction::Always;
- mDepthStencilState.stencilFront.failOp = wgpu::StencilOperation::Keep;
- mDepthStencilState.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
- mDepthStencilState.stencilFront.passOp = wgpu::StencilOperation::Keep;
- mDepthStencilState.stencilReadMask = 0xff;
- mDepthStencilState.stencilWriteMask = 0xff;
+ // mDepthStencil.
+ mDepthStencil.format = wgpu::TextureFormat::Undefined;
+ mDepthStencil.depthWriteEnabled = false;
+ mDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always;
+ mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep;
+ mDepthStencil.stencilReadMask = 0xff;
+ mDepthStencil.stencilWriteMask = 0xff;
+ mDepthStencil.depthBias = 0;
+ mDepthStencil.depthBiasSlopeScale = 0.0f;
+ mDepthStencil.depthBiasClamp = 0.0f;
}
for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
- mColorStates[i] = descriptor->colorStates[static_cast<uint8_t>(i)];
+ const ColorTargetState* target =
+ &descriptor->fragment->targets[static_cast<uint8_t>(i)];
+ mTargets[i] = *target;
+
+ if (target->blend != nullptr) {
+ mTargetBlend[i] = *target->blend;
+ mTargets[i].blend = &mTargetBlend[i];
+ }
}
// TODO(cwallez@chromium.org): Check against the shader module that the correct color
@@ -486,11 +462,6 @@ namespace dawn_native {
}
}
- const VertexStateDescriptor* RenderPipelineBase::GetVertexStateDescriptor() const {
- ASSERT(!IsError());
- return &mVertexState;
- }
-
const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
RenderPipelineBase::GetAttributeLocationsUsed() const {
ASSERT(!IsError());
@@ -516,51 +487,66 @@ namespace dawn_native {
return mVertexBufferInfos[slot];
}
- const ColorStateDescriptor* RenderPipelineBase::GetColorStateDescriptor(
+ uint32_t RenderPipelineBase::GetVertexBufferCount() const {
+ ASSERT(!IsError());
+ return mVertexBufferCount;
+ }
+
+ const ColorTargetState* RenderPipelineBase::GetColorTargetState(
ColorAttachmentIndex attachmentSlot) const {
ASSERT(!IsError());
- ASSERT(attachmentSlot < mColorStates.size());
- return &mColorStates[attachmentSlot];
+ ASSERT(attachmentSlot < mTargets.size());
+ return &mTargets[attachmentSlot];
}
- const DepthStencilStateDescriptor* RenderPipelineBase::GetDepthStencilStateDescriptor() const {
+ const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const {
ASSERT(!IsError());
- return &mDepthStencilState;
+ return &mDepthStencil;
}
wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
ASSERT(!IsError());
- return mPrimitiveTopology;
+ return mPrimitive.topology;
+ }
+
+ wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const {
+ ASSERT(!IsError());
+ return mPrimitive.stripIndexFormat;
}
wgpu::CullMode RenderPipelineBase::GetCullMode() const {
ASSERT(!IsError());
- return mRasterizationState.cullMode;
+ return mPrimitive.cullMode;
}
wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
ASSERT(!IsError());
- return mRasterizationState.frontFace;
+ return mPrimitive.frontFace;
}
bool RenderPipelineBase::IsDepthBiasEnabled() const {
ASSERT(!IsError());
- return mRasterizationState.depthBias != 0 || mRasterizationState.depthBiasSlopeScale != 0;
+ return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0;
}
int32_t RenderPipelineBase::GetDepthBias() const {
ASSERT(!IsError());
- return mRasterizationState.depthBias;
+ return mDepthStencil.depthBias;
}
float RenderPipelineBase::GetDepthBiasSlopeScale() const {
ASSERT(!IsError());
- return mRasterizationState.depthBiasSlopeScale;
+ return mDepthStencil.depthBiasSlopeScale;
}
float RenderPipelineBase::GetDepthBiasClamp() const {
ASSERT(!IsError());
- return mRasterizationState.depthBiasClamp;
+ return mDepthStencil.depthBiasClamp;
+ }
+
+ bool RenderPipelineBase::ShouldClampDepth() const {
+ ASSERT(!IsError());
+ return mClampDepth;
}
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
@@ -577,13 +563,13 @@ namespace dawn_native {
wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(
ColorAttachmentIndex attachment) const {
ASSERT(!IsError());
- return mColorStates[attachment].format;
+ return mTargets[attachment].format;
}
wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
ASSERT(!IsError());
ASSERT(mAttachmentState->HasDepthStencilAttachment());
- return mDepthStencilState.format;
+ return mDepthStencil.format;
}
uint32_t RenderPipelineBase::GetSampleCount() const {
@@ -593,12 +579,12 @@ namespace dawn_native {
uint32_t RenderPipelineBase::GetSampleMask() const {
ASSERT(!IsError());
- return mSampleMask;
+ return mMultisample.mask;
}
bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
ASSERT(!IsError());
- return mAlphaToCoverageEnabled;
+ return mMultisample.alphaToCoverageEnabled;
}
const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
@@ -619,22 +605,25 @@ namespace dawn_native {
// Record attachments
for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
- const ColorStateDescriptor& desc = *GetColorStateDescriptor(i);
+ const ColorTargetState& desc = *GetColorTargetState(i);
recorder.Record(desc.writeMask);
- recorder.Record(desc.colorBlend.operation, desc.colorBlend.srcFactor,
- desc.colorBlend.dstFactor);
- recorder.Record(desc.alphaBlend.operation, desc.alphaBlend.srcFactor,
- desc.alphaBlend.dstFactor);
+ if (desc.blend != nullptr) {
+ recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor,
+ desc.blend->color.dstFactor);
+ recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor,
+ desc.blend->alpha.dstFactor);
+ }
}
if (mAttachmentState->HasDepthStencilAttachment()) {
- const DepthStencilStateDescriptor& desc = mDepthStencilState;
+ const DepthStencilState& desc = mDepthStencil;
recorder.Record(desc.depthWriteEnabled, desc.depthCompare);
recorder.Record(desc.stencilReadMask, desc.stencilWriteMask);
recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp,
desc.stencilFront.depthFailOp, desc.stencilFront.passOp);
recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp,
desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
+ recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
}
// Record vertex state
@@ -650,17 +639,13 @@ namespace dawn_native {
recorder.Record(desc.arrayStride, desc.stepMode);
}
- recorder.Record(mVertexState.indexFormat);
+ // Record primitive state
+ recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace,
+ mPrimitive.cullMode, mClampDepth);
- // Record rasterization state
- {
- const RasterizationStateDescriptor& desc = mRasterizationState;
- recorder.Record(desc.frontFace, desc.cullMode);
- recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
- }
-
- // Record other state
- recorder.Record(mPrimitiveTopology, mSampleMask, mAlphaToCoverageEnabled);
+ // Record multisample state
+ // Sample count hashed as part of the attachment state
+ recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled);
return recorder.GetContentHash();
}
@@ -680,44 +665,59 @@ namespace dawn_native {
for (ColorAttachmentIndex i :
IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) {
- const ColorStateDescriptor& descA = *a->GetColorStateDescriptor(i);
- const ColorStateDescriptor& descB = *b->GetColorStateDescriptor(i);
+ const ColorTargetState& descA = *a->GetColorTargetState(i);
+ const ColorTargetState& descB = *b->GetColorTargetState(i);
if (descA.writeMask != descB.writeMask) {
return false;
}
- if (descA.colorBlend.operation != descB.colorBlend.operation ||
- descA.colorBlend.srcFactor != descB.colorBlend.srcFactor ||
- descA.colorBlend.dstFactor != descB.colorBlend.dstFactor) {
+ if ((descA.blend == nullptr) != (descB.blend == nullptr)) {
return false;
}
- if (descA.alphaBlend.operation != descB.alphaBlend.operation ||
- descA.alphaBlend.srcFactor != descB.alphaBlend.srcFactor ||
- descA.alphaBlend.dstFactor != descB.alphaBlend.dstFactor) {
- return false;
+ if (descA.blend != nullptr) {
+ if (descA.blend->color.operation != descB.blend->color.operation ||
+ descA.blend->color.srcFactor != descB.blend->color.srcFactor ||
+ descA.blend->color.dstFactor != descB.blend->color.dstFactor) {
+ return false;
+ }
+ if (descA.blend->alpha.operation != descB.blend->alpha.operation ||
+ descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor ||
+ descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) {
+ return false;
+ }
}
}
+ // Check depth/stencil state
if (a->mAttachmentState->HasDepthStencilAttachment()) {
- const DepthStencilStateDescriptor& descA = a->mDepthStencilState;
- const DepthStencilStateDescriptor& descB = b->mDepthStencilState;
- if (descA.depthWriteEnabled != descB.depthWriteEnabled ||
- descA.depthCompare != descB.depthCompare) {
+ const DepthStencilState& stateA = a->mDepthStencil;
+ const DepthStencilState& stateB = b->mDepthStencil;
+
+ ASSERT(!std::isnan(stateA.depthBiasSlopeScale));
+ ASSERT(!std::isnan(stateB.depthBiasSlopeScale));
+ ASSERT(!std::isnan(stateA.depthBiasClamp));
+ ASSERT(!std::isnan(stateB.depthBiasClamp));
+
+ if (stateA.depthWriteEnabled != stateB.depthWriteEnabled ||
+ stateA.depthCompare != stateB.depthCompare ||
+ stateA.depthBias != stateB.depthBias ||
+ stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale ||
+ stateA.depthBiasClamp != stateB.depthBiasClamp) {
return false;
}
- if (descA.stencilReadMask != descB.stencilReadMask ||
- descA.stencilWriteMask != descB.stencilWriteMask) {
+ if (stateA.stencilFront.compare != stateB.stencilFront.compare ||
+ stateA.stencilFront.failOp != stateB.stencilFront.failOp ||
+ stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp ||
+ stateA.stencilFront.passOp != stateB.stencilFront.passOp) {
return false;
}
- if (descA.stencilFront.compare != descB.stencilFront.compare ||
- descA.stencilFront.failOp != descB.stencilFront.failOp ||
- descA.stencilFront.depthFailOp != descB.stencilFront.depthFailOp ||
- descA.stencilFront.passOp != descB.stencilFront.passOp) {
+ if (stateA.stencilBack.compare != stateB.stencilBack.compare ||
+ stateA.stencilBack.failOp != stateB.stencilBack.failOp ||
+ stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp ||
+ stateA.stencilBack.passOp != stateB.stencilBack.passOp) {
return false;
}
- if (descA.stencilBack.compare != descB.stencilBack.compare ||
- descA.stencilBack.failOp != descB.stencilBack.failOp ||
- descA.stencilBack.depthFailOp != descB.stencilBack.depthFailOp ||
- descA.stencilBack.passOp != descB.stencilBack.passOp) {
+ if (stateA.stencilReadMask != stateB.stencilReadMask ||
+ stateA.stencilWriteMask != stateB.stencilWriteMask) {
return false;
}
}
@@ -749,36 +749,29 @@ namespace dawn_native {
}
}
- if (a->mVertexState.indexFormat != b->mVertexState.indexFormat) {
- return false;
- }
-
- // Check rasterization state
+ // Check primitive state
{
- const RasterizationStateDescriptor& descA = a->mRasterizationState;
- const RasterizationStateDescriptor& descB = b->mRasterizationState;
- if (descA.frontFace != descB.frontFace || descA.cullMode != descB.cullMode) {
+ const PrimitiveState& stateA = a->mPrimitive;
+ const PrimitiveState& stateB = b->mPrimitive;
+ if (stateA.topology != stateB.topology ||
+ stateA.stripIndexFormat != stateB.stripIndexFormat ||
+ stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode ||
+ a->mClampDepth != b->mClampDepth) {
return false;
}
+ }
- ASSERT(!std::isnan(descA.depthBiasSlopeScale));
- ASSERT(!std::isnan(descB.depthBiasSlopeScale));
- ASSERT(!std::isnan(descA.depthBiasClamp));
- ASSERT(!std::isnan(descB.depthBiasClamp));
-
- if (descA.depthBias != descB.depthBias ||
- descA.depthBiasSlopeScale != descB.depthBiasSlopeScale ||
- descA.depthBiasClamp != descB.depthBiasClamp) {
+ // Check multisample state
+ {
+ const MultisampleState& stateA = a->mMultisample;
+ const MultisampleState& stateB = b->mMultisample;
+ // Sample count already checked as part of the attachment state.
+ if (stateA.mask != stateB.mask ||
+ stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) {
return false;
}
}
- // Check other state
- if (a->mPrimitiveTopology != b->mPrimitiveTopology || a->mSampleMask != b->mSampleMask ||
- a->mAlphaToCoverageEnabled != b->mAlphaToCoverageEnabled) {
- return false;
- }
-
return true;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
index 812f7ec8235..bf36af5f2bd 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
@@ -30,14 +30,16 @@ namespace dawn_native {
class DeviceBase;
MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor);
+ const RenderPipelineDescriptor2* descriptor);
+
+ std::vector<StageAndDescriptor> GetStages(const RenderPipelineDescriptor2* descriptor);
+
size_t IndexFormatSize(wgpu::IndexFormat format);
- uint32_t VertexFormatNumComponents(wgpu::VertexFormat format);
- size_t VertexFormatComponentSize(wgpu::VertexFormat format);
- size_t VertexFormatSize(wgpu::VertexFormat format);
+
bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology);
- bool StencilTestEnabled(const DepthStencilStateDescriptor* mDepthStencilState);
+ bool StencilTestEnabled(const DepthStencilState* mDepthStencil);
+
bool BlendEnabled(const ColorStateDescriptor* mColorState);
struct VertexAttributeInfo {
@@ -54,28 +56,29 @@ namespace dawn_native {
class RenderPipelineBase : public PipelineBase {
public:
- RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
+ RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor2* descriptor);
~RenderPipelineBase() override;
static RenderPipelineBase* MakeError(DeviceBase* device);
- const VertexStateDescriptor* GetVertexStateDescriptor() const;
const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
GetAttributeLocationsUsed() const;
const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
const VertexBufferInfo& GetVertexBuffer(VertexBufferSlot slot) const;
+ uint32_t GetVertexBufferCount() const;
- const ColorStateDescriptor* GetColorStateDescriptor(
- ColorAttachmentIndex attachmentSlot) const;
- const DepthStencilStateDescriptor* GetDepthStencilStateDescriptor() const;
+ const ColorTargetState* GetColorTargetState(ColorAttachmentIndex attachmentSlot) const;
+ const DepthStencilState* GetDepthStencilState() const;
wgpu::PrimitiveTopology GetPrimitiveTopology() const;
+ wgpu::IndexFormat GetStripIndexFormat() const;
wgpu::CullMode GetCullMode() const;
wgpu::FrontFace GetFrontFace() const;
bool IsDepthBiasEnabled() const;
int32_t GetDepthBias() const;
float GetDepthBiasSlopeScale() const;
float GetDepthBiasClamp() const;
+ bool ShouldClampDepth() const;
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
bool HasDepthStencilAttachment() const;
@@ -98,7 +101,7 @@ namespace dawn_native {
RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
// Vertex state
- VertexStateDescriptor mVertexState;
+ uint32_t mVertexBufferCount;
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> mAttributeLocationsUsed;
ityp::array<VertexAttributeLocation, VertexAttributeInfo, kMaxVertexAttributes>
mAttributeInfos;
@@ -107,14 +110,14 @@ namespace dawn_native {
// Attachments
Ref<AttachmentState> mAttachmentState;
- DepthStencilStateDescriptor mDepthStencilState;
- ityp::array<ColorAttachmentIndex, ColorStateDescriptor, kMaxColorAttachments> mColorStates;
+ ityp::array<ColorAttachmentIndex, ColorTargetState, kMaxColorAttachments> mTargets;
+ ityp::array<ColorAttachmentIndex, BlendState, kMaxColorAttachments> mTargetBlend;
// Other state
- wgpu::PrimitiveTopology mPrimitiveTopology;
- RasterizationStateDescriptor mRasterizationState;
- uint32_t mSampleMask;
- bool mAlphaToCoverageEnabled;
+ PrimitiveState mPrimitive;
+ DepthStencilState mDepthStencil;
+ MultisampleState mMultisample;
+ bool mClampDepth = false;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
index b04d8d144db..af6b2d7b1e1 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
@@ -14,24 +14,26 @@
#include "dawn_native/ShaderModule.h"
+#include "common/VertexFormatUtils.h"
#include "dawn_native/BindGroupLayout.h"
+#include "dawn_native/CompilationMessages.h"
#include "dawn_native/Device.h"
#include "dawn_native/ObjectContentHasher.h"
#include "dawn_native/Pipeline.h"
#include "dawn_native/PipelineLayout.h"
+#include "dawn_native/RenderPipeline.h"
#include "dawn_native/SpirvUtils.h"
+#include "dawn_native/TintUtils.h"
#include <spirv-tools/libspirv.hpp>
#include <spirv-tools/optimizer.hpp>
#include <spirv_cross.hpp>
-#ifdef DAWN_ENABLE_WGSL
// Tint include must be after spirv_cross.hpp, because spirv-cross has its own
// version of spirv_headers. We also need to undef SPV_REVISION because SPIRV-Cross
// is at 3 while spirv-headers is at 4.
-# undef SPV_REVISION
-# include <tint/tint.h>
-#endif // DAWN_ENABLE_WGSL
+#undef SPV_REVISION
+#include <tint/tint.h>
#include <sstream>
@@ -46,70 +48,107 @@ namespace dawn_native {
return ostream.str();
}
-#ifdef DAWN_ENABLE_WGSL
tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
+ format = dawn::NormalizeVertexFormat(format);
switch (format) {
- case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::Uint8x2:
return tint::transform::VertexFormat::kVec2U8;
- case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Uint8x4:
return tint::transform::VertexFormat::kVec4U8;
- case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Sint8x2:
return tint::transform::VertexFormat::kVec2I8;
- case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::Sint8x4:
return tint::transform::VertexFormat::kVec4I8;
- case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::Unorm8x2:
return tint::transform::VertexFormat::kVec2U8Norm;
- case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Unorm8x4:
return tint::transform::VertexFormat::kVec4U8Norm;
- case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Snorm8x2:
return tint::transform::VertexFormat::kVec2I8Norm;
- case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Snorm8x4:
return tint::transform::VertexFormat::kVec4I8Norm;
- case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::Uint16x2:
return tint::transform::VertexFormat::kVec2U16;
- case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::Uint16x4:
return tint::transform::VertexFormat::kVec4U16;
- case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Sint16x2:
return tint::transform::VertexFormat::kVec2I16;
- case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Sint16x4:
return tint::transform::VertexFormat::kVec4I16;
- case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::Unorm16x2:
return tint::transform::VertexFormat::kVec2U16Norm;
- case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Unorm16x4:
return tint::transform::VertexFormat::kVec4U16Norm;
- case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Snorm16x2:
return tint::transform::VertexFormat::kVec2I16Norm;
- case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Snorm16x4:
return tint::transform::VertexFormat::kVec4I16Norm;
- case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Float16x2:
return tint::transform::VertexFormat::kVec2F16;
- case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float16x4:
return tint::transform::VertexFormat::kVec4F16;
- case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float32:
return tint::transform::VertexFormat::kF32;
- case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float32x2:
return tint::transform::VertexFormat::kVec2F32;
- case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float32x3:
return tint::transform::VertexFormat::kVec3F32;
- case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float32x4:
return tint::transform::VertexFormat::kVec4F32;
- case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::Uint32:
return tint::transform::VertexFormat::kU32;
- case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::Uint32x2:
return tint::transform::VertexFormat::kVec2U32;
- case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::Uint32x3:
return tint::transform::VertexFormat::kVec3U32;
- case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Uint32x4:
return tint::transform::VertexFormat::kVec4U32;
- case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Sint32:
return tint::transform::VertexFormat::kI32;
- case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Sint32x2:
return tint::transform::VertexFormat::kVec2I32;
- case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Sint32x3:
return tint::transform::VertexFormat::kVec3I32;
- case wgpu::VertexFormat::Int4:
+ case wgpu::VertexFormat::Sint32x4:
return tint::transform::VertexFormat::kVec4I32;
+
+ case wgpu::VertexFormat::Undefined:
+ break;
+
+ // Deprecated formats (should be unreachable after NormalizeVertexFormat call)
+ case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int4:
+ break;
}
+ UNREACHABLE();
}
tint::transform::InputStepMode ToTintInputStepMode(wgpu::InputStepMode mode) {
@@ -121,7 +160,8 @@ namespace dawn_native {
}
}
- SingleShaderStage PipelineStateToShaderStage(tint::ast::PipelineStage stage) {
+ ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(
+ tint::ast::PipelineStage stage) {
switch (stage) {
case tint::ast::PipelineStage::kVertex:
return SingleShaderStage::Vertex;
@@ -129,12 +169,184 @@ namespace dawn_native {
return SingleShaderStage::Fragment;
case tint::ast::PipelineStage::kCompute:
return SingleShaderStage::Compute;
- default:
+ case tint::ast::PipelineStage::kNone:
UNREACHABLE();
}
}
-#endif // DAWN_ENABLE_WGSL
+ BindingInfoType TintResourceTypeToBindingInfoType(
+ tint::inspector::ResourceBinding::ResourceType type) {
+ switch (type) {
+ case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+ case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+ return BindingInfoType::Buffer;
+ case tint::inspector::ResourceBinding::ResourceType::kSampler:
+ case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+ return BindingInfoType::Sampler;
+ case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
+ return BindingInfoType::Texture;
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageTexture:
+ case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+ return BindingInfoType::StorageTexture;
+ }
+ }
+
+ wgpu::TextureFormat TintImageFormatToTextureFormat(
+ tint::inspector::ResourceBinding::ImageFormat format) {
+ switch (format) {
+ case tint::inspector::ResourceBinding::ImageFormat::kR8Unorm:
+ return wgpu::TextureFormat::R8Unorm;
+ case tint::inspector::ResourceBinding::ImageFormat::kR8Snorm:
+ return wgpu::TextureFormat::R8Snorm;
+ case tint::inspector::ResourceBinding::ImageFormat::kR8Uint:
+ return wgpu::TextureFormat::R8Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kR8Sint:
+ return wgpu::TextureFormat::R8Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kR16Uint:
+ return wgpu::TextureFormat::R16Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kR16Sint:
+ return wgpu::TextureFormat::R16Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kR16Float:
+ return wgpu::TextureFormat::R16Float;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg8Unorm:
+ return wgpu::TextureFormat::RG8Unorm;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg8Snorm:
+ return wgpu::TextureFormat::RG8Snorm;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg8Uint:
+ return wgpu::TextureFormat::RG8Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg8Sint:
+ return wgpu::TextureFormat::RG8Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kR32Uint:
+ return wgpu::TextureFormat::R32Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kR32Sint:
+ return wgpu::TextureFormat::R32Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kR32Float:
+ return wgpu::TextureFormat::R32Float;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg16Uint:
+ return wgpu::TextureFormat::RG16Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg16Sint:
+ return wgpu::TextureFormat::RG16Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg16Float:
+ return wgpu::TextureFormat::RG16Float;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba8Unorm:
+ return wgpu::TextureFormat::RGBA8Unorm;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba8UnormSrgb:
+ return wgpu::TextureFormat::RGBA8UnormSrgb;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba8Snorm:
+ return wgpu::TextureFormat::RGBA8Snorm;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba8Uint:
+ return wgpu::TextureFormat::RGBA8Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba8Sint:
+ return wgpu::TextureFormat::RGBA8Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kBgra8Unorm:
+ return wgpu::TextureFormat::BGRA8Unorm;
+ case tint::inspector::ResourceBinding::ImageFormat::kBgra8UnormSrgb:
+ return wgpu::TextureFormat::BGRA8UnormSrgb;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgb10A2Unorm:
+ return wgpu::TextureFormat::RGB10A2Unorm;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg11B10Float:
+ return wgpu::TextureFormat::RG11B10Ufloat;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg32Uint:
+ return wgpu::TextureFormat::RG32Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg32Sint:
+ return wgpu::TextureFormat::RG32Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRg32Float:
+ return wgpu::TextureFormat::RG32Float;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba16Uint:
+ return wgpu::TextureFormat::RGBA16Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba16Sint:
+ return wgpu::TextureFormat::RGBA16Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba16Float:
+ return wgpu::TextureFormat::RGBA16Float;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba32Uint:
+ return wgpu::TextureFormat::RGBA32Uint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba32Sint:
+ return wgpu::TextureFormat::RGBA32Sint;
+ case tint::inspector::ResourceBinding::ImageFormat::kRgba32Float:
+ return wgpu::TextureFormat::RGBA32Float;
+ case tint::inspector::ResourceBinding::ImageFormat::kNone:
+ return wgpu::TextureFormat::Undefined;
+ }
+ }
+
+ wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
+ tint::inspector::ResourceBinding::TextureDimension dim) {
+ switch (dim) {
+ case tint::inspector::ResourceBinding::TextureDimension::k1d:
+ return wgpu::TextureViewDimension::e1D;
+ case tint::inspector::ResourceBinding::TextureDimension::k2d:
+ return wgpu::TextureViewDimension::e2D;
+ case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
+ return wgpu::TextureViewDimension::e2DArray;
+ case tint::inspector::ResourceBinding::TextureDimension::k3d:
+ return wgpu::TextureViewDimension::e3D;
+ case tint::inspector::ResourceBinding::TextureDimension::kCube:
+ return wgpu::TextureViewDimension::Cube;
+ case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
+ return wgpu::TextureViewDimension::CubeArray;
+ case tint::inspector::ResourceBinding::TextureDimension::kNone:
+ return wgpu::TextureViewDimension::Undefined;
+ }
+ }
+
+ wgpu::TextureSampleType TintSampledKindToTextureSampleType(
+ tint::inspector::ResourceBinding::SampledKind s) {
+ switch (s) {
+ case tint::inspector::ResourceBinding::SampledKind::kSInt:
+ return wgpu::TextureSampleType::Sint;
+ case tint::inspector::ResourceBinding::SampledKind::kUInt:
+ return wgpu::TextureSampleType::Uint;
+ case tint::inspector::ResourceBinding::SampledKind::kFloat:
+ return wgpu::TextureSampleType::Float;
+ case tint::inspector::ResourceBinding::SampledKind::kUnknown:
+ return wgpu::TextureSampleType::Undefined;
+ }
+ }
+
+ ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
+ tint::inspector::ComponentType type) {
+ switch (type) {
+ case tint::inspector::ComponentType::kFloat:
+ return wgpu::TextureComponentType::Float;
+ case tint::inspector::ComponentType::kSInt:
+ return wgpu::TextureComponentType::Sint;
+ case tint::inspector::ComponentType::kUInt:
+ return wgpu::TextureComponentType::Uint;
+ case tint::inspector::ComponentType::kUnknown:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert 'Unknown' component type from Tint");
+ }
+ }
+
+ ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
+ tint::inspector::ResourceBinding::ResourceType resource_type) {
+ switch (resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+ return wgpu::BufferBindingType::Uniform;
+ case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+ return wgpu::BufferBindingType::Storage;
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+ return wgpu::BufferBindingType::ReadOnlyStorage;
+ default:
+ return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
+ }
+ }
+
+ ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
+ tint::inspector::ResourceBinding::ResourceType resource_type) {
+ switch (resource_type) {
+ case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageTexture:
+ return wgpu::StorageTextureAccess::ReadOnly;
+ case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+ return wgpu::StorageTextureAccess::WriteOnly;
+ default:
+ return DAWN_VALIDATION_ERROR(
+ "Attempted to convert non-storage texture resource type");
+ }
+ }
MaybeError ValidateSpirv(const uint32_t* code, uint32_t codeSize) {
spvtools::SpirvTools spirvTools(SPV_ENV_VULKAN_1_1);
@@ -178,14 +390,17 @@ namespace dawn_native {
return {};
}
-#ifdef DAWN_ENABLE_WGSL
- ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file) {
+ ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
+ OwnedCompilationMessages* outMessages) {
std::ostringstream errorStream;
errorStream << "Tint WGSL reader failure:" << std::endl;
tint::Program program = tint::reader::wgsl::Parse(file);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(program.Diagnostics());
+ }
if (!program.IsValid()) {
- auto err = tint::diag::Formatter{}.format(program.Diagnostics());
+ auto err = program.Diagnostics().str();
errorStream << "Parser: " << err << std::endl
<< "Shader: " << std::endl
<< file->content << std::endl;
@@ -195,13 +410,17 @@ namespace dawn_native {
return std::move(program);
}
- ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv) {
+ ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv,
+ OwnedCompilationMessages* outMessages) {
std::ostringstream errorStream;
errorStream << "Tint SPIRV reader failure:" << std::endl;
tint::Program program = tint::reader::spirv::Parse(spirv);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(program.Diagnostics());
+ }
if (!program.IsValid()) {
- auto err = tint::diag::Formatter{}.format(program.Diagnostics());
+ auto err = program.Diagnostics().str();
errorStream << "Parser: " << err << std::endl;
return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
}
@@ -209,13 +428,18 @@ namespace dawn_native {
return std::move(program);
}
- MaybeError ValidateModule(tint::Program* program) {
+ MaybeError ValidateModule(const tint::Program* program,
+ OwnedCompilationMessages* outMessages) {
std::ostringstream errorStream;
errorStream << "Tint program validation" << std::endl;
tint::Validator validator;
- if (!validator.Validate(program)) {
- auto err = tint::diag::Formatter{}.format(validator.diagnostics());
+ bool isValid = validator.Validate(program);
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(validator.diagnostics());
+ }
+ if (!isValid) {
+ auto err = validator.diagnostics().str();
errorStream << "Validation: " << err << std::endl;
return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
}
@@ -236,7 +460,6 @@ namespace dawn_native {
std::vector<uint32_t> spirv = generator.result();
return std::move(spirv);
}
-#endif // DAWN_ENABLE_WGSL
std::vector<uint64_t> GetBindGroupMinBufferSizes(
const EntryPointMetadata::BindingGroupInfoMap& shaderBindings,
@@ -509,6 +732,10 @@ namespace dawn_native {
}
info->texture.sampleType = wgpu::TextureSampleType::Depth;
}
+ if (imageType.ms && imageType.arrayed) {
+ return DAWN_VALIDATION_ERROR(
+ "Multisampled array textures aren't supported");
+ }
break;
}
case BindingInfoType::Buffer: {
@@ -661,21 +888,16 @@ namespace dawn_native {
return {std::move(metadata)};
}
-#ifdef DAWN_ENABLE_WGSL
- // Currently only partially populated the reflection data, needs to be
- // completed using PopulateMetadataUsingSPIRVCross. In the future, once
- // this function is complete, ReflectShaderUsingSPIRVCross and
- // PopulateMetadataUsingSPIRVCross will be removed.
ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(
DeviceBase*,
- const tint::Program& program) {
- ASSERT(program.IsValid());
+ const tint::Program* program) {
+ ASSERT(program->IsValid());
EntryPointMetadataTable result;
std::ostringstream errorStream;
errorStream << "Tint Reflection failure:" << std::endl;
- tint::inspector::Inspector inspector(&program);
+ tint::inspector::Inspector inspector(program);
auto entryPoints = inspector.GetEntryPoints();
if (inspector.has_error()) {
errorStream << "Inspector: " << inspector.error() << std::endl;
@@ -687,8 +909,7 @@ namespace dawn_native {
auto metadata = std::make_unique<EntryPointMetadata>();
- metadata->stage = PipelineStateToShaderStage(entryPoint.stage);
-
+ DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
if (metadata->stage == SingleShaderStage::Vertex) {
for (auto& stage_input : entryPoint.input_variables) {
if (!stage_input.has_location_decoration) {
@@ -716,96 +937,124 @@ namespace dawn_native {
metadata->localWorkgroupSize.z = entryPoint.workgroup_size_z;
}
- result[entryPoint.name] = std::move(metadata);
- }
- return std::move(result);
- }
-#endif // DAWN_ENABLE_WGSL
-
- // Uses SPIRV-Cross, which is planned for removal, but until
- // ReflectShaderUsingTint is completed, will be kept as a
- // fallback/source of truth.
- ResultOrError<EntryPointMetadataTable> ReflectShaderUsingSPIRVCross(
- DeviceBase* device,
- std::vector<uint32_t> spirv) {
- EntryPointMetadataTable result;
- spirv_cross::Compiler compiler(spirv);
- for (const spirv_cross::EntryPoint& entryPoint :
- compiler.get_entry_points_and_stages()) {
- ASSERT(result.count(entryPoint.name) == 0);
+ if (metadata->stage == SingleShaderStage::Vertex) {
+ for (const auto& input_var : entryPoint.input_variables) {
+ uint32_t location = 0;
+ if (input_var.has_location_decoration) {
+ location = input_var.location_decoration;
+ }
- SingleShaderStage stage = ExecutionModelToShaderStage(entryPoint.execution_model);
- compiler.set_entry_point(entryPoint.name, entryPoint.execution_model);
+ if (DAWN_UNLIKELY(location >= kMaxVertexAttributes)) {
+ std::stringstream ss;
+ ss << "Attribute location (" << location << ") over limits";
+ return DAWN_VALIDATION_ERROR(ss.str());
+ }
+ metadata->usedVertexAttributes.set(location);
+ }
- std::unique_ptr<EntryPointMetadata> metadata;
- DAWN_TRY_ASSIGN(metadata,
- ExtractSpirvInfo(device, compiler, entryPoint.name, stage));
- result[entryPoint.name] = std::move(metadata);
- }
- return std::move(result);
- }
+ for (const auto& output_var : entryPoint.output_variables) {
+ if (DAWN_UNLIKELY(!output_var.has_location_decoration)) {
+ std::stringstream ss;
+ ss << "Missing location qualifier on vertex output, "
+ << output_var.name;
+ return DAWN_VALIDATION_ERROR(ss.str());
+ }
+ }
+ }
-#ifdef DAWN_ENABLE_WGSL
- // Temporary utility method that allows for polyfilling like behaviour,
- // specifically data missing from the Tint implementation is filled in
- // using the SPIRV-Cross implementation. Once the Tint implementation is
- // completed, this function will be removed.
- MaybeError PopulateMetadataUsingSPIRVCross(DeviceBase* device,
- std::vector<uint32_t> spirv,
- EntryPointMetadataTable* tintTable) {
- EntryPointMetadataTable crossTable;
- DAWN_TRY_ASSIGN(crossTable, ReflectShaderUsingSPIRVCross(device, spirv));
- if (tintTable->size() != crossTable.size()) {
- return DAWN_VALIDATION_ERROR(
- "Tint and SPIRV-Cross returned different number of entry points");
- }
- for (auto& crossMember : crossTable) {
- auto& name = crossMember.first;
- auto& crossEntry = crossMember.second;
+ if (metadata->stage == SingleShaderStage::Fragment) {
+ for (const auto& input_var : entryPoint.input_variables) {
+ if (!input_var.has_location_decoration) {
+ return DAWN_VALIDATION_ERROR(
+ "Need location decoration on fragment input");
+ }
+ }
- auto tintMember = tintTable->find(name);
- if (tintMember == tintTable->end()) {
- return DAWN_VALIDATION_ERROR(
- "Tint and SPIRV-Cross returned different entry point names");
- }
+ for (const auto& output_var : entryPoint.output_variables) {
+ if (!output_var.has_location_decoration) {
+ return DAWN_VALIDATION_ERROR(
+ "Need location decoration on fragment output");
+ }
- auto& tintEntry = tintMember->second;
- if (tintEntry->stage != crossEntry->stage) {
- return DAWN_VALIDATION_ERROR(
- "Tint and SPIRV-Cross returned different stages for entry point");
+ uint32_t unsanitizedAttachment = output_var.location_decoration;
+ if (unsanitizedAttachment >= kMaxColorAttachments) {
+ return DAWN_VALIDATION_ERROR(
+ "Fragment output index must be less than max number of color "
+ "attachments");
+ }
+ ColorAttachmentIndex attachment(
+ static_cast<uint8_t>(unsanitizedAttachment));
+ DAWN_TRY_ASSIGN(
+ metadata->fragmentOutputFormatBaseTypes[attachment],
+ TintComponentTypeToTextureComponentType(output_var.component_type));
+ metadata->fragmentOutputsWritten.set(attachment);
+ }
}
- if (tintEntry->stage == SingleShaderStage::Vertex) {
- if (tintEntry->usedVertexAttributes != crossEntry->usedVertexAttributes) {
- return DAWN_VALIDATION_ERROR(
- "Tint and SPIRV-Cross returned different used vertex attributes for "
- "entry point");
+ for (auto& resource : inspector.GetResourceBindings(entryPoint.name)) {
+ BindingNumber bindingNumber(resource.binding);
+ BindGroupIndex bindGroupIndex(resource.bind_group);
+ if (bindGroupIndex >= kMaxBindGroupsTyped) {
+ return DAWN_VALIDATION_ERROR("Shader has bind group index over limits");
}
- }
- if (tintEntry->stage == SingleShaderStage::Compute) {
- if (tintEntry->localWorkgroupSize.x != crossEntry->localWorkgroupSize.x ||
- tintEntry->localWorkgroupSize.y != crossEntry->localWorkgroupSize.y ||
- tintEntry->localWorkgroupSize.z != crossEntry->localWorkgroupSize.z) {
- return DAWN_VALIDATION_ERROR(
- "Tint and SPIRV-Cross returned different values for local workgroup "
- "size");
+ const auto& it = metadata->bindings[bindGroupIndex].emplace(
+ bindingNumber, EntryPointMetadata::ShaderBindingInfo{});
+ if (!it.second) {
+ return DAWN_VALIDATION_ERROR("Shader has duplicate bindings");
+ }
+
+ EntryPointMetadata::ShaderBindingInfo* info = &it.first->second;
+ info->bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
+
+ switch (info->bindingType) {
+ case BindingInfoType::Buffer:
+ info->buffer.minBindingSize = resource.size_no_padding;
+ DAWN_TRY_ASSIGN(info->buffer.type, TintResourceTypeToBufferBindingType(
+ resource.resource_type));
+ break;
+ case BindingInfoType::Sampler:
+ info->sampler.type = wgpu::SamplerBindingType::Filtering;
+ break;
+ case BindingInfoType::Texture:
+ info->texture.viewDimension =
+ TintTextureDimensionToTextureViewDimension(resource.dim);
+ if (resource.resource_type ==
+ tint::inspector::ResourceBinding::ResourceType::kDepthTexture) {
+ info->texture.sampleType = wgpu::TextureSampleType::Depth;
+ } else {
+ info->texture.sampleType =
+ TintSampledKindToTextureSampleType(resource.sampled_kind);
+ }
+ info->texture.multisampled = resource.resource_type ==
+ tint::inspector::ResourceBinding::
+ ResourceType::kMultisampledTexture;
+
+ break;
+ case BindingInfoType::StorageTexture:
+ DAWN_TRY_ASSIGN(
+ info->storageTexture.access,
+ TintResourceTypeToStorageTextureAccess(resource.resource_type));
+ info->storageTexture.format =
+ TintImageFormatToTextureFormat(resource.image_format);
+ info->storageTexture.viewDimension =
+ TintTextureDimensionToTextureViewDimension(resource.dim);
+
+ break;
+ default:
+ return DAWN_VALIDATION_ERROR("Unknown binding type in Shader");
}
}
- // TODO(rharrison): Use the Inspector to get this data.
- tintEntry->bindings = crossEntry->bindings;
- tintEntry->fragmentOutputFormatBaseTypes =
- crossEntry->fragmentOutputFormatBaseTypes;
- tintEntry->fragmentOutputsWritten = crossEntry->fragmentOutputsWritten;
+ result[entryPoint.name] = std::move(metadata);
}
- return {};
+ return std::move(result);
}
-#endif // DAWN_ENABLE_WGSL
-
} // anonymous namespace
- ShaderModuleParseResult::ShaderModuleParseResult() = default;
+ ShaderModuleParseResult::ShaderModuleParseResult()
+ : compilationMessages(new OwnedCompilationMessages()) {
+ }
ShaderModuleParseResult::~ShaderModuleParseResult() = default;
ShaderModuleParseResult::ShaderModuleParseResult(ShaderModuleParseResult&& rhs) = default;
@@ -813,9 +1062,15 @@ namespace dawn_native {
ShaderModuleParseResult& ShaderModuleParseResult::operator=(ShaderModuleParseResult&& rhs) =
default;
- ResultOrError<ShaderModuleParseResult> ValidateShaderModuleDescriptor(
- DeviceBase* device,
- const ShaderModuleDescriptor* descriptor) {
+ bool ShaderModuleParseResult::HasParsedShader() const {
+ return tintProgram != nullptr || spirv.size() > 0;
+ }
+
+ MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
+ ASSERT(parseResult != nullptr);
+
const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
if (chainedDescriptor == nullptr) {
return DAWN_VALIDATION_ERROR("Shader module descriptor missing chained descriptor");
@@ -826,75 +1081,70 @@ namespace dawn_native {
"Shader module descriptor chained nextInChain must be nullptr");
}
- ShaderModuleParseResult parseResult = {};
+ OwnedCompilationMessages* outMessages = parseResult->compilationMessages.get();
+
+ ScopedTintICEHandler scopedICEHandler(device);
+
switch (chainedDescriptor->sType) {
case wgpu::SType::ShaderModuleSPIRVDescriptor: {
const auto* spirvDesc =
static_cast<const ShaderModuleSPIRVDescriptor*>(chainedDescriptor);
std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
if (device->IsToggleEnabled(Toggle::UseTintGenerator)) {
-#ifdef DAWN_ENABLE_WGSL
tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv));
+ DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateModule(&program));
+ DAWN_TRY(ValidateModule(&program, outMessages));
}
- parseResult.tintProgram = std::make_unique<tint::Program>(std::move(program));
-#else
- return DAWN_VALIDATION_ERROR("Using Tint is not enabled in this build.");
-#endif // DAWN_ENABLE_WGSL
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
} else {
if (device->IsValidationEnabled()) {
DAWN_TRY(ValidateSpirv(spirv.data(), spirv.size()));
}
- parseResult.spirv = std::move(spirv);
+ parseResult->spirv = std::move(spirv);
}
break;
}
case wgpu::SType::ShaderModuleWGSLDescriptor: {
-#ifdef DAWN_ENABLE_WGSL
const auto* wgslDesc =
static_cast<const ShaderModuleWGSLDescriptor*>(chainedDescriptor);
tint::Source::File file("", wgslDesc->source);
tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseWGSL(&file));
+ DAWN_TRY_ASSIGN(program, ParseWGSL(&file, outMessages));
if (device->IsToggleEnabled(Toggle::UseTintGenerator)) {
if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateModule(&program));
+ DAWN_TRY(ValidateModule(&program, outMessages));
}
+ parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
} else {
tint::transform::Manager transformManager;
transformManager.append(
std::make_unique<tint::transform::EmitVertexPointSize>());
transformManager.append(std::make_unique<tint::transform::Spirv>());
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, &program));
+ DAWN_TRY_ASSIGN(program,
+ RunTransforms(&transformManager, &program, outMessages));
if (device->IsValidationEnabled()) {
- DAWN_TRY(ValidateModule(&program));
+ DAWN_TRY(ValidateModule(&program, outMessages));
}
std::vector<uint32_t> spirv;
DAWN_TRY_ASSIGN(spirv, ModuleToSPIRV(&program));
DAWN_TRY(ValidateSpirv(spirv.data(), spirv.size()));
- parseResult.spirv = std::move(spirv);
+ parseResult->spirv = std::move(spirv);
}
-
- parseResult.tintProgram = std::make_unique<tint::Program>(std::move(program));
break;
-#else
- return DAWN_VALIDATION_ERROR("Using Tint is not enabled in this build.");
-#endif // DAWN_ENABLE_WGSL
}
default:
return DAWN_VALIDATION_ERROR("Unsupported sType");
}
- return std::move(parseResult);
+ return {};
}
RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
@@ -908,34 +1158,29 @@ namespace dawn_native {
return bufferSizes;
}
-#ifdef DAWN_ENABLE_WGSL
ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
- tint::Program* program) {
+ const tint::Program* program,
+ OwnedCompilationMessages* outMessages) {
tint::transform::Transform::Output output = transform->Run(program);
- if (output.diagnostics.contains_errors()) {
- // TODO(bclayton): Remove Transform::Output::diagnostics - just put diagnostics into
- // output.program.
- std::string err =
- "Tint transform failure: " + tint::diag::Formatter{}.format(output.diagnostics);
- return DAWN_VALIDATION_ERROR(err.c_str());
+ if (outMessages != nullptr) {
+ outMessages->AddMessages(output.program.Diagnostics());
}
-
if (!output.program.IsValid()) {
- std::string err = "Tint program failure: " +
- tint::diag::Formatter{}.format(output.program.Diagnostics());
+ std::string err = "Tint program failure: " + output.program.Diagnostics().str();
return DAWN_VALIDATION_ERROR(err.c_str());
}
return std::move(output.program);
}
std::unique_ptr<tint::transform::VertexPulling> MakeVertexPullingTransform(
- const VertexStateDescriptor& vertexState,
+ const VertexState& vertexState,
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet) {
- auto transform = std::make_unique<tint::transform::VertexPulling>();
- tint::transform::VertexStateDescriptor state;
- for (uint32_t i = 0; i < vertexState.vertexBufferCount; ++i) {
- const auto& vertexBuffer = vertexState.vertexBuffers[i];
+ tint::transform::VertexPulling::Config cfg;
+ cfg.entry_point_name = entryPoint;
+ cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
+ for (uint32_t i = 0; i < vertexState.bufferCount; ++i) {
+ const auto& vertexBuffer = vertexState.buffers[i];
tint::transform::VertexBufferLayoutDescriptor layout;
layout.array_stride = vertexBuffer.arrayStride;
layout.step_mode = ToTintInputStepMode(vertexBuffer.stepMode);
@@ -950,14 +1195,10 @@ namespace dawn_native {
layout.attributes.push_back(std::move(attr));
}
- state.push_back(std::move(layout));
+ cfg.vertex_state.push_back(std::move(layout));
}
- transform->SetVertexState(std::move(state));
- transform->SetEntryPoint(entryPoint);
- transform->SetPullingBufferBindingSet(static_cast<uint32_t>(pullingBufferBindingSet));
- return transform;
+ return std::make_unique<tint::transform::VertexPulling>(cfg);
}
-#endif
MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
const EntryPointMetadata& entryPoint,
@@ -1004,8 +1245,13 @@ namespace dawn_native {
}
}
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : CachedObject(device, tag), mType(Type::Undefined) {
+ ShaderModuleBase::ShaderModuleBase(
+ DeviceBase* device,
+ ObjectBase::ErrorTag tag,
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages)
+ : CachedObject(device, tag),
+ mType(Type::Undefined),
+ mCompilationMessages(std::move(compilationMessages)) {
}
ShaderModuleBase::~ShaderModuleBase() {
@@ -1015,8 +1261,10 @@ namespace dawn_native {
}
// static
- ShaderModuleBase* ShaderModuleBase::MakeError(DeviceBase* device) {
- return new ShaderModuleBase(device, ObjectBase::kError);
+ ShaderModuleBase* ShaderModuleBase::MakeError(
+ DeviceBase* device,
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages) {
+ return new ShaderModuleBase(device, ObjectBase::kError, std::move(compilationMessages));
}
bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
@@ -1047,21 +1295,35 @@ namespace dawn_native {
return mSpirv;
}
-#ifdef DAWN_ENABLE_WGSL
+ const tint::Program* ShaderModuleBase::GetTintProgram() const {
+ ASSERT(GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator));
+ return mTintProgram.get();
+ }
+
+ void ShaderModuleBase::APIGetCompilationInfo(wgpu::CompilationInfoCallback callback,
+ void* userdata) {
+ if (callback == nullptr) {
+ return;
+ }
+
+ callback(WGPUCompilationInfoRequestStatus_Success,
+ mCompilationMessages->GetCompilationInfo(), userdata);
+ }
+
ResultOrError<std::vector<uint32_t>> ShaderModuleBase::GeneratePullingSpirv(
const std::vector<uint32_t>& spirv,
- const VertexStateDescriptor& vertexState,
+ const VertexState& vertexState,
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet) const {
tint::Program program;
- DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv));
+ DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, nullptr));
return GeneratePullingSpirv(&program, vertexState, entryPoint, pullingBufferBindingSet);
}
ResultOrError<std::vector<uint32_t>> ShaderModuleBase::GeneratePullingSpirv(
- tint::Program* programIn,
- const VertexStateDescriptor& vertexState,
+ const tint::Program* programIn,
+ const VertexState& vertexState,
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet) const {
std::ostringstream errorStream;
@@ -1073,13 +1335,14 @@ namespace dawn_native {
transformManager.append(std::make_unique<tint::transform::EmitVertexPointSize>());
transformManager.append(std::make_unique<tint::transform::Spirv>());
if (GetDevice()->IsRobustnessEnabled()) {
- // TODO(enga): Run the Tint BoundArrayAccessors transform instead of the SPIRV Tools
- // one, but it appears to crash after running VertexPulling.
- // transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
+ transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
}
+ // A nullptr is passed in for the CompilationMessages here since this method is called
+ // during RenderPipeline creation, by which point the shader module's CompilationInfo may
+ // have already been queried.
tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, programIn));
+ DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, programIn, nullptr));
tint::writer::spirv::Generator generator(&program);
if (!generator.Generate()) {
@@ -1088,67 +1351,46 @@ namespace dawn_native {
}
std::vector<uint32_t> spirv = generator.result();
- if (GetDevice()->IsRobustnessEnabled()) {
- DAWN_TRY_ASSIGN(spirv, RunRobustBufferAccessPass(spirv));
- }
DAWN_TRY(ValidateSpirv(spirv.data(), spirv.size()));
return std::move(spirv);
}
-#endif
MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult) {
-#ifdef DAWN_ENABLE_WGSL
- tint::Program* program = parseResult->tintProgram.get();
-#endif
+ mTintProgram = std::move(parseResult->tintProgram);
mSpirv = std::move(parseResult->spirv);
+ mCompilationMessages = std::move(parseResult->compilationMessages);
- // If not using Tint to generate backend code, run the robust buffer access pass now since
- // all backends will use this SPIR-V. If Tint is used, the robustness pass should be run
- // per-backend.
- if (!GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator) &&
- GetDevice()->IsRobustnessEnabled()) {
- DAWN_TRY_ASSIGN(mSpirv, RunRobustBufferAccessPass(mSpirv));
- }
-
- // We still need the spirv for reflection. Remove this when we use the Tint inspector
- // completely.
- std::vector<uint32_t>* spirvPtr = &mSpirv;
- std::vector<uint32_t> localSpirv;
if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
-#ifdef DAWN_ENABLE_WGSL
- ASSERT(program != nullptr);
-
- DAWN_TRY_ASSIGN(localSpirv, ModuleToSPIRV(program));
- DAWN_TRY(ValidateSpirv(localSpirv.data(), localSpirv.size()));
- spirvPtr = &localSpirv;
-#else
- UNREACHABLE();
-#endif
- }
-
- if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
-#ifdef DAWN_ENABLE_WGSL
- tint::Program localProgram;
-
- tint::Program* programPtr = program;
- if (!GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
- // We have mSpirv, but no Tint program
- DAWN_TRY_ASSIGN(localProgram, ParseSPIRV(mSpirv));
- DAWN_TRY(ValidateModule(&localProgram));
- programPtr = &localProgram;
- }
-
- EntryPointMetadataTable table;
- DAWN_TRY_ASSIGN(table, ReflectShaderUsingTint(GetDevice(), *programPtr));
- DAWN_TRY(PopulateMetadataUsingSPIRVCross(GetDevice(), *spirvPtr, &table));
- mEntryPoints = std::move(table);
-#else
- return DAWN_VALIDATION_ERROR("Using Tint is not enabled in this build.");
-#endif
+ DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingTint(GetDevice(), mTintProgram.get()));
} else {
- DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingSPIRVCross(GetDevice(), *spirvPtr));
+ // If not using Tint to generate backend code, run the robust buffer access pass now
+ // since all backends will use this SPIR-V. If Tint is used, the robustness pass should
+ // be run per-backend.
+ if (GetDevice()->IsRobustnessEnabled()) {
+ DAWN_TRY_ASSIGN(mSpirv, RunRobustBufferAccessPass(mSpirv));
+ }
+ DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingSPIRVCross(GetDevice(), mSpirv));
}
return {};
}
+
+ ResultOrError<EntryPointMetadataTable> ShaderModuleBase::ReflectShaderUsingSPIRVCross(
+ DeviceBase* device,
+ const std::vector<uint32_t>& spirv) {
+ EntryPointMetadataTable result;
+ spirv_cross::Compiler compiler(spirv);
+ for (const spirv_cross::EntryPoint& entryPoint : compiler.get_entry_points_and_stages()) {
+ ASSERT(result.count(entryPoint.name) == 0);
+
+ SingleShaderStage stage = ExecutionModelToShaderStage(entryPoint.execution_model);
+ compiler.set_entry_point(entryPoint.name, entryPoint.execution_model);
+
+ std::unique_ptr<EntryPointMetadata> metadata;
+ DAWN_TRY_ASSIGN(metadata, ExtractSpirvInfo(device, compiler, entryPoint.name, stage));
+ result[entryPoint.name] = std::move(metadata);
+ }
+ return std::move(result);
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
index 61b6973f0f3..2e8c3af9499 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
@@ -19,6 +19,7 @@
#include "common/ityp_array.h"
#include "dawn_native/BindingInfo.h"
#include "dawn_native/CachedObject.h"
+#include "dawn_native/CompilationMessages.h"
#include "dawn_native/Error.h"
#include "dawn_native/Format.h"
#include "dawn_native/Forward.h"
@@ -60,30 +61,30 @@ namespace dawn_native {
ShaderModuleParseResult(ShaderModuleParseResult&& rhs);
ShaderModuleParseResult& operator=(ShaderModuleParseResult&& rhs);
-#ifdef DAWN_ENABLE_WGSL
+ bool HasParsedShader() const;
+
std::unique_ptr<tint::Program> tintProgram;
-#endif
std::vector<uint32_t> spirv;
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages;
};
- ResultOrError<ShaderModuleParseResult> ValidateShaderModuleDescriptor(
- DeviceBase* device,
- const ShaderModuleDescriptor* descriptor);
+ MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
const EntryPointMetadata& entryPoint,
const PipelineLayoutBase* layout);
RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
const PipelineLayoutBase* layout);
-#ifdef DAWN_ENABLE_WGSL
ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
- tint::Program* program);
+ const tint::Program* program,
+ OwnedCompilationMessages* messages);
std::unique_ptr<tint::transform::VertexPulling> MakeVertexPullingTransform(
- const VertexStateDescriptor& vertexState,
+ const VertexState& vertexState,
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet);
-#endif
// Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
// stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
@@ -128,7 +129,9 @@ namespace dawn_native {
ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
~ShaderModuleBase() override;
- static ShaderModuleBase* MakeError(DeviceBase* device);
+ static ShaderModuleBase* MakeError(
+ DeviceBase* device,
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages);
// Return true iff the program has an entrypoint called `entryPoint`.
bool HasEntryPoint(const std::string& entryPoint) const;
@@ -145,34 +148,50 @@ namespace dawn_native {
};
const std::vector<uint32_t>& GetSpirv() const;
+ const tint::Program* GetTintProgram() const;
+
+ void APIGetCompilationInfo(wgpu::CompilationInfoCallback callback, void* userdata);
-#ifdef DAWN_ENABLE_WGSL
ResultOrError<std::vector<uint32_t>> GeneratePullingSpirv(
const std::vector<uint32_t>& spirv,
- const VertexStateDescriptor& vertexState,
+ const VertexState& vertexState,
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet) const;
ResultOrError<std::vector<uint32_t>> GeneratePullingSpirv(
- tint::Program* program,
- const VertexStateDescriptor& vertexState,
+ const tint::Program* program,
+ const VertexState& vertexState,
const std::string& entryPoint,
BindGroupIndex pullingBufferBindingSet) const;
-#endif
+
+ OwnedCompilationMessages* CompilationMessages() {
+ return mCompilationMessages.get();
+ }
protected:
MaybeError InitializeBase(ShaderModuleParseResult* parseResult);
+ static ResultOrError<EntryPointMetadataTable> ReflectShaderUsingSPIRVCross(
+ DeviceBase* device,
+ const std::vector<uint32_t>& spirv);
private:
- ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ ShaderModuleBase(DeviceBase* device,
+ ObjectBase::ErrorTag tag,
+ std::unique_ptr<OwnedCompilationMessages> compilationMessages);
+ // The original data in the descriptor for caching.
enum class Type { Undefined, Spirv, Wgsl };
Type mType;
std::vector<uint32_t> mOriginalSpirv;
- std::vector<uint32_t> mSpirv;
std::string mWgsl;
+ // Data computed from what is in the descriptor. mSpirv is set iff !UseTintGenerator while
+ // mTintProgram is set iff UseTintGenerator.
EntryPointMetadataTable mEntryPoints;
+ std::vector<uint32_t> mSpirv;
+ std::unique_ptr<tint::Program> mTintProgram;
+
+ std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Subresource.h b/chromium/third_party/dawn/src/dawn_native/Subresource.h
index ce9b3f3f09c..643b7bc0229 100644
--- a/chromium/third_party/dawn/src/dawn_native/Subresource.h
+++ b/chromium/third_party/dawn/src/dawn_native/Subresource.h
@@ -94,6 +94,10 @@ namespace dawn_native {
uint8_t GetAspectIndex(Aspect aspect);
uint8_t GetAspectCount(Aspect aspects);
+ // The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
+ // the per plane index does not exceed the known maximum plane count.
+ static constexpr uint32_t kMaxPlanesPerFormat = 3;
+
} // namespace dawn_native
namespace wgpu {
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
index 84f5a568aef..aa5f8942a3b 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
@@ -31,19 +31,19 @@ namespace dawn_native {
}
private:
- void Configure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override {
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override {
GetDevice()->ConsumedError(DAWN_VALIDATION_ERROR("error swapchain"));
}
- TextureViewBase* GetCurrentTextureView() override {
+ TextureViewBase* APIGetCurrentTextureView() override {
GetDevice()->ConsumedError(DAWN_VALIDATION_ERROR("error swapchain"));
return TextureViewBase::MakeError(GetDevice());
}
- void Present() override {
+ void APIPresent() override {
GetDevice()->ConsumedError(DAWN_VALIDATION_ERROR("error swapchain"));
}
};
@@ -142,10 +142,10 @@ namespace dawn_native {
}
}
- void OldSwapChainBase::Configure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) {
+ void OldSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) {
if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
return;
}
@@ -161,7 +161,7 @@ namespace dawn_native {
static_cast<WGPUTextureUsage>(allowedUsage), width, height);
}
- TextureViewBase* OldSwapChainBase::GetCurrentTextureView() {
+ TextureViewBase* OldSwapChainBase::APIGetCurrentTextureView() {
if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
return TextureViewBase::MakeError(GetDevice());
}
@@ -180,7 +180,7 @@ namespace dawn_native {
descriptor.dimension = wgpu::TextureDimension::e2D;
descriptor.size.width = mWidth;
descriptor.size.height = mHeight;
- descriptor.size.depth = 1;
+ descriptor.size.depthOrArrayLayers = 1;
descriptor.sampleCount = 1;
descriptor.format = mFormat;
descriptor.mipLevelCount = 1;
@@ -190,11 +190,11 @@ namespace dawn_native {
// of dawn_native
mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
- mCurrentTextureView = mCurrentTexture->CreateView();
+ mCurrentTextureView = mCurrentTexture->APICreateView();
return mCurrentTextureView.Get();
}
- void OldSwapChainBase::Present() {
+ void OldSwapChainBase::APIPresent() {
if (GetDevice()->ConsumedError(ValidatePresent())) {
return;
}
@@ -292,15 +292,15 @@ namespace dawn_native {
mAttached = true;
}
- void NewSwapChainBase::Configure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) {
+ void NewSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) {
GetDevice()->ConsumedError(
DAWN_VALIDATION_ERROR("Configure is invalid for surface-based swapchains"));
}
- TextureViewBase* NewSwapChainBase::GetCurrentTextureView() {
+ TextureViewBase* NewSwapChainBase::APIGetCurrentTextureView() {
if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
return TextureViewBase::MakeError(GetDevice());
}
@@ -331,7 +331,7 @@ namespace dawn_native {
return view;
}
- void NewSwapChainBase::Present() {
+ void NewSwapChainBase::APIPresent() {
if (GetDevice()->ConsumedError(ValidatePresent())) {
return;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.h b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
index efa8a5411f0..2c69fe9c77f 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.h
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
@@ -37,12 +37,12 @@ namespace dawn_native {
static SwapChainBase* MakeError(DeviceBase* device);
// Dawn API
- virtual void Configure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) = 0;
- virtual TextureViewBase* GetCurrentTextureView() = 0;
- virtual void Present() = 0;
+ virtual void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) = 0;
+ virtual TextureViewBase* APIGetCurrentTextureView() = 0;
+ virtual void APIPresent() = 0;
protected:
SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
@@ -57,12 +57,12 @@ namespace dawn_native {
static SwapChainBase* MakeError(DeviceBase* device);
// Dawn API
- void Configure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override;
- TextureViewBase* GetCurrentTextureView() override;
- void Present() override;
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override;
+ TextureViewBase* APIGetCurrentTextureView() override;
+ void APIPresent() override;
protected:
~OldSwapChainBase() override;
@@ -114,12 +114,12 @@ namespace dawn_native {
void SetIsAttached();
// Dawn API
- void Configure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) override;
- TextureViewBase* GetCurrentTextureView() override;
- void Present() override;
+ void APIConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override;
+ TextureViewBase* APIGetCurrentTextureView() override;
+ void APIPresent() override;
uint32_t GetWidth() const;
uint32_t GetHeight() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
index 21ae34b61b1..55eb68fb1f1 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
@@ -118,14 +118,18 @@ namespace dawn_native {
// Multisampled 2D array texture is not supported because on Metal it requires the
// version of macOS be greater than 10.14.
if (descriptor->dimension != wgpu::TextureDimension::e2D ||
- descriptor->size.depth > 1) {
+ descriptor->size.depthOrArrayLayers > 1) {
return DAWN_VALIDATION_ERROR("Multisampled texture must be 2D with depth=1");
}
- if (format->isCompressed) {
- return DAWN_VALIDATION_ERROR(
- "The sample counts of the textures in BC formats must be 1.");
+ // If a format can support multisample, it must be renderable. Because Vulkan
+ // requires that if the format is not color-renderable or depth/stencil renderable,
+ // sampleCount must be 1.
+ if (!format->isRenderable) {
+ return DAWN_VALIDATION_ERROR("This format cannot support multisample.");
}
+ // Compressed formats are not renderable. They cannot support multisample.
+ ASSERT(!format->isCompressed);
if (descriptor->usage & wgpu::TextureUsage::Storage) {
return DAWN_VALIDATION_ERROR(
@@ -164,7 +168,7 @@ namespace dawn_native {
MaybeError ValidateTextureSize(const TextureDescriptor* descriptor, const Format* format) {
ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
- descriptor->size.depth != 0);
+ descriptor->size.depthOrArrayLayers != 0);
Extent3D maxExtent;
switch (descriptor->dimension) {
@@ -182,7 +186,7 @@ namespace dawn_native {
}
if (descriptor->size.width > maxExtent.width ||
descriptor->size.height > maxExtent.height ||
- descriptor->size.depth > maxExtent.depth) {
+ descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers) {
return DAWN_VALIDATION_ERROR("Texture dimension (width, height or depth) exceeded");
}
@@ -191,7 +195,8 @@ namespace dawn_native {
maxMippedDimension = std::max(maxMippedDimension, descriptor->size.height);
}
if (descriptor->dimension == wgpu::TextureDimension::e3D) {
- maxMippedDimension = std::max(maxMippedDimension, descriptor->size.depth);
+ maxMippedDimension =
+ std::max(maxMippedDimension, descriptor->size.depthOrArrayLayers);
}
if (Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount) {
return DAWN_VALIDATION_ERROR("Texture has too many mip levels");
@@ -243,11 +248,28 @@ namespace dawn_native {
} // anonymous namespace
+ MaybeError FixUpDeprecatedGPUExtent3DDepth(DeviceBase* device, Extent3D* extent) {
+ if (extent->depth != 1) {
+ // deprecated depth is assigned
+ if (extent->depthOrArrayLayers != 1) {
+ // both deprecated and updated API is used
+ return DAWN_VALIDATION_ERROR(
+ "Deprecated GPUExtent3D.depth and updated GPUExtent3D.depthOrArrayLengths are "
+ "both assigned.");
+ }
+
+ extent->depthOrArrayLayers = extent->depth;
+
+ device->EmitDeprecationWarning(
+ "GPUExtent3D.depth is deprecated. Please use GPUExtent3D.depthOrArrayLayers "
+ "instead.");
+ }
+
+ return {};
+ }
+
MaybeError ValidateTextureDescriptor(const DeviceBase* device,
const TextureDescriptor* descriptor) {
- if (descriptor == nullptr) {
- return DAWN_VALIDATION_ERROR("Texture descriptor is nullptr");
- }
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
@@ -264,7 +286,7 @@ namespace dawn_native {
// TODO(jiawei.shao@intel.com): check stuff based on the dimension
if (descriptor->size.width == 0 || descriptor->size.height == 0 ||
- descriptor->size.depth == 0 || descriptor->mipLevelCount == 0) {
+ descriptor->size.depthOrArrayLayers == 0 || descriptor->mipLevelCount == 0) {
return DAWN_VALIDATION_ERROR("Cannot create an empty texture");
}
@@ -272,7 +294,7 @@ namespace dawn_native {
if (descriptor->dimension != wgpu::TextureDimension::e2D &&
device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
return DAWN_VALIDATION_ERROR(
- "1D and 3D textures are disallowed because they are not fully implemented ");
+ "1D and 3D textures are disallowed because they are not fully implemented");
}
if (descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed) {
@@ -399,7 +421,8 @@ namespace dawn_native {
mSampleCount(descriptor->sampleCount),
mUsage(descriptor->usage),
mState(state) {
- uint32_t subresourceCount = mMipLevelCount * mSize.depth * GetAspectCount(mFormat.aspects);
+ uint32_t subresourceCount =
+ mMipLevelCount * GetArrayLayers() * GetAspectCount(mFormat.aspects);
mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
// Add readonly storage usage if the texture has a storage usage. The validation rules in
@@ -446,7 +469,7 @@ namespace dawn_native {
uint32_t TextureBase::GetDepth() const {
ASSERT(!IsError());
ASSERT(mDimension == wgpu::TextureDimension::e3D);
- return mSize.depth;
+ return mSize.depthOrArrayLayers;
}
uint32_t TextureBase::GetArrayLayers() const {
ASSERT(!IsError());
@@ -455,7 +478,7 @@ namespace dawn_native {
if (mDimension == wgpu::TextureDimension::e3D) {
return 1;
}
- return mSize.depth;
+ return mSize.depthOrArrayLayers;
}
uint32_t TextureBase::GetNumMipLevels() const {
ASSERT(!IsError());
@@ -554,7 +577,7 @@ namespace dawn_native {
return extent;
}
- extent.depth = std::max(mSize.depth >> level, 1u);
+ extent.depthOrArrayLayers = std::max(mSize.depthOrArrayLayers >> level, 1u);
return extent;
}
@@ -584,14 +607,14 @@ namespace dawn_native {
uint32_t clampedCopyExtentHeight = (origin.y + extent.height > virtualSizeAtLevel.height)
? (virtualSizeAtLevel.height - origin.y)
: extent.height;
- return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depth};
+ return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depthOrArrayLayers};
}
- TextureViewBase* TextureBase::CreateView(const TextureViewDescriptor* descriptor) {
+ TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
return GetDevice()->CreateTextureView(this, descriptor);
}
- void TextureBase::Destroy() {
+ void TextureBase::APIDestroy() {
if (GetDevice()->ConsumedError(ValidateDestroy())) {
return;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.h b/chromium/third_party/dawn/src/dawn_native/Texture.h
index 52cfa06c5ab..7ae39463f65 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.h
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.h
@@ -38,6 +38,8 @@ namespace dawn_native {
bool IsValidSampleCount(uint32_t sampleCount);
+ MaybeError FixUpDeprecatedGPUExtent3DDepth(DeviceBase* device, Extent3D* extent);
+
static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::Sampled | kReadOnlyStorageTexture;
@@ -86,8 +88,8 @@ namespace dawn_native {
const Extent3D& extent) const;
// Dawn API
- TextureViewBase* CreateView(const TextureViewDescriptor* descriptor = nullptr);
- void Destroy();
+ TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
+ void APIDestroy();
protected:
void DestroyInternal();
diff --git a/chromium/third_party/dawn/src/dawn_native/TintUtils.cpp b/chromium/third_party/dawn/src/dawn_native/TintUtils.cpp
new file mode 100644
index 00000000000..7315904fef6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/TintUtils.cpp
@@ -0,0 +1,55 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/TintUtils.h"
+#include "dawn_native/Device.h"
+
+#include <tint/tint.h>
+
+namespace dawn_native {
+
+ namespace {
+
+ thread_local DeviceBase* tlDevice = nullptr;
+
+ void TintICEReporter(const tint::diag::List& diagnostics) {
+ if (tlDevice) {
+ tlDevice->HandleError(InternalErrorType::Validation, diagnostics.str().c_str());
+ }
+ }
+
+ bool InitializeTintErrorReporter() {
+ tint::SetInternalCompilerErrorReporter(&TintICEReporter);
+ return true;
+ }
+
+ } // namespace
+
+ ScopedTintICEHandler::ScopedTintICEHandler(DeviceBase* device) {
+ // Call tint::SetInternalCompilerErrorReporter() the first time
+ // this constructor is called. Static initialization is
+ // guaranteed to be thread-safe, and only occur once.
+ static bool init_once_tint_error_reporter = InitializeTintErrorReporter();
+ (void)init_once_tint_error_reporter;
+
+ // Shouldn't have overlapping instances of this handler.
+ ASSERT(tlDevice == nullptr);
+ tlDevice = device;
+ }
+
+ ScopedTintICEHandler::~ScopedTintICEHandler() {
+ tlDevice = nullptr;
+ }
+
+} // namespace dawn_native \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/TintUtils.h b/chromium/third_party/dawn/src/dawn_native/TintUtils.h
new file mode 100644
index 00000000000..c3761f69ff3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/TintUtils.h
@@ -0,0 +1,37 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_TINTUTILS_H_
+#define DAWNNATIVE_TINTUTILS_H_
+
+#include "common/NonCopyable.h"
+
+namespace dawn_native {
+
+ class DeviceBase;
+
+ // Indicates that for the lifetime of this object tint internal compiler errors should be
+ // reported to the given device.
+ class ScopedTintICEHandler : public NonCopyable {
+ public:
+ ScopedTintICEHandler(DeviceBase* device);
+ ~ScopedTintICEHandler();
+
+ private:
+ ScopedTintICEHandler(ScopedTintICEHandler&&) = delete;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_TEXTURE_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
index 41ac54992a1..a4561c1178b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
@@ -167,12 +167,16 @@ namespace dawn_native {
{"flush_before_client_wait_sync",
"Call glFlush before glClientWaitSync to work around bugs in the latter",
"https://crbug.com/dawn/633"}},
- {Toggle::ConvertTimestampsToNanoseconds,
- {"convert_timestamps_to_nanoseconds",
- "If needed, use a compute shader to transform timestamp queries from ticks to "
- "nanoseconds. This is temporarily needed to avoid requiring Tint to use timestamp "
- "queries",
- "https://crbug.com/dawn/686"}}
+ {Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ {"use_temp_buffer_in_small_format_texture_to_texture_copy_from_greater_to_less_mip_"
+ "level",
+ "Split texture-to-texture copy into two copies: copy from source texture into a "
+ "temporary buffer, and copy from the temporary buffer into the destination texture "
+ "under specific situations. This workaround is by default enabled on some Intel "
+ "GPUs which have a driver bug in the execution of CopyTextureRegion() when we copy "
+ "with the formats whose texel block sizes are less than 4 bytes from a greater mip "
+ "level to a smaller mip level on D3D12 backends.",
+ "https://crbug.com/1161355"}}
// Dummy comment to separate the }} so it is clearer what to copy-paste to add a toggle.
}};
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.h b/chromium/third_party/dawn/src/dawn_native/Toggles.h
index df56b4f53fa..258d8179504 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.h
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.h
@@ -50,7 +50,7 @@ namespace dawn_native {
DisallowUnsafeAPIs,
UseTintGenerator,
FlushBeforeClientWaitSync,
- ConvertTimestampsToNanoseconds,
+ UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
EnumCount,
InvalidEnum = EnumCount,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
index 775e32da18d..1455d129383 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
@@ -15,28 +15,17 @@
#include "dawn_native/d3d12/AdapterD3D12.h"
#include "common/Constants.h"
+#include "common/WindowsUtils.h"
#include "dawn_native/Instance.h"
#include "dawn_native/d3d12/BackendD3D12.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
-#include <locale>
#include <sstream>
namespace dawn_native { namespace d3d12 {
- // utility wrapper to adapt locale-bound facets for wstring/wbuffer convert
- template <class Facet>
- struct DeletableFacet : Facet {
- template <class... Args>
- DeletableFacet(Args&&... args) : Facet(std::forward<Args>(args)...) {
- }
-
- ~DeletableFacet() {
- }
- };
-
Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
: AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
mHardwareAdapter(hardwareAdapter),
@@ -80,6 +69,7 @@ namespace dawn_native { namespace d3d12 {
mPCIInfo.deviceId = adapterDesc.DeviceId;
mPCIInfo.vendorId = adapterDesc.VendorId;
+ mPCIInfo.name = WCharToUTF8(adapterDesc.Description);
DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
@@ -90,11 +80,6 @@ namespace dawn_native { namespace d3d12 {
: wgpu::AdapterType::DiscreteGPU;
}
- // Get the adapter's name as a UTF8 string.
- std::wstring_convert<DeletableFacet<std::codecvt<wchar_t, char, std::mbstate_t>>> converter(
- "Error converting");
- mPCIInfo.name = converter.to_bytes(adapterDesc.Description);
-
// Convert the adapter's D3D12 driver version to a readable string like "24.21.13.9793".
LARGE_INTEGER umdVersion;
if (mHardwareAdapter->CheckInterfaceSupport(__uuidof(IDXGIDevice), &umdVersion) !=
@@ -206,8 +191,13 @@ namespace dawn_native { namespace d3d12 {
if (!GetInstance()->IsBackendValidationEnabled()) {
return;
}
+
+ // If the debug layer is not installed, return immediately to avoid crashing the process.
ComPtr<ID3D12InfoQueue> infoQueue;
- ASSERT_SUCCESS(mD3d12Device.As(&infoQueue));
+ if (FAILED(mD3d12Device.As(&infoQueue))) {
+ return;
+ }
+
infoQueue->PopRetrievalFilter();
infoQueue->PopStorageFilter();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
index a162dbe0e55..57548c7ef81 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
@@ -48,13 +48,6 @@ namespace dawn_native { namespace d3d12 {
// Enable additional debug layers.
dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
}
-
- ComPtr<IDXGIDebug1> dxgiDebug;
- if (SUCCEEDED(functions->dxgiGetDebugInterface1(0, IID_PPV_ARGS(&dxgiDebug)))) {
- ASSERT(dxgiDebug != nullptr);
- dxgiDebug->ReportLiveObjects(DXGI_DEBUG_ALL,
- DXGI_DEBUG_RLO_FLAGS(DXGI_DEBUG_RLO_ALL));
- }
}
if (beginCaptureOnStartup) {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
index 88a54b8f052..75eb734c695 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
@@ -25,8 +25,8 @@
namespace dawn_native { namespace d3d12 {
// static
- ResultOrError<BindGroup*> BindGroup::Create(Device* device,
- const BindGroupDescriptor* descriptor) {
+ ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+ const BindGroupDescriptor* descriptor) {
return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
index a48fc14225d..43b54c14dc0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
@@ -28,8 +28,8 @@ namespace dawn_native { namespace d3d12 {
class BindGroup final : public BindGroupBase, public PlacementAllocated {
public:
- static ResultOrError<BindGroup*> Create(Device* device,
- const BindGroupDescriptor* descriptor);
+ static ResultOrError<Ref<BindGroup>> Create(Device* device,
+ const BindGroupDescriptor* descriptor);
BindGroup(Device* device,
const BindGroupDescriptor* descriptor,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
index 449a07a5ff2..c5151664ee0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
@@ -56,6 +56,12 @@ namespace dawn_native { namespace d3d12 {
}
} // anonymous namespace
+ // static
+ Ref<BindGroupLayout> BindGroupLayout::Create(Device* device,
+ const BindGroupLayoutDescriptor* descriptor) {
+ return AcquireRef(new BindGroupLayout(device, descriptor));
+ }
+
BindGroupLayout::BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor)
: BindGroupLayoutBase(device, descriptor),
mBindingOffsets(GetBindingCount()),
@@ -138,7 +144,7 @@ namespace dawn_native { namespace d3d12 {
device->GetSamplerStagingDescriptorAllocator(GetSamplerDescriptorCount());
}
- ResultOrError<BindGroup*> BindGroupLayout::AllocateBindGroup(
+ ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
Device* device,
const BindGroupDescriptor* descriptor) {
uint32_t viewSizeIncrement = 0;
@@ -158,7 +164,7 @@ namespace dawn_native { namespace d3d12 {
bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
}
- return bindGroup.Detach();
+ return bindGroup;
}
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
index 6ee462348fc..503566643d5 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
@@ -30,10 +30,11 @@ namespace dawn_native { namespace d3d12 {
class BindGroupLayout final : public BindGroupLayoutBase {
public:
- BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor);
+ static Ref<BindGroupLayout> Create(Device* device,
+ const BindGroupLayoutDescriptor* descriptor);
- ResultOrError<BindGroup*> AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor);
+ ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor);
void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
enum DescriptorType {
@@ -53,6 +54,7 @@ namespace dawn_native { namespace d3d12 {
const D3D12_DESCRIPTOR_RANGE* GetSamplerDescriptorRanges() const;
private:
+ BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor);
~BindGroupLayout() override = default;
ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mBindingOffsets;
std::array<uint32_t, DescriptorType::Count> mDescriptorCounts;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
index 2d445be8725..a73eb9b57c4 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
@@ -94,6 +94,13 @@ namespace dawn_native { namespace d3d12 {
}
} // namespace
+ // static
+ ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+ DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+ return buffer;
+ }
+
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
: BufferBase(device, descriptor) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
index 613508fb308..4cd3d512caa 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
@@ -27,9 +27,8 @@ namespace dawn_native { namespace d3d12 {
class Buffer final : public BufferBase {
public:
- Buffer(Device* device, const BufferDescriptor* descriptor);
-
- MaybeError Initialize(bool mappedAtCreation);
+ static ResultOrError<Ref<Buffer>> Create(Device* device,
+ const BufferDescriptor* descriptor);
ID3D12Resource* GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
@@ -51,7 +50,10 @@ namespace dawn_native { namespace d3d12 {
const CopyTextureToBufferCmd* copy);
private:
+ Buffer(Device* device, const BufferDescriptor* descriptor);
~Buffer() override;
+
+ MaybeError Initialize(bool mappedAtCreation);
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
void UnmapImpl() override;
void DestroyImpl() override;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
index 4a9051d458c..8d0056adf6b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
@@ -81,41 +81,8 @@ namespace dawn_native { namespace d3d12 {
copySize.width == srcSize.width && //
copySize.height == dstSize.height && //
copySize.height == srcSize.height && //
- copySize.depth == dstSize.depth && //
- copySize.depth == srcSize.depth;
- }
-
- void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
- const Texture2DCopySplit& baseCopySplit,
- Buffer* buffer,
- uint64_t baseOffset,
- uint64_t bufferBytesPerRow,
- Texture* texture,
- uint32_t textureMiplevel,
- uint32_t textureSlice,
- Aspect aspect) {
- const D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice,
- aspect);
-
- const uint64_t offset = baseCopySplit.offset + baseOffset;
-
- for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
- const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
-
- // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
- // members in Texture2DCopySplit::CopyInfo.
- const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(texture, buffer->GetD3D12Resource(),
- info.bufferSize, offset,
- bufferBytesPerRow, aspect);
- const D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
-
- commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
- info.bufferOffset.y, info.bufferOffset.z,
- &textureLocation, &sourceRegion);
- }
+ copySize.depthOrArrayLayers == dstSize.depthOrArrayLayers && //
+ copySize.depthOrArrayLayers == srcSize.depthOrArrayLayers;
}
void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList,
@@ -148,6 +115,82 @@ namespace dawn_native { namespace d3d12 {
commandList->SetGraphicsRoot32BitConstants(layout->GetFirstIndexOffsetParameterIndex(),
count, offsets.data(), 0);
}
+
+ bool ShouldCopyUsingTemporaryBuffer(DeviceBase* device,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy) {
+ // Currently we only need the workaround for an Intel D3D12 driver issue.
+ if (device->IsToggleEnabled(
+ Toggle::
+ UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel)) {
+ bool copyToLesserLevel = srcCopy.mipLevel > dstCopy.mipLevel;
+ ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
+
+ // GetAspectInfo(aspect) requires HasOneBit(aspect) == true, plus the texel block
+ // sizes of depth stencil formats are always no less than 4 bytes.
+ bool isSmallColorFormat =
+ HasOneBit(srcCopy.aspect) &&
+ srcCopy.texture->GetFormat().GetAspectInfo(srcCopy.aspect).block.byteSize < 4u;
+ if (copyToLesserLevel && isSmallColorFormat) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ void RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+ const TextureCopy& srcCopy,
+ const TextureCopy& dstCopy,
+ const Extent3D& copySize) {
+ ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
+ ASSERT(srcCopy.aspect == dstCopy.aspect);
+ dawn_native::Format format = srcCopy.texture->GetFormat();
+ const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+ ASSERT(copySize.width % blockInfo.width == 0);
+ uint32_t widthInBlocks = copySize.width / blockInfo.width;
+ ASSERT(copySize.height % blockInfo.height == 0);
+ uint32_t heightInBlocks = copySize.height / blockInfo.height;
+
+ // Create tempBuffer
+ uint32_t bytesPerRow =
+ Align(blockInfo.byteSize * widthInBlocks, kTextureBytesPerRowAlignment);
+ uint32_t rowsPerImage = heightInBlocks;
+
+ // The size of temporary buffer isn't needed to be a multiple of 4 because we don't
+ // need to set mappedAtCreation to be true.
+ auto tempBufferSize =
+ ComputeRequiredBytesInCopy(blockInfo, copySize, bytesPerRow, rowsPerImage);
+
+ BufferDescriptor tempBufferDescriptor;
+ tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+ tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
+ Device* device = ToBackend(srcCopy.texture->GetDevice());
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<Buffer> tempBuffer =
+ AcquireRef(ToBackend(device->APICreateBuffer(&tempBufferDescriptor)));
+
+ // Copy from source texture into tempBuffer
+ Texture* srcTexture = ToBackend(srcCopy.texture).Get();
+ tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ BufferCopy bufferCopy;
+ bufferCopy.buffer = tempBuffer;
+ bufferCopy.offset = 0;
+ bufferCopy.bytesPerRow = bytesPerRow;
+ bufferCopy.rowsPerImage = rowsPerImage;
+ RecordCopyTextureToBuffer(recordingContext->GetCommandList(), srcCopy, bufferCopy,
+ srcTexture, tempBuffer.Get(), copySize);
+
+ // Copy from tempBuffer into destination texture
+ tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ Texture* dstTexture = ToBackend(dstCopy.texture).Get();
+ RecordCopyBufferToTexture(recordingContext, dstCopy, tempBuffer->GetD3D12Resource(), 0,
+ bytesPerRow, rowsPerImage, copySize, dstTexture,
+ dstCopy.aspect);
+
+ // Save tempBuffer into recordingContext
+ recordingContext->AddToTempBuffers(std::move(tempBuffer));
+ }
} // anonymous namespace
class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
@@ -545,6 +588,12 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
+ // static
+ Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
+ }
+
CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
: CommandBufferBase(encoder, descriptor) {
}
@@ -692,7 +741,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
SubresourceRange subresources =
GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
@@ -707,11 +756,10 @@ namespace dawn_native { namespace d3d12 {
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
subresources);
- // compute the copySplits and record the CopyTextureRegion commands
- CopyBufferToTextureWithCopySplit(
- commandContext, copy->destination, copy->copySize, texture,
- buffer->GetD3D12Resource(), copy->source.offset, copy->source.bytesPerRow,
- copy->source.rowsPerImage, subresources.aspects);
+ RecordCopyBufferToTexture(commandContext, copy->destination,
+ buffer->GetD3D12Resource(), copy->source.offset,
+ copy->source.bytesPerRow, copy->source.rowsPerImage,
+ copy->copySize, texture, subresources.aspects);
break;
}
@@ -723,7 +771,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
SubresourceRange subresources =
GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
@@ -733,43 +781,8 @@ namespace dawn_native { namespace d3d12 {
subresources);
buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
- const TexelBlockInfo& blockInfo =
- texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
-
- // See comments around ComputeTextureCopySplits() for more details.
- const TextureCopySplits copySplits = ComputeTextureCopySplits(
- copy->source.origin, copy->copySize, blockInfo, copy->destination.offset,
- copy->destination.bytesPerRow, copy->destination.rowsPerImage);
-
- const uint64_t bytesPerSlice =
- copy->destination.bytesPerRow * copy->destination.rowsPerImage;
-
- // copySplits.copies2D[1] is always calculated for the second copy slice with
- // extra "bytesPerSlice" copy offset compared with the first copy slice. So
- // here we use an array bufferOffsetsForNextSlice to record the extra offsets
- // for each copy slice: bufferOffsetsForNextSlice[0] is the extra offset for
- // the next copy slice that uses copySplits.copies2D[0], and
- // bufferOffsetsForNextSlice[1] is the extra offset for the next copy slice
- // that uses copySplits.copies2D[1].
- std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits>
- bufferOffsetsForNextSlice = {{0u, 0u}};
- for (uint32_t copySlice = 0; copySlice < copy->copySize.depth; ++copySlice) {
- const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
-
- const Texture2DCopySplit& copySplitPerLayerBase =
- copySplits.copies2D[splitIndex];
- const uint64_t bufferOffsetForNextSlice =
- bufferOffsetsForNextSlice[splitIndex];
- const uint32_t copyTextureLayer = copySlice + copy->source.origin.z;
-
- RecordCopyTextureToBufferFromTextureCopySplit(
- commandList, copySplitPerLayerBase, buffer, bufferOffsetForNextSlice,
- copy->destination.bytesPerRow, texture, copy->source.mipLevel,
- copyTextureLayer, subresources.aspects);
-
- bufferOffsetsForNextSlice[splitIndex] +=
- bytesPerSlice * copySplits.copies2D.size();
- }
+ RecordCopyTextureToBuffer(commandList, copy->source, copy->destination, texture,
+ buffer, copy->copySize);
break;
}
@@ -801,7 +814,7 @@ namespace dawn_native { namespace d3d12 {
// it is not allowed to copy with overlapped subresources, but we still
// add the ASSERT here as a reminder for this possible misuse.
ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
- copy->copySize.depth));
+ copy->copySize.depthOrArrayLayers));
}
source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
srcRange);
@@ -809,6 +822,13 @@ namespace dawn_native { namespace d3d12 {
wgpu::TextureUsage::CopyDst, dstRange);
ASSERT(srcRange.aspects == dstRange.aspects);
+ if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source,
+ copy->destination)) {
+ RecordCopyTextureWithTemporaryBuffer(commandContext, copy->source,
+ copy->destination, copy->copySize);
+ break;
+ }
+
if (CanUseCopyResource(copy->source, copy->destination, copy->copySize)) {
commandList->CopyResource(destination->GetD3D12Resource(),
source->GetD3D12Resource());
@@ -820,7 +840,8 @@ namespace dawn_native { namespace d3d12 {
copy->copySize.width, copy->copySize.height, 1u};
for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
- for (uint32_t slice = 0; slice < copy->copySize.depth; ++slice) {
+ for (uint32_t slice = 0; slice < copy->copySize.depthOrArrayLayers;
+ ++slice) {
D3D12_TEXTURE_COPY_LOCATION srcLocation =
ComputeTextureCopyLocationForTexture(
source, copy->source.mipLevel,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
index cf4c4517c25..342f851a998 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
@@ -30,11 +30,14 @@ namespace dawn_native { namespace d3d12 {
class CommandBuffer final : public CommandBufferBase {
public:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
MaybeError RecordCommands(CommandRecordingContext* commandContext);
private:
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
MaybeError RecordComputePass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker);
MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
index cae9e2a1c1a..8faa46e0369 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
@@ -112,10 +112,15 @@ namespace dawn_native { namespace d3d12 {
mIsOpen = false;
mSharedTextures.clear();
mHeapsPendingUsage.clear();
+ mTempBuffers.clear();
}
bool CommandRecordingContext::IsOpen() const {
return mIsOpen;
}
+ void CommandRecordingContext::AddToTempBuffers(Ref<Buffer> tempBuffer) {
+ mTempBuffers.emplace_back(tempBuffer);
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
index 4fc089d424c..6c6dc37dd0f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
@@ -16,6 +16,7 @@
#include "dawn_native/Error.h"
#include "dawn_native/IntegerTypes.h"
+#include "dawn_native/d3d12/BufferD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
#include <set>
@@ -41,12 +42,16 @@ namespace dawn_native { namespace d3d12 {
void TrackHeapUsage(Heap* heap, ExecutionSerial serial);
+ void AddToTempBuffers(Ref<Buffer> tempBuffer);
+
private:
ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
bool mIsOpen = false;
std::set<Texture*> mSharedTextures;
std::vector<Heap*> mHeapsPendingUsage;
+
+ std::vector<Ref<Buffer>> mTempBuffers;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
index 88aa240ead4..948722af539 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/d3d12/ComputePipelineD3D12.h"
#include "common/Assert.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
@@ -23,12 +24,12 @@
namespace dawn_native { namespace d3d12 {
- ResultOrError<ComputePipeline*> ComputePipeline::Create(
+ ResultOrError<Ref<ComputePipeline>> ComputePipeline::Create(
Device* device,
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline.Detach();
+ return pipeline;
}
MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
@@ -51,8 +52,10 @@ namespace dawn_native { namespace d3d12 {
SingleShaderStage::Compute,
ToBackend(GetLayout()), compileFlags));
d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
- device->GetD3D12Device()->CreateComputePipelineState(&d3dDesc,
- IID_PPV_ARGS(&mPipelineState));
+ auto* d3d12Device = device->GetD3D12Device();
+ DAWN_TRY(CheckHRESULT(
+ d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
+ "D3D12 creating pipeline state"));
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
index 7d05f0b3c1c..b71ae1a4691 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
@@ -25,8 +25,9 @@ namespace dawn_native { namespace d3d12 {
class ComputePipeline final : public ComputePipelineBase {
public:
- static ResultOrError<ComputePipeline*> Create(Device* device,
- const ComputePipelineDescriptor* descriptor);
+ static ResultOrError<Ref<ComputePipeline>> Create(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor);
ComputePipeline() = delete;
ID3D12PipelineState* GetPipelineState() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
index aac8968c315..5fc2e24ec4c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
@@ -17,6 +17,8 @@
#include "dawn_native/D3D12Backend.h"
+#include "common/Log.h"
+#include "common/Math.h"
#include "common/SwapChainUtils.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/NativeSwapChainImplD3D12.h"
@@ -51,6 +53,89 @@ namespace dawn_native { namespace d3d12 {
: ExternalImageDescriptor(ExternalImageType::DXGISharedHandle) {
}
+ ExternalImageDXGI::ExternalImageDXGI(ComPtr<ID3D12Resource> d3d12Resource,
+ const WGPUTextureDescriptor* descriptor)
+ : mD3D12Resource(std::move(d3d12Resource)),
+ mUsage(descriptor->usage),
+ mDimension(descriptor->dimension),
+ mSize(descriptor->size),
+ mFormat(descriptor->format),
+ mMipLevelCount(descriptor->mipLevelCount),
+ mSampleCount(descriptor->sampleCount) {
+ ASSERT(descriptor->nextInChain == nullptr);
+ }
+
+ WGPUTexture ExternalImageDXGI::ProduceTexture(
+ WGPUDevice device,
+ const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
+ Device* backendDevice = reinterpret_cast<Device*>(device);
+
+ // Ensure the texture usage is allowed
+ if (!IsSubset(descriptor->usage, mUsage)) {
+ dawn::ErrorLog() << "Texture usage is not valid for external image";
+ return nullptr;
+ }
+
+ TextureDescriptor textureDescriptor = {};
+ textureDescriptor.usage = static_cast<wgpu::TextureUsage>(descriptor->usage);
+ textureDescriptor.dimension = static_cast<wgpu::TextureDimension>(mDimension);
+ textureDescriptor.size = {mSize.width, mSize.height, mSize.depthOrArrayLayers};
+ textureDescriptor.format = static_cast<wgpu::TextureFormat>(mFormat);
+ textureDescriptor.mipLevelCount = mMipLevelCount;
+ textureDescriptor.sampleCount = mSampleCount;
+
+ Ref<TextureBase> texture = backendDevice->CreateExternalTexture(
+ &textureDescriptor, mD3D12Resource, ExternalMutexSerial(descriptor->acquireMutexKey),
+ descriptor->isSwapChainTexture, descriptor->isInitialized);
+ return reinterpret_cast<WGPUTexture>(texture.Detach());
+ }
+
+ // static
+ std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
+ WGPUDevice device,
+ const ExternalImageDescriptorDXGISharedHandle* descriptor) {
+ Device* backendDevice = reinterpret_cast<Device*>(device);
+
+ Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
+ if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
+ descriptor->sharedHandle, IID_PPV_ARGS(&d3d12Resource)))) {
+ return nullptr;
+ }
+
+ const TextureDescriptor* textureDescriptor =
+ reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
+
+ if (backendDevice->ConsumedError(
+ ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
+ return nullptr;
+ }
+
+ if (backendDevice->ConsumedError(
+ ValidateTextureDescriptorCanBeWrapped(textureDescriptor))) {
+ return nullptr;
+ }
+
+ if (backendDevice->ConsumedError(
+ ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), textureDescriptor))) {
+ return nullptr;
+ }
+
+ // Shared handle is assumed to support resource sharing capability. The resource
+ // shared capability tier must agree to share resources between D3D devices.
+ const Format* format =
+ backendDevice->GetInternalFormat(textureDescriptor->format).AcquireSuccess();
+ if (format->IsMultiPlanar()) {
+ if (backendDevice->ConsumedError(ValidateD3D12VideoTextureCanBeShared(
+ backendDevice, D3D12TextureFormat(textureDescriptor->format)))) {
+ return nullptr;
+ }
+ }
+
+ std::unique_ptr<ExternalImageDXGI> result(
+ new ExternalImageDXGI(std::move(d3d12Resource), descriptor->cTextureDescriptor));
+ return result;
+ }
+
uint64_t SetExternalMemoryReservation(WGPUDevice device,
uint64_t requestedReservationSize,
MemorySegment memorySegment) {
@@ -60,15 +145,6 @@ namespace dawn_native { namespace d3d12 {
memorySegment, requestedReservationSize);
}
- WGPUTexture WrapSharedHandle(WGPUDevice device,
- const ExternalImageDescriptorDXGISharedHandle* descriptor) {
- Device* backendDevice = reinterpret_cast<Device*>(device);
- Ref<TextureBase> texture = backendDevice->WrapSharedHandle(
- descriptor, descriptor->sharedHandle, ExternalMutexSerial(descriptor->acquireMutexKey),
- descriptor->isSwapChainTexture);
- return reinterpret_cast<WGPUTexture>(texture.Detach());
- }
-
AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
: AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
index 9d5041f9693..4de76548b71 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "common/GPUInfo.h"
#include "dawn_native/Instance.h"
#include "dawn_native/d3d12/AdapterD3D12.h"
#include "dawn_native/d3d12/BackendD3D12.h"
@@ -268,17 +269,17 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Device::WaitForSerial(ExecutionSerial serial) {
- CheckPassedSerials();
+ DAWN_TRY(CheckPassedSerials());
if (GetCompletedCommandSerial() < serial) {
DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(uint64_t(serial), mFenceEvent),
"D3D12 set event on completion"));
WaitForSingleObject(mFenceEvent, INFINITE);
- CheckPassedSerials();
+ DAWN_TRY(CheckPassedSerials());
}
return {};
}
- ExecutionSerial Device::CheckAndUpdateCompletedSerials() {
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
ExecutionSerial completeSerial = ExecutionSerial(mFence->GetCompletedValue());
if (completeSerial <= GetCompletedCommandSerial()) {
@@ -296,51 +297,51 @@ namespace dawn_native { namespace d3d12 {
return mPendingCommands.ExecuteCommandList(this);
}
- ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
return BindGroup::Create(this, descriptor);
}
- ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
- return new BindGroupLayout(this, descriptor);
+ return BindGroupLayout::Create(this, descriptor);
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- Ref<Buffer> buffer = AcquireRef(new Buffer(this, descriptor));
- DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
- return std::move(buffer);
+ return Buffer::Create(this, descriptor);
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return new CommandBuffer(encoder, descriptor);
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return CommandBuffer::Create(encoder, descriptor);
}
- ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> Device::CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) {
return ComputePipeline::Create(this, descriptor);
}
- ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
return PipelineLayout::Create(this, descriptor);
}
- ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
return QuerySet::Create(this, descriptor);
}
- ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
+ ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) {
return RenderPipeline::Create(this, descriptor);
}
- ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return new Sampler(this, descriptor);
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return Sampler::Create(this, descriptor);
}
- ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) {
return ShaderModule::Create(this, descriptor, parseResult);
}
- ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
- return new SwapChain(this, descriptor);
+ return SwapChain::Create(this, descriptor);
}
- ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) {
@@ -349,10 +350,10 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return Texture::Create(this, descriptor);
}
- ResultOrError<TextureViewBase*> Device::CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) {
- return new TextureView(texture, descriptor);
+ return TextureView::Create(texture, descriptor);
}
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
@@ -404,7 +405,7 @@ namespace dawn_native { namespace d3d12 {
CommandRecordingContext* commandContext;
DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
Texture* texture = ToBackend(dst->texture.Get());
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
@@ -416,10 +417,9 @@ namespace dawn_native { namespace d3d12 {
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
- // compute the copySplits and record the CopyTextureRegion commands
- CopyBufferToTextureWithCopySplit(commandContext, *dst, copySizePixels, texture,
- ToBackend(source)->GetResource(), src.offset,
- src.bytesPerRow, src.rowsPerImage, range.aspects);
+ RecordCopyBufferToTexture(commandContext, *dst, ToBackend(source)->GetResource(),
+ src.offset, src.bytesPerRow, src.rowsPerImage, copySizePixels,
+ texture, range.aspects);
return {};
}
@@ -436,14 +436,16 @@ namespace dawn_native { namespace d3d12 {
initialUsage);
}
- Ref<TextureBase> Device::WrapSharedHandle(const ExternalImageDescriptor* descriptor,
- HANDLE sharedHandle,
- ExternalMutexSerial acquireMutexKey,
- bool isSwapChainTexture) {
+ Ref<TextureBase> Device::CreateExternalTexture(const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ ExternalMutexSerial acquireMutexKey,
+ bool isSwapChainTexture,
+ bool isInitialized) {
Ref<Texture> dawnTexture;
- if (ConsumedError(Texture::Create(this, descriptor, sharedHandle, acquireMutexKey,
- isSwapChainTexture),
- &dawnTexture)) {
+ if (ConsumedError(
+ Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
+ acquireMutexKey, isSwapChainTexture, isInitialized),
+ &dawnTexture)) {
return nullptr;
}
return {dawnTexture};
@@ -533,6 +535,20 @@ namespace dawn_native { namespace d3d12 {
// By default use the maximum shader-visible heap size allowed.
SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
+
+ PCIInfo pciInfo = GetAdapter()->GetPCIInfo();
+
+ // Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
+ // See http://crbug.com/1161355 for more information.
+ // TODO(jiawei.shao@intel.com): disable this workaround on the newer drivers when the driver
+ // bug is fixed.
+ if (gpu_info::IsIntel(pciInfo.vendorId) &&
+ (gpu_info::IsSkylake(pciInfo.deviceId) || gpu_info::IsKabylake(pciInfo.deviceId) ||
+ gpu_info::IsCoffeelake(pciInfo.deviceId))) {
+ SetToggle(
+ Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+ true);
+ }
}
MaybeError Device::WaitForIdleForDestruction() {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
index 113f8197468..4819dd4f921 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
@@ -46,8 +46,9 @@ namespace dawn_native { namespace d3d12 {
MaybeError Initialize();
- CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
MaybeError TickImpl() override;
@@ -121,10 +122,11 @@ namespace dawn_native { namespace d3d12 {
StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
- Ref<TextureBase> WrapSharedHandle(const ExternalImageDescriptor* descriptor,
- HANDLE sharedHandle,
- ExternalMutexSerial acquireMutexKey,
- bool isSwapChainTexture);
+ Ref<TextureBase> CreateExternalTexture(const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ ExternalMutexSerial acquireMutexKey,
+ bool isSwapChainTexture,
+ bool isInitialized);
ResultOrError<ComPtr<IDXGIKeyedMutex>> CreateKeyedMutexForTexture(
ID3D12Resource* d3d12Resource);
void ReleaseKeyedMutexForTexture(ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex);
@@ -139,33 +141,34 @@ namespace dawn_native { namespace d3d12 {
private:
using DeviceBase::DeviceBase;
- ResultOrError<BindGroupBase*> CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
- ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
- ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
- ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
- ResultOrError<ShaderModuleBase*> CreateShaderModuleImpl(
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) override;
- ResultOrError<SwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
- ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) override;
ResultOrError<Ref<TextureBase>> CreateTextureImpl(
const TextureDescriptor* descriptor) override;
- ResultOrError<TextureViewBase*> CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
@@ -178,7 +181,7 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12Fence> mFence;
HANDLE mFenceEvent = nullptr;
- ExecutionSerial CheckAndUpdateCompletedSerials() override;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
ComPtr<ID3D12Device> mD3d12Device; // Device is owned by adapter and will not be outlived.
ComPtr<ID3D12CommandQueue> mCommandQueue;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
index 93e8bc0d644..913c845ff83 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
@@ -54,12 +54,12 @@ namespace dawn_native { namespace d3d12 {
}
} // anonymous namespace
- ResultOrError<PipelineLayout*> PipelineLayout::Create(
+ ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
Device* device,
const PipelineLayoutDescriptor* descriptor) {
Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
DAWN_TRY(layout->Initialize());
- return layout.Detach();
+ return layout;
}
MaybeError PipelineLayout::Initialize() {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
index 45c47797074..f20923543bf 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
@@ -26,8 +26,9 @@ namespace dawn_native { namespace d3d12 {
class PipelineLayout final : public PipelineLayoutBase {
public:
- static ResultOrError<PipelineLayout*> Create(Device* device,
- const PipelineLayoutDescriptor* descriptor);
+ static ResultOrError<Ref<PipelineLayout>> Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor);
uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp
index 726e6a201aa..420c8a3c751 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp
@@ -33,11 +33,11 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
// static
- ResultOrError<QuerySet*> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
+ ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
DAWN_TRY(querySet->Initialize());
- return querySet.Detach();
+ return querySet;
}
MaybeError QuerySet::Initialize() {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h
index 7b24cceb67d..16c49d199e6 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h
@@ -24,8 +24,8 @@ namespace dawn_native { namespace d3d12 {
class QuerySet : public QuerySetBase {
public:
- static ResultOrError<QuerySet*> Create(Device* device,
- const QuerySetDescriptor* descriptor);
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
ID3D12QueryHeap* GetQueryHeap() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
index 49a0e7597a1..ca6064de511 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
@@ -32,7 +32,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
- device->Tick();
+ DAWN_TRY(device->Tick());
CommandRecordingContext* commandContext;
DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
index eb3f2d1d43b..3aea032deb1 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
@@ -31,66 +31,68 @@ namespace dawn_native { namespace d3d12 {
namespace {
DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
switch (format) {
- case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::Uint8x2:
return DXGI_FORMAT_R8G8_UINT;
- case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Uint8x4:
return DXGI_FORMAT_R8G8B8A8_UINT;
- case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Sint8x2:
return DXGI_FORMAT_R8G8_SINT;
- case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::Sint8x4:
return DXGI_FORMAT_R8G8B8A8_SINT;
- case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::Unorm8x2:
return DXGI_FORMAT_R8G8_UNORM;
- case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Unorm8x4:
return DXGI_FORMAT_R8G8B8A8_UNORM;
- case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Snorm8x2:
return DXGI_FORMAT_R8G8_SNORM;
- case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Snorm8x4:
return DXGI_FORMAT_R8G8B8A8_SNORM;
- case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::Uint16x2:
return DXGI_FORMAT_R16G16_UINT;
- case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::Uint16x4:
return DXGI_FORMAT_R16G16B16A16_UINT;
- case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Sint16x2:
return DXGI_FORMAT_R16G16_SINT;
- case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Sint16x4:
return DXGI_FORMAT_R16G16B16A16_SINT;
- case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::Unorm16x2:
return DXGI_FORMAT_R16G16_UNORM;
- case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Unorm16x4:
return DXGI_FORMAT_R16G16B16A16_UNORM;
- case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Snorm16x2:
return DXGI_FORMAT_R16G16_SNORM;
- case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Snorm16x4:
return DXGI_FORMAT_R16G16B16A16_SNORM;
- case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Float16x2:
return DXGI_FORMAT_R16G16_FLOAT;
- case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float16x4:
return DXGI_FORMAT_R16G16B16A16_FLOAT;
- case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float32:
return DXGI_FORMAT_R32_FLOAT;
- case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float32x2:
return DXGI_FORMAT_R32G32_FLOAT;
- case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float32x3:
return DXGI_FORMAT_R32G32B32_FLOAT;
- case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float32x4:
return DXGI_FORMAT_R32G32B32A32_FLOAT;
- case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::Uint32:
return DXGI_FORMAT_R32_UINT;
- case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::Uint32x2:
return DXGI_FORMAT_R32G32_UINT;
- case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::Uint32x3:
return DXGI_FORMAT_R32G32B32_UINT;
- case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Uint32x4:
return DXGI_FORMAT_R32G32B32A32_UINT;
- case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Sint32:
return DXGI_FORMAT_R32_SINT;
- case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Sint32x2:
return DXGI_FORMAT_R32G32_SINT;
- case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Sint32x3:
return DXGI_FORMAT_R32G32B32_SINT;
- case wgpu::VertexFormat::Int4:
+ case wgpu::VertexFormat::Sint32x4:
return DXGI_FORMAT_R32G32B32A32_SINT;
+ default:
+ UNREACHABLE();
}
}
@@ -205,16 +207,18 @@ namespace dawn_native { namespace d3d12 {
return static_cast<uint8_t>(writeMask);
}
- D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorStateDescriptor* descriptor) {
+ D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorTargetState* state) {
D3D12_RENDER_TARGET_BLEND_DESC blendDesc;
- blendDesc.BlendEnable = BlendEnabled(descriptor);
- blendDesc.SrcBlend = D3D12Blend(descriptor->colorBlend.srcFactor);
- blendDesc.DestBlend = D3D12Blend(descriptor->colorBlend.dstFactor);
- blendDesc.BlendOp = D3D12BlendOperation(descriptor->colorBlend.operation);
- blendDesc.SrcBlendAlpha = D3D12Blend(descriptor->alphaBlend.srcFactor);
- blendDesc.DestBlendAlpha = D3D12Blend(descriptor->alphaBlend.dstFactor);
- blendDesc.BlendOpAlpha = D3D12BlendOperation(descriptor->alphaBlend.operation);
- blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(descriptor->writeMask);
+ blendDesc.BlendEnable = state->blend != nullptr;
+ if (blendDesc.BlendEnable) {
+ blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
+ blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
+ blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
+ blendDesc.SrcBlendAlpha = D3D12Blend(state->blend->alpha.srcFactor);
+ blendDesc.DestBlendAlpha = D3D12Blend(state->blend->alpha.dstFactor);
+ blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
+ }
+ blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
blendDesc.LogicOpEnable = false;
blendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
return blendDesc;
@@ -252,8 +256,7 @@ namespace dawn_native { namespace d3d12 {
return desc;
}
- D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(
- const DepthStencilStateDescriptor* descriptor) {
+ D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
D3D12_DEPTH_STENCIL_DESC mDepthStencilDescriptor;
mDepthStencilDescriptor.DepthEnable = TRUE;
mDepthStencilDescriptor.DepthWriteMask = descriptor->depthWriteEnabled
@@ -292,15 +295,15 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
- ResultOrError<RenderPipeline*> RenderPipeline::Create(
+ ResultOrError<Ref<RenderPipeline>> RenderPipeline::Create(
Device* device,
- const RenderPipelineDescriptor* descriptor) {
+ const RenderPipelineDescriptor2* descriptor) {
Ref<RenderPipeline> pipeline = AcquireRef(new RenderPipeline(device, descriptor));
DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline.Detach();
+ return pipeline;
}
- MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor2* descriptor) {
Device* device = ToBackend(GetDevice());
uint32_t compileFlags = 0;
#if defined(_DEBUG)
@@ -313,12 +316,12 @@ namespace dawn_native { namespace d3d12 {
D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
PerStage<const char*> entryPoints;
- entryPoints[SingleShaderStage::Vertex] = descriptor->vertexStage.entryPoint;
- entryPoints[SingleShaderStage::Fragment] = descriptor->fragmentStage->entryPoint;
+ entryPoints[SingleShaderStage::Vertex] = descriptor->vertex.entryPoint;
+ entryPoints[SingleShaderStage::Fragment] = descriptor->fragment->entryPoint;
PerStage<ShaderModule*> modules;
- modules[SingleShaderStage::Vertex] = ToBackend(descriptor->vertexStage.module);
- modules[SingleShaderStage::Fragment] = ToBackend(descriptor->fragmentStage->module);
+ modules[SingleShaderStage::Vertex] = ToBackend(descriptor->vertex.module);
+ modules[SingleShaderStage::Fragment] = ToBackend(descriptor->fragment->module);
PerStage<D3D12_SHADER_BYTECODE*> shaders;
shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
@@ -345,8 +348,8 @@ namespace dawn_native { namespace d3d12 {
descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
}
- descriptorD3D12.IBStripCutValue = ComputeIndexBufferStripCutValue(
- GetPrimitiveTopology(), GetVertexStateDescriptor()->indexFormat);
+ descriptorD3D12.IBStripCutValue =
+ ComputeIndexBufferStripCutValue(GetPrimitiveTopology(), GetStripIndexFormat());
descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
@@ -370,15 +373,14 @@ namespace dawn_native { namespace d3d12 {
descriptorD3D12.RTVFormats[static_cast<uint8_t>(i)] =
D3D12TextureFormat(GetColorAttachmentFormat(i));
descriptorD3D12.BlendState.RenderTarget[static_cast<uint8_t>(i)] =
- ComputeColorDesc(GetColorStateDescriptor(i));
+ ComputeColorDesc(GetColorTargetState(i));
}
descriptorD3D12.NumRenderTargets = static_cast<uint32_t>(GetColorAttachmentsMask().count());
- descriptorD3D12.BlendState.AlphaToCoverageEnable = descriptor->alphaToCoverageEnabled;
+ descriptorD3D12.BlendState.AlphaToCoverageEnable = IsAlphaToCoverageEnabled();
descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
- descriptorD3D12.DepthStencilState =
- ComputeDepthStencilDesc(GetDepthStencilStateDescriptor());
+ descriptorD3D12.DepthStencilState = ComputeDepthStencilDesc(GetDepthStencilState());
descriptorD3D12.SampleMask = GetSampleMask();
descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
index 7e6e70340a2..bb82b8def8e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
@@ -26,8 +26,9 @@ namespace dawn_native { namespace d3d12 {
class RenderPipeline final : public RenderPipelineBase {
public:
- static ResultOrError<RenderPipeline*> Create(Device* device,
- const RenderPipelineDescriptor* descriptor);
+ static ResultOrError<Ref<RenderPipeline>> Create(
+ Device* device,
+ const RenderPipelineDescriptor2* descriptor);
RenderPipeline() = delete;
D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
@@ -38,7 +39,7 @@ namespace dawn_native { namespace d3d12 {
private:
~RenderPipeline() override;
using RenderPipelineBase::RenderPipelineBase;
- MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
+ MaybeError Initialize(const RenderPipelineDescriptor2* descriptor);
D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp
index 7d63e0d2979..0671cc428dc 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp
@@ -32,6 +32,11 @@ namespace dawn_native { namespace d3d12 {
}
} // namespace
+ // static
+ Ref<Sampler> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(device, descriptor));
+ }
+
Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
: SamplerBase(device, descriptor) {
D3D12_FILTER_TYPE minFilter;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h
index 59a7d8370a4..ede374b94b4 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.h
@@ -25,11 +25,12 @@ namespace dawn_native { namespace d3d12 {
class Sampler final : public SamplerBase {
public:
- Sampler(Device* device, const SamplerDescriptor* descriptor);
+ static Ref<Sampler> Create(Device* device, const SamplerDescriptor* descriptor);
const D3D12_SAMPLER_DESC& GetSamplerDescriptor() const;
private:
+ Sampler(Device* device, const SamplerDescriptor* descriptor);
~Sampler() override = default;
D3D12_SAMPLER_DESC mSamplerDesc = {};
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
index 1c9bf1c2f20..33ea80b4564 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
@@ -18,6 +18,7 @@
#include "common/BitSetIterator.h"
#include "common/Log.h"
#include "dawn_native/SpirvUtils.h"
+#include "dawn_native/TintUtils.h"
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
@@ -29,13 +30,11 @@
#include <spirv_hlsl.hpp>
-#ifdef DAWN_ENABLE_WGSL
// Tint include must be after spirv_hlsl.hpp, because spirv-cross has its own
// version of spirv_headers. We also need to undef SPV_REVISION because SPIRV-Cross
// is at 3 while spirv-headers is at 4.
-# undef SPV_REVISION
-# include <tint/tint.h>
-#endif // DAWN_ENABLE_WGSL
+#undef SPV_REVISION
+#include <tint/tint.h>
namespace dawn_native { namespace d3d12 {
@@ -173,12 +172,12 @@ namespace dawn_native { namespace d3d12 {
}
// static
- ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
+ ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
DAWN_TRY(module->Initialize(parseResult));
- return module.Detach();
+ return module;
}
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
@@ -186,11 +185,8 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- DAWN_TRY(InitializeBase(parseResult));
-#ifdef DAWN_ENABLE_WGSL
- mTintProgram = std::move(parseResult->tintProgram);
-#endif
- return {};
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+ return InitializeBase(parseResult);
}
ResultOrError<std::string> ShaderModule::TranslateToHLSLWithTint(
@@ -201,42 +197,97 @@ namespace dawn_native { namespace d3d12 {
FirstOffsetInfo* firstOffsetInfo) const {
ASSERT(!IsError());
-#ifdef DAWN_ENABLE_WGSL
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+ using BindingRemapper = tint::transform::BindingRemapper;
+ using BindingPoint = tint::transform::BindingPoint;
+ BindingRemapper::BindingPoints bindingPoints;
+ BindingRemapper::AccessControls accessControls;
+
+ const EntryPointMetadata::BindingInfoArray& moduleBindingInfo =
+ GetEntryPoint(entryPointName).bindings;
+
+ // d3d12::BindGroupLayout packs the bindings per HLSL register-space.
+ // We modify the Tint AST to make the "bindings" decoration match the
+ // offset chosen by d3d12::BindGroupLayout so that Tint produces HLSL
+ // with the correct registers assigned to each interface variable.
+ for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+ const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+ const auto& bindingOffsets = bgl->GetBindingOffsets();
+ const auto& groupBindingInfo = moduleBindingInfo[group];
+ for (const auto& it : groupBindingInfo) {
+ BindingNumber binding = it.first;
+ auto const& bindingInfo = it.second;
+ BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+ uint32_t bindingOffset = bindingOffsets[bindingIndex];
+ BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+ static_cast<uint32_t>(binding)};
+ BindingPoint dstBindingPoint{static_cast<uint32_t>(group), bindingOffset};
+ if (srcBindingPoint != dstBindingPoint) {
+ bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+ }
+
+ // Declaring a read-only storage buffer in HLSL but specifying a
+ // storage buffer in the BGL produces the wrong output.
+ // Force read-only storage buffer bindings to be treated as UAV
+ // instead of SRV.
+ const bool forceStorageBufferAsUAV =
+ (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
+ bgl->GetBindingInfo(bindingIndex).buffer.type ==
+ wgpu::BufferBindingType::Storage);
+ if (forceStorageBufferAsUAV) {
+ accessControls.emplace(srcBindingPoint, tint::ast::AccessControl::kReadWrite);
+ }
+ }
+ }
+
std::ostringstream errorStream;
errorStream << "Tint HLSL failure:" << std::endl;
tint::transform::Manager transformManager;
transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
-
- tint::transform::FirstIndexOffset* firstOffsetTransform = nullptr;
if (stage == SingleShaderStage::Vertex) {
- auto transformer = std::make_unique<tint::transform::FirstIndexOffset>(
+ transformManager.append(std::make_unique<tint::transform::FirstIndexOffset>(
layout->GetFirstIndexOffsetShaderRegister(),
- layout->GetFirstIndexOffsetRegisterSpace());
- firstOffsetTransform = transformer.get();
- transformManager.append(std::move(transformer));
+ layout->GetFirstIndexOffsetRegisterSpace()));
+ }
+ transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
+ transformManager.append(std::make_unique<tint::transform::Renamer>());
+ transformManager.append(std::make_unique<tint::transform::Hlsl>());
+
+ tint::transform::DataMap transformInputs;
+ transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+ std::move(accessControls));
+ tint::transform::Transform::Output output =
+ transformManager.Run(GetTintProgram(), transformInputs);
+
+ tint::Program& program = output.program;
+ if (!program.IsValid()) {
+ errorStream << "Tint program transform error: " << program.Diagnostics().str()
+ << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
}
- tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, mTintProgram.get()));
-
- if (firstOffsetTransform != nullptr) {
- // Functions are only available after transform has been performed
- firstOffsetInfo->usesVertexIndex = firstOffsetTransform->HasVertexIndex();
+ if (auto* data = output.data.Get<tint::transform::FirstIndexOffset::Data>()) {
+ firstOffsetInfo->usesVertexIndex = data->has_vertex_index;
if (firstOffsetInfo->usesVertexIndex) {
- firstOffsetInfo->vertexIndexOffset = firstOffsetTransform->GetFirstVertexOffset();
+ firstOffsetInfo->vertexIndexOffset = data->first_vertex_offset;
}
-
- firstOffsetInfo->usesInstanceIndex = firstOffsetTransform->HasInstanceIndex();
+ firstOffsetInfo->usesInstanceIndex = data->has_instance_index;
if (firstOffsetInfo->usesInstanceIndex) {
- firstOffsetInfo->instanceIndexOffset =
- firstOffsetTransform->GetFirstInstanceOffset();
+ firstOffsetInfo->instanceIndexOffset = data->first_instance_offset;
}
}
- ASSERT(remappedEntryPointName != nullptr);
- tint::inspector::Inspector inspector(&program);
- *remappedEntryPointName = inspector.GetRemappedNameForEntryPoint(entryPointName);
+ if (auto* data = output.data.Get<tint::transform::Renamer::Data>()) {
+ auto it = data->remappings.find(entryPointName);
+ if (it == data->remappings.end()) {
+ return DAWN_VALIDATION_ERROR("Could not find remapped name for entry point.");
+ }
+ *remappedEntryPointName = it->second;
+ } else {
+ return DAWN_VALIDATION_ERROR("Transform output missing renamer data.");
+ }
tint::writer::hlsl::Generator generator(&program);
// TODO: Switch to GenerateEntryPoint once HLSL writer supports it.
@@ -246,9 +297,6 @@ namespace dawn_native { namespace d3d12 {
}
return generator.result();
-#else
- return DAWN_VALIDATION_ERROR("Using Tint to generate HLSL is not supported.");
-#endif // DAWN_ENABLE_WGSL
}
ResultOrError<std::string> ShaderModule::TranslateToHLSLWithSPIRVCross(
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
index b3850bca254..98eed9e5a16 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
@@ -45,9 +45,9 @@ namespace dawn_native { namespace d3d12 {
class ShaderModule final : public ShaderModuleBase {
public:
- static ResultOrError<ShaderModule*> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
ResultOrError<CompiledShader> Compile(const char* entryPointName,
SingleShaderStage stage,
@@ -76,10 +76,6 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<uint64_t> GetDXCompilerVersion() const;
uint64_t GetD3DCompilerVersion() const;
-
-#ifdef DAWN_ENABLE_WGSL
- std::unique_ptr<tint::Program> mTintProgram;
-#endif
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
index 4d890bfa109..8fd4554e7fd 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
@@ -21,6 +21,11 @@
namespace dawn_native { namespace d3d12 {
+ // static
+ Ref<SwapChain> SwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new SwapChain(device, descriptor));
+ }
+
SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
: OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
index 6938e20adad..4083b04a144 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
@@ -23,9 +23,10 @@ namespace dawn_native { namespace d3d12 {
class SwapChain final : public OldSwapChainBase {
public:
- SwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ static Ref<SwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
protected:
+ SwapChain(Device* device, const SwapChainDescriptor* descriptor);
~SwapChain() override;
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
MaybeError OnBeforePresent(TextureViewBase* view) override;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
index d326489a56d..e5307cd7deb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
@@ -23,17 +23,13 @@ namespace dawn_native { namespace d3d12 {
namespace {
Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
uint32_t offset,
- uint32_t bytesPerRow,
- uint32_t slicePitch) {
+ uint32_t bytesPerRow) {
ASSERT(bytesPerRow != 0);
- ASSERT(slicePitch != 0);
uint32_t byteOffsetX = offset % bytesPerRow;
- offset -= byteOffsetX;
- uint32_t byteOffsetY = offset % slicePitch;
- uint32_t byteOffsetZ = offset - byteOffsetY;
+ uint32_t byteOffsetY = offset - byteOffsetX;
return {byteOffsetX / blockInfo.byteSize * blockInfo.width,
- byteOffsetY / bytesPerRow * blockInfo.height, byteOffsetZ / slicePitch};
+ byteOffsetY / bytesPerRow * blockInfo.height, 0};
}
} // namespace
@@ -47,10 +43,15 @@ namespace dawn_native { namespace d3d12 {
ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+ // The copies must be 512-aligned. To do this, we calculate the first 512-aligned address
+ // preceding our data.
uint64_t alignedOffset =
offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
copy.offset = alignedOffset;
+
+ // If the provided offset to the data was already 512-aligned, we can simply copy the data
+ // without further translation.
if (offset == alignedOffset) {
copy.count = 1;
@@ -63,17 +64,36 @@ namespace dawn_native { namespace d3d12 {
copy.copies[0].bufferOffset.z = 0;
copy.copies[0].bufferSize = copySize;
- // Return early. There is only one copy needed because the offset is already 512-byte
- // aligned
return copy;
}
ASSERT(alignedOffset < offset);
ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
- uint32_t slicePitch = bytesPerRow * rowsPerImage;
+ // We must reinterpret our aligned offset into X and Y offsets with respect to the row
+ // pitch.
+ //
+ // You can visualize the data in the buffer like this:
+ // |-----------------------++++++++++++++++++++++++++++++++|
+ // ^ 512-aligned address ^ Aligned offset ^ End of copy data
+ //
+ // Now when you consider the row pitch, you can visualize the data like this:
+ // |~~~~~~~~~~~~~~~~|
+ // |~~~~~+++++++++++|
+ // |++++++++++++++++|
+ // |+++++~~~~~~~~~~~|
+ // |<---row pitch-->|
+ //
+ // The X and Y offsets calculated in ComputeTexelOffsets can be visualized like this:
+ // |YYYYYYYYYYYYYYYY|
+ // |XXXXXX++++++++++|
+ // |++++++++++++++++|
+ // |++++++~~~~~~~~~~|
+ // |<---row pitch-->|
Origin3D texelOffset = ComputeTexelOffsets(
- blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow, slicePitch);
+ blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow);
+
+ ASSERT(texelOffset.z == 0);
uint32_t copyBytesPerRowPitch = copySize.width / blockInfo.width * blockInfo.byteSize;
uint32_t byteOffsetInRowPitch = texelOffset.x / blockInfo.width * blockInfo.byteSize;
@@ -111,7 +131,7 @@ namespace dawn_native { namespace d3d12 {
copy.copies[0].bufferOffset = texelOffset;
copy.copies[0].bufferSize.width = copySize.width + texelOffset.x;
copy.copies[0].bufferSize.height = rowsPerImageInTexels + texelOffset.y;
- copy.copies[0].bufferSize.depth = copySize.depth + texelOffset.z;
+ copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
return copy;
}
@@ -158,12 +178,12 @@ namespace dawn_native { namespace d3d12 {
uint32_t texelsPerRow = bytesPerRow / blockInfo.byteSize * blockInfo.width;
copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
copy.copies[0].copySize.height = copySize.height;
- copy.copies[0].copySize.depth = copySize.depth;
+ copy.copies[0].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
copy.copies[0].bufferOffset = texelOffset;
copy.copies[0].bufferSize.width = texelsPerRow;
copy.copies[0].bufferSize.height = rowsPerImageInTexels + texelOffset.y;
- copy.copies[0].bufferSize.depth = copySize.depth + texelOffset.z;
+ copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
copy.copies[1].textureOffset.x = origin.x + copy.copies[0].copySize.width;
copy.copies[1].textureOffset.y = origin.y;
@@ -172,14 +192,14 @@ namespace dawn_native { namespace d3d12 {
ASSERT(copySize.width > copy.copies[0].copySize.width);
copy.copies[1].copySize.width = copySize.width - copy.copies[0].copySize.width;
copy.copies[1].copySize.height = copySize.height;
- copy.copies[1].copySize.depth = copySize.depth;
+ copy.copies[1].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
copy.copies[1].bufferOffset.x = 0;
copy.copies[1].bufferOffset.y = texelOffset.y + blockInfo.height;
- copy.copies[1].bufferOffset.z = texelOffset.z;
+ copy.copies[1].bufferOffset.z = 0;
copy.copies[1].bufferSize.width = copy.copies[1].copySize.width;
copy.copies[1].bufferSize.height = rowsPerImageInTexels + texelOffset.y + blockInfo.height;
- copy.copies[1].bufferSize.depth = copySize.depth + texelOffset.z;
+ copy.copies[1].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
return copy;
}
@@ -189,7 +209,8 @@ namespace dawn_native { namespace d3d12 {
const TexelBlockInfo& blockInfo,
uint64_t offset,
uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
+ uint32_t rowsPerImage,
+ bool is3DTexture) {
TextureCopySplits copies;
const uint64_t bytesPerSlice = bytesPerRow * rowsPerImage;
@@ -205,15 +226,19 @@ namespace dawn_native { namespace d3d12 {
// slice. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
// share the same copy split, so in this situation we just need to compute copy split once
// and reuse it for all the slices.
- const dawn_native::Extent3D copyOneLayerSize = {copySize.width, copySize.height, 1};
- const dawn_native::Origin3D copyFirstLayerOrigin = {origin.x, origin.y, 0};
+ Extent3D copyOneLayerSize = copySize;
+ Origin3D copyFirstLayerOrigin = origin;
+ if (!is3DTexture) {
+ copyOneLayerSize.depthOrArrayLayers = 1;
+ copyFirstLayerOrigin.z = 0;
+ }
copies.copies2D[0] = ComputeTextureCopySplit(copyFirstLayerOrigin, copyOneLayerSize,
blockInfo, offset, bytesPerRow, rowsPerImage);
- // When the copy only refers one texture 2D array layer copies.copies2D[1] will never be
- // used so we can safely early return here.
- if (copySize.depth == 1) {
+ // When the copy only refers one texture 2D array layer or a 3D texture, copies.copies2D[1]
+ // will never be used so we can safely early return here.
+ if (copySize.depthOrArrayLayers == 1 || is3DTexture) {
return copies;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
index 962c33239df..f4bdb7b6d6c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
@@ -61,7 +61,8 @@ namespace dawn_native { namespace d3d12 {
const TexelBlockInfo& blockInfo,
uint64_t offset,
uint32_t bytesPerRow,
- uint32_t rowsPerImage);
+ uint32_t rowsPerImage,
+ bool is3DTexture = false);
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
index 960b09a68c2..fdcd43e4843 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
@@ -344,7 +344,7 @@ namespace dawn_native { namespace d3d12 {
return DAWN_VALIDATION_ERROR("Mip level count must be 1");
}
- if (descriptor->size.depth != 1) {
+ if (descriptor->size.depthOrArrayLayers != 1) {
return DAWN_VALIDATION_ERROR("Depth must be 1");
}
@@ -360,7 +360,7 @@ namespace dawn_native { namespace d3d12 {
const D3D12_RESOURCE_DESC d3dDescriptor = d3d12Resource->GetDesc();
if ((dawnDescriptor->size.width != d3dDescriptor.Width) ||
(dawnDescriptor->size.height != d3dDescriptor.Height) ||
- (dawnDescriptor->size.depth != 1)) {
+ (dawnDescriptor->size.depthOrArrayLayers != 1)) {
return DAWN_VALIDATION_ERROR("D3D12 texture size doesn't match descriptor");
}
@@ -418,27 +418,25 @@ namespace dawn_native { namespace d3d12 {
}
// static
- ResultOrError<Ref<Texture>> Texture::Create(Device* device,
- const ExternalImageDescriptor* descriptor,
- HANDLE sharedHandle,
- ExternalMutexSerial acquireMutexKey,
- bool isSwapChainTexture) {
- const TextureDescriptor* textureDescriptor =
- reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
-
+ ResultOrError<Ref<Texture>> Texture::CreateExternalImage(Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ ExternalMutexSerial acquireMutexKey,
+ bool isSwapChainTexture,
+ bool isInitialized) {
Ref<Texture> dawnTexture =
- AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
- DAWN_TRY(dawnTexture->InitializeAsExternalTexture(textureDescriptor, sharedHandle,
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+ DAWN_TRY(dawnTexture->InitializeAsExternalTexture(descriptor, std::move(d3d12Texture),
acquireMutexKey, isSwapChainTexture));
// Importing a multi-planar format must be initialized. This is required because
// a shared multi-planar format cannot be initialized by Dawn.
- if (!descriptor->isInitialized && dawnTexture->GetFormat().IsMultiPlanar()) {
+ if (!isInitialized && dawnTexture->GetFormat().IsMultiPlanar()) {
return DAWN_VALIDATION_ERROR(
"Cannot create a multi-planar formatted texture without being initialized");
}
- dawnTexture->SetIsSubresourceContentInitialized(descriptor->isInitialized,
+ dawnTexture->SetIsSubresourceContentInitialized(isInitialized,
dawnTexture->GetAllSubresources());
return std::move(dawnTexture);
}
@@ -454,30 +452,13 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Texture::InitializeAsExternalTexture(const TextureDescriptor* descriptor,
- HANDLE sharedHandle,
+ ComPtr<ID3D12Resource> d3d12Texture,
ExternalMutexSerial acquireMutexKey,
bool isSwapChainTexture) {
Device* dawnDevice = ToBackend(GetDevice());
- DAWN_TRY(ValidateTextureDescriptor(dawnDevice, descriptor));
- DAWN_TRY(ValidateTextureDescriptorCanBeWrapped(descriptor));
-
- ComPtr<ID3D12Resource> d3d12Resource;
- DAWN_TRY(CheckHRESULT(dawnDevice->GetD3D12Device()->OpenSharedHandle(
- sharedHandle, IID_PPV_ARGS(&d3d12Resource)),
- "D3D12 opening shared handle"));
-
- DAWN_TRY(ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), descriptor));
-
- // Shared handle is assumed to support resource sharing capability. The resource
- // shared capability tier must agree to share resources between D3D devices.
- if (GetFormat().IsMultiPlanar()) {
- DAWN_TRY(ValidateD3D12VideoTextureCanBeShared(ToBackend(GetDevice()),
- D3D12TextureFormat(descriptor->format)));
- }
ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
- DAWN_TRY_ASSIGN(dxgiKeyedMutex,
- dawnDevice->CreateKeyedMutexForTexture(d3d12Resource.Get()));
+ DAWN_TRY_ASSIGN(dxgiKeyedMutex, dawnDevice->CreateKeyedMutexForTexture(d3d12Texture.Get()));
DAWN_TRY(CheckHRESULT(dxgiKeyedMutex->AcquireSync(uint64_t(acquireMutexKey), INFINITE),
"D3D12 acquiring shared mutex"));
@@ -491,7 +472,7 @@ namespace dawn_native { namespace d3d12 {
// When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
// texture is owned externally. The texture's owning entity must remain responsible for
// memory management.
- mResourceAllocation = {info, 0, std::move(d3d12Resource), nullptr};
+ mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
return {};
}
@@ -504,7 +485,7 @@ namespace dawn_native { namespace d3d12 {
const Extent3D& size = GetSize();
resourceDescriptor.Width = size.width;
resourceDescriptor.Height = size.height;
- resourceDescriptor.DepthOrArraySize = size.depth;
+ resourceDescriptor.DepthOrArraySize = size.depthOrArrayLayers;
// This will need to be much more nuanced when WebGPU has
// texture view compatibility rules.
@@ -586,6 +567,11 @@ namespace dawn_native { namespace d3d12 {
device->DeallocateMemory(mResourceAllocation);
+ // Now that we've deallocated the memory, the texture is no longer a swap chain texture.
+ // We can set mSwapChainTexture to false to avoid passing a nullptr to
+ // ID3D12SharingContract::Present.
+ mSwapChainTexture = false;
+
if (mDxgiKeyedMutex != nullptr) {
mDxgiKeyedMutex->ReleaseSync(uint64_t(mAcquireMutexKey) + 1);
device->ReleaseKeyedMutexForTexture(std::move(mDxgiKeyedMutex));
@@ -781,8 +767,7 @@ namespace dawn_native { namespace d3d12 {
const ExecutionSerial pendingCommandSerial =
ToBackend(GetDevice())->GetPendingCommandSerial();
- // This transitions assume it is a 2D texture
- ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+ ASSERT(GetDimension() != wgpu::TextureDimension::e1D);
mSubresourceStateAndDecay.Update(
range, [&](const SubresourceRange& updateRange, StateAndDecay* state) {
@@ -998,23 +983,10 @@ namespace dawn_native { namespace d3d12 {
continue;
}
- D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(this, level, layer, aspect);
- for (uint32_t i = 0; i < copySplit.count; ++i) {
- Texture2DCopySplit::CopyInfo& info = copySplit.copies[i];
-
- D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(
- this, ToBackend(uploadHandle.stagingBuffer)->GetResource(),
- info.bufferSize, copySplit.offset, bytesPerRow, aspect);
- D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
-
- // copy the buffer filled with clear color to the texture
- commandList->CopyTextureRegion(
- &textureLocation, info.textureOffset.x, info.textureOffset.y,
- info.textureOffset.z, &bufferLocation, &sourceRegion);
- }
+ RecordCopyBufferToTextureFromTextureCopySplit(
+ commandList, copySplit,
+ ToBackend(uploadHandle.stagingBuffer)->GetResource(), 0, bytesPerRow,
+ this, level, layer, aspect);
}
}
}
@@ -1044,6 +1016,12 @@ namespace dawn_native { namespace d3d12 {
isValidToDecay == other.isValidToDecay;
}
+ // static
+ Ref<TextureView> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return AcquireRef(new TextureView(texture, descriptor));
+ }
+
TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
: TextureViewBase(texture, descriptor) {
mSrvDesc.Format = D3D12TextureFormat(descriptor->format);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
index 2e2e089e641..528dd216984 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
@@ -33,16 +33,18 @@ namespace dawn_native { namespace d3d12 {
MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
const TextureDescriptor* descriptor);
MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
+ MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat);
class Texture final : public TextureBase {
public:
static ResultOrError<Ref<Texture>> Create(Device* device,
const TextureDescriptor* descriptor);
- static ResultOrError<Ref<Texture>> Create(Device* device,
- const ExternalImageDescriptor* descriptor,
- HANDLE sharedHandle,
- ExternalMutexSerial acquireMutexKey,
- bool isSwapChainTexture);
+ static ResultOrError<Ref<Texture>> CreateExternalImage(Device* device,
+ const TextureDescriptor* descriptor,
+ ComPtr<ID3D12Resource> d3d12Texture,
+ ExternalMutexSerial acquireMutexKey,
+ bool isSwapChainTexture,
+ bool isInitialized);
static ResultOrError<Ref<Texture>> Create(Device* device,
const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture);
@@ -85,7 +87,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError InitializeAsInternalTexture();
MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
- HANDLE sharedHandle,
+ ComPtr<ID3D12Resource> d3d12Texture,
ExternalMutexSerial acquireMutexKey,
bool isSwapChainTexture);
MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
@@ -126,7 +128,8 @@ namespace dawn_native { namespace d3d12 {
class TextureView final : public TextureViewBase {
public:
- TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+ static Ref<TextureView> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
DXGI_FORMAT GetD3D12Format() const;
@@ -136,6 +139,8 @@ namespace dawn_native { namespace d3d12 {
D3D12_UNORDERED_ACCESS_VIEW_DESC GetUAVDescriptor() const;
private:
+ TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
D3D12_SHADER_RESOURCE_VIEW_DESC mSrvDesc;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
index fa950b58a61..b1902100785 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
@@ -93,7 +93,7 @@ namespace dawn_native { namespace d3d12 {
texture->GetD3D12CopyableSubresourceFormat(aspect);
bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
- bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depth;
+ bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depthOrArrayLayers;
bufferLocation.PlacedFootprint.Footprint.RowPitch = rowPitch;
return bufferLocation;
}
@@ -105,7 +105,7 @@ namespace dawn_native { namespace d3d12 {
sourceRegion.front = offset.z;
sourceRegion.right = offset.x + copySize.width;
sourceRegion.bottom = offset.y + copySize.height;
- sourceRegion.back = offset.z + copySize.depth;
+ sourceRegion.back = offset.z + copySize.depthOrArrayLayers;
return sourceRegion;
}
@@ -145,7 +145,7 @@ namespace dawn_native { namespace d3d12 {
void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
const Texture2DCopySplit& baseCopySplit,
ID3D12Resource* bufferResource,
- uint64_t baseOffsetBytes,
+ uint64_t baseOffset,
uint64_t bufferBytesPerRow,
Texture* texture,
uint32_t textureMiplevel,
@@ -155,7 +155,7 @@ namespace dawn_native { namespace d3d12 {
const D3D12_TEXTURE_COPY_LOCATION textureLocation =
ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice, aspect);
- const uint64_t offsetBytes = baseCopySplit.offset + baseOffsetBytes;
+ const uint64_t offsetBytes = baseCopySplit.offset + baseOffset;
for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
@@ -174,20 +174,20 @@ namespace dawn_native { namespace d3d12 {
}
}
- void CopyBufferToTextureWithCopySplit(CommandRecordingContext* commandContext,
- const TextureCopy& textureCopy,
- const Extent3D& copySize,
- Texture* texture,
- ID3D12Resource* bufferResource,
- const uint64_t offsetBytes,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- Aspect aspect) {
+ void CopyBufferTo2DTextureWithCopySplit(CommandRecordingContext* commandContext,
+ const TextureCopy& textureCopy,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const Extent3D& copySize,
+ Texture* texture,
+ Aspect aspect) {
ASSERT(HasOneBit(aspect));
// See comments in ComputeTextureCopySplits() for more details.
const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(aspect).block;
const TextureCopySplits copySplits = ComputeTextureCopySplits(
- textureCopy.origin, copySize, blockInfo, offsetBytes, bytesPerRow, rowsPerImage);
+ textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
const uint64_t bytesPerSlice = bytesPerRow * rowsPerImage;
@@ -201,7 +201,7 @@ namespace dawn_native { namespace d3d12 {
std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits> bufferOffsetsForNextSlice = {
{0u, 0u}};
- for (uint32_t copySlice = 0; copySlice < copySize.depth; ++copySlice) {
+ for (uint32_t copySlice = 0; copySlice < copySize.depthOrArrayLayers; ++copySlice) {
const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
const Texture2DCopySplit& copySplitPerLayerBase = copySplits.copies2D[splitIndex];
@@ -217,4 +217,155 @@ namespace dawn_native { namespace d3d12 {
}
}
+ void CopyBufferTo3DTexture(CommandRecordingContext* commandContext,
+ const TextureCopy& textureCopy,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const Extent3D& copySize,
+ Texture* texture,
+ Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ // See comments in ComputeTextureCopySplits() for more details.
+ const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(aspect).block;
+ const TextureCopySplits copySplits = ComputeTextureCopySplits(
+ textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage, true);
+
+ RecordCopyBufferToTextureFromTextureCopySplit(
+ commandContext->GetCommandList(), copySplits.copies2D[0], bufferResource, 0,
+ bytesPerRow, texture, textureCopy.mipLevel, textureCopy.origin.z, aspect);
+ }
+
+ void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+ const TextureCopy& textureCopy,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const Extent3D& copySize,
+ Texture* texture,
+ Aspect aspect) {
+ // Record the CopyTextureRegion commands for 3D textures. Multiple depths of 3D
+ // textures can be copied in one shot and copySplits are not needed.
+ if (texture->GetDimension() == wgpu::TextureDimension::e3D) {
+ CopyBufferTo3DTexture(commandContext, textureCopy, bufferResource, offset, bytesPerRow,
+ rowsPerImage, copySize, texture, aspect);
+ } else {
+ // Compute the copySplits and record the CopyTextureRegion commands for 2D
+ // textures.
+ CopyBufferTo2DTextureWithCopySplit(commandContext, textureCopy, bufferResource, offset,
+ bytesPerRow, rowsPerImage, copySize, texture,
+ aspect);
+ }
+ }
+
+ void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
+ const Texture2DCopySplit& baseCopySplit,
+ Buffer* buffer,
+ uint64_t baseOffset,
+ uint64_t bufferBytesPerRow,
+ Texture* texture,
+ uint32_t textureMiplevel,
+ uint32_t textureSlice,
+ Aspect aspect) {
+ const D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice, aspect);
+
+ const uint64_t offset = baseCopySplit.offset + baseOffset;
+
+ for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
+ const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
+
+ // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
+ // members in Texture2DCopySplit::CopyInfo.
+ const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(texture, buffer->GetD3D12Resource(),
+ info.bufferSize, offset,
+ bufferBytesPerRow, aspect);
+ const D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
+
+ commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
+ info.bufferOffset.y, info.bufferOffset.z,
+ &textureLocation, &sourceRegion);
+ }
+ }
+
+ void Copy2DTextureToBufferWithCopySplit(ID3D12GraphicsCommandList* commandList,
+ const TextureCopy& textureCopy,
+ const BufferCopy& bufferCopy,
+ Texture* texture,
+ Buffer* buffer,
+ const Extent3D& copySize) {
+ ASSERT(HasOneBit(textureCopy.aspect));
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+
+ // See comments around ComputeTextureCopySplits() for more details.
+ const TextureCopySplits copySplits =
+ ComputeTextureCopySplits(textureCopy.origin, copySize, blockInfo, bufferCopy.offset,
+ bufferCopy.bytesPerRow, bufferCopy.rowsPerImage);
+
+ const uint64_t bytesPerSlice = bufferCopy.bytesPerRow * bufferCopy.rowsPerImage;
+
+ // copySplits.copies2D[1] is always calculated for the second copy slice with
+ // extra "bytesPerSlice" copy offset compared with the first copy slice. So
+ // here we use an array bufferOffsetsForNextSlice to record the extra offsets
+ // for each copy slice: bufferOffsetsForNextSlice[0] is the extra offset for
+ // the next copy slice that uses copySplits.copies2D[0], and
+ // bufferOffsetsForNextSlice[1] is the extra offset for the next copy slice
+ // that uses copySplits.copies2D[1].
+ std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits> bufferOffsetsForNextSlice = {
+ {0u, 0u}};
+ for (uint32_t copySlice = 0; copySlice < copySize.depthOrArrayLayers; ++copySlice) {
+ const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
+
+ const Texture2DCopySplit& copySplitPerLayerBase = copySplits.copies2D[splitIndex];
+ const uint64_t bufferOffsetForNextSlice = bufferOffsetsForNextSlice[splitIndex];
+ const uint32_t copyTextureLayer = copySlice + textureCopy.origin.z;
+
+ RecordCopyTextureToBufferFromTextureCopySplit(
+ commandList, copySplitPerLayerBase, buffer, bufferOffsetForNextSlice,
+ bufferCopy.bytesPerRow, texture, textureCopy.mipLevel, copyTextureLayer,
+ textureCopy.aspect);
+
+ bufferOffsetsForNextSlice[splitIndex] += bytesPerSlice * copySplits.copies2D.size();
+ }
+ }
+
+ void Copy3DTextureToBuffer(ID3D12GraphicsCommandList* commandList,
+ const TextureCopy& textureCopy,
+ const BufferCopy& bufferCopy,
+ Texture* texture,
+ Buffer* buffer,
+ const Extent3D& copySize) {
+ ASSERT(HasOneBit(textureCopy.aspect));
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+
+ // See comments around ComputeTextureCopySplits() for more details.
+ const TextureCopySplits copySplits =
+ ComputeTextureCopySplits(textureCopy.origin, copySize, blockInfo, bufferCopy.offset,
+ bufferCopy.bytesPerRow, bufferCopy.rowsPerImage, true);
+
+ RecordCopyTextureToBufferFromTextureCopySplit(
+ commandList, copySplits.copies2D[0], buffer, 0, bufferCopy.bytesPerRow, texture,
+ textureCopy.mipLevel, textureCopy.origin.z, textureCopy.aspect);
+ }
+
+ void RecordCopyTextureToBuffer(ID3D12GraphicsCommandList* commandList,
+ const TextureCopy& textureCopy,
+ const BufferCopy& bufferCopy,
+ Texture* texture,
+ Buffer* buffer,
+ const Extent3D& copySize) {
+ if (texture->GetDimension() == wgpu::TextureDimension::e3D) {
+ Copy3DTextureToBuffer(commandList, textureCopy, bufferCopy, texture, buffer, copySize);
+ } else {
+ Copy2DTextureToBufferWithCopySplit(commandList, textureCopy, bufferCopy, texture,
+ buffer, copySize);
+ }
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
index 6109c0f60b3..719a19a8544 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
@@ -46,6 +46,26 @@ namespace dawn_native { namespace d3d12 {
void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
const Texture2DCopySplit& baseCopySplit,
+ ID3D12Resource* bufferResource,
+ uint64_t baseOffset,
+ uint64_t bufferBytesPerRow,
+ Texture* texture,
+ uint32_t textureMiplevel,
+ uint32_t textureSlice,
+ Aspect aspect);
+
+ void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+ const TextureCopy& textureCopy,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ const Extent3D& copySize,
+ Texture* texture,
+ Aspect aspect);
+
+ void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
+ const Texture2DCopySplit& baseCopySplit,
Buffer* buffer,
uint64_t baseOffset,
uint64_t bufferBytesPerRow,
@@ -54,15 +74,12 @@ namespace dawn_native { namespace d3d12 {
uint32_t textureSlice,
Aspect aspect);
- void CopyBufferToTextureWithCopySplit(CommandRecordingContext* commandContext,
- const TextureCopy& textureCopy,
- const Extent3D& copySize,
- Texture* texture,
- ID3D12Resource* bufferResource,
- const uint64_t offset,
- const uint32_t bytesPerRow,
- const uint32_t rowsPerImage,
- Aspect aspect);
+ void RecordCopyTextureToBuffer(ID3D12GraphicsCommandList* commandList,
+ const TextureCopy& textureCopy,
+ const BufferCopy& bufferCopy,
+ Texture* texture,
+ Buffer* buffer,
+ const Extent3D& copySize);
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
index 06375141197..dcf01f95f25 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
@@ -227,6 +227,9 @@ namespace dawn_native { namespace metal {
// See https://github.com/gpuweb/gpuweb/issues/1325
}
}
+ if (@available(macOS 10.11, iOS 11.0, *)) {
+ mSupportedExtensions.EnableExtension(Extension::DepthClamping);
+ }
mSupportedExtensions.EnableExtension(Extension::ShaderFloat16);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
index c6777db7a7c..edd9c35b1c9 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
@@ -25,13 +25,16 @@ namespace dawn_native { namespace metal {
class BindGroupLayout final : public BindGroupLayoutBase {
public:
- BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
+ static Ref<BindGroupLayout> Create(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor);
- BindGroup* AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
void DeallocateBindGroup(BindGroup* bindGroup);
private:
+ BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
~BindGroupLayout() override = default;
+
SlabAllocator<BindGroup> mBindGroupAllocator;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
index 70beb5d5374..535979bb835 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
@@ -18,15 +18,21 @@
namespace dawn_native { namespace metal {
+ // static
+ Ref<BindGroupLayout> BindGroupLayout::Create(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor) {
+ return AcquireRef(new BindGroupLayout(device, descriptor));
+ }
+
BindGroupLayout::BindGroupLayout(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor)
: BindGroupLayoutBase(device, descriptor),
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
}
- BindGroup* BindGroupLayout::AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor) {
- return mBindGroupAllocator.Allocate(device, descriptor);
+ Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
}
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h
index 791dbf94182..29a3f4e82ca 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h
@@ -24,9 +24,9 @@ namespace dawn_native { namespace metal {
class BindGroup final : public BindGroupBase, public PlacementAllocated {
public:
- BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
- static BindGroup* Create(Device* device, const BindGroupDescriptor* descriptor);
+ BindGroup(Device* device, const BindGroupDescriptor* descriptor);
private:
~BindGroup() override;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm
index d8bcd515d97..48571ecbb70 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm
@@ -27,7 +27,7 @@ namespace dawn_native { namespace metal {
}
// static
- BindGroup* BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
index 3410e57954d..4283a13a2cf 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
@@ -31,11 +31,14 @@ namespace dawn_native { namespace metal {
class CommandBuffer final : public CommandBufferBase {
public:
- CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
MaybeError FillCommands(CommandRecordingContext* commandContext);
private:
+ using CommandBufferBase::CommandBufferBase;
+
MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
MaybeError EncodeRenderPass(CommandRecordingContext* commandContext,
MTLRenderPassDescriptor* mtlRenderPass,
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
index 58b85fb6259..71f0d0f1280 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
@@ -284,7 +284,7 @@ namespace dawn_native { namespace metal {
->GetBufferBindingCount(SingleShaderStage::Vertex);
if (enableVertexPulling) {
- bufferCount += pipeline->GetVertexStateDescriptor()->vertexBufferCount;
+ bufferCount += pipeline->GetVertexBufferCount();
}
[render setVertexBytes:data[SingleShaderStage::Vertex].data()
@@ -544,8 +544,10 @@ namespace dawn_native { namespace metal {
} // anonymous namespace
- CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor) {
+ // static
+ Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
}
MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
@@ -638,7 +640,7 @@ namespace dawn_native { namespace metal {
const TextureBufferCopySplit::CopyInfo& copyInfo = splitCopies.copies[i];
const uint32_t copyBaseLayer = copyInfo.textureOrigin.z;
- const uint32_t copyLayerCount = copyInfo.copyExtent.depth;
+ const uint32_t copyLayerCount = copyInfo.copyExtent.depthOrArrayLayers;
const MTLOrigin textureOrigin =
MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
const MTLSize copyExtent =
@@ -688,7 +690,7 @@ namespace dawn_native { namespace metal {
const TextureBufferCopySplit::CopyInfo& copyInfo = splitCopies.copies[i];
const uint32_t copyBaseLayer = copyInfo.textureOrigin.z;
- const uint32_t copyLayerCount = copyInfo.copyExtent.depth;
+ const uint32_t copyLayerCount = copyInfo.copyExtent.depthOrArrayLayers;
const MTLOrigin textureOrigin =
MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
const MTLSize copyExtent =
@@ -738,7 +740,7 @@ namespace dawn_native { namespace metal {
const MTLOrigin destinationOriginNoLayer =
MTLOriginMake(copy->destination.origin.x, copy->destination.origin.y, 0);
- for (uint32_t slice = 0; slice < copy->copySize.depth; ++slice) {
+ for (uint32_t slice = 0; slice < copy->copySize.depthOrArrayLayers; ++slice) {
[commandContext->EnsureBlit()
copyFromTexture:srcTexture->GetMTLTexture()
sourceSlice:copy->source.origin.z + slice
@@ -1191,6 +1193,11 @@ namespace dawn_native { namespace metal {
[encoder setDepthBias:newPipeline->GetDepthBias()
slopeScale:newPipeline->GetDepthBiasSlopeScale()
clamp:newPipeline->GetDepthBiasClamp()];
+ if (@available(macOS 10.11, iOS 11.0, *)) {
+ MTLDepthClipMode clipMode = newPipeline->ShouldClampDepth() ?
+ MTLDepthClipModeClamp : MTLDepthClipModeClip;
+ [encoder setDepthClipMode:clipMode];
+ }
newPipeline->Encode(encoder);
lastPipeline = newPipeline;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
index f9e874d659b..3ff70b18093 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
@@ -27,8 +27,9 @@ namespace dawn_native { namespace metal {
class ComputePipeline final : public ComputePipelineBase {
public:
- static ResultOrError<ComputePipeline*> Create(Device* device,
- const ComputePipelineDescriptor* descriptor);
+ static ResultOrError<Ref<ComputePipeline>> Create(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor);
void Encode(id<MTLComputeCommandEncoder> encoder);
MTLSize GetLocalWorkGroupSize() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
index 7de57bdf203..0845abf0c61 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
@@ -20,12 +20,12 @@
namespace dawn_native { namespace metal {
// static
- ResultOrError<ComputePipeline*> ComputePipeline::Create(
+ ResultOrError<Ref<ComputePipeline>> ComputePipeline::Create(
Device* device,
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline.Detach();
+ return pipeline;
}
MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
index f886fcd32d4..04e5f909abb 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
@@ -41,9 +41,6 @@ namespace dawn_native { namespace metal {
MaybeError Initialize();
- CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
-
MaybeError TickImpl() override;
id<MTLDevice> GetMTLDevice();
@@ -78,40 +75,44 @@ namespace dawn_native { namespace metal {
NSPRef<id<MTLDevice>> mtlDevice,
const DeviceDescriptor* descriptor);
- ResultOrError<BindGroupBase*> CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
- ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
- ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
- ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
- ResultOrError<ShaderModuleBase*> CreateShaderModuleImpl(
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) override;
- ResultOrError<SwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
- ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) override;
ResultOrError<Ref<TextureBase>> CreateTextureImpl(
const TextureDescriptor* descriptor) override;
- ResultOrError<TextureViewBase*> CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
void InitTogglesFromDriver();
void ShutDownImpl() override;
MaybeError WaitForIdleForDestruction() override;
- ExecutionSerial CheckAndUpdateCompletedSerials() override;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
NSPRef<id<MTLDevice>> mMtlDevice;
NSPRef<id<MTLCommandQueue>> mCommandQueue;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
index e570639ce00..5a8b3a1cf93 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
@@ -117,64 +117,66 @@ namespace dawn_native { namespace metal {
}
}
- ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
return BindGroup::Create(this, descriptor);
}
- ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
- return new BindGroupLayout(this, descriptor);
+ return BindGroupLayout::Create(this, descriptor);
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return Buffer::Create(this, descriptor);
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return new CommandBuffer(encoder, descriptor);
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return CommandBuffer::Create(encoder, descriptor);
}
- ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> Device::CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) {
return ComputePipeline::Create(this, descriptor);
}
- ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
- return new PipelineLayout(this, descriptor);
+ return PipelineLayout::Create(this, descriptor);
}
- ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
return QuerySet::Create(this, descriptor);
}
- ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
+ ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) {
return RenderPipeline::Create(this, descriptor);
}
- ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
return Sampler::Create(this, descriptor);
}
- ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) {
return ShaderModule::Create(this, descriptor, parseResult);
}
- ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
- return new OldSwapChain(this, descriptor);
+ return OldSwapChain::Create(this, descriptor);
}
- ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) {
return SwapChain::Create(this, surface, previousSwapChain, descriptor);
}
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
- return AcquireRef(new Texture(this, descriptor));
+ return Texture::Create(this, descriptor);
}
- ResultOrError<TextureViewBase*> Device::CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) {
- return new TextureView(texture, descriptor);
+ return TextureView::Create(texture, descriptor);
}
- ExecutionSerial Device::CheckAndUpdateCompletedSerials() {
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
if (frontendCompletedSerial > mCompletedSerial) {
// sometimes we increase the serials, in which case the completed serial in
@@ -313,7 +315,7 @@ namespace dawn_native { namespace metal {
const Extent3D clampedSize =
texture->ClampToMipLevelVirtualSize(dst->mipLevel, dst->origin, copySizePixels);
const uint32_t copyBaseLayer = dst->origin.z;
- const uint32_t copyLayerCount = copySizePixels.depth;
+ const uint32_t copyLayerCount = copySizePixels.depthOrArrayLayers;
const uint64_t bytesPerImage = dataLayout.rowsPerImage * dataLayout.bytesPerRow;
MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), dst->aspect);
@@ -373,12 +375,12 @@ namespace dawn_native { namespace metal {
MaybeError Device::WaitForIdleForDestruction() {
// Forget all pending commands.
mCommandContext.AcquireCommands();
- CheckPassedSerials();
+ DAWN_TRY(CheckPassedSerials());
// Wait for all commands to be finished so we can free resources
while (GetCompletedCommandSerial() != GetLastSubmittedCommandSerial()) {
usleep(100);
- CheckPassedSerials();
+ DAWN_TRY(CheckPassedSerials());
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
index c3ba537541a..0a0347d76d5 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
@@ -42,7 +42,8 @@ namespace dawn_native { namespace metal {
class PipelineLayout final : public PipelineLayoutBase {
public:
- PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+ static Ref<PipelineLayout> Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor);
using BindingIndexInfo =
ityp::array<BindGroupIndex,
@@ -54,6 +55,7 @@ namespace dawn_native { namespace metal {
uint32_t GetBufferBindingCount(SingleShaderStage stage);
private:
+ PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
~PipelineLayout() override = default;
PerStage<BindingIndexInfo> mIndexInfo;
PerStage<uint32_t> mBufferBindingCount;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
index 3951ef539a2..34ddc44f2ff 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
@@ -20,6 +20,12 @@
namespace dawn_native { namespace metal {
+ // static
+ Ref<PipelineLayout> PipelineLayout::Create(Device* device,
+ const PipelineLayoutDescriptor* descriptor) {
+ return AcquireRef(new PipelineLayout(device, descriptor));
+ }
+
PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
: PipelineLayoutBase(device, descriptor) {
// Each stage has its own numbering namespace in CompilerMSL.
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h
index b0f84a62411..a7b1ad7fa34 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.h
@@ -27,8 +27,8 @@ namespace dawn_native { namespace metal {
class QuerySet final : public QuerySetBase {
public:
- static ResultOrError<QuerySet*> Create(Device* device,
- const QuerySetDescriptor* descriptor);
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
id<MTLBuffer> GetVisibilityBuffer() const;
id<MTLCounterSampleBuffer> GetCounterSampleBuffer() const
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm
index 472fdf011b3..ad9546d7a67 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QuerySetMTL.mm
@@ -62,11 +62,11 @@ namespace dawn_native { namespace metal {
}
// static
- ResultOrError<QuerySet*> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
+ ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
DAWN_TRY(queryset->Initialize());
- return queryset.Detach();
+ return queryset;
}
MaybeError QuerySet::Initialize() {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
index 480ccd89b4b..bfa33ac2c15 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
@@ -31,7 +31,9 @@ namespace dawn_native { namespace metal {
MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
- device->Tick();
+
+ DAWN_TRY(device->Tick());
+
CommandRecordingContext* commandContext = device->GetPendingCommandContext();
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
index 9b3372bcf02..19ea2841bb4 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
@@ -27,8 +27,9 @@ namespace dawn_native { namespace metal {
class RenderPipeline final : public RenderPipelineBase {
public:
- static ResultOrError<RenderPipeline*> Create(Device* device,
- const RenderPipelineDescriptor* descriptor);
+ static ResultOrError<Ref<RenderPipeline>> Create(
+ Device* device,
+ const RenderPipelineDescriptor2* descriptor);
MTLPrimitiveType GetMTLPrimitiveTopology() const;
MTLWinding GetMTLFrontFace() const;
@@ -46,7 +47,7 @@ namespace dawn_native { namespace metal {
private:
using RenderPipelineBase::RenderPipelineBase;
- MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
+ MaybeError Initialize(const RenderPipelineDescriptor2* descriptor);
MTLVertexDescriptor* MakeVertexDesc();
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
index 1f690b4cfec..8da86184872 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
@@ -14,6 +14,7 @@
#include "dawn_native/metal/RenderPipelineMTL.h"
+#include "common/VertexFormatUtils.h"
#include "dawn_native/metal/DeviceMTL.h"
#include "dawn_native/metal/PipelineLayoutMTL.h"
#include "dawn_native/metal/ShaderModuleMTL.h"
@@ -25,66 +26,68 @@ namespace dawn_native { namespace metal {
namespace {
MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
switch (format) {
- case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::Uint8x2:
return MTLVertexFormatUChar2;
- case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Uint8x4:
return MTLVertexFormatUChar4;
- case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Sint8x2:
return MTLVertexFormatChar2;
- case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::Sint8x4:
return MTLVertexFormatChar4;
- case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::Unorm8x2:
return MTLVertexFormatUChar2Normalized;
- case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Unorm8x4:
return MTLVertexFormatUChar4Normalized;
- case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Snorm8x2:
return MTLVertexFormatChar2Normalized;
- case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Snorm8x4:
return MTLVertexFormatChar4Normalized;
- case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::Uint16x2:
return MTLVertexFormatUShort2;
- case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::Uint16x4:
return MTLVertexFormatUShort4;
- case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Sint16x2:
return MTLVertexFormatShort2;
- case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Sint16x4:
return MTLVertexFormatShort4;
- case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::Unorm16x2:
return MTLVertexFormatUShort2Normalized;
- case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Unorm16x4:
return MTLVertexFormatUShort4Normalized;
- case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Snorm16x2:
return MTLVertexFormatShort2Normalized;
- case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Snorm16x4:
return MTLVertexFormatShort4Normalized;
- case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Float16x2:
return MTLVertexFormatHalf2;
- case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float16x4:
return MTLVertexFormatHalf4;
- case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float32:
return MTLVertexFormatFloat;
- case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float32x2:
return MTLVertexFormatFloat2;
- case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float32x3:
return MTLVertexFormatFloat3;
- case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float32x4:
return MTLVertexFormatFloat4;
- case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::Uint32:
return MTLVertexFormatUInt;
- case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::Uint32x2:
return MTLVertexFormatUInt2;
- case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::Uint32x3:
return MTLVertexFormatUInt3;
- case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Uint32x4:
return MTLVertexFormatUInt4;
- case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Sint32:
return MTLVertexFormatInt;
- case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Sint32x2:
return MTLVertexFormatInt2;
- case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Sint32x3:
return MTLVertexFormatInt3;
- case wgpu::VertexFormat::Int4:
+ case wgpu::VertexFormat::Sint32x4:
return MTLVertexFormatInt4;
+ default:
+ UNREACHABLE();
}
}
@@ -198,21 +201,23 @@ namespace dawn_native { namespace metal {
}
void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
- const ColorStateDescriptor* descriptor,
+ const ColorTargetState* state,
bool isDeclaredInFragmentShader) {
- attachment.blendingEnabled = BlendEnabled(descriptor);
- attachment.sourceRGBBlendFactor =
- MetalBlendFactor(descriptor->colorBlend.srcFactor, false);
- attachment.destinationRGBBlendFactor =
- MetalBlendFactor(descriptor->colorBlend.dstFactor, false);
- attachment.rgbBlendOperation = MetalBlendOperation(descriptor->colorBlend.operation);
- attachment.sourceAlphaBlendFactor =
- MetalBlendFactor(descriptor->alphaBlend.srcFactor, true);
- attachment.destinationAlphaBlendFactor =
- MetalBlendFactor(descriptor->alphaBlend.dstFactor, true);
- attachment.alphaBlendOperation = MetalBlendOperation(descriptor->alphaBlend.operation);
+ attachment.blendingEnabled = state->blend != nullptr;
+ if (attachment.blendingEnabled) {
+ attachment.sourceRGBBlendFactor =
+ MetalBlendFactor(state->blend->color.srcFactor, false);
+ attachment.destinationRGBBlendFactor =
+ MetalBlendFactor(state->blend->color.dstFactor, false);
+ attachment.rgbBlendOperation = MetalBlendOperation(state->blend->color.operation);
+ attachment.sourceAlphaBlendFactor =
+ MetalBlendFactor(state->blend->alpha.srcFactor, true);
+ attachment.destinationAlphaBlendFactor =
+ MetalBlendFactor(state->blend->alpha.dstFactor, true);
+ attachment.alphaBlendOperation = MetalBlendOperation(state->blend->alpha.operation);
+ }
attachment.writeMask =
- MetalColorWriteMask(descriptor->writeMask, isDeclaredInFragmentShader);
+ MetalColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
}
MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
@@ -236,8 +241,7 @@ namespace dawn_native { namespace metal {
}
}
- NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(
- const DepthStencilStateDescriptor* descriptor) {
+ NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(const DepthStencilState* descriptor) {
NSRef<MTLDepthStencilDescriptor> mtlDepthStencilDescRef =
AcquireNSRef([MTLDepthStencilDescriptor new]);
MTLDepthStencilDescriptor* mtlDepthStencilDescriptor = mtlDepthStencilDescRef.Get();
@@ -306,15 +310,15 @@ namespace dawn_native { namespace metal {
} // anonymous namespace
// static
- ResultOrError<RenderPipeline*> RenderPipeline::Create(
+ ResultOrError<Ref<RenderPipeline>> RenderPipeline::Create(
Device* device,
- const RenderPipelineDescriptor* descriptor) {
+ const RenderPipelineDescriptor2* descriptor) {
Ref<RenderPipeline> pipeline = AcquireRef(new RenderPipeline(device, descriptor));
DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline.Detach();
+ return pipeline;
}
- MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor2* descriptor) {
mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
mMtlFrontFace = MTLFrontFace(GetFrontFace());
mMtlCullMode = ToMTLCullMode(GetCullMode());
@@ -334,24 +338,32 @@ namespace dawn_native { namespace metal {
}
descriptorMTL.vertexDescriptor = vertexDesc.Get();
- ShaderModule* vertexModule = ToBackend(descriptor->vertexStage.module);
- const char* vertexEntryPoint = descriptor->vertexStage.entryPoint;
+ ShaderModule* vertexModule = ToBackend(descriptor->vertex.module);
+ const char* vertexEntryPoint = descriptor->vertex.entryPoint;
ShaderModule::MetalFunctionData vertexData;
+
+ const VertexState* vertexStatePtr = &descriptor->vertex;
+ VertexState vertexState;
+ if (vertexStatePtr == nullptr) {
+ vertexState = {};
+ vertexStatePtr = &vertexState;
+ }
+
DAWN_TRY(vertexModule->CreateFunction(vertexEntryPoint, SingleShaderStage::Vertex,
- ToBackend(GetLayout()), &vertexData, 0xFFFFFFFF,
- this));
+ ToBackend(GetLayout()), &vertexData, 0xFFFFFFFF, this,
+ vertexStatePtr));
descriptorMTL.vertexFunction = vertexData.function.Get();
if (vertexData.needsStorageBufferLength) {
mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
}
- ShaderModule* fragmentModule = ToBackend(descriptor->fragmentStage->module);
- const char* fragmentEntryPoint = descriptor->fragmentStage->entryPoint;
+ ShaderModule* fragmentModule = ToBackend(descriptor->fragment->module);
+ const char* fragmentEntryPoint = descriptor->fragment->entryPoint;
ShaderModule::MetalFunctionData fragmentData;
DAWN_TRY(fragmentModule->CreateFunction(fragmentEntryPoint, SingleShaderStage::Fragment,
ToBackend(GetLayout()), &fragmentData,
- descriptor->sampleMask));
+ GetSampleMask()));
descriptorMTL.fragmentFunction = fragmentData.function.Get();
if (fragmentData.needsStorageBufferLength) {
@@ -376,14 +388,14 @@ namespace dawn_native { namespace metal {
for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
MetalPixelFormat(GetColorAttachmentFormat(i));
- const ColorStateDescriptor* descriptor = GetColorStateDescriptor(i);
+ const ColorTargetState* descriptor = GetColorTargetState(i);
ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)], descriptor,
fragmentOutputsWritten[i]);
}
descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
descriptorMTL.sampleCount = GetSampleCount();
- descriptorMTL.alphaToCoverageEnabled = descriptor->alphaToCoverageEnabled;
+ descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
{
NSError* error = nullptr;
@@ -400,7 +412,7 @@ namespace dawn_native { namespace metal {
// call setDepthStencilState() for a given render pipeline in CommandEncoder, in order to
// improve performance.
NSRef<MTLDepthStencilDescriptor> depthStencilDesc =
- MakeDepthStencilDesc(GetDepthStencilStateDescriptor());
+ MakeDepthStencilDesc(GetDepthStencilState());
mMtlDepthStencilState =
AcquireNSPRef([mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc.Get()]);
@@ -458,8 +470,9 @@ namespace dawn_native { namespace metal {
if (attrib.vertexBufferSlot != slot) {
continue;
}
- maxArrayStride = std::max(
- maxArrayStride, VertexFormatSize(attrib.format) + size_t(attrib.offset));
+ maxArrayStride =
+ std::max(maxArrayStride,
+ dawn::VertexFormatSize(attrib.format) + size_t(attrib.offset));
}
layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
layoutDesc.stepRate = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h
index 535a0c7e43a..98565aedf39 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h
@@ -27,7 +27,8 @@ namespace dawn_native { namespace metal {
class Sampler final : public SamplerBase {
public:
- static ResultOrError<Sampler*> Create(Device* device, const SamplerDescriptor* descriptor);
+ static ResultOrError<Ref<Sampler>> Create(Device* device,
+ const SamplerDescriptor* descriptor);
id<MTLSamplerState> GetMTLSamplerState();
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
index 34a5b1fe80e..f1c477961fc 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
@@ -51,13 +51,14 @@ namespace dawn_native { namespace metal {
}
// static
- ResultOrError<Sampler*> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
+ const SamplerDescriptor* descriptor) {
if (descriptor->compare != wgpu::CompareFunction::Undefined &&
device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare)) {
return DAWN_VALIDATION_ERROR("Sampler compare function not supported.");
}
- return new Sampler(device, descriptor);
+ return AcquireRef(new Sampler(device, descriptor));
}
Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
index 621c198c3cc..2cfdf497127 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
@@ -34,9 +34,9 @@ namespace dawn_native { namespace metal {
class ShaderModule final : public ShaderModuleBase {
public:
- static ResultOrError<ShaderModule*> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
struct MetalFunctionData {
NSPRef<id<MTLFunction>> function;
@@ -47,7 +47,8 @@ namespace dawn_native { namespace metal {
const PipelineLayout* layout,
MetalFunctionData* out,
uint32_t sampleMask = 0xFFFFFFFF,
- const RenderPipeline* renderPipeline = nullptr);
+ const RenderPipeline* renderPipeline = nullptr,
+ const VertexState* vertexState = nullptr);
private:
ResultOrError<std::string> TranslateToMSLWithTint(const char* entryPointName,
@@ -55,6 +56,7 @@ namespace dawn_native { namespace metal {
const PipelineLayout* layout,
uint32_t sampleMask,
const RenderPipeline* renderPipeline,
+ const VertexState* vertexState,
std::string* remappedEntryPointName,
bool* needsStorageBufferLength);
ResultOrError<std::string> TranslateToMSLWithSPIRVCross(
@@ -63,16 +65,13 @@ namespace dawn_native { namespace metal {
const PipelineLayout* layout,
uint32_t sampleMask,
const RenderPipeline* renderPipeline,
+ const VertexState* vertexState,
std::string* remappedEntryPointName,
bool* needsStorageBufferLength);
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
~ShaderModule() override = default;
MaybeError Initialize(ShaderModuleParseResult* parseResult);
-
-#ifdef DAWN_ENABLE_WGSL
- std::unique_ptr<tint::Program> mTintProgram;
-#endif
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
index 11241e9cd16..d302ce7fc80 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
@@ -16,31 +16,30 @@
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/SpirvUtils.h"
+#include "dawn_native/TintUtils.h"
#include "dawn_native/metal/DeviceMTL.h"
#include "dawn_native/metal/PipelineLayoutMTL.h"
#include "dawn_native/metal/RenderPipelineMTL.h"
#include <spirv_msl.hpp>
-#ifdef DAWN_ENABLE_WGSL
// Tint include must be after spirv_msl.hpp, because spirv-cross has its own
// version of spirv_headers. We also need to undef SPV_REVISION because SPIRV-Cross
// is at 3 while spirv-headers is at 4.
-# undef SPV_REVISION
-# include <tint/tint.h>
-#endif // DAWN_ENABLE_WGSL
+#undef SPV_REVISION
+#include <tint/tint.h>
#include <sstream>
namespace dawn_native { namespace metal {
// static
- ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
+ ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
DAWN_TRY(module->Initialize(parseResult));
- return module.Detach();
+ return module;
}
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
@@ -48,11 +47,8 @@ namespace dawn_native { namespace metal {
}
MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- DAWN_TRY(InitializeBase(parseResult));
-#ifdef DAWN_ENABLE_WGSL
- mTintProgram = std::move(parseResult->tintProgram);
-#endif
- return {};
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+ return InitializeBase(parseResult);
}
ResultOrError<std::string> ShaderModule::TranslateToMSLWithTint(
@@ -62,12 +58,14 @@ namespace dawn_native { namespace metal {
// TODO(crbug.com/tint/387): AND in a fixed sample mask in the shader.
uint32_t sampleMask,
const RenderPipeline* renderPipeline,
+ const VertexState* vertexState,
std::string* remappedEntryPointName,
bool* needsStorageBufferLength) {
-#if DAWN_ENABLE_WGSL
// TODO(crbug.com/tint/256): Set this accordingly if arrayLength(..) is used.
*needsStorageBufferLength = false;
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
std::ostringstream errorStream;
errorStream << "Tint MSL failure:" << std::endl;
@@ -75,8 +73,7 @@ namespace dawn_native { namespace metal {
if (stage == SingleShaderStage::Vertex &&
GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
transformManager.append(
- MakeVertexPullingTransform(*renderPipeline->GetVertexStateDescriptor(),
- entryPointName, kPullingBufferBindingSet));
+ MakeVertexPullingTransform(*vertexState, entryPointName, kPullingBufferBindingSet));
for (VertexBufferSlot slot :
IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
@@ -87,13 +84,27 @@ namespace dawn_native { namespace metal {
}
}
transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
+ transformManager.append(std::make_unique<tint::transform::Renamer>());
+ transformManager.append(std::make_unique<tint::transform::Msl>());
- tint::Program program;
- DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, mTintProgram.get()));
+ tint::transform::Transform::Output output = transformManager.Run(GetTintProgram());
- ASSERT(remappedEntryPointName != nullptr);
- tint::inspector::Inspector inspector(&program);
- *remappedEntryPointName = inspector.GetRemappedNameForEntryPoint(entryPointName);
+ tint::Program& program = output.program;
+ if (!program.IsValid()) {
+ errorStream << "Tint program transform error: " << program.Diagnostics().str()
+ << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ if (auto* data = output.data.Get<tint::transform::Renamer::Data>()) {
+ auto it = data->remappings.find(entryPointName);
+ if (it == data->remappings.end()) {
+ return DAWN_VALIDATION_ERROR("Could not find remapped name for entry point.");
+ }
+ *remappedEntryPointName = it->second;
+ } else {
+ return DAWN_VALIDATION_ERROR("Transform output missing renamer data.");
+ }
tint::writer::msl::Generator generator(&program);
if (!generator.Generate()) {
@@ -103,9 +114,6 @@ namespace dawn_native { namespace metal {
std::string msl = generator.result();
return std::move(msl);
-#else
- UNREACHABLE();
-#endif
}
ResultOrError<std::string> ShaderModule::TranslateToMSLWithSPIRVCross(
@@ -114,29 +122,26 @@ namespace dawn_native { namespace metal {
const PipelineLayout* layout,
uint32_t sampleMask,
const RenderPipeline* renderPipeline,
+ const VertexState* vertexState,
std::string* remappedEntryPointName,
bool* needsStorageBufferLength) {
const std::vector<uint32_t>* spirv = &GetSpirv();
spv::ExecutionModel executionModel = ShaderStageToExecutionModel(stage);
-#ifdef DAWN_ENABLE_WGSL
std::vector<uint32_t> pullingSpirv;
if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
stage == SingleShaderStage::Vertex) {
- if (mTintProgram) {
+ if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
DAWN_TRY_ASSIGN(pullingSpirv,
- GeneratePullingSpirv(mTintProgram.get(),
- *renderPipeline->GetVertexStateDescriptor(),
- entryPointName, kPullingBufferBindingSet));
+ GeneratePullingSpirv(GetTintProgram(), *vertexState, entryPointName,
+ kPullingBufferBindingSet));
} else {
- DAWN_TRY_ASSIGN(
- pullingSpirv,
- GeneratePullingSpirv(GetSpirv(), *renderPipeline->GetVertexStateDescriptor(),
- entryPointName, kPullingBufferBindingSet));
+ DAWN_TRY_ASSIGN(pullingSpirv,
+ GeneratePullingSpirv(GetSpirv(), *vertexState, entryPointName,
+ kPullingBufferBindingSet));
}
spirv = &pullingSpirv;
}
-#endif
// If these options are changed, the values in DawnSPIRVCrossMSLFastFuzzer.cpp need to
// be updated.
@@ -192,7 +197,6 @@ namespace dawn_native { namespace metal {
}
}
-#ifdef DAWN_ENABLE_WGSL
// Add vertex buffers bound as storage buffers
if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
stage == SingleShaderStage::Vertex) {
@@ -209,7 +213,6 @@ namespace dawn_native { namespace metal {
compiler.add_msl_resource_binding(mslBinding);
}
}
-#endif
// SPIRV-Cross also supports re-ordering attributes but it seems to do the correct thing
// by default.
@@ -228,20 +231,29 @@ namespace dawn_native { namespace metal {
const PipelineLayout* layout,
ShaderModule::MetalFunctionData* out,
uint32_t sampleMask,
- const RenderPipeline* renderPipeline) {
+ const RenderPipeline* renderPipeline,
+ const VertexState* vertexState) {
ASSERT(!IsError());
ASSERT(out);
+ // Vertex stages must specify a renderPipeline and vertexState
+ if (stage == SingleShaderStage::Vertex) {
+ ASSERT(renderPipeline != nullptr);
+ ASSERT(vertexState != nullptr);
+ }
+
std::string remappedEntryPointName;
std::string msl;
if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
- DAWN_TRY_ASSIGN(msl, TranslateToMSLWithTint(entryPointName, stage, layout, sampleMask,
- renderPipeline, &remappedEntryPointName,
- &out->needsStorageBufferLength));
+ DAWN_TRY_ASSIGN(
+ msl, TranslateToMSLWithTint(entryPointName, stage, layout, sampleMask,
+ renderPipeline, vertexState, &remappedEntryPointName,
+ &out->needsStorageBufferLength));
} else {
- DAWN_TRY_ASSIGN(msl, TranslateToMSLWithSPIRVCross(
- entryPointName, stage, layout, sampleMask, renderPipeline,
- &remappedEntryPointName, &out->needsStorageBufferLength));
+ DAWN_TRY_ASSIGN(msl, TranslateToMSLWithSPIRVCross(entryPointName, stage, layout,
+ sampleMask, renderPipeline,
+ vertexState, &remappedEntryPointName,
+ &out->needsStorageBufferLength));
}
// Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
index 11fad51a4e3..6a7216ecb0e 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
@@ -29,9 +29,10 @@ namespace dawn_native { namespace metal {
class OldSwapChain final : public OldSwapChainBase {
public:
- OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ static Ref<OldSwapChain> Create(Device* deivce, const SwapChainDescriptor* descriptor);
protected:
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
~OldSwapChain() override;
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
MaybeError OnBeforePresent(TextureViewBase* view) override;
@@ -39,10 +40,10 @@ namespace dawn_native { namespace metal {
class SwapChain final : public NewSwapChainBase {
public:
- static ResultOrError<SwapChain*> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
~SwapChain() override;
private:
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
index 55eec73aeeb..b878893928b 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
@@ -26,6 +26,11 @@ namespace dawn_native { namespace metal {
// OldSwapChain
+ // static
+ Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
+ }
+
OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
: OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
@@ -58,14 +63,13 @@ namespace dawn_native { namespace metal {
// SwapChain
// static
- ResultOrError<SwapChain*> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- std::unique_ptr<SwapChain> swapchain =
- std::make_unique<SwapChain>(device, surface, descriptor);
+ ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain.release();
+ return swapchain;
}
SwapChain::~SwapChain() {
@@ -113,7 +117,7 @@ namespace dawn_native { namespace metal {
ASSERT(mCurrentDrawable != nullptr);
[*mCurrentDrawable present];
- mTexture->Destroy();
+ mTexture->APIDestroy();
mTexture = nullptr;
mCurrentDrawable = nullptr;
@@ -127,16 +131,18 @@ namespace dawn_native { namespace metal {
TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
mTexture = AcquireRef(
new Texture(ToBackend(GetDevice()), &textureDesc, [*mCurrentDrawable texture]));
- return mTexture->CreateView();
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mTexture->APICreateView();
}
void SwapChain::DetachFromSurfaceImpl() {
ASSERT((mTexture == nullptr) == (mCurrentDrawable == nullptr));
if (mTexture != nullptr) {
- mTexture->Destroy();
+ mTexture->APIDestroy();
mTexture = nullptr;
mCurrentDrawable = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
index d578446c923..1265a4ecc37 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
@@ -35,7 +35,9 @@ namespace dawn_native { namespace metal {
class Texture final : public TextureBase {
public:
- Texture(Device* device, const TextureDescriptor* descriptor);
+ static ResultOrError<Ref<Texture>> Create(Device* device,
+ const TextureDescriptor* descriptor);
+
Texture(Device* device,
const TextureDescriptor* descriptor,
NSPRef<id<MTLTexture>> mtlTexture);
@@ -49,6 +51,7 @@ namespace dawn_native { namespace metal {
void EnsureSubresourceContentInitialized(const SubresourceRange& range);
private:
+ Texture(Device* device, const TextureDescriptor* descriptor);
~Texture() override;
void DestroyImpl() override;
@@ -60,11 +63,14 @@ namespace dawn_native { namespace metal {
class TextureView final : public TextureViewBase {
public:
- TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+ static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
id<MTLTexture> GetMTLTexture();
private:
+ TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
NSPRef<id<MTLTexture>> mMtlTextureView;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
index d10c675d30c..29485ba9bef 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
@@ -275,7 +275,7 @@ namespace dawn_native { namespace metal {
return DAWN_VALIDATION_ERROR("IOSurface mip level count must be 1");
}
- if (descriptor->size.depth != 1) {
+ if (descriptor->size.depthOrArrayLayers != 1) {
return DAWN_VALIDATION_ERROR("IOSurface array layer count must be 1");
}
@@ -285,7 +285,7 @@ namespace dawn_native { namespace metal {
if (descriptor->size.width != IOSurfaceGetWidthOfPlane(ioSurface, plane) ||
descriptor->size.height != IOSurfaceGetHeightOfPlane(ioSurface, plane) ||
- descriptor->size.depth != 1) {
+ descriptor->size.depthOrArrayLayers != 1) {
return DAWN_VALIDATION_ERROR("IOSurface size doesn't match descriptor");
}
@@ -317,7 +317,7 @@ namespace dawn_native { namespace metal {
// Choose the correct MTLTextureType and paper over differences in how the array layer count
// is specified.
- mtlDesc.depth = descriptor->size.depth;
+ mtlDesc.depth = descriptor->size.depthOrArrayLayers;
mtlDesc.arrayLength = 1;
switch (descriptor->dimension) {
case wgpu::TextureDimension::e2D:
@@ -346,6 +346,12 @@ namespace dawn_native { namespace metal {
return mtlDescRef;
}
+ // static
+ ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor) {
+ return AcquireRef(new Texture(device, descriptor));
+ }
+
Texture::Texture(Device* device, const TextureDescriptor* descriptor)
: TextureBase(device, descriptor, TextureState::OwnedInternal) {
NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor(device, descriptor);
@@ -532,8 +538,8 @@ namespace dawn_native { namespace metal {
(largestMipSize.height / blockInfo.height),
512llu);
- // TODO(enga): Multiply by largestMipSize.depth and do a larger 3D copy to clear a whole
- // range of subresources when tracking that is improved.
+ // TODO(enga): Multiply by largestMipSize.depthOrArrayLayers and do a larger 3D copy to
+ // clear a whole range of subresources when tracking that is improved.
uint64_t bufferSize = largestMipBytesPerImage * 1;
if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
@@ -600,6 +606,12 @@ namespace dawn_native { namespace metal {
}
}
+ // static
+ ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
+ return AcquireRef(new TextureView(texture, descriptor));
+ }
+
TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
: TextureViewBase(texture, descriptor) {
id<MTLTexture> mtlTexture = ToBackend(texture)->GetMTLTexture();
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
index 953deb92614..bc41ec52244 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
@@ -60,9 +60,9 @@ namespace dawn_native { namespace metal {
// compute the correct range when checking if the buffer is big enough to contain the
// data for the whole copy. Instead of looking at the position of the last texel in the
// buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
- // format.blockHeight) * copySize.depth. For example considering the pixel buffer below
- // where in memory, each row data (D) of the texture is followed by some padding data
- // (P):
+ // format.blockHeight) * copySize.depthOrArrayLayers. For example considering the pixel
+ // buffer below where in memory, each row data (D) of the texture is followed by some
+ // padding data (P):
// |DDDDDDD|PP|
// |DDDDDDD|PP|
// |DDDDDDD|PP|
@@ -85,7 +85,8 @@ namespace dawn_native { namespace metal {
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
// Check whether buffer size is big enough.
- bool needWorkaround = bufferSize - bufferOffset < bytesPerImage * copyExtent.depth;
+ bool needWorkaround =
+ bufferSize - bufferOffset < bytesPerImage * copyExtent.depthOrArrayLayers;
if (!needWorkaround) {
copy.count = 1;
copy.copies[0].bufferOffset = bufferOffset;
@@ -93,25 +94,25 @@ namespace dawn_native { namespace metal {
copy.copies[0].bytesPerImage = bytesPerImage;
copy.copies[0].textureOrigin = origin;
copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
- copyExtent.depth};
+ copyExtent.depthOrArrayLayers};
return copy;
}
uint64_t currentOffset = bufferOffset;
// Doing all the copy except the last image.
- if (copyExtent.depth > 1) {
+ if (copyExtent.depthOrArrayLayers > 1) {
copy.copies[copy.count].bufferOffset = currentOffset;
copy.copies[copy.count].bytesPerRow = bytesPerRow;
copy.copies[copy.count].bytesPerImage = bytesPerImage;
copy.copies[copy.count].textureOrigin = origin;
copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
- copyExtent.depth - 1};
+ copyExtent.depthOrArrayLayers - 1};
++copy.count;
// Update offset to copy to the last image.
- currentOffset += (copyExtent.depth - 1) * bytesPerImage;
+ currentOffset += (copyExtent.depthOrArrayLayers - 1) * bytesPerImage;
}
// Doing all the copy in last image except the last row.
@@ -121,7 +122,7 @@ namespace dawn_native { namespace metal {
copy.copies[copy.count].bytesPerRow = bytesPerRow;
copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
- origin.z + copyExtent.depth - 1};
+ origin.z + copyExtent.depthOrArrayLayers - 1};
ASSERT(copyExtent.height - blockInfo.height <
texture->GetMipLevelVirtualSize(mipLevel).height);
@@ -146,7 +147,7 @@ namespace dawn_native { namespace metal {
copy.copies[copy.count].bytesPerImage = lastRowDataSize;
copy.copies[copy.count].textureOrigin = {origin.x,
origin.y + copyExtent.height - blockInfo.height,
- origin.z + copyExtent.depth - 1};
+ origin.z + copyExtent.depthOrArrayLayers - 1};
copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
++copy.count;
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
index 5f5a28304cf..70ac53c6dd1 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
@@ -92,52 +92,54 @@ namespace dawn_native { namespace null {
return DeviceBase::Initialize(new Queue(this));
}
- ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
- return new BindGroup(this, descriptor);
+ return AcquireRef(new BindGroup(this, descriptor));
}
- ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
- return new BindGroupLayout(this, descriptor);
+ return AcquireRef(new BindGroupLayout(this, descriptor));
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
DAWN_TRY(IncrementMemoryUsage(descriptor->size));
return AcquireRef(new Buffer(this, descriptor));
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return new CommandBuffer(encoder, descriptor);
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
}
- ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> Device::CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) {
- return new ComputePipeline(this, descriptor);
+ return AcquireRef(new ComputePipeline(this, descriptor));
}
- ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
- return new PipelineLayout(this, descriptor);
+ return AcquireRef(new PipelineLayout(this, descriptor));
}
- ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
- return new QuerySet(this, descriptor);
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
+ return AcquireRef(new QuerySet(this, descriptor));
}
- ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return new RenderPipeline(this, descriptor);
+ ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) {
+ return AcquireRef(new RenderPipeline(this, descriptor));
}
- ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return new Sampler(this, descriptor);
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(this, descriptor));
}
- ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) {
Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
DAWN_TRY(module->Initialize(parseResult));
- return module.Detach();
+ return module;
}
- ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
- return new OldSwapChain(this, descriptor);
+ return AcquireRef(new OldSwapChain(this, descriptor));
}
- ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) {
@@ -146,10 +148,10 @@ namespace dawn_native { namespace null {
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
}
- ResultOrError<TextureViewBase*> Device::CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) {
- return new TextureView(texture, descriptor);
+ return AcquireRef(new TextureView(texture, descriptor));
}
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
@@ -216,25 +218,27 @@ namespace dawn_native { namespace null {
}
MaybeError Device::TickImpl() {
- SubmitPendingOperations();
- return {};
+ return SubmitPendingOperations();
}
- ExecutionSerial Device::CheckAndUpdateCompletedSerials() {
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
return GetLastSubmittedCommandSerial();
}
void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
mPendingOperations.emplace_back(std::move(operation));
}
- void Device::SubmitPendingOperations() {
+
+ MaybeError Device::SubmitPendingOperations() {
for (auto& operation : mPendingOperations) {
operation->Execute();
}
mPendingOperations.clear();
- CheckPassedSerials();
+ DAWN_TRY(CheckPassedSerials());
IncrementLastSubmittedCommandSerial();
+
+ return {};
}
// BindGroupDataHolder
@@ -334,8 +338,13 @@ namespace dawn_native { namespace null {
}
MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
- ToBackend(GetDevice())->SubmitPendingOperations();
- return {};
+ Device* device = ToBackend(GetDevice());
+
+ // The Vulkan, D3D12 and Metal implementation all tick the device here,
+ // for testing purposes we should also tick in the null implementation.
+ DAWN_TRY(device->Tick());
+
+ return device->SubmitPendingOperations();
}
MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
@@ -349,14 +358,13 @@ namespace dawn_native { namespace null {
// SwapChain
// static
- ResultOrError<SwapChain*> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- std::unique_ptr<SwapChain> swapchain =
- std::make_unique<SwapChain>(device, surface, descriptor);
+ ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain.release();
+ return swapchain;
}
MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
@@ -375,21 +383,23 @@ namespace dawn_native { namespace null {
SwapChain::~SwapChain() = default;
MaybeError SwapChain::PresentImpl() {
- mTexture->Destroy();
+ mTexture->APIDestroy();
mTexture = nullptr;
return {};
}
ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
mTexture = AcquireRef(
new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
- return mTexture->CreateView();
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mTexture->APICreateView();
}
void SwapChain::DetachFromSurfaceImpl() {
if (mTexture != nullptr) {
- mTexture->Destroy();
+ mTexture->APIDestroy();
mTexture = nullptr;
}
}
@@ -412,7 +422,7 @@ namespace dawn_native { namespace null {
}
TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
- return GetDevice()->CreateTexture(descriptor);
+ return GetDevice()->APICreateTexture(descriptor);
}
MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
index 7cc08df9493..6ee14229132 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
@@ -91,13 +91,14 @@ namespace dawn_native { namespace null {
MaybeError Initialize();
- CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
MaybeError TickImpl() override;
void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
- void SubmitPendingOperations();
+ MaybeError SubmitPendingOperations();
ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
@@ -121,37 +122,38 @@ namespace dawn_native { namespace null {
private:
using DeviceBase::DeviceBase;
- ResultOrError<BindGroupBase*> CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
- ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
- ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
- ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
- ResultOrError<ShaderModuleBase*> CreateShaderModuleImpl(
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) override;
- ResultOrError<SwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
- ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) override;
ResultOrError<Ref<TextureBase>> CreateTextureImpl(
const TextureDescriptor* descriptor) override;
- ResultOrError<TextureViewBase*> CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
- ExecutionSerial CheckAndUpdateCompletedSerials() override;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
void ShutDownImpl() override;
MaybeError WaitForIdleForDestruction() override;
@@ -254,10 +256,10 @@ namespace dawn_native { namespace null {
class SwapChain final : public NewSwapChainBase {
public:
- static ResultOrError<SwapChain*> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
~SwapChain() override;
private:
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
index 486eb2c51b6..721189b9dbb 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
@@ -54,7 +54,7 @@ namespace dawn_native { namespace opengl {
}
// static
- BindGroup* BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
index 5544a492dda..0619cf185d3 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
@@ -26,9 +26,9 @@ namespace dawn_native { namespace opengl {
class BindGroup final : public BindGroupBase, public PlacementAllocated {
public:
- BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
- static BindGroup* Create(Device* device, const BindGroupDescriptor* descriptor);
+ BindGroup(Device* device, const BindGroupDescriptor* descriptor);
private:
~BindGroup() override;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
index 7c098c8700b..619e4e62ee9 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
@@ -24,9 +24,9 @@ namespace dawn_native { namespace opengl {
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
}
- BindGroup* BindGroupLayout::AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor) {
- return mBindGroupAllocator.Allocate(device, descriptor);
+ Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
}
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
index aeba62f54c3..edd1dd050b4 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
@@ -27,7 +27,7 @@ namespace dawn_native { namespace opengl {
public:
BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
- BindGroup* AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
void DeallocateBindGroup(BindGroup* bindGroup);
private:
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
index e0c2f37d998..6573d12f296 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/opengl/CommandBufferGL.h"
+#include "common/VertexFormatUtils.h"
#include "dawn_native/BindGroup.h"
#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/CommandEncoder.h"
@@ -49,57 +50,59 @@ namespace dawn_native { namespace opengl {
GLenum VertexFormatType(wgpu::VertexFormat format) {
switch (format) {
- case wgpu::VertexFormat::UChar2:
- case wgpu::VertexFormat::UChar4:
- case wgpu::VertexFormat::UChar2Norm:
- case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Unorm8x4:
return GL_UNSIGNED_BYTE;
- case wgpu::VertexFormat::Char2:
- case wgpu::VertexFormat::Char4:
- case wgpu::VertexFormat::Char2Norm:
- case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Snorm8x4:
return GL_BYTE;
- case wgpu::VertexFormat::UShort2:
- case wgpu::VertexFormat::UShort4:
- case wgpu::VertexFormat::UShort2Norm:
- case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Unorm16x4:
return GL_UNSIGNED_SHORT;
- case wgpu::VertexFormat::Short2:
- case wgpu::VertexFormat::Short4:
- case wgpu::VertexFormat::Short2Norm:
- case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Snorm16x4:
return GL_SHORT;
- case wgpu::VertexFormat::Half2:
- case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float16x2:
+ case wgpu::VertexFormat::Float16x4:
return GL_HALF_FLOAT;
- case wgpu::VertexFormat::Float:
- case wgpu::VertexFormat::Float2:
- case wgpu::VertexFormat::Float3:
- case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float32:
+ case wgpu::VertexFormat::Float32x2:
+ case wgpu::VertexFormat::Float32x3:
+ case wgpu::VertexFormat::Float32x4:
return GL_FLOAT;
- case wgpu::VertexFormat::UInt:
- case wgpu::VertexFormat::UInt2:
- case wgpu::VertexFormat::UInt3:
- case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Uint32x4:
return GL_UNSIGNED_INT;
- case wgpu::VertexFormat::Int:
- case wgpu::VertexFormat::Int2:
- case wgpu::VertexFormat::Int3:
- case wgpu::VertexFormat::Int4:
+ case wgpu::VertexFormat::Sint32:
+ case wgpu::VertexFormat::Sint32x2:
+ case wgpu::VertexFormat::Sint32x3:
+ case wgpu::VertexFormat::Sint32x4:
return GL_INT;
+ default:
+ UNREACHABLE();
}
}
GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
switch (format) {
- case wgpu::VertexFormat::UChar2Norm:
- case wgpu::VertexFormat::UChar4Norm:
- case wgpu::VertexFormat::Char2Norm:
- case wgpu::VertexFormat::Char4Norm:
- case wgpu::VertexFormat::UShort2Norm:
- case wgpu::VertexFormat::UShort4Norm:
- case wgpu::VertexFormat::Short2Norm:
- case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Unorm8x2:
+ case wgpu::VertexFormat::Unorm8x4:
+ case wgpu::VertexFormat::Snorm8x2:
+ case wgpu::VertexFormat::Snorm8x4:
+ case wgpu::VertexFormat::Unorm16x2:
+ case wgpu::VertexFormat::Unorm16x4:
+ case wgpu::VertexFormat::Snorm16x2:
+ case wgpu::VertexFormat::Snorm16x4:
return GL_TRUE;
default:
return GL_FALSE;
@@ -108,22 +111,22 @@ namespace dawn_native { namespace opengl {
bool VertexFormatIsInt(wgpu::VertexFormat format) {
switch (format) {
- case wgpu::VertexFormat::UChar2:
- case wgpu::VertexFormat::UChar4:
- case wgpu::VertexFormat::Char2:
- case wgpu::VertexFormat::Char4:
- case wgpu::VertexFormat::UShort2:
- case wgpu::VertexFormat::UShort4:
- case wgpu::VertexFormat::Short2:
- case wgpu::VertexFormat::Short4:
- case wgpu::VertexFormat::UInt:
- case wgpu::VertexFormat::UInt2:
- case wgpu::VertexFormat::UInt3:
- case wgpu::VertexFormat::UInt4:
- case wgpu::VertexFormat::Int:
- case wgpu::VertexFormat::Int2:
- case wgpu::VertexFormat::Int3:
- case wgpu::VertexFormat::Int4:
+ case wgpu::VertexFormat::Uint8x2:
+ case wgpu::VertexFormat::Uint8x4:
+ case wgpu::VertexFormat::Sint8x2:
+ case wgpu::VertexFormat::Sint8x4:
+ case wgpu::VertexFormat::Uint16x2:
+ case wgpu::VertexFormat::Uint16x4:
+ case wgpu::VertexFormat::Sint16x2:
+ case wgpu::VertexFormat::Sint16x4:
+ case wgpu::VertexFormat::Uint32:
+ case wgpu::VertexFormat::Uint32x2:
+ case wgpu::VertexFormat::Uint32x3:
+ case wgpu::VertexFormat::Uint32x4:
+ case wgpu::VertexFormat::Sint32:
+ case wgpu::VertexFormat::Sint32x2:
+ case wgpu::VertexFormat::Sint32x3:
+ case wgpu::VertexFormat::Sint32x4:
return true;
default:
return false;
@@ -175,7 +178,7 @@ namespace dawn_native { namespace opengl {
uint64_t offset = mVertexBufferOffsets[slot];
const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
- uint32_t components = VertexFormatNumComponents(attribute.format);
+ uint32_t components = dawn::VertexFormatNumComponents(attribute.format);
GLenum formatType = VertexFormatType(attribute.format);
GLboolean normalized = VertexFormatIsNormalized(attribute.format);
@@ -469,7 +472,7 @@ namespace dawn_native { namespace opengl {
blitMask |= GL_STENCIL_BUFFER_BIT;
}
// Iterate over all layers, doing a single blit for each.
- for (uint32_t layer = 0; layer < copySize.depth; ++layer) {
+ for (uint32_t layer = 0; layer < copySize.depthOrArrayLayers; ++layer) {
// Bind all required aspects for this layer.
for (Aspect aspect : IterateEnumMask(src.aspect)) {
GLenum glAttachment;
@@ -649,7 +652,8 @@ namespace dawn_native { namespace opengl {
if (texture->GetArrayLayers() > 1) {
// TODO(jiawei.shao@intel.com): do a single copy when the data is
// correctly packed.
- for (size_t copyZ = 0; copyZ < copyExtent.depth; ++copyZ) {
+ for (size_t copyZ = 0; copyZ < copyExtent.depthOrArrayLayers;
+ ++copyZ) {
uintptr_t offsetPerImage = static_cast<uintptr_t>(
src.offset + copyZ * src.bytesPerRow * src.rowsPerImage);
uint32_t dstOriginY = dst.origin.y;
@@ -699,13 +703,15 @@ namespace dawn_native { namespace opengl {
uint64_t copyDataSize = (copySize.width / blockInfo.width) *
(copySize.height / blockInfo.height) *
- blockInfo.byteSize * copySize.depth;
+ blockInfo.byteSize *
+ copySize.depthOrArrayLayers;
if (texture->GetArrayLayers() > 1) {
gl.CompressedTexSubImage3D(
target, dst.mipLevel, dst.origin.x, dst.origin.y, dst.origin.z,
- copyExtent.width, copyExtent.height, copyExtent.depth,
- format.internalFormat, copyDataSize,
+ copyExtent.width, copyExtent.height,
+ copyExtent.depthOrArrayLayers, format.internalFormat,
+ copyDataSize,
reinterpret_cast<void*>(static_cast<uintptr_t>(src.offset)));
} else {
gl.CompressedTexSubImage2D(
@@ -731,8 +737,8 @@ namespace dawn_native { namespace opengl {
if (texture->GetArrayLayers() > 1) {
gl.TexSubImage3D(target, dst.mipLevel, dst.origin.x,
dst.origin.y, dst.origin.z, copySize.width,
- copySize.height, copySize.depth, format.format,
- format.type,
+ copySize.height, copySize.depthOrArrayLayers,
+ format.format, format.type,
reinterpret_cast<void*>(
static_cast<uintptr_t>(src.offset)));
} else {
@@ -836,7 +842,7 @@ namespace dawn_native { namespace opengl {
}
const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
- for (uint32_t layer = 0; layer < copySize.depth; ++layer) {
+ for (uint32_t layer = 0; layer < copySize.depthOrArrayLayers; ++layer) {
gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
texture->GetHandle(), src.mipLevel,
src.origin.z + layer);
@@ -889,7 +895,8 @@ namespace dawn_native { namespace opengl {
src.mipLevel, src.origin.x, src.origin.y, src.origin.z,
dstTexture->GetHandle(), dstTexture->GetGLTarget(),
dst.mipLevel, dst.origin.x, dst.origin.y, dst.origin.z,
- copySize.width, copySize.height, copy->copySize.depth);
+ copySize.width, copySize.height,
+ copy->copySize.depthOrArrayLayers);
} else {
CopyTextureToTextureWithBlit(gl, src, dst, copySize);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
index ef7be64d3ed..2963ad8b5af 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
@@ -110,50 +110,52 @@ namespace dawn_native { namespace opengl {
return result;
}
- ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
return BindGroup::Create(this, descriptor);
}
- ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
- return new BindGroupLayout(this, descriptor);
+ return AcquireRef(new BindGroupLayout(this, descriptor));
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return AcquireRef(new Buffer(this, descriptor));
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return new CommandBuffer(encoder, descriptor);
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
}
- ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> Device::CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) {
- return new ComputePipeline(this, descriptor);
+ return AcquireRef(new ComputePipeline(this, descriptor));
}
- ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
- return new PipelineLayout(this, descriptor);
+ return AcquireRef(new PipelineLayout(this, descriptor));
}
- ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
- return new QuerySet(this, descriptor);
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
+ return AcquireRef(new QuerySet(this, descriptor));
}
- ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
- return new RenderPipeline(this, descriptor);
+ ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) {
+ return AcquireRef(new RenderPipeline(this, descriptor));
}
- ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return new Sampler(this, descriptor);
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ return AcquireRef(new Sampler(this, descriptor));
}
- ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) {
return ShaderModule::Create(this, descriptor, parseResult);
}
- ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
- return new SwapChain(this, descriptor);
+ return AcquireRef(new SwapChain(this, descriptor));
}
- ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) {
@@ -162,10 +164,10 @@ namespace dawn_native { namespace opengl {
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return AcquireRef(new Texture(this, descriptor));
}
- ResultOrError<TextureViewBase*> Device::CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) {
- return new TextureView(texture, descriptor);
+ return AcquireRef(new TextureView(texture, descriptor));
}
void Device::SubmitFenceSync() {
@@ -178,7 +180,7 @@ namespace dawn_native { namespace opengl {
return {};
}
- ExecutionSerial Device::CheckAndUpdateCompletedSerials() {
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
ExecutionSerial fenceSerial{0};
while (!mFencesInFlight.empty()) {
GLsync sync = mFencesInFlight.front().first;
@@ -232,7 +234,7 @@ namespace dawn_native { namespace opengl {
MaybeError Device::WaitForIdleForDestruction() {
gl.Finish();
- CheckPassedSerials();
+ DAWN_TRY(CheckPassedSerials());
ASSERT(mFencesInFlight.empty());
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
index f463aff2aa5..6ee6de5c8ff 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
@@ -49,9 +49,9 @@ namespace dawn_native { namespace opengl {
void SubmitFenceSync();
- // Dawn API
- CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
MaybeError TickImpl() override;
@@ -77,38 +77,39 @@ namespace dawn_native { namespace opengl {
const DeviceDescriptor* descriptor,
const OpenGLFunctions& functions);
- ResultOrError<BindGroupBase*> CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
- ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
- ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
- ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
- ResultOrError<ShaderModuleBase*> CreateShaderModuleImpl(
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) override;
- ResultOrError<SwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
- ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) override;
ResultOrError<Ref<TextureBase>> CreateTextureImpl(
const TextureDescriptor* descriptor) override;
- ResultOrError<TextureViewBase*> CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
void InitTogglesFromDriver();
- ExecutionSerial CheckAndUpdateCompletedSerials() override;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
void ShutDownImpl() override;
MaybeError WaitForIdleForDestruction() override;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
index c40d25d55f2..a0181efafee 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
@@ -99,8 +99,8 @@ namespace dawn_native { namespace opengl {
ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
- mDummySampler = AcquireRef(
- ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess()));
+ mDummySampler =
+ ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
}
// Link all the shaders together.
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
index 5340d986a28..17260b925e1 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
@@ -52,14 +52,15 @@ namespace dawn_native { namespace opengl {
return {};
}
- MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
+ MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
const void* data,
const TextureDataLayout& dataLayout,
const Extent3D& writeSizePixel) {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
Texture* texture = ToBackend(destination.texture);
- SubresourceRange range(Aspect::Color, {destination.origin.z, writeSizePixel.depth},
+ SubresourceRange range(Aspect::Color,
+ {destination.origin.z, writeSizePixel.depthOrArrayLayers},
{destination.mipLevel, 1});
if (IsCompleteSubresourceCopiedTo(texture, writeSizePixel, destination.mipLevel)) {
texture->SetIsSubresourceContentInitialized(true, range);
@@ -97,7 +98,7 @@ namespace dawn_native { namespace opengl {
const uint8_t* slice = static_cast<const uint8_t*>(data);
for (uint32_t z = destination.origin.z;
- z < destination.origin.z + writeSizePixel.depth; ++z) {
+ z < destination.origin.z + writeSizePixel.depthOrArrayLayers; ++z) {
const uint8_t* d = slice;
for (uint32_t y = destination.origin.y;
@@ -122,8 +123,8 @@ namespace dawn_native { namespace opengl {
gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, dataLayout.rowsPerImage * blockInfo.height);
gl.TexSubImage3D(target, destination.mipLevel, destination.origin.x,
destination.origin.y, destination.origin.z, writeSizePixel.width,
- writeSizePixel.height, writeSizePixel.depth, format.format,
- format.type, data);
+ writeSizePixel.height, writeSizePixel.depthOrArrayLayers,
+ format.format, format.type, data);
gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
}
gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
@@ -138,7 +139,7 @@ namespace dawn_native { namespace opengl {
}
} else {
const uint8_t* slice = static_cast<const uint8_t*>(data);
- for (uint32_t z = 0; z < writeSizePixel.depth; ++z) {
+ for (uint32_t z = 0; z < writeSizePixel.depthOrArrayLayers; ++z) {
const uint8_t* d = slice;
for (uint32_t y = 0; y < writeSizePixel.height; ++y) {
gl.TexSubImage3D(target, destination.mipLevel, destination.origin.x,
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
index a95e1a4ea66..b5a5243dc56 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.h
@@ -31,7 +31,7 @@ namespace dawn_native { namespace opengl {
uint64_t bufferOffset,
const void* data,
size_t size) override;
- MaybeError WriteTextureImpl(const TextureCopyView& destination,
+ MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
const void* data,
const TextureDataLayout& dataLayout,
const Extent3D& writeSizePixel) override;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
index 9a5da5c2829..3c153fb43ee 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
@@ -104,43 +104,42 @@ namespace dawn_native { namespace opengl {
void ApplyColorState(const OpenGLFunctions& gl,
ColorAttachmentIndex attachment,
- const ColorStateDescriptor* descriptor) {
+ const ColorTargetState* state) {
GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
- if (BlendEnabled(descriptor)) {
+ if (state->blend != nullptr) {
gl.Enablei(GL_BLEND, colorBuffer);
- gl.BlendEquationSeparatei(colorBuffer,
- GLBlendMode(descriptor->colorBlend.operation),
- GLBlendMode(descriptor->alphaBlend.operation));
+ gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
+ GLBlendMode(state->blend->alpha.operation));
gl.BlendFuncSeparatei(colorBuffer,
- GLBlendFactor(descriptor->colorBlend.srcFactor, false),
- GLBlendFactor(descriptor->colorBlend.dstFactor, false),
- GLBlendFactor(descriptor->alphaBlend.srcFactor, true),
- GLBlendFactor(descriptor->alphaBlend.dstFactor, true));
+ GLBlendFactor(state->blend->color.srcFactor, false),
+ GLBlendFactor(state->blend->color.dstFactor, false),
+ GLBlendFactor(state->blend->alpha.srcFactor, true),
+ GLBlendFactor(state->blend->alpha.dstFactor, true));
} else {
gl.Disablei(GL_BLEND, colorBuffer);
}
- gl.ColorMaski(colorBuffer, descriptor->writeMask & wgpu::ColorWriteMask::Red,
- descriptor->writeMask & wgpu::ColorWriteMask::Green,
- descriptor->writeMask & wgpu::ColorWriteMask::Blue,
- descriptor->writeMask & wgpu::ColorWriteMask::Alpha);
+ gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
+ state->writeMask & wgpu::ColorWriteMask::Green,
+ state->writeMask & wgpu::ColorWriteMask::Blue,
+ state->writeMask & wgpu::ColorWriteMask::Alpha);
}
- void ApplyColorState(const OpenGLFunctions& gl, const ColorStateDescriptor* descriptor) {
- if (BlendEnabled(descriptor)) {
+ void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
+ if (state->blend != nullptr) {
gl.Enable(GL_BLEND);
- gl.BlendEquationSeparate(GLBlendMode(descriptor->colorBlend.operation),
- GLBlendMode(descriptor->alphaBlend.operation));
- gl.BlendFuncSeparate(GLBlendFactor(descriptor->colorBlend.srcFactor, false),
- GLBlendFactor(descriptor->colorBlend.dstFactor, false),
- GLBlendFactor(descriptor->alphaBlend.srcFactor, true),
- GLBlendFactor(descriptor->alphaBlend.dstFactor, true));
+ gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
+ GLBlendMode(state->blend->alpha.operation));
+ gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
+ GLBlendFactor(state->blend->color.dstFactor, false),
+ GLBlendFactor(state->blend->alpha.srcFactor, true),
+ GLBlendFactor(state->blend->alpha.dstFactor, true));
} else {
gl.Disable(GL_BLEND);
}
- gl.ColorMask(descriptor->writeMask & wgpu::ColorWriteMask::Red,
- descriptor->writeMask & wgpu::ColorWriteMask::Green,
- descriptor->writeMask & wgpu::ColorWriteMask::Blue,
- descriptor->writeMask & wgpu::ColorWriteMask::Alpha);
+ gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
+ state->writeMask & wgpu::ColorWriteMask::Green,
+ state->writeMask & wgpu::ColorWriteMask::Blue,
+ state->writeMask & wgpu::ColorWriteMask::Alpha);
}
bool Equal(const BlendDescriptor& lhs, const BlendDescriptor& rhs) {
@@ -170,7 +169,7 @@ namespace dawn_native { namespace opengl {
}
void ApplyDepthStencilState(const OpenGLFunctions& gl,
- const DepthStencilStateDescriptor* descriptor,
+ const DepthStencilState* descriptor,
PersistentPipelineState* persistentPipelineState) {
// Depth writes only occur if depth is enabled
if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
@@ -211,16 +210,16 @@ namespace dawn_native { namespace opengl {
} // anonymous namespace
- RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
+ RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor2* descriptor)
: RenderPipelineBase(device, descriptor),
mVertexArrayObject(0),
mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {
PerStage<const ShaderModule*> modules(nullptr);
- modules[SingleShaderStage::Vertex] = ToBackend(descriptor->vertexStage.module);
- modules[SingleShaderStage::Fragment] = ToBackend(descriptor->fragmentStage->module);
+ modules[SingleShaderStage::Vertex] = ToBackend(descriptor->vertex.module);
+ modules[SingleShaderStage::Fragment] = ToBackend(descriptor->fragment->module);
PipelineGL::Initialize(device->gl, ToBackend(GetLayout()), GetAllStages());
- CreateVAOForVertexState(descriptor->vertexState);
+ CreateVAOForVertexState();
}
RenderPipeline::~RenderPipeline() {
@@ -239,7 +238,7 @@ namespace dawn_native { namespace opengl {
return mAttributesUsingVertexBuffer[slot];
}
- void RenderPipeline::CreateVAOForVertexState(const VertexStateDescriptor* vertexState) {
+ void RenderPipeline::CreateVAOForVertexState() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
gl.GenVertexArrays(1, &mVertexArrayObject);
@@ -278,7 +277,7 @@ namespace dawn_native { namespace opengl {
ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
- ApplyDepthStencilState(gl, GetDepthStencilStateDescriptor(), &persistentPipelineState);
+ ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
gl.SampleMaski(0, GetSampleMask());
if (IsAlphaToCoverageEnabled()) {
@@ -302,21 +301,26 @@ namespace dawn_native { namespace opengl {
if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
- ApplyColorState(gl, attachmentSlot, GetColorStateDescriptor(attachmentSlot));
+ ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
}
} else {
- const ColorStateDescriptor* prevDescriptor = nullptr;
+ const ColorTargetState* prevDescriptor = nullptr;
for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
- const ColorStateDescriptor* descriptor = GetColorStateDescriptor(attachmentSlot);
+ const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
if (!prevDescriptor) {
ApplyColorState(gl, descriptor);
prevDescriptor = descriptor;
- } else if (!Equal(descriptor->alphaBlend, prevDescriptor->alphaBlend) ||
- !Equal(descriptor->colorBlend, prevDescriptor->colorBlend) ||
- descriptor->writeMask != prevDescriptor->writeMask) {
- // TODO(crbug.com/dawn/582): Add validation to prevent this as it is not
- // supported on GLES < 3.2.
+ } else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
+ // TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
+ // per color target. Add validation to prevent this as it is not.
ASSERT(false);
+ } else if (descriptor->blend != nullptr) {
+ if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
+ !Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
+ descriptor->writeMask != prevDescriptor->writeMask) {
+ // TODO(crbug.com/dawn/582)
+ ASSERT(false);
+ }
}
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
index e15c8648d57..0dc7f32fd30 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
@@ -29,7 +29,7 @@ namespace dawn_native { namespace opengl {
class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
public:
- RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
+ RenderPipeline(Device* device, const RenderPipelineDescriptor2* descriptor);
GLenum GetGLPrimitiveTopology() const;
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
@@ -39,7 +39,7 @@ namespace dawn_native { namespace opengl {
private:
~RenderPipeline() override;
- void CreateVAOForVertexState(const VertexStateDescriptor* vertexState);
+ void CreateVAOForVertexState();
// TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
GLuint mVertexArrayObject;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
index 1b9070ea3e2..cc225491bd5 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
@@ -18,18 +18,17 @@
#include "common/Platform.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/SpirvUtils.h"
+#include "dawn_native/TintUtils.h"
#include "dawn_native/opengl/DeviceGL.h"
#include "dawn_native/opengl/PipelineLayoutGL.h"
#include <spirv_glsl.hpp>
-#ifdef DAWN_ENABLE_WGSL
// Tint include must be after spirv_glsl.hpp, because spirv-cross has its own
// version of spirv_headers. We also need to undef SPV_REVISION because SPIRV-Cross
// is at 3 while spirv-headers is at 4.
-# undef SPV_REVISION
-# include <tint/tint.h>
-#endif // DAWN_ENABLE_WGSL
+#undef SPV_REVISION
+#include <tint/tint.h>
#include <sstream>
@@ -66,12 +65,12 @@ namespace dawn_native { namespace opengl {
}
// static
- ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
+ ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
DAWN_TRY(module->Initialize(parseResult));
- return module.Detach();
+ return module;
}
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
@@ -79,40 +78,23 @@ namespace dawn_native { namespace opengl {
}
MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
- if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
-#ifdef DAWN_ENABLE_WGSL
- std::ostringstream errorStream;
- errorStream << "Tint SPIR-V (for GLSL) writer failure:" << std::endl;
-
- tint::transform::Manager transformManager;
- transformManager.append(std::make_unique<tint::transform::BoundArrayAccessors>());
- transformManager.append(std::make_unique<tint::transform::EmitVertexPointSize>());
- transformManager.append(std::make_unique<tint::transform::Spirv>());
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
- tint::Program program;
- DAWN_TRY_ASSIGN(program,
- RunTransforms(&transformManager, parseResult->tintProgram.get()));
-
- tint::writer::spirv::Generator generator(&program);
+ DAWN_TRY(InitializeBase(parseResult));
+ // Tint currently does not support emitting GLSL, so when provided a Tint program need to
+ // generate SPIRV and SPIRV-Cross reflection data to be used in this backend.
+ if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
+ tint::writer::spirv::Generator generator(GetTintProgram());
if (!generator.Generate()) {
+ std::ostringstream errorStream;
errorStream << "Generator: " << generator.error() << std::endl;
return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
}
- mSpirv = generator.result();
-
- ShaderModuleParseResult transformedParseResult;
- transformedParseResult.tintProgram =
- std::make_unique<tint::Program>(std::move(program));
- transformedParseResult.spirv = mSpirv;
-
- DAWN_TRY(InitializeBase(&transformedParseResult));
-#else
- UNREACHABLE();
-#endif
- } else {
- DAWN_TRY(InitializeBase(parseResult));
+ mGLSpirv = generator.result();
+ DAWN_TRY_ASSIGN(mGLEntryPoints, ReflectShaderUsingSPIRVCross(GetDevice(), mGLSpirv));
}
+
return {};
}
@@ -141,7 +123,7 @@ namespace dawn_native { namespace opengl {
options.version = version.GetMajor() * 100 + version.GetMinor() * 10;
spirv_cross::CompilerGLSL compiler(
- GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator) ? mSpirv : GetSpirv());
+ GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator) ? mGLSpirv : GetSpirv());
compiler.set_common_options(options);
compiler.set_entry_point(entryPointName, ShaderStageToExecutionModel(stage));
@@ -180,7 +162,9 @@ namespace dawn_native { namespace opengl {
}
const EntryPointMetadata::BindingInfoArray& bindingInfo =
- GetEntryPoint(entryPointName).bindings;
+ GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)
+ ? (*mGLEntryPoints.at(entryPointName)).bindings
+ : GetEntryPoint(entryPointName).bindings;
// Change binding names to be "dawn_binding_<group>_<binding>".
// Also unsets the SPIRV "Binding" decoration as it outputs "layout(binding=)" which
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
index 371c415e9ae..7f598b9b70f 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
@@ -46,9 +46,9 @@ namespace dawn_native { namespace opengl {
class ShaderModule final : public ShaderModuleBase {
public:
- static ResultOrError<ShaderModule*> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
std::string TranslateToGLSL(const char* entryPointName,
SingleShaderStage stage,
@@ -61,7 +61,8 @@ namespace dawn_native { namespace opengl {
~ShaderModule() override = default;
MaybeError Initialize(ShaderModuleParseResult* parseResult);
- std::vector<uint32_t> mSpirv;
+ std::vector<uint32_t> mGLSpirv;
+ EntryPointMetadataTable mGLEntryPoints;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
index 5582d979797..c7fc9ef74b7 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
@@ -29,7 +29,7 @@ namespace dawn_native { namespace opengl {
GLenum TargetForTexture(const TextureDescriptor* descriptor) {
switch (descriptor->dimension) {
case wgpu::TextureDimension::e2D:
- if (descriptor->size.depth > 1) {
+ if (descriptor->size.depthOrArrayLayers > 1) {
ASSERT(descriptor->sampleCount == 1);
return GL_TEXTURE_2D_ARRAY;
} else {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
index 94841e091b5..532def483f3 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
@@ -87,6 +87,7 @@ namespace dawn_native { namespace vulkan {
if (mDeviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
mDeviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
mDeviceInfo.HasExt(DeviceExt::_16BitStorage) &&
+ mDeviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
mDeviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE) {
mSupportedExtensions.EnableExtension(Extension::ShaderFloat16);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
index 9bb379bde07..700e850318a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
@@ -74,12 +74,12 @@ namespace dawn_native { namespace vulkan {
}
// static
- ResultOrError<BindGroupLayout*> BindGroupLayout::Create(
+ ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
Device* device,
const BindGroupLayoutDescriptor* descriptor) {
Ref<BindGroupLayout> bgl = AcquireRef(new BindGroupLayout(device, descriptor));
DAWN_TRY(bgl->Initialize());
- return bgl.Detach();
+ return bgl;
}
MaybeError BindGroupLayout::Initialize() {
@@ -158,13 +158,14 @@ namespace dawn_native { namespace vulkan {
return mHandle;
}
- ResultOrError<BindGroup*> BindGroupLayout::AllocateBindGroup(
+ ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
Device* device,
const BindGroupDescriptor* descriptor) {
DescriptorSetAllocation descriptorSetAllocation;
DAWN_TRY_ASSIGN(descriptorSetAllocation, mDescriptorSetAllocator->Allocate());
- return mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation);
+ return AcquireRef(
+ mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation));
}
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
index 394cfab1626..72f8b698d7a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
@@ -45,15 +45,16 @@ namespace dawn_native { namespace vulkan {
// expensive syscall.
class BindGroupLayout final : public BindGroupLayoutBase {
public:
- static ResultOrError<BindGroupLayout*> Create(Device* device,
- const BindGroupLayoutDescriptor* descriptor);
+ static ResultOrError<Ref<BindGroupLayout>> Create(
+ Device* device,
+ const BindGroupLayoutDescriptor* descriptor);
BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
VkDescriptorSetLayout GetHandle() const;
- ResultOrError<BindGroup*> AllocateBindGroup(Device* device,
- const BindGroupDescriptor* descriptor);
+ ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor);
void DeallocateBindGroup(BindGroup* bindGroup,
DescriptorSetAllocation* descriptorSetAllocation);
void FinishDeallocation(ExecutionSerial completedSerial);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
index f7dd72f962a..07653e8bf59 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
@@ -27,8 +27,8 @@
namespace dawn_native { namespace vulkan {
// static
- ResultOrError<BindGroup*> BindGroup::Create(Device* device,
- const BindGroupDescriptor* descriptor) {
+ ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+ const BindGroupDescriptor* descriptor) {
return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
index 411c1149405..dac780bf0b9 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
@@ -28,8 +28,8 @@ namespace dawn_native { namespace vulkan {
class BindGroup final : public BindGroupBase, public PlacementAllocated {
public:
- static ResultOrError<BindGroup*> Create(Device* device,
- const BindGroupDescriptor* descriptor);
+ static ResultOrError<Ref<BindGroup>> Create(Device* device,
+ const BindGroupDescriptor* descriptor);
BindGroup(Device* device,
const BindGroupDescriptor* descriptor,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
index 98bfb58dc4f..47fac441a5f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
@@ -58,7 +58,7 @@ namespace dawn_native { namespace vulkan {
Extent3D imageExtentDst = ComputeTextureCopyExtent(dstCopy, copySize);
return imageExtentSrc.width == imageExtentDst.width &&
imageExtentSrc.height == imageExtentDst.height &&
- imageExtentSrc.depth == imageExtentDst.depth;
+ imageExtentSrc.depthOrArrayLayers == imageExtentDst.depthOrArrayLayers;
}
VkImageCopy ComputeImageCopyRegion(const TextureCopy& srcCopy,
@@ -76,7 +76,7 @@ namespace dawn_native { namespace vulkan {
region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
region.srcSubresource.mipLevel = srcCopy.mipLevel;
region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
- region.srcSubresource.layerCount = copySize.depth;
+ region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
region.srcOffset.x = srcCopy.origin.x;
region.srcOffset.y = srcCopy.origin.y;
@@ -85,7 +85,7 @@ namespace dawn_native { namespace vulkan {
region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
region.dstSubresource.mipLevel = dstCopy.mipLevel;
region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
- region.dstSubresource.layerCount = copySize.depth;
+ region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
region.dstOffset.x = dstCopy.origin.x;
region.dstOffset.y = dstCopy.origin.y;
@@ -369,13 +369,36 @@ namespace dawn_native { namespace vulkan {
return {};
}
- void ResetUsedQuerySets(Device* device,
- VkCommandBuffer commands,
- const std::set<QuerySetBase*>& usedQuerySets) {
- // TODO(hao.x.li@intel.com): Reset the queries based on the used indexes.
- for (QuerySetBase* querySet : usedQuerySets) {
- device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), 0,
- querySet->GetQueryCount());
+ // Reset the query sets used on render pass because the reset command must be called outside
+ // render pass.
+ void ResetUsedQuerySetsOnRenderPass(Device* device,
+ VkCommandBuffer commands,
+ QuerySetBase* querySet,
+ const std::vector<bool>& availability) {
+ ASSERT(availability.size() == querySet->GetQueryAvailability().size());
+
+ auto currentIt = availability.begin();
+ auto lastIt = availability.end();
+ // Traverse the used queries which availability are true.
+ while (currentIt != lastIt) {
+ auto firstTrueIt = std::find(currentIt, lastIt, true);
+ // No used queries need to be reset
+ if (firstTrueIt == lastIt) {
+ break;
+ }
+
+ auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+ uint32_t queryIndex = std::distance(availability.begin(), firstTrueIt);
+ uint32_t queryCount = std::distance(firstTrueIt, nextFalseIt);
+
+ // Reset the queries between firstTrueIt and nextFalseIt (which is at most
+ // lastIt)
+ device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), queryIndex,
+ queryCount);
+
+ // Set current iterator to next false
+ currentIt = nextFalseIt;
}
}
@@ -425,7 +448,7 @@ namespace dawn_native { namespace vulkan {
destination->GetHandle(), resolveDestinationOffset, sizeof(uint64_t),
VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
- // Set current interator to next false
+ // Set current iterator to next false
currentIt = nextFalseIt;
}
}
@@ -433,9 +456,9 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
// static
- CommandBuffer* CommandBuffer::Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
- return new CommandBuffer(encoder, descriptor);
+ Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
+ return AcquireRef(new CommandBuffer(encoder, descriptor));
}
CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
@@ -459,13 +482,15 @@ namespace dawn_native { namespace vulkan {
// Create the temporary buffer. Note that We don't need to respect WebGPU's 256 alignment
// because it isn't a hard constraint in Vulkan.
uint64_t tempBufferSize =
- widthInBlocks * heightInBlocks * copySize.depth * blockInfo.byteSize;
+ widthInBlocks * heightInBlocks * copySize.depthOrArrayLayers * blockInfo.byteSize;
BufferDescriptor tempBufferDescriptor;
tempBufferDescriptor.size = tempBufferSize;
tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
Device* device = ToBackend(GetDevice());
- Ref<Buffer> tempBuffer = AcquireRef(ToBackend(device->CreateBuffer(&tempBufferDescriptor)));
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ Ref<Buffer> tempBuffer =
+ AcquireRef(ToBackend(device->APICreateBuffer(&tempBufferDescriptor)));
BufferCopy tempBufferCopy;
tempBufferCopy.buffer = tempBuffer.Get();
@@ -502,7 +527,8 @@ namespace dawn_native { namespace vulkan {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = recordingContext->commandBuffer;
- // Records the necessary barriers for the resource usage pre-computed by the frontend
+ // Records the necessary barriers for the resource usage pre-computed by the frontend.
+ // And resets the used query sets which are rewritten on the render pass.
auto PrepareResourcesForRenderPass = [](Device* device,
CommandRecordingContext* recordingContext,
const PassResourceUsage& usages) {
@@ -544,6 +570,13 @@ namespace dawn_native { namespace vulkan {
bufferBarriers.data(), imageBarriers.size(),
imageBarriers.data());
}
+
+ // Reset all query set used on current render pass together before beginning render pass
+ // because the reset command must be called outside render pass
+ for (size_t i = 0; i < usages.querySets.size(); ++i) {
+ ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
+ usages.querySets[i], usages.queryAvailabilities[i]);
+ }
};
// TODO(jiawei.shao@intel.com): move the resource lazy clearing inside the barrier tracking
@@ -566,9 +599,6 @@ namespace dawn_native { namespace vulkan {
const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
size_t nextPassNumber = 0;
- // QuerySet must be reset between uses.
- ResetUsedQuerySets(device, commands, GetResourceUsages().usedQuerySets);
-
Command type;
while (mCommands.NextCommandId(&type)) {
switch (type) {
@@ -689,8 +719,8 @@ namespace dawn_native { namespace vulkan {
// subresources should all be GENERAL instead of what we set now. Currently
// it is not allowed to copy with overlapped subresources, but we still
// add the ASSERT here as a reminder for this possible misuse.
- ASSERT(
- !IsRangeOverlapped(src.origin.z, dst.origin.z, copy->copySize.depth));
+ ASSERT(!IsRangeOverlapped(src.origin.z, dst.origin.z,
+ copy->copySize.depthOrArrayLayers));
}
// TODO after Yunchao's CL
@@ -770,10 +800,15 @@ namespace dawn_native { namespace vulkan {
QuerySet* querySet = ToBackend(cmd->querySet.Get());
Buffer* destination = ToBackend(cmd->destination.Get());
+ // TODO(hao.x.li@intel.com): Clear the resolve region of the buffer to 0 if at
+ // least one query is unavailable for the resolving and the resolve buffer has
+ // been initialized or fully used.
+
destination->EnsureDataInitializedAsDestination(
recordingContext, cmd->destinationOffset,
cmd->queryCount * sizeof(uint64_t));
- destination->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+ destination->TransitionUsageNow(recordingContext,
+ wgpu::BufferUsage::QueryResolve);
RecordResolveQuerySetCmd(commands, device, querySet, cmd->firstQuery,
cmd->queryCount, destination, cmd->destinationOffset);
@@ -784,6 +819,10 @@ namespace dawn_native { namespace vulkan {
case Command::WriteTimestamp: {
WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ // The query must be reset between uses.
+ device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+ cmd->queryIndex, 1);
+
RecordWriteTimestampCmd(recordingContext, device, cmd);
break;
}
@@ -958,6 +997,10 @@ namespace dawn_native { namespace vulkan {
case Command::WriteTimestamp: {
WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+ // The query must be reset between uses.
+ device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+ cmd->queryIndex, 1);
+
RecordWriteTimestampCmd(recordingContext, device, cmd);
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
index c5476d38ad0..edc35ff1280 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
@@ -32,8 +32,8 @@ namespace dawn_native { namespace vulkan {
class CommandBuffer final : public CommandBufferBase {
public:
- static CommandBuffer* Create(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor);
+ static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor);
MaybeError RecordCommands(CommandRecordingContext* recordingContext);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
index b4e1c3016f2..a81dee9039e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
@@ -24,12 +24,12 @@
namespace dawn_native { namespace vulkan {
// static
- ResultOrError<ComputePipeline*> ComputePipeline::Create(
+ ResultOrError<Ref<ComputePipeline>> ComputePipeline::Create(
Device* device,
const ComputePipelineDescriptor* descriptor) {
Ref<ComputePipeline> pipeline = AcquireRef(new ComputePipeline(device, descriptor));
DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline.Detach();
+ return pipeline;
}
MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
index 6aa98a4ef89..bd1fc04279e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.h
@@ -26,8 +26,9 @@ namespace dawn_native { namespace vulkan {
class ComputePipeline final : public ComputePipelineBase {
public:
- static ResultOrError<ComputePipeline*> Create(Device* device,
- const ComputePipelineDescriptor* descriptor);
+ static ResultOrError<Ref<ComputePipeline>> Create(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor);
VkPipeline GetHandle() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
index 8b79d3e84f9..530b5ca11d0 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
@@ -102,49 +102,51 @@ namespace dawn_native { namespace vulkan {
ShutDownBase();
}
- ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
return BindGroup::Create(this, descriptor);
}
- ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
return BindGroupLayout::Create(this, descriptor);
}
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return Buffer::Create(this, descriptor);
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) {
+ ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) {
return CommandBuffer::Create(encoder, descriptor);
}
- ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> Device::CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) {
return ComputePipeline::Create(this, descriptor);
}
- ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
return PipelineLayout::Create(this, descriptor);
}
- ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+ ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
+ const QuerySetDescriptor* descriptor) {
return QuerySet::Create(this, descriptor);
}
- ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) {
+ ResultOrError<Ref<RenderPipelineBase>> Device::CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) {
return RenderPipeline::Create(this, descriptor);
}
- ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+ ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
return Sampler::Create(this, descriptor);
}
- ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
+ ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) {
return ShaderModule::Create(this, descriptor, parseResult);
}
- ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
return OldSwapChain::Create(this, descriptor);
}
- ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) {
@@ -153,7 +155,7 @@ namespace dawn_native { namespace vulkan {
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return Texture::Create(this, descriptor);
}
- ResultOrError<TextureViewBase*> Device::CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) {
return TextureView::Create(texture, descriptor);
@@ -341,9 +343,11 @@ namespace dawn_native { namespace vulkan {
ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
+ deviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
+ usedKnobs._16BitStorageFeatures.storageBuffer16BitAccess = VK_TRUE;
usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
@@ -509,21 +513,22 @@ namespace dawn_native { namespace vulkan {
return fence;
}
- ExecutionSerial Device::CheckAndUpdateCompletedSerials() {
+ ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
ExecutionSerial fenceSerial(0);
while (!mFencesInFlight.empty()) {
VkFence fence = mFencesInFlight.front().first;
ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
VkResult result = VkResult::WrapUnsafe(
INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
- // TODO: Handle DeviceLost error.
- ASSERT(result == VK_SUCCESS || result == VK_NOT_READY);
// Fence are added in order, so we can stop searching as soon
// as we see one that's not ready.
if (result == VK_NOT_READY) {
return fenceSerial;
+ } else {
+ DAWN_TRY(CheckVkSuccess(::VkResult(result), "GetFenceStatus"));
}
+
// Update fenceSerial since fence is ready.
fenceSerial = tentativeSerial;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
index e28c1b91091..ae6032c0da8 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
@@ -77,9 +77,9 @@ namespace dawn_native { namespace vulkan {
ExternalImageExportInfoVk* info,
std::vector<ExternalSemaphoreHandle>* semaphoreHandle);
- // Dawn API
- CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
- const CommandBufferDescriptor* descriptor) override;
+ ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+ CommandEncoder* encoder,
+ const CommandBufferDescriptor* descriptor) override;
MaybeError TickImpl() override;
@@ -114,33 +114,34 @@ namespace dawn_native { namespace vulkan {
private:
Device(Adapter* adapter, const DeviceDescriptor* descriptor);
- ResultOrError<BindGroupBase*> CreateBindGroupImpl(
+ ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) override;
- ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
const BufferDescriptor* descriptor) override;
- ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
+ ResultOrError<Ref<ComputePipelineBase>> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
- ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
+ ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) override;
- ResultOrError<QuerySetBase*> CreateQuerySetImpl(
+ ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
const QuerySetDescriptor* descriptor) override;
- ResultOrError<RenderPipelineBase*> CreateRenderPipelineImpl(
- const RenderPipelineDescriptor* descriptor) override;
- ResultOrError<SamplerBase*> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
- ResultOrError<ShaderModuleBase*> CreateShaderModuleImpl(
+ ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipelineImpl(
+ const RenderPipelineDescriptor2* descriptor) override;
+ ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+ const SamplerDescriptor* descriptor) override;
+ ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor,
ShaderModuleParseResult* parseResult) override;
- ResultOrError<SwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
- ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
Surface* surface,
NewSwapChainBase* previousSwapChain,
const SwapChainDescriptor* descriptor) override;
ResultOrError<Ref<TextureBase>> CreateTextureImpl(
const TextureDescriptor* descriptor) override;
- ResultOrError<TextureViewBase*> CreateTextureViewImpl(
+ ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
@@ -173,7 +174,7 @@ namespace dawn_native { namespace vulkan {
std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
ResultOrError<VkFence> GetUnusedFence();
- ExecutionSerial CheckAndUpdateCompletedSerials() override;
+ ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
// We track which operations are in flight on the GPU with an increasing serial.
// This works only because we have a single queue. Each submit to a queue is associated
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
index 80c5aa833e8..2aff50d9211 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
@@ -23,12 +23,12 @@
namespace dawn_native { namespace vulkan {
// static
- ResultOrError<PipelineLayout*> PipelineLayout::Create(
+ ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
Device* device,
const PipelineLayoutDescriptor* descriptor) {
Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
DAWN_TRY(layout->Initialize());
- return layout.Detach();
+ return layout;
}
MaybeError PipelineLayout::Initialize() {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h
index fe136e4d0d6..c96215f6b71 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.h
@@ -26,8 +26,9 @@ namespace dawn_native { namespace vulkan {
class PipelineLayout final : public PipelineLayoutBase {
public:
- static ResultOrError<PipelineLayout*> Create(Device* device,
- const PipelineLayoutDescriptor* descriptor);
+ static ResultOrError<Ref<PipelineLayout>> Create(
+ Device* device,
+ const PipelineLayoutDescriptor* descriptor);
VkPipelineLayout GetHandle() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
index c74ef7c7b86..27f6ab629bc 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
@@ -64,11 +64,11 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
// static
- ResultOrError<QuerySet*> QuerySet::Create(Device* device,
- const QuerySetDescriptor* descriptor) {
+ ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
DAWN_TRY(queryset->Initialize());
- return queryset.Detach();
+ return queryset;
}
MaybeError QuerySet::Initialize() {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h
index 18cd0012908..80e7befa1d9 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h
@@ -25,8 +25,8 @@ namespace dawn_native { namespace vulkan {
class QuerySet final : public QuerySetBase {
public:
- static ResultOrError<QuerySet*> Create(Device* device,
- const QuerySetDescriptor* descriptor);
+ static ResultOrError<Ref<QuerySet>> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
VkQueryPool GetHandle() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
index b7fa159159f..2cb1e69db89 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
@@ -41,7 +41,7 @@ namespace dawn_native { namespace vulkan {
MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
- device->Tick();
+ DAWN_TRY(device->Tick());
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
"CommandBufferVk::RecordCommands");
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
index e50c34da638..bbb4f8eed49 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
@@ -38,66 +38,68 @@ namespace dawn_native { namespace vulkan {
VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
switch (format) {
- case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::Uint8x2:
return VK_FORMAT_R8G8_UINT;
- case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Uint8x4:
return VK_FORMAT_R8G8B8A8_UINT;
- case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Sint8x2:
return VK_FORMAT_R8G8_SINT;
- case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::Sint8x4:
return VK_FORMAT_R8G8B8A8_SINT;
- case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::Unorm8x2:
return VK_FORMAT_R8G8_UNORM;
- case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Unorm8x4:
return VK_FORMAT_R8G8B8A8_UNORM;
- case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Snorm8x2:
return VK_FORMAT_R8G8_SNORM;
- case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Snorm8x4:
return VK_FORMAT_R8G8B8A8_SNORM;
- case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::Uint16x2:
return VK_FORMAT_R16G16_UINT;
- case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::Uint16x4:
return VK_FORMAT_R16G16B16A16_UINT;
- case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Sint16x2:
return VK_FORMAT_R16G16_SINT;
- case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Sint16x4:
return VK_FORMAT_R16G16B16A16_SINT;
- case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::Unorm16x2:
return VK_FORMAT_R16G16_UNORM;
- case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Unorm16x4:
return VK_FORMAT_R16G16B16A16_UNORM;
- case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Snorm16x2:
return VK_FORMAT_R16G16_SNORM;
- case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Snorm16x4:
return VK_FORMAT_R16G16B16A16_SNORM;
- case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Float16x2:
return VK_FORMAT_R16G16_SFLOAT;
- case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float16x4:
return VK_FORMAT_R16G16B16A16_SFLOAT;
- case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float32:
return VK_FORMAT_R32_SFLOAT;
- case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float32x2:
return VK_FORMAT_R32G32_SFLOAT;
- case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float32x3:
return VK_FORMAT_R32G32B32_SFLOAT;
- case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float32x4:
return VK_FORMAT_R32G32B32A32_SFLOAT;
- case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::Uint32:
return VK_FORMAT_R32_UINT;
- case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::Uint32x2:
return VK_FORMAT_R32G32_UINT;
- case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::Uint32x3:
return VK_FORMAT_R32G32B32_UINT;
- case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Uint32x4:
return VK_FORMAT_R32G32B32A32_UINT;
- case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Sint32:
return VK_FORMAT_R32_SINT;
- case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Sint32x2:
return VK_FORMAT_R32G32_SINT;
- case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Sint32x3:
return VK_FORMAT_R32G32B32_SINT;
- case wgpu::VertexFormat::Int4:
+ case wgpu::VertexFormat::Sint32x4:
return VK_FORMAT_R32G32B32A32_SINT;
+ default:
+ UNREACHABLE();
}
}
@@ -220,18 +222,29 @@ namespace dawn_native { namespace vulkan {
: static_cast<VkColorComponentFlags>(0);
}
- VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorStateDescriptor* descriptor,
+ VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state,
bool isDeclaredInFragmentShader) {
VkPipelineColorBlendAttachmentState attachment;
- attachment.blendEnable = BlendEnabled(descriptor) ? VK_TRUE : VK_FALSE;
- attachment.srcColorBlendFactor = VulkanBlendFactor(descriptor->colorBlend.srcFactor);
- attachment.dstColorBlendFactor = VulkanBlendFactor(descriptor->colorBlend.dstFactor);
- attachment.colorBlendOp = VulkanBlendOperation(descriptor->colorBlend.operation);
- attachment.srcAlphaBlendFactor = VulkanBlendFactor(descriptor->alphaBlend.srcFactor);
- attachment.dstAlphaBlendFactor = VulkanBlendFactor(descriptor->alphaBlend.dstFactor);
- attachment.alphaBlendOp = VulkanBlendOperation(descriptor->alphaBlend.operation);
+ attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE;
+ if (attachment.blendEnable) {
+ attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor);
+ attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor);
+ attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation);
+ attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor);
+ attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor);
+ attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation);
+ } else {
+ // Swiftshader's Vulkan implementation appears to expect these values to be valid
+ // even when blending is not enabled.
+ attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+ attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+ attachment.colorBlendOp = VK_BLEND_OP_ADD;
+ attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+ attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+ attachment.alphaBlendOp = VK_BLEND_OP_ADD;
+ }
attachment.colorWriteMask =
- VulkanColorWriteMask(descriptor->writeMask, isDeclaredInFragmentShader);
+ VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
return attachment;
}
@@ -257,7 +270,7 @@ namespace dawn_native { namespace vulkan {
}
VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(
- const DepthStencilStateDescriptor* descriptor) {
+ const DepthStencilState* descriptor) {
VkPipelineDepthStencilStateCreateInfo depthStencilState;
depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
depthStencilState.pNext = nullptr;
@@ -306,15 +319,15 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
// static
- ResultOrError<RenderPipeline*> RenderPipeline::Create(
+ ResultOrError<Ref<RenderPipeline>> RenderPipeline::Create(
Device* device,
- const RenderPipelineDescriptor* descriptor) {
+ const RenderPipelineDescriptor2* descriptor) {
Ref<RenderPipeline> pipeline = AcquireRef(new RenderPipeline(device, descriptor));
DAWN_TRY(pipeline->Initialize(descriptor));
- return pipeline.Detach();
+ return pipeline;
}
- MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor2* descriptor) {
Device* device = ToBackend(GetDevice());
VkPipelineShaderStageCreateInfo shaderStages[2];
@@ -324,16 +337,16 @@ namespace dawn_native { namespace vulkan {
shaderStages[0].flags = 0;
shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shaderStages[0].pSpecializationInfo = nullptr;
- shaderStages[0].module = ToBackend(descriptor->vertexStage.module)->GetHandle();
- shaderStages[0].pName = descriptor->vertexStage.entryPoint;
+ shaderStages[0].module = ToBackend(descriptor->vertex.module)->GetHandle();
+ shaderStages[0].pName = descriptor->vertex.entryPoint;
shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[1].pNext = nullptr;
shaderStages[1].flags = 0;
shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shaderStages[1].pSpecializationInfo = nullptr;
- shaderStages[1].module = ToBackend(descriptor->fragmentStage->module)->GetHandle();
- shaderStages[1].pName = descriptor->fragmentStage->entryPoint;
+ shaderStages[1].module = ToBackend(descriptor->fragment->module)->GetHandle();
+ shaderStages[1].pName = descriptor->fragment->entryPoint;
}
PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
@@ -398,11 +411,11 @@ namespace dawn_native { namespace vulkan {
ASSERT(multisample.rasterizationSamples <= 32);
VkSampleMask sampleMask = GetSampleMask();
multisample.pSampleMask = &sampleMask;
- multisample.alphaToCoverageEnable = descriptor->alphaToCoverageEnabled;
+ multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled();
multisample.alphaToOneEnable = VK_FALSE;
VkPipelineDepthStencilStateCreateInfo depthStencilState =
- ComputeDepthStencilDesc(GetDepthStencilStateDescriptor());
+ ComputeDepthStencilDesc(GetDepthStencilState());
// Initialize the "blend state info" that will be chained in the "create info" from the data
// pre-computed in the ColorState
@@ -411,9 +424,8 @@ namespace dawn_native { namespace vulkan {
const auto& fragmentOutputsWritten =
GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
- const ColorStateDescriptor* colorStateDescriptor = GetColorStateDescriptor(i);
- colorBlendAttachments[i] =
- ComputeColorDesc(colorStateDescriptor, fragmentOutputsWritten[i]);
+ const ColorTargetState* target = GetColorTargetState(i);
+ colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
}
VkPipelineColorBlendStateCreateInfo colorBlend;
colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
index 4613f322c35..be61fa27b44 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
@@ -26,15 +26,16 @@ namespace dawn_native { namespace vulkan {
class RenderPipeline final : public RenderPipelineBase {
public:
- static ResultOrError<RenderPipeline*> Create(Device* device,
- const RenderPipelineDescriptor* descriptor);
+ static ResultOrError<Ref<RenderPipeline>> Create(
+ Device* device,
+ const RenderPipelineDescriptor2* descriptor);
VkPipeline GetHandle() const;
private:
~RenderPipeline() override;
using RenderPipelineBase::RenderPipelineBase;
- MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
+ MaybeError Initialize(const RenderPipelineDescriptor2* descriptor);
struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
index 033f7b769ab..c5e852b5e97 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
@@ -53,10 +53,11 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
// static
- ResultOrError<Sampler*> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
+ const SamplerDescriptor* descriptor) {
Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
DAWN_TRY(sampler->Initialize(descriptor));
- return sampler.Detach();
+ return sampler;
}
MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
index 72f7d79b15b..ac7b886a3e9 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
@@ -26,7 +26,8 @@ namespace dawn_native { namespace vulkan {
class Sampler final : public SamplerBase {
public:
- static ResultOrError<Sampler*> Create(Device* device, const SamplerDescriptor* descriptor);
+ static ResultOrError<Ref<Sampler>> Create(Device* device,
+ const SamplerDescriptor* descriptor);
VkSampler GetHandle() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
index baccde81d26..0fb4c610b41 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
@@ -14,32 +14,28 @@
#include "dawn_native/vulkan/ShaderModuleVk.h"
+#include "dawn_native/TintUtils.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/VulkanError.h"
#include <spirv_cross.hpp>
-#ifdef DAWN_ENABLE_WGSL
// Tint include must be after spirv_hlsl.hpp, because spirv-cross has its own
// version of spirv_headers. We also need to undef SPV_REVISION because SPIRV-Cross
// is at 3 while spirv-headers is at 4.
-# undef SPV_REVISION
-# include <tint/tint.h>
-#endif // DAWN_ENABLE_WGSL
+#undef SPV_REVISION
+#include <tint/tint.h>
namespace dawn_native { namespace vulkan {
// static
- ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult) {
+ ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult) {
Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
- if (module == nullptr) {
- return DAWN_VALIDATION_ERROR("Unable to create ShaderModule");
- }
DAWN_TRY(module->Initialize(parseResult));
- return module.Detach();
+ return module;
}
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
@@ -50,8 +46,9 @@ namespace dawn_native { namespace vulkan {
std::vector<uint32_t> spirv;
const std::vector<uint32_t>* spirvPtr;
+ ScopedTintICEHandler scopedICEHandler(GetDevice());
+
if (GetDevice()->IsToggleEnabled(Toggle::UseTintGenerator)) {
-#ifdef DAWN_ENABLE_WGSL
std::ostringstream errorStream;
errorStream << "Tint SPIR-V writer failure:" << std::endl;
@@ -62,7 +59,8 @@ namespace dawn_native { namespace vulkan {
tint::Program program;
DAWN_TRY_ASSIGN(program,
- RunTransforms(&transformManager, parseResult->tintProgram.get()));
+ RunTransforms(&transformManager, parseResult->tintProgram.get(),
+ CompilationMessages()));
tint::writer::spirv::Generator generator(&program);
if (!generator.Generate()) {
@@ -79,9 +77,6 @@ namespace dawn_native { namespace vulkan {
transformedParseResult.spirv = spirv;
DAWN_TRY(InitializeBase(&transformedParseResult));
-#else
- UNREACHABLE();
-#endif
} else {
DAWN_TRY(InitializeBase(parseResult));
spirvPtr = &GetSpirv();
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
index 621ab0e9396..7c0d8ef841c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
@@ -26,9 +26,9 @@ namespace dawn_native { namespace vulkan {
class ShaderModule final : public ShaderModuleBase {
public:
- static ResultOrError<ShaderModule*> Create(Device* device,
- const ShaderModuleDescriptor* descriptor,
- ShaderModuleParseResult* parseResult);
+ static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor,
+ ShaderModuleParseResult* parseResult);
VkShaderModule GetHandle() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
index d9f75d15da7..29c2454d04c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
@@ -35,8 +35,8 @@ namespace dawn_native { namespace vulkan {
// OldSwapChain
// static
- OldSwapChain* OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
- return new OldSwapChain(device, descriptor);
+ Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+ return AcquireRef(new OldSwapChain(device, descriptor));
}
OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
@@ -211,14 +211,13 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
// static
- ResultOrError<SwapChain*> SwapChain::Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor) {
- std::unique_ptr<SwapChain> swapchain =
- std::make_unique<SwapChain>(device, surface, descriptor);
+ ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
DAWN_TRY(swapchain->Initialize(previousSwapChain));
- return swapchain.release();
+ return swapchain;
}
SwapChain::~SwapChain() {
@@ -496,7 +495,7 @@ namespace dawn_native { namespace vulkan {
// TODO(cwallez@chromium.org): Find a way to reuse the blit texture between frames
// instead of creating a new one every time. This will involve "un-destroying" the
// texture or making the blit texture "external".
- mBlitTexture->Destroy();
+ mBlitTexture->APIDestroy();
mBlitTexture = nullptr;
}
@@ -523,7 +522,7 @@ namespace dawn_native { namespace vulkan {
presentInfo.pResults = nullptr;
// Free the texture before present so error handling doesn't skip that step.
- mTexture->Destroy();
+ mTexture->APIDestroy();
mTexture = nullptr;
VkResult result =
@@ -620,7 +619,8 @@ namespace dawn_native { namespace vulkan {
// In the happy path we can use the swapchain image directly.
if (!mConfig.needsBlit) {
- return mTexture->CreateView();
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mTexture->APICreateView();
}
// The blit texture always perfectly matches what the user requested for the swapchain.
@@ -628,17 +628,18 @@ namespace dawn_native { namespace vulkan {
TextureDescriptor desc = GetSwapChainBaseTextureDescriptor(this);
DAWN_TRY_ASSIGN(mBlitTexture,
Texture::Create(device, &desc, VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
- return mBlitTexture->CreateView();
+ // TODO(dawn:723): change to not use AcquireRef for reentrant object creation.
+ return mBlitTexture->APICreateView();
}
void SwapChain::DetachFromSurfaceImpl() {
if (mTexture != nullptr) {
- mTexture->Destroy();
+ mTexture->APIDestroy();
mTexture = nullptr;
}
if (mBlitTexture != nullptr) {
- mBlitTexture->Destroy();
+ mBlitTexture->APIDestroy();
mBlitTexture = nullptr;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
index 24c6a5cd7da..2210379090f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
@@ -29,7 +29,7 @@ namespace dawn_native { namespace vulkan {
class OldSwapChain : public OldSwapChainBase {
public:
- static OldSwapChain* Create(Device* device, const SwapChainDescriptor* descriptor);
+ static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
protected:
OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
@@ -44,10 +44,10 @@ namespace dawn_native { namespace vulkan {
class SwapChain : public NewSwapChainBase {
public:
- static ResultOrError<SwapChain*> Create(Device* device,
- Surface* surface,
- NewSwapChainBase* previousSwapChain,
- const SwapChainDescriptor* descriptor);
+ static ResultOrError<Ref<SwapChain>> Create(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
~SwapChain() override;
private:
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
index 870b59f8dbf..59e753fd839 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
@@ -199,12 +199,12 @@ namespace dawn_native { namespace vulkan {
case wgpu::TextureDimension::e2D:
info->imageType = VK_IMAGE_TYPE_2D;
info->extent = {size.width, size.height, 1};
- info->arrayLayers = size.depth;
+ info->arrayLayers = size.depthOrArrayLayers;
break;
case wgpu::TextureDimension::e3D:
info->imageType = VK_IMAGE_TYPE_3D;
- info->extent = {size.width, size.height, size.depth};
+ info->extent = {size.width, size.height, size.depthOrArrayLayers};
info->arrayLayers = 1;
break;
@@ -456,7 +456,7 @@ namespace dawn_native { namespace vulkan {
return DAWN_VALIDATION_ERROR("Mip level count must be 1");
}
- if (descriptor->size.depth != 1) {
+ if (descriptor->size.depthOrArrayLayers != 1) {
return DAWN_VALIDATION_ERROR("Array layer count must be 1");
}
@@ -1162,11 +1162,11 @@ namespace dawn_native { namespace vulkan {
}
// static
- ResultOrError<TextureView*> TextureView::Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor) {
+ ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor) {
Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
DAWN_TRY(view->Initialize(descriptor));
- return view.Detach();
+ return view;
}
MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
index 9af6701198f..013a3b6217a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
@@ -167,8 +167,8 @@ namespace dawn_native { namespace vulkan {
class TextureView final : public TextureViewBase {
public:
- static ResultOrError<TextureView*> Create(TextureBase* texture,
- const TextureViewDescriptor* descriptor);
+ static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+ const TextureViewDescriptor* descriptor);
VkImageView GetHandle() const;
private:
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
index 7e58319ae25..6167201df08 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
@@ -130,7 +130,7 @@ namespace dawn_native { namespace vulkan {
region.imageOffset.z = 0;
region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
- region.imageSubresource.layerCount = copySize.depth;
+ region.imageSubresource.layerCount = copySize.depthOrArrayLayers;
Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
region.imageExtent.width = imageExtent.width;
diff --git a/chromium/third_party/dawn/src/dawn_wire/BUILD.gn b/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
index 4b212996ce7..4a3f1275831 100644
--- a/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
@@ -59,6 +59,8 @@ dawn_component("dawn_wire") {
configs = [ "${dawn_root}/src/common:dawn_internal" ]
sources = get_target_outputs(":dawn_wire_gen")
sources += [
+ "BufferConsumer.h",
+ "BufferConsumer_impl.h",
"ChunkedCommandHandler.cpp",
"ChunkedCommandHandler.h",
"ChunkedCommandSerializer.cpp",
@@ -67,6 +69,7 @@ dawn_component("dawn_wire") {
"WireClient.cpp",
"WireDeserializeAllocator.cpp",
"WireDeserializeAllocator.h",
+ "WireResult.h",
"WireServer.cpp",
"client/ApiObjects.h",
"client/Buffer.cpp",
@@ -82,6 +85,8 @@ dawn_component("dawn_wire") {
"client/ObjectAllocator.h",
"client/Queue.cpp",
"client/Queue.h",
+ "client/ShaderModule.cpp",
+ "client/ShaderModule.h",
"server/ObjectStorage.h",
"server/Server.cpp",
"server/Server.h",
@@ -90,6 +95,7 @@ dawn_component("dawn_wire") {
"server/ServerFence.cpp",
"server/ServerInlineMemoryTransferService.cpp",
"server/ServerQueue.cpp",
+ "server/ServerShaderModule.cpp",
]
# Make headers publicly visible
diff --git a/chromium/third_party/dawn/src/dawn_wire/BufferConsumer.h b/chromium/third_party/dawn/src/dawn_wire/BufferConsumer.h
new file mode 100644
index 00000000000..3797bf40c88
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/BufferConsumer.h
@@ -0,0 +1,85 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_BUFFERCONSUMER_H_
+#define DAWNWIRE_BUFFERCONSUMER_H_
+
+#include "dawn_wire/WireResult.h"
+
+#include <cstddef>
+
+namespace dawn_wire {
+
+ // BufferConsumer is a utility class that allows reading bytes from a buffer
+ // while simultaneously decrementing the amount of remaining space by exactly
+ // the amount read. It helps prevent bugs where incrementing a pointer and
+ // decrementing a size value are not kept in sync.
+ // BufferConsumer also contains bounds checks to prevent reading out-of-bounds.
+ template <typename BufferT>
+ class BufferConsumer {
+ static_assert(sizeof(BufferT) == 1,
+ "BufferT must be 1-byte, but may have const/volatile qualifiers.");
+
+ public:
+ BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {
+ }
+
+ BufferT* Buffer() const {
+ return mBuffer;
+ }
+ size_t AvailableSize() const {
+ return mSize;
+ }
+
+ protected:
+ template <typename T, typename N>
+ WireResult NextN(N count, T** data);
+
+ template <typename T>
+ WireResult Next(T** data);
+
+ template <typename T>
+ WireResult Peek(T** data);
+
+ private:
+ BufferT* mBuffer;
+ size_t mSize;
+ };
+
+ class SerializeBuffer : public BufferConsumer<char> {
+ public:
+ using BufferConsumer::BufferConsumer;
+ using BufferConsumer::Next;
+ using BufferConsumer::NextN;
+ };
+
+ class DeserializeBuffer : public BufferConsumer<const volatile char> {
+ public:
+ using BufferConsumer::BufferConsumer;
+ using BufferConsumer::Peek;
+
+ template <typename T, typename N>
+ WireResult ReadN(N count, const volatile T** data) {
+ return NextN(count, data);
+ }
+
+ template <typename T>
+ WireResult Read(const volatile T** data) {
+ return Next(data);
+ }
+ };
+
+} // namespace dawn_wire
+
+#endif // DAWNWIRE_BUFFERCONSUMER_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_wire/BufferConsumer_impl.h b/chromium/third_party/dawn/src/dawn_wire/BufferConsumer_impl.h
new file mode 100644
index 00000000000..fdd5fdbc726
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/BufferConsumer_impl.h
@@ -0,0 +1,73 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_BUFFERCONSUMER_IMPL_H_
+#define DAWNWIRE_BUFFERCONSUMER_IMPL_H_
+
+#include "dawn_wire/BufferConsumer.h"
+
+#include <limits>
+#include <type_traits>
+
+namespace dawn_wire {
+
+ template <typename BufferT>
+ template <typename T>
+ WireResult BufferConsumer<BufferT>::Peek(T** data) {
+ if (sizeof(T) > mSize) {
+ return WireResult::FatalError;
+ }
+
+ *data = reinterpret_cast<T*>(mBuffer);
+ return WireResult::Success;
+ }
+
+ template <typename BufferT>
+ template <typename T>
+ WireResult BufferConsumer<BufferT>::Next(T** data) {
+ if (sizeof(T) > mSize) {
+ return WireResult::FatalError;
+ }
+
+ *data = reinterpret_cast<T*>(mBuffer);
+ mBuffer += sizeof(T);
+ mSize -= sizeof(T);
+ return WireResult::Success;
+ }
+
+ template <typename BufferT>
+ template <typename T, typename N>
+ WireResult BufferConsumer<BufferT>::NextN(N count, T** data) {
+ static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
+
+ constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
+ if (count > kMaxCountWithoutOverflows) {
+ return WireResult::FatalError;
+ }
+
+ // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
+ size_t totalSize = sizeof(T) * count;
+ if (totalSize > mSize) {
+ return WireResult::FatalError;
+ }
+
+ *data = reinterpret_cast<T*>(mBuffer);
+ mBuffer += totalSize;
+ mSize -= totalSize;
+ return WireResult::Success;
+ }
+
+} // namespace dawn_wire
+
+#endif // DAWNWIRE_BUFFERCONSUMER_IMPL_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
index d6d430f5888..b1f9ba4bc20 100644
--- a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
@@ -31,6 +31,8 @@ target_sources(dawn_wire PRIVATE
"${DAWN_INCLUDE_DIR}/dawn_wire/WireServer.h"
"${DAWN_INCLUDE_DIR}/dawn_wire/dawn_wire_export.h"
${DAWN_WIRE_GEN_SOURCES}
+ "BufferConsumer.h"
+ "BufferConsumer_impl.h"
"ChunkedCommandHandler.cpp"
"ChunkedCommandHandler.h"
"ChunkedCommandSerializer.cpp"
@@ -39,6 +41,7 @@ target_sources(dawn_wire PRIVATE
"WireClient.cpp"
"WireDeserializeAllocator.cpp"
"WireDeserializeAllocator.h"
+ "WireResult.h"
"WireServer.cpp"
"client/ApiObjects.h"
"client/Buffer.cpp"
@@ -54,6 +57,8 @@ target_sources(dawn_wire PRIVATE
"client/ObjectAllocator.h"
"client/Queue.cpp"
"client/Queue.h"
+ "client/ShaderModule.cpp"
+ "client/ShaderModule.h"
"server/ObjectStorage.h"
"server/Server.cpp"
"server/Server.h"
@@ -62,6 +67,7 @@ target_sources(dawn_wire PRIVATE
"server/ServerFence.cpp"
"server/ServerInlineMemoryTransferService.cpp"
"server/ServerQueue.cpp"
+ "server/ServerShaderModule.cpp"
)
target_link_libraries(dawn_wire
PUBLIC dawn_headers
diff --git a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h b/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h
index e62cb99d79c..2465f8153da 100644
--- a/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h
+++ b/chromium/third_party/dawn/src/dawn_wire/ChunkedCommandSerializer.h
@@ -32,7 +32,7 @@ namespace dawn_wire {
template <typename Cmd>
void SerializeCommand(const Cmd& cmd) {
- SerializeCommand(cmd, 0, [](SerializeBuffer*) { return true; });
+ SerializeCommand(cmd, 0, [](SerializeBuffer*) { return WireResult::Success; });
}
template <typename Cmd, typename ExtraSizeSerializeFn>
@@ -49,7 +49,8 @@ namespace dawn_wire {
template <typename Cmd>
void SerializeCommand(const Cmd& cmd, const ObjectIdProvider& objectIdProvider) {
- SerializeCommand(cmd, objectIdProvider, 0, [](SerializeBuffer*) { return true; });
+ SerializeCommand(cmd, objectIdProvider, 0,
+ [](SerializeBuffer*) { return WireResult::Success; });
}
template <typename Cmd, typename ExtraSizeSerializeFn>
@@ -79,10 +80,9 @@ namespace dawn_wire {
char* allocatedBuffer = static_cast<char*>(mSerializer->GetCmdSpace(requiredSize));
if (allocatedBuffer != nullptr) {
SerializeBuffer serializeBuffer(allocatedBuffer, requiredSize);
- bool success = true;
- success &= SerializeCmd(cmd, requiredSize, &serializeBuffer);
- success &= SerializeExtraSize(&serializeBuffer);
- if (DAWN_UNLIKELY(!success)) {
+ WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+ WireResult r2 = SerializeExtraSize(&serializeBuffer);
+ if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
mSerializer->OnSerializeError();
}
}
@@ -94,10 +94,9 @@ namespace dawn_wire {
return;
}
SerializeBuffer serializeBuffer(cmdSpace.get(), requiredSize);
- bool success = true;
- success &= SerializeCmd(cmd, requiredSize, &serializeBuffer);
- success &= SerializeExtraSize(&serializeBuffer);
- if (DAWN_UNLIKELY(!success)) {
+ WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+ WireResult r2 = SerializeExtraSize(&serializeBuffer);
+ if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
mSerializer->OnSerializeError();
return;
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireResult.h b/chromium/third_party/dawn/src/dawn_wire/WireResult.h
new file mode 100644
index 00000000000..fc0deb3c86c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/WireResult.h
@@ -0,0 +1,38 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_WIRERESULT_H_
+#define DAWNWIRE_WIRERESULT_H_
+
+#include "common/Compiler.h"
+
+namespace dawn_wire {
+
+ enum class DAWN_NO_DISCARD WireResult {
+ Success,
+ FatalError,
+ };
+
+// Macro to simplify error handling, similar to DAWN_TRY but for WireResult.
+#define WIRE_TRY(EXPR) \
+ do { \
+ WireResult exprResult = EXPR; \
+ if (DAWN_UNLIKELY(exprResult != WireResult::Success)) { \
+ return exprResult; \
+ } \
+ } while (0)
+
+} // namespace dawn_wire
+
+#endif // DAWNWIRE_WIRERESULT_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h b/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h
index f842d53f469..8ec482a971f 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h
@@ -21,6 +21,7 @@
#include "dawn_wire/client/Device.h"
#include "dawn_wire/client/Fence.h"
#include "dawn_wire/client/Queue.h"
+#include "dawn_wire/client/ShaderModule.h"
#include "dawn_wire/client/ApiObjects_autogen.h"
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
index 4f4598423f0..3c7519c6ed8 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
@@ -14,6 +14,7 @@
#include "dawn_wire/client/Buffer.h"
+#include "dawn_wire/BufferConsumer_impl.h"
#include "dawn_wire/WireCmd_autogen.h"
#include "dawn_wire/client/Client.h"
#include "dawn_wire/client/Device.h"
@@ -77,11 +78,11 @@ namespace dawn_wire { namespace client {
wireClient->SerializeCommand(
cmd, writeHandleCreateInfoLength, [&](SerializeBuffer* serializeBuffer) {
if (descriptor->mappedAtCreation) {
- if (serializeBuffer->AvailableSize() != writeHandleCreateInfoLength) {
- return false;
- }
+ char* writeHandleBuffer;
+ WIRE_TRY(
+ serializeBuffer->NextN(writeHandleCreateInfoLength, &writeHandleBuffer));
// Serialize the WriteHandle into the space after the command.
- writeHandle->SerializeCreate(serializeBuffer->Buffer());
+ writeHandle->SerializeCreate(writeHandleBuffer);
// Set the buffer state for the mapping at creation. The buffer now owns the
// write handle..
@@ -90,7 +91,7 @@ namespace dawn_wire { namespace client {
buffer->mMapOffset = 0;
buffer->mMapSize = buffer->mSize;
}
- return true;
+ return WireResult::Success;
});
return ToAPI(buffer);
}
@@ -207,22 +208,21 @@ namespace dawn_wire { namespace client {
cmd.handleCreateInfoLength = request.readHandle->SerializeCreateSize();
client->SerializeCommand(
cmd, cmd.handleCreateInfoLength, [&](SerializeBuffer* serializeBuffer) {
- bool success = serializeBuffer->AvailableSize() == cmd.handleCreateInfoLength;
- if (success) {
- request.readHandle->SerializeCreate(serializeBuffer->Buffer());
- }
- return success;
+ char* readHandleBuffer;
+ WIRE_TRY(serializeBuffer->NextN(cmd.handleCreateInfoLength, &readHandleBuffer));
+ request.readHandle->SerializeCreate(readHandleBuffer);
+ return WireResult::Success;
});
} else {
ASSERT(isWriteMode);
cmd.handleCreateInfoLength = request.writeHandle->SerializeCreateSize();
client->SerializeCommand(
cmd, cmd.handleCreateInfoLength, [&](SerializeBuffer* serializeBuffer) {
- bool success = serializeBuffer->AvailableSize() == cmd.handleCreateInfoLength;
- if (success) {
- request.writeHandle->SerializeCreate(serializeBuffer->Buffer());
- }
- return success;
+ char* writeHandleBuffer;
+ WIRE_TRY(
+ serializeBuffer->NextN(cmd.handleCreateInfoLength, &writeHandleBuffer));
+ request.writeHandle->SerializeCreate(writeHandleBuffer);
+ return WireResult::Success;
});
}
@@ -352,13 +352,13 @@ namespace dawn_wire { namespace client {
client->SerializeCommand(
cmd, writeFlushInfoLength, [&](SerializeBuffer* serializeBuffer) {
- bool success = serializeBuffer->AvailableSize() == writeFlushInfoLength;
- if (success) {
- // Serialize flush metadata into the space after the command.
- // This closes the handle for writing.
- mWriteHandle->SerializeFlush(serializeBuffer->Buffer());
- }
- return success;
+ char* writeHandleBuffer;
+ WIRE_TRY(serializeBuffer->NextN(writeFlushInfoLength, &writeHandleBuffer));
+
+ // Serialize flush metadata into the space after the command.
+ // This closes the handle for writing.
+ mWriteHandle->SerializeFlush(writeHandleBuffer);
+ return WireResult::Success;
});
mWriteHandle = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
index 2c6e23fbb14..4073baa2ff5 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
@@ -122,4 +122,15 @@ namespace dawn_wire { namespace client {
return device->OnCreateRenderPipelineAsyncCallback(requestSerial, status, message);
}
+ bool Client::DoShaderModuleGetCompilationInfoCallback(ShaderModule* shaderModule,
+ uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info) {
+ // The shader module might have been deleted or recreated so this isn't an error.
+ if (shaderModule == nullptr) {
+ return true;
+ }
+ return shaderModule->GetCompilationInfoCallback(requestSerial, status, info);
+ }
+
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
index 3a9189ec506..b4b46790be6 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
@@ -277,7 +277,7 @@ namespace dawn_wire { namespace client {
return true;
}
- void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+ void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor2 const* descriptor,
WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata) {
if (client->IsDisconnected()) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.h b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
index c72200be3a6..5de8f771a46 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
@@ -45,7 +45,7 @@ namespace dawn_wire { namespace client {
void CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
WGPUCreateComputePipelineAsyncCallback callback,
void* userdata);
- void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+ void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor2 const* descriptor,
WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata);
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
index 8c9f78b53b1..6d4da45070c 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
@@ -90,7 +90,7 @@ namespace dawn_wire { namespace client {
client->SerializeCommand(cmd);
}
- void Queue::WriteTexture(const WGPUTextureCopyView* destination,
+ void Queue::WriteTexture(const WGPUImageCopyTexture* destination,
const void* data,
size_t dataSize,
const WGPUTextureDataLayout* dataLayout,
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Queue.h b/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
index f14fae1440e..46a2fb58b0b 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
@@ -37,7 +37,7 @@ namespace dawn_wire { namespace client {
void* userdata);
WGPUFence CreateFence(const WGPUFenceDescriptor* descriptor);
void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
- void WriteTexture(const WGPUTextureCopyView* destination,
+ void WriteTexture(const WGPUImageCopyTexture* destination,
const void* data,
size_t dataSize,
const WGPUTextureDataLayout* dataLayout,
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp
new file mode 100644
index 00000000000..97e0204c924
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.cpp
@@ -0,0 +1,66 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_wire/client/ShaderModule.h"
+
+#include "dawn_wire/client/Client.h"
+
+namespace dawn_wire { namespace client {
+
+ ShaderModule::~ShaderModule() {
+ // Callbacks need to be fired in all cases, as they can handle freeing resources. So we call
+ // them with "Unknown" status.
+ for (auto& it : mCompilationInfoRequests) {
+ if (it.second.callback) {
+ it.second.callback(WGPUCompilationInfoRequestStatus_Unknown, nullptr,
+ it.second.userdata);
+ }
+ }
+ mCompilationInfoRequests.clear();
+ }
+
+ void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
+ if (client->IsDisconnected()) {
+ callback(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, userdata);
+ return;
+ }
+
+ uint64_t serial = mCompilationInfoRequestSerial++;
+ ShaderModuleGetCompilationInfoCmd cmd;
+ cmd.shaderModuleId = this->id;
+ cmd.requestSerial = serial;
+
+ mCompilationInfoRequests[serial] = {callback, userdata};
+
+ client->SerializeCommand(cmd);
+ }
+
+ bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info) {
+ auto requestIt = mCompilationInfoRequests.find(requestSerial);
+ if (requestIt == mCompilationInfoRequests.end()) {
+ return false;
+ }
+
+ // Remove the request data so that the callback cannot be called again.
+ // ex.) inside the callback: if the shader module is deleted, all callbacks reject.
+ CompilationInfoRequest request = std::move(requestIt->second);
+ mCompilationInfoRequests.erase(requestIt);
+
+ request.callback(status, info, request.userdata);
+ return true;
+ }
+
+}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h
new file mode 100644
index 00000000000..add5b975ffd
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ShaderModule.h
@@ -0,0 +1,46 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_SHADER_MODULE_H_
+#define DAWNWIRE_CLIENT_SHADER_MODULE_H_
+
+#include <dawn/webgpu.h>
+
+#include "common/SerialMap.h"
+#include "dawn_wire/client/ObjectBase.h"
+
+namespace dawn_wire { namespace client {
+
+ class ShaderModule final : public ObjectBase {
+ public:
+ using ObjectBase::ObjectBase;
+ ~ShaderModule();
+
+ void GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata);
+ bool GetCompilationInfoCallback(uint64_t requestSerial,
+ WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info);
+
+ private:
+ struct CompilationInfoRequest {
+ WGPUCompilationInfoCallback callback = nullptr;
+ void* userdata = nullptr;
+ };
+ uint64_t mCompilationInfoRequestSerial = 0;
+ std::map<uint64_t, CompilationInfoRequest> mCompilationInfoRequests;
+ };
+
+}} // namespace dawn_wire::client
+
+#endif // DAWNWIRE_CLIENT_SHADER_MODULE_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
index 1124cba2106..5baea199ea9 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
@@ -141,6 +141,13 @@ namespace dawn_wire { namespace server {
uint64_t requestSerial;
};
+ struct ShaderModuleGetCompilationInfoUserdata : CallbackUserdata {
+ using CallbackUserdata::CallbackUserdata;
+
+ ObjectHandle shaderModule;
+ uint64_t requestSerial;
+ };
+
struct QueueWorkDoneUserdata : CallbackUserdata {
using CallbackUserdata::CallbackUserdata;
@@ -218,6 +225,9 @@ namespace dawn_wire { namespace server {
WGPURenderPipeline pipeline,
const char* message,
CreatePipelineAsyncUserData* userdata);
+ void OnShaderModuleGetCompilationInfo(WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info,
+ ShaderModuleGetCompilationInfoUserdata* userdata);
#include "dawn_wire/server/ServerPrototypes_autogen.inc"
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
index 8aaf4ffda7c..b9fda443177 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
@@ -13,6 +13,7 @@
// limitations under the License.
#include "common/Assert.h"
+#include "dawn_wire/BufferConsumer_impl.h"
#include "dawn_wire/WireCmd_autogen.h"
#include "dawn_wire/server/Server.h"
@@ -251,12 +252,12 @@ namespace dawn_wire { namespace server {
SerializeCommand(cmd, cmd.readInitialDataInfoLength, [&](SerializeBuffer* serializeBuffer) {
if (isSuccess) {
if (isRead) {
- if (serializeBuffer->AvailableSize() != cmd.readInitialDataInfoLength) {
- return false;
- }
+ char* readHandleBuffer;
+ WIRE_TRY(
+ serializeBuffer->NextN(cmd.readInitialDataInfoLength, &readHandleBuffer));
+
// Serialize the initialization message into the space after the command.
- data->readHandle->SerializeInitialData(readData, data->size,
- serializeBuffer->Buffer());
+ data->readHandle->SerializeInitialData(readData, data->size, readHandleBuffer);
// The in-flight map request returned successfully.
// Move the ReadHandle so it is owned by the buffer.
bufferData->readHandle = std::move(data->readHandle);
@@ -271,7 +272,7 @@ namespace dawn_wire { namespace server {
data->size);
}
}
- return true;
+ return WireResult::Success;
});
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
index 19e6298aa14..9ce6ce4d08c 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
@@ -149,10 +149,11 @@ namespace dawn_wire { namespace server {
SerializeCommand(cmd);
}
- bool Server::DoDeviceCreateRenderPipelineAsync(ObjectId deviceId,
- uint64_t requestSerial,
- ObjectHandle pipelineObjectHandle,
- const WGPURenderPipelineDescriptor* descriptor) {
+ bool Server::DoDeviceCreateRenderPipelineAsync(
+ ObjectId deviceId,
+ uint64_t requestSerial,
+ ObjectHandle pipelineObjectHandle,
+ const WGPURenderPipelineDescriptor2* descriptor) {
auto* device = DeviceObjects().Get(deviceId);
if (device == nullptr) {
return false;
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
index 9ab8bc0bbb1..f194b32b2a0 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
@@ -97,7 +97,7 @@ namespace dawn_wire { namespace server {
}
bool Server::DoQueueWriteTextureInternal(ObjectId queueId,
- const WGPUTextureCopyView* destination,
+ const WGPUImageCopyTexture* destination,
const uint8_t* data,
uint64_t dataSize,
const WGPUTextureDataLayout* dataLayout,
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerShaderModule.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerShaderModule.cpp
new file mode 100644
index 00000000000..cec0dc4db30
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerShaderModule.cpp
@@ -0,0 +1,51 @@
+// Copyright 2021 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_wire/server/Server.h"
+
+#include <memory>
+
+namespace dawn_wire { namespace server {
+
+ bool Server::DoShaderModuleGetCompilationInfo(ObjectId shaderModuleId, uint64_t requestSerial) {
+ auto* shaderModule = ShaderModuleObjects().Get(shaderModuleId);
+ if (shaderModule == nullptr) {
+ return false;
+ }
+
+ auto userdata = MakeUserdata<ShaderModuleGetCompilationInfoUserdata>();
+ userdata->shaderModule = ObjectHandle{shaderModuleId, shaderModule->generation};
+ userdata->requestSerial = requestSerial;
+
+ mProcs.shaderModuleGetCompilationInfo(
+ shaderModule->handle,
+ ForwardToServer<decltype(&Server::OnShaderModuleGetCompilationInfo)>::Func<
+ &Server::OnShaderModuleGetCompilationInfo>(),
+ userdata.release());
+ return true;
+ }
+
+ void Server::OnShaderModuleGetCompilationInfo(WGPUCompilationInfoRequestStatus status,
+ const WGPUCompilationInfo* info,
+ ShaderModuleGetCompilationInfoUserdata* data) {
+ ReturnShaderModuleGetCompilationInfoCallbackCmd cmd;
+ cmd.shaderModule = data->shaderModule;
+ cmd.requestSerial = data->requestSerial;
+ cmd.status = status;
+ cmd.info = info;
+
+ SerializeCommand(cmd);
+ }
+
+}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
index 035ad96188e..b85a988946b 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
@@ -19,10 +19,14 @@
#include <dawn_native/DawnNative.h>
#include <DXGI1_4.h>
+#include <d3d12.h>
#include <windows.h>
#include <wrl/client.h>
+#include <memory>
+
struct ID3D12Device;
+struct ID3D12Resource;
namespace dawn_native { namespace d3d12 {
DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
@@ -44,14 +48,42 @@ namespace dawn_native { namespace d3d12 {
public:
ExternalImageDescriptorDXGISharedHandle();
+ // Note: SharedHandle must be a handle to a texture object.
HANDLE sharedHandle;
+ };
+
+ struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
+ : ExternalImageAccessDescriptor {
+ public:
uint64_t acquireMutexKey;
bool isSwapChainTexture = false;
};
- // Note: SharedHandle must be a handle to a texture object.
- DAWN_NATIVE_EXPORT WGPUTexture
- WrapSharedHandle(WGPUDevice device, const ExternalImageDescriptorDXGISharedHandle* descriptor);
+ class DAWN_NATIVE_EXPORT ExternalImageDXGI {
+ public:
+ // Note: SharedHandle must be a handle to a texture object.
+ static std::unique_ptr<ExternalImageDXGI> Create(
+ WGPUDevice device,
+ const ExternalImageDescriptorDXGISharedHandle* descriptor);
+
+ WGPUTexture ProduceTexture(WGPUDevice device,
+ const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
+
+ private:
+ ExternalImageDXGI(Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
+ const WGPUTextureDescriptor* descriptor);
+
+ Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource;
+
+ // Contents of WGPUTextureDescriptor are stored individually since the descriptor
+ // could outlive this image.
+ WGPUTextureUsageFlags mUsage;
+ WGPUTextureDimension mDimension;
+ WGPUExtent3D mSize;
+ WGPUTextureFormat mFormat;
+ uint32_t mMipLevelCount;
+ uint32_t mSampleCount;
+ };
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
diff --git a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
index f161d4ae9fd..6e1b830cb14 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
@@ -228,6 +228,12 @@ namespace dawn_native {
ExternalImageDescriptor(ExternalImageType type);
};
+ struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
+ public:
+ bool isInitialized; // Whether the texture is initialized on import
+ WGPUTextureUsageFlags usage;
+ };
+
struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
public:
const ExternalImageType type;
diff --git a/chromium/third_party/dawn/src/tests/BUILD.gn b/chromium/third_party/dawn/src/tests/BUILD.gn
index a0baaf2eae9..18d2fb536eb 100644
--- a/chromium/third_party/dawn/src/tests/BUILD.gn
+++ b/chromium/third_party/dawn/src/tests/BUILD.gn
@@ -192,6 +192,7 @@ test("dawn_unittests") {
"unittests/validation/DrawIndirectValidationTests.cpp",
"unittests/validation/DynamicStateCommandValidationTests.cpp",
"unittests/validation/ErrorScopeValidationTests.cpp",
+ "unittests/validation/ExternalTextureTests.cpp",
"unittests/validation/FenceValidationTests.cpp",
"unittests/validation/GetBindGroupLayoutValidationTests.cpp",
"unittests/validation/IndexBufferValidationTests.cpp",
@@ -238,6 +239,10 @@ test("dawn_unittests") {
"unittests/wire/WireWGPUDevicePropertiesTests.cpp",
]
+ if (is_win) {
+ sources += [ "unittests/WindowsUtilsTests.cpp" ]
+ }
+
if (dawn_enable_d3d12) {
sources += [ "unittests/d3d12/CopySplitTests.cpp" ]
}
@@ -264,7 +269,9 @@ source_set("dawn_end2end_tests_sources") {
"${dawn_root}/src/common",
"${dawn_root}/src/dawn:dawn_proc",
"${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn_native",
+
+ # Statically linked because the end2end white_box tests use Dawn internals.
+ "${dawn_root}/src/dawn_native:dawn_native_static",
"${dawn_root}/src/dawn_wire",
"${dawn_root}/src/utils:dawn_utils",
]
@@ -302,6 +309,7 @@ source_set("dawn_end2end_tests_sources") {
"end2end/DrawTests.cpp",
"end2end/DynamicBufferOffsetTests.cpp",
"end2end/EntryPointTests.cpp",
+ "end2end/ExternalTextureTests.cpp",
"end2end/FenceTests.cpp",
"end2end/FirstIndexOffsetTests.cpp",
"end2end/GpuMemorySynchronizationTests.cpp",
@@ -313,6 +321,7 @@ source_set("dawn_end2end_tests_sources") {
"end2end/ObjectCachingTests.cpp",
"end2end/OpArrayLengthTests.cpp",
"end2end/PipelineLayoutTests.cpp",
+ "end2end/PrimitiveStateTests.cpp",
"end2end/PrimitiveTopologyTests.cpp",
"end2end/QueryTests.cpp",
"end2end/QueueTests.cpp",
@@ -331,6 +340,7 @@ source_set("dawn_end2end_tests_sources") {
"end2end/TextureSubresourceTests.cpp",
"end2end/TextureViewTests.cpp",
"end2end/TextureZeroInitTests.cpp",
+ "end2end/VertexBufferRobustnessTests.cpp",
"end2end/VertexFormatTests.cpp",
"end2end/VertexStateTests.cpp",
"end2end/ViewportOrientationTests.cpp",
@@ -362,10 +372,6 @@ source_set("dawn_end2end_tests_sources") {
frameworks = [ "IOSurface.framework" ]
}
- if (dawn_enable_wgsl) {
- sources += [ "end2end/VertexBufferRobustnessTests.cpp" ]
- }
-
if (dawn_enable_opengl) {
assert(dawn_supports_glfw_for_windowing)
}
@@ -390,12 +396,11 @@ source_set("dawn_white_box_tests_sources") {
"${dawn_root}/src/dawn:dawn_proc",
"${dawn_root}/src/dawn:dawncpp",
"${dawn_root}/src/dawn_native:dawn_native_sources",
- "${dawn_root}/src/dawn_wire",
- "${dawn_root}/src/utils:dawn_utils",
- # Static because the tests both link against and have dawn_native
- # sources. MSVC errors when both importing and exporting symbols.
+ # Statically linked because the end2end white_box tests use Dawn internals.
"${dawn_root}/src/dawn_native:dawn_native_static",
+ "${dawn_root}/src/dawn_wire",
+ "${dawn_root}/src/utils:dawn_utils",
]
sources = [ "DawnTest.h" ]
@@ -448,7 +453,7 @@ test("dawn_end2end_tests") {
"${dawn_root}/src/common",
"${dawn_root}/src/dawn:dawn_proc",
"${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn_native",
+ "${dawn_root}/src/dawn_native:dawn_native_static",
"${dawn_root}/src/dawn_wire",
"${dawn_root}/src/utils:dawn_utils",
]
diff --git a/chromium/third_party/dawn/src/utils/BUILD.gn b/chromium/third_party/dawn/src/utils/BUILD.gn
index 1884d8ffb1c..64431bfd378 100644
--- a/chromium/third_party/dawn/src/utils/BUILD.gn
+++ b/chromium/third_party/dawn/src/utils/BUILD.gn
@@ -88,9 +88,9 @@ static_library("dawn_utils") {
deps = [
"${dawn_root}/src/common",
"${dawn_root}/src/dawn:dawn_proc",
- "${dawn_root}/src/dawn_native",
+ "${dawn_root}/src/dawn_native:dawn_native_headers",
"${dawn_root}/src/dawn_wire",
- "${dawn_shaderc_dir}:libshaderc",
+ "${dawn_spirv_tools_dir}:spvtools_opt",
]
libs = []
frameworks = []
diff --git a/chromium/third_party/dawn/src/utils/CMakeLists.txt b/chromium/third_party/dawn/src/utils/CMakeLists.txt
index f553d97438f..59b2a1943ff 100644
--- a/chromium/third_party/dawn/src/utils/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/utils/CMakeLists.txt
@@ -44,7 +44,7 @@ target_link_libraries(dawn_utils
dawn_native
dawn_proc
dawn_wire
- shaderc
+ SPIRV-Tools-opt
glfw
)
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
index 558fe78c001..c1ecb5bd243 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
+++ b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
@@ -18,6 +18,8 @@
namespace utils {
+ // For creating deprecated render pipeline descriptors
+
ComboVertexStateDescriptor::ComboVertexStateDescriptor() {
wgpu::VertexStateDescriptor* descriptor = this;
@@ -28,7 +30,7 @@ namespace utils {
wgpu::VertexAttributeDescriptor vertexAttribute;
vertexAttribute.shaderLocation = 0;
vertexAttribute.offset = 0;
- vertexAttribute.format = wgpu::VertexFormat::Float;
+ vertexAttribute.format = wgpu::VertexFormat::Float32;
for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
cAttributes[i] = vertexAttribute;
}
@@ -114,4 +116,103 @@ namespace utils {
}
}
+ ComboRenderPipelineDescriptor2::ComboRenderPipelineDescriptor2() {
+ wgpu::RenderPipelineDescriptor2* descriptor = this;
+
+ // Set defaults for the vertex state.
+ {
+ wgpu::VertexState* vertex = &descriptor->vertex;
+ vertex->module = nullptr;
+ vertex->entryPoint = "main";
+ vertex->bufferCount = 0;
+
+ // Fill the default values for vertexBuffers and vertexAttributes in buffers.
+ for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
+ cAttributes[i].shaderLocation = 0;
+ cAttributes[i].offset = 0;
+ cAttributes[i].format = wgpu::VertexFormat::Float32;
+ }
+ for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
+ cBuffers[i].arrayStride = 0;
+ cBuffers[i].stepMode = wgpu::InputStepMode::Vertex;
+ cBuffers[i].attributeCount = 0;
+ cBuffers[i].attributes = nullptr;
+ }
+ // cBuffers[i].attributes points to somewhere in cAttributes.
+ // cBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+ // cBuffers[0] has two attributes, then cBuffers[1].attributes should point to
+ // &cAttributes[2]. Likewise, if cBuffers[1] has 3 attributes, then
+ // cBuffers[2].attributes should point to &cAttributes[5].
+ cBuffers[0].attributes = &cAttributes[0];
+ vertex->buffers = &cBuffers[0];
+ }
+
+ // Set the defaults for the primitive state
+ {
+ wgpu::PrimitiveState* primitive = &descriptor->primitive;
+ primitive->topology = wgpu::PrimitiveTopology::TriangleList;
+ primitive->stripIndexFormat = wgpu::IndexFormat::Undefined;
+ primitive->frontFace = wgpu::FrontFace::CCW;
+ primitive->cullMode = wgpu::CullMode::None;
+ }
+
+ // Set the defaults for the depth-stencil state
+ {
+ wgpu::StencilFaceState stencilFace;
+ stencilFace.compare = wgpu::CompareFunction::Always;
+ stencilFace.failOp = wgpu::StencilOperation::Keep;
+ stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+ stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+ cDepthStencil.format = wgpu::TextureFormat::Depth24PlusStencil8;
+ cDepthStencil.depthWriteEnabled = false;
+ cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+ cDepthStencil.stencilBack = stencilFace;
+ cDepthStencil.stencilFront = stencilFace;
+ cDepthStencil.stencilReadMask = 0xff;
+ cDepthStencil.stencilWriteMask = 0xff;
+ cDepthStencil.depthBias = 0;
+ cDepthStencil.depthBiasSlopeScale = 0.0;
+ cDepthStencil.depthBiasClamp = 0.0;
+ }
+
+ // Set the defaults for the multisample state
+ {
+ wgpu::MultisampleState* multisample = &descriptor->multisample;
+ multisample->count = 1;
+ multisample->mask = 0xFFFFFFFF;
+ multisample->alphaToCoverageEnabled = false;
+ }
+
+ // Set the defaults for the fragment state
+ {
+ cFragment.module = nullptr;
+ cFragment.entryPoint = "main";
+ cFragment.targetCount = 1;
+ cFragment.targets = &cTargets[0];
+ descriptor->fragment = &cFragment;
+
+ wgpu::BlendComponent blendComponent;
+ blendComponent.srcFactor = wgpu::BlendFactor::One;
+ blendComponent.dstFactor = wgpu::BlendFactor::Zero;
+ blendComponent.operation = wgpu::BlendOperation::Add;
+
+ for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+ cTargets[i].format = wgpu::TextureFormat::RGBA8Unorm;
+ cTargets[i].blend = nullptr;
+ cTargets[i].writeMask = wgpu::ColorWriteMask::All;
+
+ cBlends[i].color = blendComponent;
+ cBlends[i].alpha = blendComponent;
+ }
+ }
+ }
+
+ wgpu::DepthStencilState* ComboRenderPipelineDescriptor2::EnableDepthStencil(
+ wgpu::TextureFormat format) {
+ this->depthStencil = &cDepthStencil;
+ cDepthStencil.format = format;
+ return &cDepthStencil;
+ }
+
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
index ce8eb308b14..0624169834c 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
+++ b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
@@ -23,6 +23,7 @@
namespace utils {
+ // For creating deprecated render pipeline descriptors
class ComboVertexStateDescriptor : public wgpu::VertexStateDescriptor {
public:
ComboVertexStateDescriptor();
@@ -53,6 +54,28 @@ namespace utils {
wgpu::DepthStencilStateDescriptor cDepthStencilState;
};
+ // For creating the new style of render pipeline descriptors
+ class ComboRenderPipelineDescriptor2 : public wgpu::RenderPipelineDescriptor2 {
+ public:
+ ComboRenderPipelineDescriptor2();
+
+ ComboRenderPipelineDescriptor2(const ComboRenderPipelineDescriptor2&) = delete;
+ ComboRenderPipelineDescriptor2& operator=(const ComboRenderPipelineDescriptor2&) = delete;
+ ComboRenderPipelineDescriptor2(ComboRenderPipelineDescriptor2&&) = delete;
+ ComboRenderPipelineDescriptor2& operator=(ComboRenderPipelineDescriptor2&&) = delete;
+
+ wgpu::DepthStencilState* EnableDepthStencil(
+ wgpu::TextureFormat format = wgpu::TextureFormat::Depth24PlusStencil8);
+
+ std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cBuffers;
+ std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+ std::array<wgpu::ColorTargetState, kMaxColorAttachments> cTargets;
+ std::array<wgpu::BlendState, kMaxColorAttachments> cBlends;
+
+ wgpu::FragmentState cFragment;
+ wgpu::DepthStencilState cDepthStencil;
+ };
+
} // namespace utils
#endif // UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
diff --git a/chromium/third_party/dawn/src/utils/TestUtils.cpp b/chromium/third_party/dawn/src/utils/TestUtils.cpp
index 6cbaa8766e5..673ad78732a 100644
--- a/chromium/third_party/dawn/src/utils/TestUtils.cpp
+++ b/chromium/third_party/dawn/src/utils/TestUtils.cpp
@@ -40,7 +40,8 @@ namespace utils {
TextureDataCopyLayout layout;
layout.mipSize = {textureSizeAtLevel0.width >> mipmapLevel,
- textureSizeAtLevel0.height >> mipmapLevel, textureSizeAtLevel0.depth};
+ textureSizeAtLevel0.height >> mipmapLevel,
+ textureSizeAtLevel0.depthOrArrayLayers};
layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
@@ -83,7 +84,7 @@ namespace utils {
ASSERT(copyExtent.height % blockHeight == 0);
uint32_t heightInBlocks = copyExtent.height / blockHeight;
return RequiredBytesInCopy(bytesPerRow, rowsPerImage, widthInBlocks, heightInBlocks,
- copyExtent.depth, blockSize);
+ copyExtent.depthOrArrayLayers, blockSize);
}
uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
@@ -123,13 +124,14 @@ namespace utils {
descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
wgpu::Texture texture = device.CreateTexture(&descriptor);
- wgpu::TextureCopyView textureCopyView = utils::CreateTextureCopyView(texture, 0, {0, 0, 0});
+ wgpu::ImageCopyTexture imageCopyTexture =
+ utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
wgpu::TextureDataLayout textureDataLayout =
utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
wgpu::Extent3D copyExtent = {1, 1, 1};
// WriteTexture with exactly 1 byte of data.
- device.GetQueue().WriteTexture(&textureCopyView, data.data(), 1, &textureDataLayout,
+ device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), 1, &textureDataLayout,
&copyExtent);
}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
index 8ac53873089..d8f7f3fd53f 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
@@ -17,131 +17,53 @@
#include "common/Constants.h"
#include "common/Log.h"
-#include <shaderc/shaderc.hpp>
+#include "spirv-tools/optimizer.hpp"
#include <cstring>
#include <iomanip>
+#include <limits>
#include <mutex>
#include <sstream>
namespace utils {
- namespace {
-
- shaderc_shader_kind ShadercShaderKind(SingleShaderStage stage) {
- switch (stage) {
- case SingleShaderStage::Vertex:
- return shaderc_glsl_vertex_shader;
- case SingleShaderStage::Fragment:
- return shaderc_glsl_fragment_shader;
- case SingleShaderStage::Compute:
- return shaderc_glsl_compute_shader;
- default:
- UNREACHABLE();
- }
- }
+ wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
+ // Use SPIRV-Tools's C API to assemble the SPIR-V assembly text to binary. Because the types
+ // aren't RAII, we don't return directly on success and instead always go through the code
+ // path that destroys the SPIRV-Tools objects.
+ wgpu::ShaderModule result = nullptr;
+
+ spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
+ ASSERT(context != nullptr);
- wgpu::ShaderModule CreateShaderModuleFromResult(
- const wgpu::Device& device,
- const shaderc::SpvCompilationResult& result) {
- // result.cend and result.cbegin return pointers to uint32_t.
- const uint32_t* resultBegin = result.cbegin();
- const uint32_t* resultEnd = result.cend();
- // So this size is in units of sizeof(uint32_t).
- ptrdiff_t resultSize = resultEnd - resultBegin;
- // SetSource takes data as uint32_t*.
+ spv_binary spirv = nullptr;
+ spv_diagnostic diagnostic = nullptr;
+ if (spvTextToBinary(context, source, strlen(source), &spirv, &diagnostic) == SPV_SUCCESS) {
+ ASSERT(spirv != nullptr);
+ ASSERT(spirv->wordCount <= std::numeric_limits<uint32_t>::max());
wgpu::ShaderModuleSPIRVDescriptor spirvDesc;
- spirvDesc.codeSize = static_cast<uint32_t>(resultSize);
- spirvDesc.code = result.cbegin();
+ spirvDesc.codeSize = static_cast<uint32_t>(spirv->wordCount);
+ spirvDesc.code = spirv->code;
wgpu::ShaderModuleDescriptor descriptor;
descriptor.nextInChain = &spirvDesc;
-
- return device.CreateShaderModule(&descriptor);
- }
-
- class CompilerSingleton {
- public:
- static shaderc::Compiler* Get() {
- std::call_once(mInitFlag, &CompilerSingleton::Initialize);
- return mCompiler;
- }
-
- private:
- CompilerSingleton() = default;
- ~CompilerSingleton() = default;
- CompilerSingleton(const CompilerSingleton&) = delete;
- CompilerSingleton& operator=(const CompilerSingleton&) = delete;
-
- static shaderc::Compiler* mCompiler;
- static std::once_flag mInitFlag;
-
- static void Initialize() {
- mCompiler = new shaderc::Compiler();
- }
- };
-
- shaderc::Compiler* CompilerSingleton::mCompiler = nullptr;
- std::once_flag CompilerSingleton::mInitFlag;
-
- } // anonymous namespace
-
- wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device,
- SingleShaderStage stage,
- const char* source) {
- shaderc_shader_kind kind = ShadercShaderKind(stage);
-
- shaderc::Compiler* compiler = CompilerSingleton::Get();
- auto result = compiler->CompileGlslToSpv(source, strlen(source), kind, "myshader?");
- if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
- dawn::ErrorLog() << result.GetErrorMessage();
- return {};
- }
-#ifdef DUMP_SPIRV_ASSEMBLY
- {
- shaderc::CompileOptions options;
- auto resultAsm = compiler->CompileGlslToSpvAssembly(source, strlen(source), kind,
- "myshader?", options);
- size_t sizeAsm = (resultAsm.cend() - resultAsm.cbegin());
-
- char* buffer = reinterpret_cast<char*>(malloc(sizeAsm + 1));
- memcpy(buffer, resultAsm.cbegin(), sizeAsm);
- buffer[sizeAsm] = '\0';
- printf("SPIRV ASSEMBLY DUMP START\n%s\nSPIRV ASSEMBLY DUMP END\n", buffer);
- free(buffer);
- }
-#endif
-
-#ifdef DUMP_SPIRV_JS_ARRAY
- printf("SPIRV JS ARRAY DUMP START\n");
- for (size_t i = 0; i < size; i++) {
- printf("%#010x", result.cbegin()[i]);
- if ((i + 1) % 4 == 0) {
- printf(",\n");
- } else {
- printf(", ");
- }
+ result = device.CreateShaderModule(&descriptor);
+ } else {
+ ASSERT(diagnostic != nullptr);
+ dawn::WarningLog() << "CreateShaderModuleFromASM SPIRV assembly error:"
+ << diagnostic->position.line + 1 << ":"
+ << diagnostic->position.column + 1 << ": " << diagnostic->error;
}
- printf("\n");
- printf("SPIRV JS ARRAY DUMP END\n");
-#endif
-
- return CreateShaderModuleFromResult(device, result);
- }
- wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
- shaderc::Compiler* compiler = CompilerSingleton::Get();
- shaderc::SpvCompilationResult result = compiler->AssembleToSpv(source, strlen(source));
- if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
- dawn::ErrorLog() << result.GetErrorMessage();
- return {};
- }
+ spvDiagnosticDestroy(diagnostic);
+ spvBinaryDestroy(spirv);
+ spvContextDestroy(context);
- return CreateShaderModuleFromResult(device, result);
+ return result;
}
- wgpu::ShaderModule CreateShaderModuleFromWGSL(const wgpu::Device& device, const char* source) {
+ wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source) {
wgpu::ShaderModuleWGSLDescriptor wgslDesc;
wgslDesc.source = source;
wgpu::ShaderModuleDescriptor descriptor;
@@ -149,18 +71,6 @@ namespace utils {
return device.CreateShaderModule(&descriptor);
}
- std::vector<uint32_t> CompileGLSLToSpirv(SingleShaderStage stage, const char* source) {
- shaderc_shader_kind kind = ShadercShaderKind(stage);
-
- shaderc::Compiler* compiler = CompilerSingleton::Get();
- auto result = compiler->CompileGlslToSpv(source, strlen(source), kind, "myshader?");
- if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
- dawn::ErrorLog() << result.GetErrorMessage();
- return {};
- }
- return {result.cbegin(), result.cend()};
- }
-
wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
const void* data,
uint64_t size,
@@ -258,7 +168,7 @@ namespace utils {
descriptor.dimension = wgpu::TextureDimension::e2D;
descriptor.size.width = width;
descriptor.size.height = height;
- descriptor.size.depth = 1;
+ descriptor.size.depthOrArrayLayers = 1;
descriptor.sampleCount = 1;
descriptor.format = BasicRenderPass::kDefaultColorFormat;
descriptor.mipLevelCount = 1;
@@ -268,28 +178,28 @@ namespace utils {
return BasicRenderPass(width, height, color);
}
- wgpu::BufferCopyView CreateBufferCopyView(wgpu::Buffer buffer,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- wgpu::BufferCopyView bufferCopyView = {};
- bufferCopyView.buffer = buffer;
- bufferCopyView.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
+ wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ wgpu::ImageCopyBuffer imageCopyBuffer = {};
+ imageCopyBuffer.buffer = buffer;
+ imageCopyBuffer.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
- return bufferCopyView;
+ return imageCopyBuffer;
}
- wgpu::TextureCopyView CreateTextureCopyView(wgpu::Texture texture,
- uint32_t mipLevel,
- wgpu::Origin3D origin,
- wgpu::TextureAspect aspect) {
- wgpu::TextureCopyView textureCopyView;
- textureCopyView.texture = texture;
- textureCopyView.mipLevel = mipLevel;
- textureCopyView.origin = origin;
- textureCopyView.aspect = aspect;
-
- return textureCopyView;
+ wgpu::ImageCopyTexture CreateImageCopyTexture(wgpu::Texture texture,
+ uint32_t mipLevel,
+ wgpu::Origin3D origin,
+ wgpu::TextureAspect aspect) {
+ wgpu::ImageCopyTexture imageCopyTexture;
+ imageCopyTexture.texture = texture;
+ imageCopyTexture.mipLevel = mipLevel;
+ imageCopyTexture.origin = origin;
+ imageCopyTexture.aspect = aspect;
+
+ return imageCopyTexture;
}
wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
index 86dbb4e1544..5230ebff782 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.h
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
@@ -30,13 +30,8 @@ namespace utils {
enum class SingleShaderStage { Vertex, Fragment, Compute };
- wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device,
- SingleShaderStage stage,
- const char* source);
wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
- wgpu::ShaderModule CreateShaderModuleFromWGSL(const wgpu::Device& device, const char* source);
-
- std::vector<uint32_t> CompileGLSLToSpirv(SingleShaderStage stage, const char* source);
+ wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
const void* data,
@@ -50,11 +45,11 @@ namespace utils {
return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
}
- wgpu::BufferCopyView CreateBufferCopyView(wgpu::Buffer buffer,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
- wgpu::TextureCopyView CreateTextureCopyView(
+ wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+ wgpu::ImageCopyTexture CreateImageCopyTexture(
wgpu::Texture texture,
uint32_t level,
wgpu::Origin3D origin,
diff --git a/chromium/third_party/dawn/third_party/CMakeLists.txt b/chromium/third_party/dawn/third_party/CMakeLists.txt
index 1169999f76e..94e5d8360e1 100644
--- a/chromium/third_party/dawn/third_party/CMakeLists.txt
+++ b/chromium/third_party/dawn/third_party/CMakeLists.txt
@@ -38,30 +38,6 @@ if (NOT TARGET SPIRV-Tools)
add_subdirectory(${DAWN_SPIRV_TOOLS_DIR})
endif()
-if (NOT TARGET glslang)
- set(SKIP_GLSLANG_INSTALL ON CACHE BOOL "" FORCE)
- set(ENABLE_SPVREMAPPER OFF CACHE BOOL "" FORCE)
- set(ENABLE_GLSLANG_BINARIES OFF CACHE BOOL "" FORCE)
- set(ENABLE_CTEST OFF CACHE BOOL "" FORCE)
-
- message(STATUS "Dawn: using GLSLang at ${DAWN_GLSLANG_DIR}")
- add_subdirectory(${DAWN_GLSLANG_DIR})
-endif()
-
-if (NOT TARGET shaderc)
- set(SHADERC_SKIP_TESTS ON CACHE BOOL "" FORCE)
- set(SHADERC_SKIP_INSTALL ON CACHE BOOL "" FORCE)
-
- # Change the default value of SHADERC_ENABLE_SHARED_CRT to ON as that's what matches the
- # CMake defaults better.
- if(MSVC)
- set(SHADERC_ENABLE_SHARED_CRT ON CACHE BOOL "Use the shared CRT instead of the static CRT")
- endif()
-
- message(STATUS "Dawn: using shaderc at ${DAWN_SHADERC_DIR}")
- add_subdirectory(${DAWN_SHADERC_DIR})
-endif()
-
if (DAWN_BUILD_EXAMPLES)
if (NOT TARGET glfw)
set(GLFW_BUILD_DOCS OFF CACHE BOOL "" FORCE)
@@ -78,12 +54,9 @@ if (DAWN_BUILD_EXAMPLES)
endif()
endif()
-if (DAWN_ENABLE_WGSL)
- if (NOT TARGET libtint)
- set(TINT_BUILD_TESTS OFF CACHE BOOL "" FORCE)
- message(STATUS "Dawn: using Tint at ${DAWN_TINT_DIR}")
- add_subdirectory(${DAWN_TINT_DIR})
- endif()
+if (NOT TARGET libtint)
+ message(STATUS "Dawn: using Tint at ${DAWN_TINT_DIR}")
+ add_subdirectory(${DAWN_TINT_DIR})
endif()
# Header-only library for khrplatform.h